3 rbd.c -- Export ceph rados objects as a Linux block device
6 based on drivers/block/osdblk.c:
8 Copyright 2009 Red Hat, Inc.
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 For usage instructions, please refer to:
27 Documentation/ABI/testing/sysfs-bus-rbd
31 #include <linux/ceph/libceph.h>
32 #include <linux/ceph/osd_client.h>
33 #include <linux/ceph/mon_client.h>
34 #include <linux/ceph/decode.h>
35 #include <linux/parser.h>
36 #include <linux/bsearch.h>
38 #include <linux/kernel.h>
39 #include <linux/device.h>
40 #include <linux/module.h>
42 #include <linux/blkdev.h>
43 #include <linux/slab.h>
44 #include <linux/idr.h>
45 #include <linux/workqueue.h>
47 #include "rbd_types.h"
49 #define RBD_DEBUG /* Activate rbd_assert() calls */
52 * The basic unit of block I/O is a sector. It is interpreted in a
53 * number of contexts in Linux (blk, bio, genhd), but the default is
54 * universally 512 bytes. These symbols are just slightly more
55 * meaningful than the bare numbers they represent.
57 #define SECTOR_SHIFT 9
58 #define SECTOR_SIZE (1ULL << SECTOR_SHIFT)
61 * Increment the given counter and return its updated value.
62 * If the counter is already 0 it will not be incremented.
63 * If the counter is already at its maximum value returns
64 * -EINVAL without updating it.
66 static int atomic_inc_return_safe(atomic_t *v)
70 counter = (unsigned int)__atomic_add_unless(v, 1, 0);
71 if (counter <= (unsigned int)INT_MAX)
79 /* Decrement the counter. Return the resulting value, or -EINVAL */
80 static int atomic_dec_return_safe(atomic_t *v)
84 counter = atomic_dec_return(v);
93 #define RBD_DRV_NAME "rbd"
95 #define RBD_MINORS_PER_MAJOR 256
96 #define RBD_SINGLE_MAJOR_PART_SHIFT 4
98 #define RBD_SNAP_DEV_NAME_PREFIX "snap_"
99 #define RBD_MAX_SNAP_NAME_LEN \
100 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
102 #define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
104 #define RBD_SNAP_HEAD_NAME "-"
106 #define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */
108 /* This allows a single page to hold an image name sent by OSD */
109 #define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
110 #define RBD_IMAGE_ID_LEN_MAX 64
112 #define RBD_OBJ_PREFIX_LEN_MAX 64
116 #define RBD_FEATURE_LAYERING (1<<0)
117 #define RBD_FEATURE_STRIPINGV2 (1<<1)
118 #define RBD_FEATURES_ALL \
119 (RBD_FEATURE_LAYERING | RBD_FEATURE_STRIPINGV2)
121 /* Features supported by this (client software) implementation. */
123 #define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL)
126 * An RBD device name will be "rbd#", where the "rbd" comes from
127 * RBD_DRV_NAME above, and # is a unique integer identifier.
128 * MAX_INT_FORMAT_WIDTH is used in ensuring DEV_NAME_LEN is big
129 * enough to hold all possible device names.
131 #define DEV_NAME_LEN 32
132 #define MAX_INT_FORMAT_WIDTH ((5 * sizeof (int)) / 2 + 1)
135 * block device image metadata (in-memory version)
137 struct rbd_image_header {
138 /* These six fields never change for a given rbd image */
145 u64 features; /* Might be changeable someday? */
147 /* The remaining fields need to be updated occasionally */
149 struct ceph_snap_context *snapc;
150 char *snap_names; /* format 1 only */
151 u64 *snap_sizes; /* format 1 only */
155 * An rbd image specification.
157 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
158 * identify an image. Each rbd_dev structure includes a pointer to
159 * an rbd_spec structure that encapsulates this identity.
161 * Each of the id's in an rbd_spec has an associated name. For a
162 * user-mapped image, the names are supplied and the id's associated
163 * with them are looked up. For a layered image, a parent image is
164 * defined by the tuple, and the names are looked up.
166 * An rbd_dev structure contains a parent_spec pointer which is
167 * non-null if the image it represents is a child in a layered
168 * image. This pointer will refer to the rbd_spec structure used
169 * by the parent rbd_dev for its own identity (i.e., the structure
170 * is shared between the parent and child).
172 * Since these structures are populated once, during the discovery
173 * phase of image construction, they are effectively immutable so
174 * we make no effort to synchronize access to them.
176 * Note that code herein does not assume the image name is known (it
177 * could be a null pointer).
181 const char *pool_name;
183 const char *image_id;
184 const char *image_name;
187 const char *snap_name;
193 * an instance of the client. multiple devices may share an rbd client.
196 struct ceph_client *client;
198 struct list_head node;
201 struct rbd_img_request;
202 typedef void (*rbd_img_callback_t)(struct rbd_img_request *);
204 #define BAD_WHICH U32_MAX /* Good which or bad which, which? */
206 struct rbd_obj_request;
207 typedef void (*rbd_obj_callback_t)(struct rbd_obj_request *);
209 enum obj_request_type {
210 OBJ_REQUEST_NODATA, OBJ_REQUEST_BIO, OBJ_REQUEST_PAGES
213 enum obj_operation_type {
220 OBJ_REQ_DONE, /* completion flag: not done = 0, done = 1 */
221 OBJ_REQ_IMG_DATA, /* object usage: standalone = 0, image = 1 */
222 OBJ_REQ_KNOWN, /* EXISTS flag valid: no = 0, yes = 1 */
223 OBJ_REQ_EXISTS, /* target exists: no = 0, yes = 1 */
226 struct rbd_obj_request {
227 const char *object_name;
228 u64 offset; /* object start byte */
229 u64 length; /* bytes from offset */
233 * An object request associated with an image will have its
234 * img_data flag set; a standalone object request will not.
236 * A standalone object request will have which == BAD_WHICH
237 * and a null obj_request pointer.
239 * An object request initiated in support of a layered image
240 * object (to check for its existence before a write) will
241 * have which == BAD_WHICH and a non-null obj_request pointer.
243 * Finally, an object request for rbd image data will have
244 * which != BAD_WHICH, and will have a non-null img_request
245 * pointer. The value of which will be in the range
246 * 0..(img_request->obj_request_count-1).
249 struct rbd_obj_request *obj_request; /* STAT op */
251 struct rbd_img_request *img_request;
253 /* links for img_request->obj_requests list */
254 struct list_head links;
257 u32 which; /* posn image request list */
259 enum obj_request_type type;
261 struct bio *bio_list;
267 struct page **copyup_pages;
268 u32 copyup_page_count;
270 struct ceph_osd_request *osd_req;
272 u64 xferred; /* bytes transferred */
275 rbd_obj_callback_t callback;
276 struct completion completion;
282 IMG_REQ_WRITE, /* I/O direction: read = 0, write = 1 */
283 IMG_REQ_CHILD, /* initiator: block = 0, child image = 1 */
284 IMG_REQ_LAYERED, /* ENOENT handling: normal = 0, layered = 1 */
285 IMG_REQ_DISCARD, /* discard: normal = 0, discard request = 1 */
288 struct rbd_img_request {
289 struct rbd_device *rbd_dev;
290 u64 offset; /* starting image byte offset */
291 u64 length; /* byte count from offset */
294 u64 snap_id; /* for reads */
295 struct ceph_snap_context *snapc; /* for writes */
298 struct request *rq; /* block request */
299 struct rbd_obj_request *obj_request; /* obj req initiator */
301 struct page **copyup_pages;
302 u32 copyup_page_count;
303 spinlock_t completion_lock;/* protects next_completion */
305 rbd_img_callback_t callback;
306 u64 xferred;/* aggregate bytes transferred */
307 int result; /* first nonzero obj_request result */
309 u32 obj_request_count;
310 struct list_head obj_requests; /* rbd_obj_request structs */
315 #define for_each_obj_request(ireq, oreq) \
316 list_for_each_entry(oreq, &(ireq)->obj_requests, links)
317 #define for_each_obj_request_from(ireq, oreq) \
318 list_for_each_entry_from(oreq, &(ireq)->obj_requests, links)
319 #define for_each_obj_request_safe(ireq, oreq, n) \
320 list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links)
332 int dev_id; /* blkdev unique id */
334 int major; /* blkdev assigned major */
336 struct gendisk *disk; /* blkdev's gendisk and rq */
338 u32 image_format; /* Either 1 or 2 */
339 struct rbd_client *rbd_client;
341 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
343 struct list_head rq_queue; /* incoming rq queue */
344 spinlock_t lock; /* queue, flags, open_count */
345 struct workqueue_struct *rq_wq;
346 struct work_struct rq_work;
348 struct rbd_image_header header;
349 unsigned long flags; /* possibly lock protected */
350 struct rbd_spec *spec;
354 struct ceph_file_layout layout;
356 struct ceph_osd_event *watch_event;
357 struct rbd_obj_request *watch_request;
359 struct rbd_spec *parent_spec;
362 struct rbd_device *parent;
364 /* protects updating the header */
365 struct rw_semaphore header_rwsem;
367 struct rbd_mapping mapping;
369 struct list_head node;
373 unsigned long open_count; /* protected by lock */
377 * Flag bits for rbd_dev->flags. If atomicity is required,
378 * rbd_dev->lock is used to protect access.
380 * Currently, only the "removing" flag (which is coupled with the
381 * "open_count" field) requires atomic access.
384 RBD_DEV_FLAG_EXISTS, /* mapped snapshot has not been deleted */
385 RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
388 static DEFINE_MUTEX(client_mutex); /* Serialize client creation */
390 static LIST_HEAD(rbd_dev_list); /* devices */
391 static DEFINE_SPINLOCK(rbd_dev_list_lock);
393 static LIST_HEAD(rbd_client_list); /* clients */
394 static DEFINE_SPINLOCK(rbd_client_list_lock);
396 /* Slab caches for frequently-allocated structures */
398 static struct kmem_cache *rbd_img_request_cache;
399 static struct kmem_cache *rbd_obj_request_cache;
400 static struct kmem_cache *rbd_segment_name_cache;
402 static int rbd_major;
403 static DEFINE_IDA(rbd_dev_id_ida);
406 * Default to false for now, as single-major requires >= 0.75 version of
407 * userspace rbd utility.
409 static bool single_major = false;
410 module_param(single_major, bool, S_IRUGO);
411 MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: false)");
413 static int rbd_img_request_submit(struct rbd_img_request *img_request);
415 static void rbd_dev_device_release(struct device *dev);
417 static ssize_t rbd_add(struct bus_type *bus, const char *buf,
419 static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
421 static ssize_t rbd_add_single_major(struct bus_type *bus, const char *buf,
423 static ssize_t rbd_remove_single_major(struct bus_type *bus, const char *buf,
425 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping);
426 static void rbd_spec_put(struct rbd_spec *spec);
428 static int rbd_dev_id_to_minor(int dev_id)
430 return dev_id << RBD_SINGLE_MAJOR_PART_SHIFT;
433 static int minor_to_rbd_dev_id(int minor)
435 return minor >> RBD_SINGLE_MAJOR_PART_SHIFT;
438 static BUS_ATTR(add, S_IWUSR, NULL, rbd_add);
439 static BUS_ATTR(remove, S_IWUSR, NULL, rbd_remove);
440 static BUS_ATTR(add_single_major, S_IWUSR, NULL, rbd_add_single_major);
441 static BUS_ATTR(remove_single_major, S_IWUSR, NULL, rbd_remove_single_major);
443 static struct attribute *rbd_bus_attrs[] = {
445 &bus_attr_remove.attr,
446 &bus_attr_add_single_major.attr,
447 &bus_attr_remove_single_major.attr,
451 static umode_t rbd_bus_is_visible(struct kobject *kobj,
452 struct attribute *attr, int index)
455 (attr == &bus_attr_add_single_major.attr ||
456 attr == &bus_attr_remove_single_major.attr))
462 static const struct attribute_group rbd_bus_group = {
463 .attrs = rbd_bus_attrs,
464 .is_visible = rbd_bus_is_visible,
466 __ATTRIBUTE_GROUPS(rbd_bus);
468 static struct bus_type rbd_bus_type = {
470 .bus_groups = rbd_bus_groups,
473 static void rbd_root_dev_release(struct device *dev)
477 static struct device rbd_root_dev = {
479 .release = rbd_root_dev_release,
482 static __printf(2, 3)
483 void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
485 struct va_format vaf;
493 printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
494 else if (rbd_dev->disk)
495 printk(KERN_WARNING "%s: %s: %pV\n",
496 RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
497 else if (rbd_dev->spec && rbd_dev->spec->image_name)
498 printk(KERN_WARNING "%s: image %s: %pV\n",
499 RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
500 else if (rbd_dev->spec && rbd_dev->spec->image_id)
501 printk(KERN_WARNING "%s: id %s: %pV\n",
502 RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
504 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
505 RBD_DRV_NAME, rbd_dev, &vaf);
510 #define rbd_assert(expr) \
511 if (unlikely(!(expr))) { \
512 printk(KERN_ERR "\nAssertion failure in %s() " \
514 "\trbd_assert(%s);\n\n", \
515 __func__, __LINE__, #expr); \
518 #else /* !RBD_DEBUG */
519 # define rbd_assert(expr) ((void) 0)
520 #endif /* !RBD_DEBUG */
522 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
523 static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
524 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
526 static int rbd_dev_refresh(struct rbd_device *rbd_dev);
527 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
528 static int rbd_dev_header_info(struct rbd_device *rbd_dev);
529 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev);
530 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
532 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
533 u8 *order, u64 *snap_size);
534 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
536 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name);
538 static int rbd_open(struct block_device *bdev, fmode_t mode)
540 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
541 bool removing = false;
543 if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
546 spin_lock_irq(&rbd_dev->lock);
547 if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
550 rbd_dev->open_count++;
551 spin_unlock_irq(&rbd_dev->lock);
555 (void) get_device(&rbd_dev->dev);
560 static void rbd_release(struct gendisk *disk, fmode_t mode)
562 struct rbd_device *rbd_dev = disk->private_data;
563 unsigned long open_count_before;
565 spin_lock_irq(&rbd_dev->lock);
566 open_count_before = rbd_dev->open_count--;
567 spin_unlock_irq(&rbd_dev->lock);
568 rbd_assert(open_count_before > 0);
570 put_device(&rbd_dev->dev);
573 static int rbd_ioctl_set_ro(struct rbd_device *rbd_dev, unsigned long arg)
578 bool ro_changed = false;
580 /* get_user() may sleep, so call it before taking rbd_dev->lock */
581 if (get_user(val, (int __user *)(arg)))
584 ro = val ? true : false;
585 /* Snapshot doesn't allow to write*/
586 if (rbd_dev->spec->snap_id != CEPH_NOSNAP && !ro)
589 spin_lock_irq(&rbd_dev->lock);
590 /* prevent others open this device */
591 if (rbd_dev->open_count > 1) {
596 if (rbd_dev->mapping.read_only != ro) {
597 rbd_dev->mapping.read_only = ro;
602 spin_unlock_irq(&rbd_dev->lock);
603 /* set_disk_ro() may sleep, so call it after releasing rbd_dev->lock */
604 if (ret == 0 && ro_changed)
605 set_disk_ro(rbd_dev->disk, ro ? 1 : 0);
610 static int rbd_ioctl(struct block_device *bdev, fmode_t mode,
611 unsigned int cmd, unsigned long arg)
613 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
618 ret = rbd_ioctl_set_ro(rbd_dev, arg);
628 static int rbd_compat_ioctl(struct block_device *bdev, fmode_t mode,
629 unsigned int cmd, unsigned long arg)
631 return rbd_ioctl(bdev, mode, cmd, arg);
633 #endif /* CONFIG_COMPAT */
635 static const struct block_device_operations rbd_bd_ops = {
636 .owner = THIS_MODULE,
638 .release = rbd_release,
641 .compat_ioctl = rbd_compat_ioctl,
646 * Initialize an rbd client instance. Success or not, this function
647 * consumes ceph_opts. Caller holds client_mutex.
649 static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
651 struct rbd_client *rbdc;
654 dout("%s:\n", __func__);
655 rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
659 kref_init(&rbdc->kref);
660 INIT_LIST_HEAD(&rbdc->node);
662 rbdc->client = ceph_create_client(ceph_opts, rbdc, 0, 0);
663 if (IS_ERR(rbdc->client))
665 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
667 ret = ceph_open_session(rbdc->client);
671 spin_lock(&rbd_client_list_lock);
672 list_add_tail(&rbdc->node, &rbd_client_list);
673 spin_unlock(&rbd_client_list_lock);
675 dout("%s: rbdc %p\n", __func__, rbdc);
679 ceph_destroy_client(rbdc->client);
684 ceph_destroy_options(ceph_opts);
685 dout("%s: error %d\n", __func__, ret);
690 static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
692 kref_get(&rbdc->kref);
698 * Find a ceph client with specific addr and configuration. If
699 * found, bump its reference count.
701 static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
703 struct rbd_client *client_node;
706 if (ceph_opts->flags & CEPH_OPT_NOSHARE)
709 spin_lock(&rbd_client_list_lock);
710 list_for_each_entry(client_node, &rbd_client_list, node) {
711 if (!ceph_compare_options(ceph_opts, client_node->client)) {
712 __rbd_get_client(client_node);
718 spin_unlock(&rbd_client_list_lock);
720 return found ? client_node : NULL;
730 /* string args above */
733 /* Boolean args above */
737 static match_table_t rbd_opts_tokens = {
739 /* string args above */
740 {Opt_read_only, "read_only"},
741 {Opt_read_only, "ro"}, /* Alternate spelling */
742 {Opt_read_write, "read_write"},
743 {Opt_read_write, "rw"}, /* Alternate spelling */
744 /* Boolean args above */
752 #define RBD_READ_ONLY_DEFAULT false
754 static int parse_rbd_opts_token(char *c, void *private)
756 struct rbd_options *rbd_opts = private;
757 substring_t argstr[MAX_OPT_ARGS];
758 int token, intval, ret;
760 token = match_token(c, rbd_opts_tokens, argstr);
764 if (token < Opt_last_int) {
765 ret = match_int(&argstr[0], &intval);
767 pr_err("bad mount option arg (not int) "
771 dout("got int token %d val %d\n", token, intval);
772 } else if (token > Opt_last_int && token < Opt_last_string) {
773 dout("got string token %d val %s\n", token,
775 } else if (token > Opt_last_string && token < Opt_last_bool) {
776 dout("got Boolean token %d\n", token);
778 dout("got token %d\n", token);
783 rbd_opts->read_only = true;
786 rbd_opts->read_only = false;
795 static char* obj_op_name(enum obj_operation_type op_type)
810 * Get a ceph client with specific addr and configuration, if one does
811 * not exist create it. Either way, ceph_opts is consumed by this
814 static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
816 struct rbd_client *rbdc;
818 mutex_lock_nested(&client_mutex, SINGLE_DEPTH_NESTING);
819 rbdc = rbd_client_find(ceph_opts);
820 if (rbdc) /* using an existing client */
821 ceph_destroy_options(ceph_opts);
823 rbdc = rbd_client_create(ceph_opts);
824 mutex_unlock(&client_mutex);
830 * Destroy ceph client
832 * Caller must hold rbd_client_list_lock.
834 static void rbd_client_release(struct kref *kref)
836 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
838 dout("%s: rbdc %p\n", __func__, rbdc);
839 spin_lock(&rbd_client_list_lock);
840 list_del(&rbdc->node);
841 spin_unlock(&rbd_client_list_lock);
843 ceph_destroy_client(rbdc->client);
848 * Drop reference to ceph client node. If it's not referenced anymore, release
851 static void rbd_put_client(struct rbd_client *rbdc)
854 kref_put(&rbdc->kref, rbd_client_release);
857 static bool rbd_image_format_valid(u32 image_format)
859 return image_format == 1 || image_format == 2;
862 static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
867 /* The header has to start with the magic rbd header text */
868 if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
871 /* The bio layer requires at least sector-sized I/O */
873 if (ondisk->options.order < SECTOR_SHIFT)
876 /* If we use u64 in a few spots we may be able to loosen this */
878 if (ondisk->options.order > 8 * sizeof (int) - 1)
882 * The size of a snapshot header has to fit in a size_t, and
883 * that limits the number of snapshots.
885 snap_count = le32_to_cpu(ondisk->snap_count);
886 size = SIZE_MAX - sizeof (struct ceph_snap_context);
887 if (snap_count > size / sizeof (__le64))
891 * Not only that, but the size of the entire the snapshot
892 * header must also be representable in a size_t.
894 size -= snap_count * sizeof (__le64);
895 if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
902 * Fill an rbd image header with information from the given format 1
905 static int rbd_header_from_disk(struct rbd_device *rbd_dev,
906 struct rbd_image_header_ondisk *ondisk)
908 struct rbd_image_header *header = &rbd_dev->header;
909 bool first_time = header->object_prefix == NULL;
910 struct ceph_snap_context *snapc;
911 char *object_prefix = NULL;
912 char *snap_names = NULL;
913 u64 *snap_sizes = NULL;
919 /* Allocate this now to avoid having to handle failure below */
924 len = strnlen(ondisk->object_prefix,
925 sizeof (ondisk->object_prefix));
926 object_prefix = kmalloc(len + 1, GFP_KERNEL);
929 memcpy(object_prefix, ondisk->object_prefix, len);
930 object_prefix[len] = '\0';
933 /* Allocate the snapshot context and fill it in */
935 snap_count = le32_to_cpu(ondisk->snap_count);
936 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
939 snapc->seq = le64_to_cpu(ondisk->snap_seq);
941 struct rbd_image_snap_ondisk *snaps;
942 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
944 /* We'll keep a copy of the snapshot names... */
946 if (snap_names_len > (u64)SIZE_MAX)
948 snap_names = kmalloc(snap_names_len, GFP_KERNEL);
952 /* ...as well as the array of their sizes. */
954 size = snap_count * sizeof (*header->snap_sizes);
955 snap_sizes = kmalloc(size, GFP_KERNEL);
960 * Copy the names, and fill in each snapshot's id
963 * Note that rbd_dev_v1_header_info() guarantees the
964 * ondisk buffer we're working with has
965 * snap_names_len bytes beyond the end of the
966 * snapshot id array, this memcpy() is safe.
968 memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
969 snaps = ondisk->snaps;
970 for (i = 0; i < snap_count; i++) {
971 snapc->snaps[i] = le64_to_cpu(snaps[i].id);
972 snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
976 /* We won't fail any more, fill in the header */
979 header->object_prefix = object_prefix;
980 header->obj_order = ondisk->options.order;
981 header->crypt_type = ondisk->options.crypt_type;
982 header->comp_type = ondisk->options.comp_type;
983 /* The rest aren't used for format 1 images */
984 header->stripe_unit = 0;
985 header->stripe_count = 0;
986 header->features = 0;
988 ceph_put_snap_context(header->snapc);
989 kfree(header->snap_names);
990 kfree(header->snap_sizes);
993 /* The remaining fields always get updated (when we refresh) */
995 header->image_size = le64_to_cpu(ondisk->image_size);
996 header->snapc = snapc;
997 header->snap_names = snap_names;
998 header->snap_sizes = snap_sizes;
1006 ceph_put_snap_context(snapc);
1007 kfree(object_prefix);
1012 static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
1014 const char *snap_name;
1016 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
1018 /* Skip over names until we find the one we are looking for */
1020 snap_name = rbd_dev->header.snap_names;
1022 snap_name += strlen(snap_name) + 1;
1024 return kstrdup(snap_name, GFP_KERNEL);
1028 * Snapshot id comparison function for use with qsort()/bsearch().
1029 * Note that result is for snapshots in *descending* order.
1031 static int snapid_compare_reverse(const void *s1, const void *s2)
1033 u64 snap_id1 = *(u64 *)s1;
1034 u64 snap_id2 = *(u64 *)s2;
1036 if (snap_id1 < snap_id2)
1038 return snap_id1 == snap_id2 ? 0 : -1;
1042 * Search a snapshot context to see if the given snapshot id is
1045 * Returns the position of the snapshot id in the array if it's found,
1046 * or BAD_SNAP_INDEX otherwise.
1048 * Note: The snapshot array is in kept sorted (by the osd) in
1049 * reverse order, highest snapshot id first.
1051 static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
1053 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
1056 found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps,
1057 sizeof (snap_id), snapid_compare_reverse);
1059 return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX;
1062 static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
1066 const char *snap_name;
1068 which = rbd_dev_snap_index(rbd_dev, snap_id);
1069 if (which == BAD_SNAP_INDEX)
1070 return ERR_PTR(-ENOENT);
1072 snap_name = _rbd_dev_v1_snap_name(rbd_dev, which);
1073 return snap_name ? snap_name : ERR_PTR(-ENOMEM);
1076 static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
1078 if (snap_id == CEPH_NOSNAP)
1079 return RBD_SNAP_HEAD_NAME;
1081 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1082 if (rbd_dev->image_format == 1)
1083 return rbd_dev_v1_snap_name(rbd_dev, snap_id);
1085 return rbd_dev_v2_snap_name(rbd_dev, snap_id);
1088 static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
1091 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1092 if (snap_id == CEPH_NOSNAP) {
1093 *snap_size = rbd_dev->header.image_size;
1094 } else if (rbd_dev->image_format == 1) {
1097 which = rbd_dev_snap_index(rbd_dev, snap_id);
1098 if (which == BAD_SNAP_INDEX)
1101 *snap_size = rbd_dev->header.snap_sizes[which];
1106 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
1115 static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
1118 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1119 if (snap_id == CEPH_NOSNAP) {
1120 *snap_features = rbd_dev->header.features;
1121 } else if (rbd_dev->image_format == 1) {
1122 *snap_features = 0; /* No features for format 1 */
1127 ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, &features);
1131 *snap_features = features;
1136 static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
1138 u64 snap_id = rbd_dev->spec->snap_id;
1143 ret = rbd_snap_size(rbd_dev, snap_id, &size);
1146 ret = rbd_snap_features(rbd_dev, snap_id, &features);
1150 rbd_dev->mapping.size = size;
1151 rbd_dev->mapping.features = features;
1156 static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
1158 rbd_dev->mapping.size = 0;
1159 rbd_dev->mapping.features = 0;
1162 static void rbd_segment_name_free(const char *name)
1164 /* The explicit cast here is needed to drop the const qualifier */
1166 kmem_cache_free(rbd_segment_name_cache, (void *)name);
1169 static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
1176 name = kmem_cache_alloc(rbd_segment_name_cache, GFP_NOIO);
1179 segment = offset >> rbd_dev->header.obj_order;
1180 name_format = "%s.%012llx";
1181 if (rbd_dev->image_format == 2)
1182 name_format = "%s.%016llx";
1183 ret = snprintf(name, CEPH_MAX_OID_NAME_LEN + 1, name_format,
1184 rbd_dev->header.object_prefix, segment);
1185 if (ret < 0 || ret > CEPH_MAX_OID_NAME_LEN) {
1186 pr_err("error formatting segment name for #%llu (%d)\n",
1188 rbd_segment_name_free(name);
1195 static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset)
1197 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1199 return offset & (segment_size - 1);
1202 static u64 rbd_segment_length(struct rbd_device *rbd_dev,
1203 u64 offset, u64 length)
1205 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1207 offset &= segment_size - 1;
1209 rbd_assert(length <= U64_MAX - offset);
1210 if (offset + length > segment_size)
1211 length = segment_size - offset;
1217 * returns the size of an object in the image
1219 static u64 rbd_obj_bytes(struct rbd_image_header *header)
1221 return 1 << header->obj_order;
1228 static void bio_chain_put(struct bio *chain)
1234 chain = chain->bi_next;
1240 * zeros a bio chain, starting at specific offset
1242 static void zero_bio_chain(struct bio *chain, int start_ofs)
1245 struct bvec_iter iter;
1246 unsigned long flags;
1251 bio_for_each_segment(bv, chain, iter) {
1252 if (pos + bv.bv_len > start_ofs) {
1253 int remainder = max(start_ofs - pos, 0);
1254 buf = bvec_kmap_irq(&bv, &flags);
1255 memset(buf + remainder, 0,
1256 bv.bv_len - remainder);
1257 flush_dcache_page(bv.bv_page);
1258 bvec_kunmap_irq(buf, &flags);
1263 chain = chain->bi_next;
1268 * similar to zero_bio_chain(), zeros data defined by a page array,
1269 * starting at the given byte offset from the start of the array and
1270 * continuing up to the given end offset. The pages array is
1271 * assumed to be big enough to hold all bytes up to the end.
1273 static void zero_pages(struct page **pages, u64 offset, u64 end)
1275 struct page **page = &pages[offset >> PAGE_SHIFT];
1277 rbd_assert(end > offset);
1278 rbd_assert(end - offset <= (u64)SIZE_MAX);
1279 while (offset < end) {
1282 unsigned long flags;
1285 page_offset = offset & ~PAGE_MASK;
1286 length = min_t(size_t, PAGE_SIZE - page_offset, end - offset);
1287 local_irq_save(flags);
1288 kaddr = kmap_atomic(*page);
1289 memset(kaddr + page_offset, 0, length);
1290 flush_dcache_page(*page);
1291 kunmap_atomic(kaddr);
1292 local_irq_restore(flags);
1300 * Clone a portion of a bio, starting at the given byte offset
1301 * and continuing for the number of bytes indicated.
1303 static struct bio *bio_clone_range(struct bio *bio_src,
1304 unsigned int offset,
1310 bio = bio_clone(bio_src, gfpmask);
1312 return NULL; /* ENOMEM */
1314 bio_advance(bio, offset);
1315 bio->bi_iter.bi_size = len;
1321 * Clone a portion of a bio chain, starting at the given byte offset
1322 * into the first bio in the source chain and continuing for the
1323 * number of bytes indicated. The result is another bio chain of
1324 * exactly the given length, or a null pointer on error.
1326 * The bio_src and offset parameters are both in-out. On entry they
1327 * refer to the first source bio and the offset into that bio where
1328 * the start of data to be cloned is located.
1330 * On return, bio_src is updated to refer to the bio in the source
1331 * chain that contains first un-cloned byte, and *offset will
1332 * contain the offset of that byte within that bio.
1334 static struct bio *bio_chain_clone_range(struct bio **bio_src,
1335 unsigned int *offset,
1339 struct bio *bi = *bio_src;
1340 unsigned int off = *offset;
1341 struct bio *chain = NULL;
1344 /* Build up a chain of clone bios up to the limit */
1346 if (!bi || off >= bi->bi_iter.bi_size || !len)
1347 return NULL; /* Nothing to clone */
1351 unsigned int bi_size;
1355 rbd_warn(NULL, "bio_chain exhausted with %u left", len);
1356 goto out_err; /* EINVAL; ran out of bio's */
1358 bi_size = min_t(unsigned int, bi->bi_iter.bi_size - off, len);
1359 bio = bio_clone_range(bi, off, bi_size, gfpmask);
1361 goto out_err; /* ENOMEM */
1364 end = &bio->bi_next;
1367 if (off == bi->bi_iter.bi_size) {
1378 bio_chain_put(chain);
1384 * The default/initial value for all object request flags is 0. For
1385 * each flag, once its value is set to 1 it is never reset to 0
1388 static void obj_request_img_data_set(struct rbd_obj_request *obj_request)
1390 if (test_and_set_bit(OBJ_REQ_IMG_DATA, &obj_request->flags)) {
1391 struct rbd_device *rbd_dev;
1393 rbd_dev = obj_request->img_request->rbd_dev;
1394 rbd_warn(rbd_dev, "obj_request %p already marked img_data",
1399 static bool obj_request_img_data_test(struct rbd_obj_request *obj_request)
1402 return test_bit(OBJ_REQ_IMG_DATA, &obj_request->flags) != 0;
1405 static void obj_request_done_set(struct rbd_obj_request *obj_request)
1407 if (test_and_set_bit(OBJ_REQ_DONE, &obj_request->flags)) {
1408 struct rbd_device *rbd_dev = NULL;
1410 if (obj_request_img_data_test(obj_request))
1411 rbd_dev = obj_request->img_request->rbd_dev;
1412 rbd_warn(rbd_dev, "obj_request %p already marked done",
1417 static bool obj_request_done_test(struct rbd_obj_request *obj_request)
1420 return test_bit(OBJ_REQ_DONE, &obj_request->flags) != 0;
1424 * This sets the KNOWN flag after (possibly) setting the EXISTS
1425 * flag. The latter is set based on the "exists" value provided.
1427 * Note that for our purposes once an object exists it never goes
1428 * away again. It's possible that the response from two existence
1429 * checks are separated by the creation of the target object, and
1430 * the first ("doesn't exist") response arrives *after* the second
1431 * ("does exist"). In that case we ignore the second one.
1433 static void obj_request_existence_set(struct rbd_obj_request *obj_request,
1437 set_bit(OBJ_REQ_EXISTS, &obj_request->flags);
1438 set_bit(OBJ_REQ_KNOWN, &obj_request->flags);
1442 static bool obj_request_known_test(struct rbd_obj_request *obj_request)
1445 return test_bit(OBJ_REQ_KNOWN, &obj_request->flags) != 0;
1448 static bool obj_request_exists_test(struct rbd_obj_request *obj_request)
1451 return test_bit(OBJ_REQ_EXISTS, &obj_request->flags) != 0;
1454 static bool obj_request_overlaps_parent(struct rbd_obj_request *obj_request)
1456 struct rbd_device *rbd_dev = obj_request->img_request->rbd_dev;
1458 return obj_request->img_offset <
1459 round_up(rbd_dev->parent_overlap, rbd_obj_bytes(&rbd_dev->header));
1462 static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
1464 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1465 atomic_read(&obj_request->kref.refcount));
1466 kref_get(&obj_request->kref);
1469 static void rbd_obj_request_destroy(struct kref *kref);
1470 static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1472 rbd_assert(obj_request != NULL);
1473 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1474 atomic_read(&obj_request->kref.refcount));
1475 kref_put(&obj_request->kref, rbd_obj_request_destroy);
1478 static void rbd_img_request_get(struct rbd_img_request *img_request)
1480 dout("%s: img %p (was %d)\n", __func__, img_request,
1481 atomic_read(&img_request->kref.refcount));
1482 kref_get(&img_request->kref);
1485 static bool img_request_child_test(struct rbd_img_request *img_request);
1486 static void rbd_parent_request_destroy(struct kref *kref);
1487 static void rbd_img_request_destroy(struct kref *kref);
1488 static void rbd_img_request_put(struct rbd_img_request *img_request)
1490 rbd_assert(img_request != NULL);
1491 dout("%s: img %p (was %d)\n", __func__, img_request,
1492 atomic_read(&img_request->kref.refcount));
1493 if (img_request_child_test(img_request))
1494 kref_put(&img_request->kref, rbd_parent_request_destroy);
1496 kref_put(&img_request->kref, rbd_img_request_destroy);
1499 static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
1500 struct rbd_obj_request *obj_request)
1502 rbd_assert(obj_request->img_request == NULL);
1504 /* Image request now owns object's original reference */
1505 obj_request->img_request = img_request;
1506 obj_request->which = img_request->obj_request_count;
1507 rbd_assert(!obj_request_img_data_test(obj_request));
1508 obj_request_img_data_set(obj_request);
1509 rbd_assert(obj_request->which != BAD_WHICH);
1510 img_request->obj_request_count++;
1511 list_add_tail(&obj_request->links, &img_request->obj_requests);
1512 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1513 obj_request->which);
1516 static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
1517 struct rbd_obj_request *obj_request)
1519 rbd_assert(obj_request->which != BAD_WHICH);
1521 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1522 obj_request->which);
1523 list_del(&obj_request->links);
1524 rbd_assert(img_request->obj_request_count > 0);
1525 img_request->obj_request_count--;
1526 rbd_assert(obj_request->which == img_request->obj_request_count);
1527 obj_request->which = BAD_WHICH;
1528 rbd_assert(obj_request_img_data_test(obj_request));
1529 rbd_assert(obj_request->img_request == img_request);
1530 obj_request->img_request = NULL;
1531 obj_request->callback = NULL;
1532 rbd_obj_request_put(obj_request);
1535 static bool obj_request_type_valid(enum obj_request_type type)
1538 case OBJ_REQUEST_NODATA:
1539 case OBJ_REQUEST_BIO:
1540 case OBJ_REQUEST_PAGES:
1547 static int rbd_obj_request_submit(struct ceph_osd_client *osdc,
1548 struct rbd_obj_request *obj_request)
1550 dout("%s %p\n", __func__, obj_request);
1551 return ceph_osdc_start_request(osdc, obj_request->osd_req, false);
1554 static void rbd_obj_request_end(struct rbd_obj_request *obj_request)
1556 dout("%s %p\n", __func__, obj_request);
1557 ceph_osdc_cancel_request(obj_request->osd_req);
1561 * Wait for an object request to complete. If interrupted, cancel the
1562 * underlying osd request.
1564 static int rbd_obj_request_wait(struct rbd_obj_request *obj_request)
1568 dout("%s %p\n", __func__, obj_request);
1570 ret = wait_for_completion_interruptible(&obj_request->completion);
1572 dout("%s %p interrupted\n", __func__, obj_request);
1573 rbd_obj_request_end(obj_request);
1577 dout("%s %p done\n", __func__, obj_request);
1581 static void rbd_img_request_complete(struct rbd_img_request *img_request)
1584 dout("%s: img %p\n", __func__, img_request);
1587 * If no error occurred, compute the aggregate transfer
1588 * count for the image request. We could instead use
1589 * atomic64_cmpxchg() to update it as each object request
1590 * completes; not clear which way is better off hand.
1592 if (!img_request->result) {
1593 struct rbd_obj_request *obj_request;
1596 for_each_obj_request(img_request, obj_request)
1597 xferred += obj_request->xferred;
1598 img_request->xferred = xferred;
1601 if (img_request->callback)
1602 img_request->callback(img_request);
1604 rbd_img_request_put(img_request);
1608 * The default/initial value for all image request flags is 0. Each
1609 * is conditionally set to 1 at image request initialization time
1610 * and currently never change thereafter.
1612 static void img_request_write_set(struct rbd_img_request *img_request)
1614 set_bit(IMG_REQ_WRITE, &img_request->flags);
1618 static bool img_request_write_test(struct rbd_img_request *img_request)
1621 return test_bit(IMG_REQ_WRITE, &img_request->flags) != 0;
1625 * Set the discard flag when the img_request is an discard request
1627 static void img_request_discard_set(struct rbd_img_request *img_request)
1629 set_bit(IMG_REQ_DISCARD, &img_request->flags);
1633 static bool img_request_discard_test(struct rbd_img_request *img_request)
1636 return test_bit(IMG_REQ_DISCARD, &img_request->flags) != 0;
1639 static void img_request_child_set(struct rbd_img_request *img_request)
1641 set_bit(IMG_REQ_CHILD, &img_request->flags);
1645 static void img_request_child_clear(struct rbd_img_request *img_request)
1647 clear_bit(IMG_REQ_CHILD, &img_request->flags);
1651 static bool img_request_child_test(struct rbd_img_request *img_request)
1654 return test_bit(IMG_REQ_CHILD, &img_request->flags) != 0;
1657 static void img_request_layered_set(struct rbd_img_request *img_request)
1659 set_bit(IMG_REQ_LAYERED, &img_request->flags);
1663 static void img_request_layered_clear(struct rbd_img_request *img_request)
1665 clear_bit(IMG_REQ_LAYERED, &img_request->flags);
1669 static bool img_request_layered_test(struct rbd_img_request *img_request)
1672 return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
1675 static enum obj_operation_type
1676 rbd_img_request_op_type(struct rbd_img_request *img_request)
1678 if (img_request_write_test(img_request))
1679 return OBJ_OP_WRITE;
1680 else if (img_request_discard_test(img_request))
1681 return OBJ_OP_DISCARD;
1687 rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
1689 u64 xferred = obj_request->xferred;
1690 u64 length = obj_request->length;
1692 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1693 obj_request, obj_request->img_request, obj_request->result,
1696 * ENOENT means a hole in the image. We zero-fill the entire
1697 * length of the request. A short read also implies zero-fill
1698 * to the end of the request. An error requires the whole
1699 * length of the request to be reported finished with an error
1700 * to the block layer. In each case we update the xferred
1701 * count to indicate the whole request was satisfied.
1703 rbd_assert(obj_request->type != OBJ_REQUEST_NODATA);
1704 if (obj_request->result == -ENOENT) {
1705 if (obj_request->type == OBJ_REQUEST_BIO)
1706 zero_bio_chain(obj_request->bio_list, 0);
1708 zero_pages(obj_request->pages, 0, length);
1709 obj_request->result = 0;
1710 } else if (xferred < length && !obj_request->result) {
1711 if (obj_request->type == OBJ_REQUEST_BIO)
1712 zero_bio_chain(obj_request->bio_list, xferred);
1714 zero_pages(obj_request->pages, xferred, length);
1716 obj_request->xferred = length;
1717 obj_request_done_set(obj_request);
1720 static void rbd_obj_request_complete(struct rbd_obj_request *obj_request)
1722 dout("%s: obj %p cb %p\n", __func__, obj_request,
1723 obj_request->callback);
1724 if (obj_request->callback)
1725 obj_request->callback(obj_request);
1727 complete_all(&obj_request->completion);
1730 static void rbd_osd_trivial_callback(struct rbd_obj_request *obj_request)
1732 dout("%s: obj %p\n", __func__, obj_request);
1733 obj_request_done_set(obj_request);
1736 static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
1738 struct rbd_img_request *img_request = NULL;
1739 struct rbd_device *rbd_dev = NULL;
1740 bool layered = false;
1742 if (obj_request_img_data_test(obj_request)) {
1743 img_request = obj_request->img_request;
1744 layered = img_request && img_request_layered_test(img_request);
1745 rbd_dev = img_request->rbd_dev;
1748 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1749 obj_request, img_request, obj_request->result,
1750 obj_request->xferred, obj_request->length);
1751 if (layered && obj_request->result == -ENOENT &&
1752 obj_request->img_offset < rbd_dev->parent_overlap)
1753 rbd_img_parent_read(obj_request);
1754 else if (img_request)
1755 rbd_img_obj_request_read_callback(obj_request);
1757 obj_request_done_set(obj_request);
1760 static void rbd_osd_write_callback(struct rbd_obj_request *obj_request)
1762 dout("%s: obj %p result %d %llu\n", __func__, obj_request,
1763 obj_request->result, obj_request->length);
1765 * There is no such thing as a successful short write. Set
1766 * it to our originally-requested length.
1768 obj_request->xferred = obj_request->length;
1769 obj_request_done_set(obj_request);
1772 static void rbd_osd_discard_callback(struct rbd_obj_request *obj_request)
1774 dout("%s: obj %p result %d %llu\n", __func__, obj_request,
1775 obj_request->result, obj_request->length);
1777 * There is no such thing as a successful short discard. Set
1778 * it to our originally-requested length.
1780 obj_request->xferred = obj_request->length;
1781 /* discarding a non-existent object is not a problem */
1782 if (obj_request->result == -ENOENT)
1783 obj_request->result = 0;
1784 obj_request_done_set(obj_request);
1788 * For a simple stat call there's nothing to do. We'll do more if
1789 * this is part of a write sequence for a layered image.
1791 static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
1793 dout("%s: obj %p\n", __func__, obj_request);
1794 obj_request_done_set(obj_request);
1797 static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
1798 struct ceph_msg *msg)
1800 struct rbd_obj_request *obj_request = osd_req->r_priv;
1803 dout("%s: osd_req %p msg %p\n", __func__, osd_req, msg);
1804 rbd_assert(osd_req == obj_request->osd_req);
1805 if (obj_request_img_data_test(obj_request)) {
1806 rbd_assert(obj_request->img_request);
1807 rbd_assert(obj_request->which != BAD_WHICH);
1809 rbd_assert(obj_request->which == BAD_WHICH);
1812 if (osd_req->r_result < 0)
1813 obj_request->result = osd_req->r_result;
1815 rbd_assert(osd_req->r_num_ops <= CEPH_OSD_MAX_OP);
1818 * We support a 64-bit length, but ultimately it has to be
1819 * passed to blk_end_request(), which takes an unsigned int.
1821 obj_request->xferred = osd_req->r_reply_op_len[0];
1822 rbd_assert(obj_request->xferred < (u64)UINT_MAX);
1824 opcode = osd_req->r_ops[0].op;
1826 case CEPH_OSD_OP_READ:
1827 rbd_osd_read_callback(obj_request);
1829 case CEPH_OSD_OP_SETALLOCHINT:
1830 rbd_assert(osd_req->r_ops[1].op == CEPH_OSD_OP_WRITE);
1832 case CEPH_OSD_OP_WRITE:
1833 rbd_osd_write_callback(obj_request);
1835 case CEPH_OSD_OP_STAT:
1836 rbd_osd_stat_callback(obj_request);
1838 case CEPH_OSD_OP_DELETE:
1839 case CEPH_OSD_OP_TRUNCATE:
1840 case CEPH_OSD_OP_ZERO:
1841 rbd_osd_discard_callback(obj_request);
1843 case CEPH_OSD_OP_CALL:
1844 case CEPH_OSD_OP_NOTIFY_ACK:
1845 case CEPH_OSD_OP_WATCH:
1846 rbd_osd_trivial_callback(obj_request);
1849 rbd_warn(NULL, "%s: unsupported op %hu",
1850 obj_request->object_name, (unsigned short) opcode);
1854 if (obj_request_done_test(obj_request))
1855 rbd_obj_request_complete(obj_request);
1858 static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request)
1860 struct rbd_img_request *img_request = obj_request->img_request;
1861 struct ceph_osd_request *osd_req = obj_request->osd_req;
1864 rbd_assert(osd_req != NULL);
1866 snap_id = img_request ? img_request->snap_id : CEPH_NOSNAP;
1867 ceph_osdc_build_request(osd_req, obj_request->offset,
1868 NULL, snap_id, NULL);
1871 static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
1873 struct rbd_img_request *img_request = obj_request->img_request;
1874 struct ceph_osd_request *osd_req = obj_request->osd_req;
1875 struct ceph_snap_context *snapc;
1876 struct timespec mtime = CURRENT_TIME;
1878 rbd_assert(osd_req != NULL);
1880 snapc = img_request ? img_request->snapc : NULL;
1881 ceph_osdc_build_request(osd_req, obj_request->offset,
1882 snapc, CEPH_NOSNAP, &mtime);
1886 * Create an osd request. A read request has one osd op (read).
1887 * A write request has either one (watch) or two (hint+write) osd ops.
1888 * (All rbd data writes are prefixed with an allocation hint op, but
1889 * technically osd watch is a write request, hence this distinction.)
1891 static struct ceph_osd_request *rbd_osd_req_create(
1892 struct rbd_device *rbd_dev,
1893 enum obj_operation_type op_type,
1894 unsigned int num_ops,
1895 struct rbd_obj_request *obj_request)
1897 struct ceph_snap_context *snapc = NULL;
1898 struct ceph_osd_client *osdc;
1899 struct ceph_osd_request *osd_req;
1901 if (obj_request_img_data_test(obj_request) &&
1902 (op_type == OBJ_OP_DISCARD || op_type == OBJ_OP_WRITE)) {
1903 struct rbd_img_request *img_request = obj_request->img_request;
1904 if (op_type == OBJ_OP_WRITE) {
1905 rbd_assert(img_request_write_test(img_request));
1907 rbd_assert(img_request_discard_test(img_request));
1909 snapc = img_request->snapc;
1912 rbd_assert(num_ops == 1 || ((op_type == OBJ_OP_WRITE) && num_ops == 2));
1914 /* Allocate and initialize the request, for the num_ops ops */
1916 osdc = &rbd_dev->rbd_client->client->osdc;
1917 osd_req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false,
1920 return NULL; /* ENOMEM */
1922 if (op_type == OBJ_OP_WRITE || op_type == OBJ_OP_DISCARD)
1923 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
1925 osd_req->r_flags = CEPH_OSD_FLAG_READ;
1927 osd_req->r_callback = rbd_osd_req_callback;
1928 osd_req->r_priv = obj_request;
1930 osd_req->r_base_oloc.pool = ceph_file_layout_pg_pool(rbd_dev->layout);
1931 ceph_oid_set_name(&osd_req->r_base_oid, obj_request->object_name);
1937 * Create a copyup osd request based on the information in the
1938 * object request supplied. A copyup request has three osd ops,
1939 * a copyup method call, a hint op, and a write op.
1941 static struct ceph_osd_request *
1942 rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
1944 struct rbd_img_request *img_request;
1945 struct ceph_snap_context *snapc;
1946 struct rbd_device *rbd_dev;
1947 struct ceph_osd_client *osdc;
1948 struct ceph_osd_request *osd_req;
1950 rbd_assert(obj_request_img_data_test(obj_request));
1951 img_request = obj_request->img_request;
1952 rbd_assert(img_request);
1953 rbd_assert(img_request_write_test(img_request));
1955 /* Allocate and initialize the request, for the three ops */
1957 snapc = img_request->snapc;
1958 rbd_dev = img_request->rbd_dev;
1959 osdc = &rbd_dev->rbd_client->client->osdc;
1960 osd_req = ceph_osdc_alloc_request(osdc, snapc, 3, false, GFP_ATOMIC);
1962 return NULL; /* ENOMEM */
1964 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
1965 osd_req->r_callback = rbd_osd_req_callback;
1966 osd_req->r_priv = obj_request;
1968 osd_req->r_base_oloc.pool = ceph_file_layout_pg_pool(rbd_dev->layout);
1969 ceph_oid_set_name(&osd_req->r_base_oid, obj_request->object_name);
1975 static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req)
1977 ceph_osdc_put_request(osd_req);
1980 /* object_name is assumed to be a non-null pointer and NUL-terminated */
1982 static struct rbd_obj_request *rbd_obj_request_create(const char *object_name,
1983 u64 offset, u64 length,
1984 enum obj_request_type type)
1986 struct rbd_obj_request *obj_request;
1990 rbd_assert(obj_request_type_valid(type));
1992 size = strlen(object_name) + 1;
1993 name = kmalloc(size, GFP_KERNEL);
1997 obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_KERNEL);
2003 obj_request->object_name = memcpy(name, object_name, size);
2004 obj_request->offset = offset;
2005 obj_request->length = length;
2006 obj_request->flags = 0;
2007 obj_request->which = BAD_WHICH;
2008 obj_request->type = type;
2009 INIT_LIST_HEAD(&obj_request->links);
2010 init_completion(&obj_request->completion);
2011 kref_init(&obj_request->kref);
2013 dout("%s: \"%s\" %llu/%llu %d -> obj %p\n", __func__, object_name,
2014 offset, length, (int)type, obj_request);
2019 static void rbd_obj_request_destroy(struct kref *kref)
2021 struct rbd_obj_request *obj_request;
2023 obj_request = container_of(kref, struct rbd_obj_request, kref);
2025 dout("%s: obj %p\n", __func__, obj_request);
2027 rbd_assert(obj_request->img_request == NULL);
2028 rbd_assert(obj_request->which == BAD_WHICH);
2030 if (obj_request->osd_req)
2031 rbd_osd_req_destroy(obj_request->osd_req);
2033 rbd_assert(obj_request_type_valid(obj_request->type));
2034 switch (obj_request->type) {
2035 case OBJ_REQUEST_NODATA:
2036 break; /* Nothing to do */
2037 case OBJ_REQUEST_BIO:
2038 if (obj_request->bio_list)
2039 bio_chain_put(obj_request->bio_list);
2041 case OBJ_REQUEST_PAGES:
2042 if (obj_request->pages)
2043 ceph_release_page_vector(obj_request->pages,
2044 obj_request->page_count);
2048 kfree(obj_request->object_name);
2049 obj_request->object_name = NULL;
2050 kmem_cache_free(rbd_obj_request_cache, obj_request);
2053 /* It's OK to call this for a device with no parent */
2055 static void rbd_spec_put(struct rbd_spec *spec);
2056 static void rbd_dev_unparent(struct rbd_device *rbd_dev)
2058 rbd_dev_remove_parent(rbd_dev);
2059 rbd_spec_put(rbd_dev->parent_spec);
2060 rbd_dev->parent_spec = NULL;
2061 rbd_dev->parent_overlap = 0;
2065 * Parent image reference counting is used to determine when an
2066 * image's parent fields can be safely torn down--after there are no
2067 * more in-flight requests to the parent image. When the last
2068 * reference is dropped, cleaning them up is safe.
2070 static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
2074 if (!rbd_dev->parent_spec)
2077 counter = atomic_dec_return_safe(&rbd_dev->parent_ref);
2081 /* Last reference; clean up parent data structures */
2084 rbd_dev_unparent(rbd_dev);
2086 rbd_warn(rbd_dev, "parent reference underflow");
2090 * If an image has a non-zero parent overlap, get a reference to its
2093 * We must get the reference before checking for the overlap to
2094 * coordinate properly with zeroing the parent overlap in
2095 * rbd_dev_v2_parent_info() when an image gets flattened. We
2096 * drop it again if there is no overlap.
2098 * Returns true if the rbd device has a parent with a non-zero
2099 * overlap and a reference for it was successfully taken, or
2102 static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
2106 if (!rbd_dev->parent_spec)
2109 counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
2110 if (counter > 0 && rbd_dev->parent_overlap)
2113 /* Image was flattened, but parent is not yet torn down */
2116 rbd_warn(rbd_dev, "parent reference overflow");
2122 * Caller is responsible for filling in the list of object requests
2123 * that comprises the image request, and the Linux request pointer
2124 * (if there is one).
2126 static struct rbd_img_request *rbd_img_request_create(
2127 struct rbd_device *rbd_dev,
2128 u64 offset, u64 length,
2129 enum obj_operation_type op_type,
2130 struct ceph_snap_context *snapc)
2132 struct rbd_img_request *img_request;
2134 img_request = kmem_cache_alloc(rbd_img_request_cache, GFP_NOIO);
2138 img_request->rq = NULL;
2139 img_request->rbd_dev = rbd_dev;
2140 img_request->offset = offset;
2141 img_request->length = length;
2142 img_request->flags = 0;
2143 if (op_type == OBJ_OP_DISCARD) {
2144 img_request_discard_set(img_request);
2145 img_request->snapc = snapc;
2146 } else if (op_type == OBJ_OP_WRITE) {
2147 img_request_write_set(img_request);
2148 img_request->snapc = snapc;
2150 img_request->snap_id = rbd_dev->spec->snap_id;
2152 if (rbd_dev_parent_get(rbd_dev))
2153 img_request_layered_set(img_request);
2154 spin_lock_init(&img_request->completion_lock);
2155 img_request->next_completion = 0;
2156 img_request->callback = NULL;
2157 img_request->result = 0;
2158 img_request->obj_request_count = 0;
2159 INIT_LIST_HEAD(&img_request->obj_requests);
2160 kref_init(&img_request->kref);
2162 dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev,
2163 obj_op_name(op_type), offset, length, img_request);
2168 static void rbd_img_request_destroy(struct kref *kref)
2170 struct rbd_img_request *img_request;
2171 struct rbd_obj_request *obj_request;
2172 struct rbd_obj_request *next_obj_request;
2174 img_request = container_of(kref, struct rbd_img_request, kref);
2176 dout("%s: img %p\n", __func__, img_request);
2178 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2179 rbd_img_obj_request_del(img_request, obj_request);
2180 rbd_assert(img_request->obj_request_count == 0);
2182 if (img_request_layered_test(img_request)) {
2183 img_request_layered_clear(img_request);
2184 rbd_dev_parent_put(img_request->rbd_dev);
2187 if (img_request_write_test(img_request) ||
2188 img_request_discard_test(img_request))
2189 ceph_put_snap_context(img_request->snapc);
2191 kmem_cache_free(rbd_img_request_cache, img_request);
2194 static struct rbd_img_request *rbd_parent_request_create(
2195 struct rbd_obj_request *obj_request,
2196 u64 img_offset, u64 length)
2198 struct rbd_img_request *parent_request;
2199 struct rbd_device *rbd_dev;
2201 rbd_assert(obj_request->img_request);
2202 rbd_dev = obj_request->img_request->rbd_dev;
2204 parent_request = rbd_img_request_create(rbd_dev->parent, img_offset,
2205 length, OBJ_OP_READ, NULL);
2206 if (!parent_request)
2209 img_request_child_set(parent_request);
2210 rbd_obj_request_get(obj_request);
2211 parent_request->obj_request = obj_request;
2213 return parent_request;
2216 static void rbd_parent_request_destroy(struct kref *kref)
2218 struct rbd_img_request *parent_request;
2219 struct rbd_obj_request *orig_request;
2221 parent_request = container_of(kref, struct rbd_img_request, kref);
2222 orig_request = parent_request->obj_request;
2224 parent_request->obj_request = NULL;
2225 rbd_obj_request_put(orig_request);
2226 img_request_child_clear(parent_request);
2228 rbd_img_request_destroy(kref);
2231 static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
2233 struct rbd_img_request *img_request;
2234 unsigned int xferred;
2238 rbd_assert(obj_request_img_data_test(obj_request));
2239 img_request = obj_request->img_request;
2241 rbd_assert(obj_request->xferred <= (u64)UINT_MAX);
2242 xferred = (unsigned int)obj_request->xferred;
2243 result = obj_request->result;
2245 struct rbd_device *rbd_dev = img_request->rbd_dev;
2246 enum obj_operation_type op_type;
2248 if (img_request_discard_test(img_request))
2249 op_type = OBJ_OP_DISCARD;
2250 else if (img_request_write_test(img_request))
2251 op_type = OBJ_OP_WRITE;
2253 op_type = OBJ_OP_READ;
2255 rbd_warn(rbd_dev, "%s %llx at %llx (%llx)",
2256 obj_op_name(op_type), obj_request->length,
2257 obj_request->img_offset, obj_request->offset);
2258 rbd_warn(rbd_dev, " result %d xferred %x",
2260 if (!img_request->result)
2261 img_request->result = result;
2264 /* Image object requests don't own their page array */
2266 if (obj_request->type == OBJ_REQUEST_PAGES) {
2267 obj_request->pages = NULL;
2268 obj_request->page_count = 0;
2271 if (img_request_child_test(img_request)) {
2272 rbd_assert(img_request->obj_request != NULL);
2273 more = obj_request->which < img_request->obj_request_count - 1;
2275 rbd_assert(img_request->rq != NULL);
2276 more = blk_end_request(img_request->rq, result, xferred);
2282 static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
2284 struct rbd_img_request *img_request;
2285 u32 which = obj_request->which;
2288 rbd_assert(obj_request_img_data_test(obj_request));
2289 img_request = obj_request->img_request;
2291 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
2292 rbd_assert(img_request != NULL);
2293 rbd_assert(img_request->obj_request_count > 0);
2294 rbd_assert(which != BAD_WHICH);
2295 rbd_assert(which < img_request->obj_request_count);
2297 spin_lock_irq(&img_request->completion_lock);
2298 if (which != img_request->next_completion)
2301 for_each_obj_request_from(img_request, obj_request) {
2303 rbd_assert(which < img_request->obj_request_count);
2305 if (!obj_request_done_test(obj_request))
2307 more = rbd_img_obj_end_request(obj_request);
2311 rbd_assert(more ^ (which == img_request->obj_request_count));
2312 img_request->next_completion = which;
2314 spin_unlock_irq(&img_request->completion_lock);
2315 rbd_img_request_put(img_request);
2318 rbd_img_request_complete(img_request);
2322 * Add individual osd ops to the given ceph_osd_request and prepare
2323 * them for submission. num_ops is the current number of
2324 * osd operations already to the object request.
2326 static void rbd_img_obj_request_fill(struct rbd_obj_request *obj_request,
2327 struct ceph_osd_request *osd_request,
2328 enum obj_operation_type op_type,
2329 unsigned int num_ops)
2331 struct rbd_img_request *img_request = obj_request->img_request;
2332 struct rbd_device *rbd_dev = img_request->rbd_dev;
2333 u64 object_size = rbd_obj_bytes(&rbd_dev->header);
2334 u64 offset = obj_request->offset;
2335 u64 length = obj_request->length;
2339 if (op_type == OBJ_OP_DISCARD) {
2340 if (!offset && (length == object_size)
2341 && (!img_request_layered_test(img_request) ||
2342 (rbd_dev->parent_overlap <=
2343 obj_request->img_offset))) {
2344 opcode = CEPH_OSD_OP_DELETE;
2345 } else if ((offset + length == object_size)) {
2346 opcode = CEPH_OSD_OP_TRUNCATE;
2348 down_read(&rbd_dev->header_rwsem);
2349 img_end = rbd_dev->header.image_size;
2350 up_read(&rbd_dev->header_rwsem);
2352 if (obj_request->img_offset + length == img_end)
2353 opcode = CEPH_OSD_OP_TRUNCATE;
2355 opcode = CEPH_OSD_OP_ZERO;
2357 } else if (op_type == OBJ_OP_WRITE) {
2358 opcode = CEPH_OSD_OP_WRITE;
2359 osd_req_op_alloc_hint_init(osd_request, num_ops,
2360 object_size, object_size);
2363 opcode = CEPH_OSD_OP_READ;
2366 osd_req_op_extent_init(osd_request, num_ops, opcode, offset, length,
2368 if (obj_request->type == OBJ_REQUEST_BIO)
2369 osd_req_op_extent_osd_data_bio(osd_request, num_ops,
2370 obj_request->bio_list, length);
2371 else if (obj_request->type == OBJ_REQUEST_PAGES)
2372 osd_req_op_extent_osd_data_pages(osd_request, num_ops,
2373 obj_request->pages, length,
2374 offset & ~PAGE_MASK, false, false);
2376 /* Discards are also writes */
2377 if (op_type == OBJ_OP_WRITE || op_type == OBJ_OP_DISCARD)
2378 rbd_osd_req_format_write(obj_request);
2380 rbd_osd_req_format_read(obj_request);
2384 * Split up an image request into one or more object requests, each
2385 * to a different object. The "type" parameter indicates whether
2386 * "data_desc" is the pointer to the head of a list of bio
2387 * structures, or the base of a page array. In either case this
2388 * function assumes data_desc describes memory sufficient to hold
2389 * all data described by the image request.
2391 static int rbd_img_request_fill(struct rbd_img_request *img_request,
2392 enum obj_request_type type,
2395 struct rbd_device *rbd_dev = img_request->rbd_dev;
2396 struct rbd_obj_request *obj_request = NULL;
2397 struct rbd_obj_request *next_obj_request;
2398 struct bio *bio_list = NULL;
2399 unsigned int bio_offset = 0;
2400 struct page **pages = NULL;
2401 enum obj_operation_type op_type;
2405 dout("%s: img %p type %d data_desc %p\n", __func__, img_request,
2406 (int)type, data_desc);
2408 img_offset = img_request->offset;
2409 resid = img_request->length;
2410 rbd_assert(resid > 0);
2411 op_type = rbd_img_request_op_type(img_request);
2413 if (type == OBJ_REQUEST_BIO) {
2414 bio_list = data_desc;
2415 rbd_assert(img_offset ==
2416 bio_list->bi_iter.bi_sector << SECTOR_SHIFT);
2417 } else if (type == OBJ_REQUEST_PAGES) {
2422 struct ceph_osd_request *osd_req;
2423 const char *object_name;
2427 object_name = rbd_segment_name(rbd_dev, img_offset);
2430 offset = rbd_segment_offset(rbd_dev, img_offset);
2431 length = rbd_segment_length(rbd_dev, img_offset, resid);
2432 obj_request = rbd_obj_request_create(object_name,
2433 offset, length, type);
2434 /* object request has its own copy of the object name */
2435 rbd_segment_name_free(object_name);
2440 * set obj_request->img_request before creating the
2441 * osd_request so that it gets the right snapc
2443 rbd_img_obj_request_add(img_request, obj_request);
2445 if (type == OBJ_REQUEST_BIO) {
2446 unsigned int clone_size;
2448 rbd_assert(length <= (u64)UINT_MAX);
2449 clone_size = (unsigned int)length;
2450 obj_request->bio_list =
2451 bio_chain_clone_range(&bio_list,
2455 if (!obj_request->bio_list)
2457 } else if (type == OBJ_REQUEST_PAGES) {
2458 unsigned int page_count;
2460 obj_request->pages = pages;
2461 page_count = (u32)calc_pages_for(offset, length);
2462 obj_request->page_count = page_count;
2463 if ((offset + length) & ~PAGE_MASK)
2464 page_count--; /* more on last page */
2465 pages += page_count;
2468 osd_req = rbd_osd_req_create(rbd_dev, op_type,
2469 (op_type == OBJ_OP_WRITE) ? 2 : 1,
2474 obj_request->osd_req = osd_req;
2475 obj_request->callback = rbd_img_obj_callback;
2476 obj_request->img_offset = img_offset;
2478 rbd_img_obj_request_fill(obj_request, osd_req, op_type, 0);
2480 rbd_img_request_get(img_request);
2482 img_offset += length;
2489 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2490 rbd_img_obj_request_del(img_request, obj_request);
2496 rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
2498 struct rbd_img_request *img_request;
2499 struct rbd_device *rbd_dev;
2500 struct page **pages;
2503 rbd_assert(obj_request->type == OBJ_REQUEST_BIO);
2504 rbd_assert(obj_request_img_data_test(obj_request));
2505 img_request = obj_request->img_request;
2506 rbd_assert(img_request);
2508 rbd_dev = img_request->rbd_dev;
2509 rbd_assert(rbd_dev);
2511 pages = obj_request->copyup_pages;
2512 rbd_assert(pages != NULL);
2513 obj_request->copyup_pages = NULL;
2514 page_count = obj_request->copyup_page_count;
2515 rbd_assert(page_count);
2516 obj_request->copyup_page_count = 0;
2517 ceph_release_page_vector(pages, page_count);
2520 * We want the transfer count to reflect the size of the
2521 * original write request. There is no such thing as a
2522 * successful short write, so if the request was successful
2523 * we can just set it to the originally-requested length.
2525 if (!obj_request->result)
2526 obj_request->xferred = obj_request->length;
2528 /* Finish up with the normal image object callback */
2530 rbd_img_obj_callback(obj_request);
2534 rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
2536 struct rbd_obj_request *orig_request;
2537 struct ceph_osd_request *osd_req;
2538 struct ceph_osd_client *osdc;
2539 struct rbd_device *rbd_dev;
2540 struct page **pages;
2547 rbd_assert(img_request_child_test(img_request));
2549 /* First get what we need from the image request */
2551 pages = img_request->copyup_pages;
2552 rbd_assert(pages != NULL);
2553 img_request->copyup_pages = NULL;
2554 page_count = img_request->copyup_page_count;
2555 rbd_assert(page_count);
2556 img_request->copyup_page_count = 0;
2558 orig_request = img_request->obj_request;
2559 rbd_assert(orig_request != NULL);
2560 rbd_assert(obj_request_type_valid(orig_request->type));
2561 img_result = img_request->result;
2562 parent_length = img_request->length;
2563 rbd_assert(parent_length == img_request->xferred);
2564 rbd_img_request_put(img_request);
2566 rbd_assert(orig_request->img_request);
2567 rbd_dev = orig_request->img_request->rbd_dev;
2568 rbd_assert(rbd_dev);
2571 * If the overlap has become 0 (most likely because the
2572 * image has been flattened) we need to free the pages
2573 * and re-submit the original write request.
2575 if (!rbd_dev->parent_overlap) {
2576 struct ceph_osd_client *osdc;
2578 ceph_release_page_vector(pages, page_count);
2579 osdc = &rbd_dev->rbd_client->client->osdc;
2580 img_result = rbd_obj_request_submit(osdc, orig_request);
2589 * The original osd request is of no use to use any more.
2590 * We need a new one that can hold the three ops in a copyup
2591 * request. Allocate the new copyup osd request for the
2592 * original request, and release the old one.
2594 img_result = -ENOMEM;
2595 osd_req = rbd_osd_req_create_copyup(orig_request);
2598 rbd_osd_req_destroy(orig_request->osd_req);
2599 orig_request->osd_req = osd_req;
2600 orig_request->copyup_pages = pages;
2601 orig_request->copyup_page_count = page_count;
2603 /* Initialize the copyup op */
2605 osd_req_op_cls_init(osd_req, 0, CEPH_OSD_OP_CALL, "rbd", "copyup");
2606 osd_req_op_cls_request_data_pages(osd_req, 0, pages, parent_length, 0,
2609 /* Then the hint op */
2611 osd_req_op_alloc_hint_init(osd_req, 1, rbd_obj_bytes(&rbd_dev->header),
2612 rbd_obj_bytes(&rbd_dev->header));
2614 /* And the original write request op */
2616 offset = orig_request->offset;
2617 length = orig_request->length;
2618 osd_req_op_extent_init(osd_req, 2, CEPH_OSD_OP_WRITE,
2619 offset, length, 0, 0);
2620 if (orig_request->type == OBJ_REQUEST_BIO)
2621 osd_req_op_extent_osd_data_bio(osd_req, 2,
2622 orig_request->bio_list, length);
2624 osd_req_op_extent_osd_data_pages(osd_req, 2,
2625 orig_request->pages, length,
2626 offset & ~PAGE_MASK, false, false);
2628 rbd_osd_req_format_write(orig_request);
2630 /* All set, send it off. */
2632 orig_request->callback = rbd_img_obj_copyup_callback;
2633 osdc = &rbd_dev->rbd_client->client->osdc;
2634 img_result = rbd_obj_request_submit(osdc, orig_request);
2638 /* Record the error code and complete the request */
2640 orig_request->result = img_result;
2641 orig_request->xferred = 0;
2642 obj_request_done_set(orig_request);
2643 rbd_obj_request_complete(orig_request);
2647 * Read from the parent image the range of data that covers the
2648 * entire target of the given object request. This is used for
2649 * satisfying a layered image write request when the target of an
2650 * object request from the image request does not exist.
2652 * A page array big enough to hold the returned data is allocated
2653 * and supplied to rbd_img_request_fill() as the "data descriptor."
2654 * When the read completes, this page array will be transferred to
2655 * the original object request for the copyup operation.
2657 * If an error occurs, record it as the result of the original
2658 * object request and mark it done so it gets completed.
2660 static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
2662 struct rbd_img_request *img_request = NULL;
2663 struct rbd_img_request *parent_request = NULL;
2664 struct rbd_device *rbd_dev;
2667 struct page **pages = NULL;
2671 rbd_assert(obj_request_img_data_test(obj_request));
2672 rbd_assert(obj_request_type_valid(obj_request->type));
2674 img_request = obj_request->img_request;
2675 rbd_assert(img_request != NULL);
2676 rbd_dev = img_request->rbd_dev;
2677 rbd_assert(rbd_dev->parent != NULL);
2680 * Determine the byte range covered by the object in the
2681 * child image to which the original request was to be sent.
2683 img_offset = obj_request->img_offset - obj_request->offset;
2684 length = (u64)1 << rbd_dev->header.obj_order;
2687 * There is no defined parent data beyond the parent
2688 * overlap, so limit what we read at that boundary if
2691 if (img_offset + length > rbd_dev->parent_overlap) {
2692 rbd_assert(img_offset < rbd_dev->parent_overlap);
2693 length = rbd_dev->parent_overlap - img_offset;
2697 * Allocate a page array big enough to receive the data read
2700 page_count = (u32)calc_pages_for(0, length);
2701 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2702 if (IS_ERR(pages)) {
2703 result = PTR_ERR(pages);
2709 parent_request = rbd_parent_request_create(obj_request,
2710 img_offset, length);
2711 if (!parent_request)
2714 result = rbd_img_request_fill(parent_request, OBJ_REQUEST_PAGES, pages);
2717 parent_request->copyup_pages = pages;
2718 parent_request->copyup_page_count = page_count;
2720 parent_request->callback = rbd_img_obj_parent_read_full_callback;
2721 result = rbd_img_request_submit(parent_request);
2725 parent_request->copyup_pages = NULL;
2726 parent_request->copyup_page_count = 0;
2727 parent_request->obj_request = NULL;
2728 rbd_obj_request_put(obj_request);
2731 ceph_release_page_vector(pages, page_count);
2733 rbd_img_request_put(parent_request);
2734 obj_request->result = result;
2735 obj_request->xferred = 0;
2736 obj_request_done_set(obj_request);
2741 static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request)
2743 struct rbd_obj_request *orig_request;
2744 struct rbd_device *rbd_dev;
2747 rbd_assert(!obj_request_img_data_test(obj_request));
2750 * All we need from the object request is the original
2751 * request and the result of the STAT op. Grab those, then
2752 * we're done with the request.
2754 orig_request = obj_request->obj_request;
2755 obj_request->obj_request = NULL;
2756 rbd_obj_request_put(orig_request);
2757 rbd_assert(orig_request);
2758 rbd_assert(orig_request->img_request);
2760 result = obj_request->result;
2761 obj_request->result = 0;
2763 dout("%s: obj %p for obj %p result %d %llu/%llu\n", __func__,
2764 obj_request, orig_request, result,
2765 obj_request->xferred, obj_request->length);
2766 rbd_obj_request_put(obj_request);
2769 * If the overlap has become 0 (most likely because the
2770 * image has been flattened) we need to free the pages
2771 * and re-submit the original write request.
2773 rbd_dev = orig_request->img_request->rbd_dev;
2774 if (!rbd_dev->parent_overlap) {
2775 struct ceph_osd_client *osdc;
2777 osdc = &rbd_dev->rbd_client->client->osdc;
2778 result = rbd_obj_request_submit(osdc, orig_request);
2784 * Our only purpose here is to determine whether the object
2785 * exists, and we don't want to treat the non-existence as
2786 * an error. If something else comes back, transfer the
2787 * error to the original request and complete it now.
2790 obj_request_existence_set(orig_request, true);
2791 } else if (result == -ENOENT) {
2792 obj_request_existence_set(orig_request, false);
2793 } else if (result) {
2794 orig_request->result = result;
2799 * Resubmit the original request now that we have recorded
2800 * whether the target object exists.
2802 orig_request->result = rbd_img_obj_request_submit(orig_request);
2804 if (orig_request->result)
2805 rbd_obj_request_complete(orig_request);
2808 static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request)
2810 struct rbd_obj_request *stat_request;
2811 struct rbd_device *rbd_dev;
2812 struct ceph_osd_client *osdc;
2813 struct page **pages = NULL;
2819 * The response data for a STAT call consists of:
2826 size = sizeof (__le64) + sizeof (__le32) + sizeof (__le32);
2827 page_count = (u32)calc_pages_for(0, size);
2828 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2830 return PTR_ERR(pages);
2833 stat_request = rbd_obj_request_create(obj_request->object_name, 0, 0,
2838 rbd_obj_request_get(obj_request);
2839 stat_request->obj_request = obj_request;
2840 stat_request->pages = pages;
2841 stat_request->page_count = page_count;
2843 rbd_assert(obj_request->img_request);
2844 rbd_dev = obj_request->img_request->rbd_dev;
2845 stat_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
2847 if (!stat_request->osd_req)
2849 stat_request->callback = rbd_img_obj_exists_callback;
2851 osd_req_op_init(stat_request->osd_req, 0, CEPH_OSD_OP_STAT);
2852 osd_req_op_raw_data_in_pages(stat_request->osd_req, 0, pages, size, 0,
2854 rbd_osd_req_format_read(stat_request);
2856 osdc = &rbd_dev->rbd_client->client->osdc;
2857 ret = rbd_obj_request_submit(osdc, stat_request);
2860 rbd_obj_request_put(obj_request);
2865 static bool img_obj_request_simple(struct rbd_obj_request *obj_request)
2867 struct rbd_img_request *img_request;
2868 struct rbd_device *rbd_dev;
2870 rbd_assert(obj_request_img_data_test(obj_request));
2872 img_request = obj_request->img_request;
2873 rbd_assert(img_request);
2874 rbd_dev = img_request->rbd_dev;
2877 if (!img_request_write_test(img_request) &&
2878 !img_request_discard_test(img_request))
2881 /* Non-layered writes */
2882 if (!img_request_layered_test(img_request))
2886 * Layered writes outside of the parent overlap range don't
2887 * share any data with the parent.
2889 if (!obj_request_overlaps_parent(obj_request))
2893 * Entire-object layered writes - we will overwrite whatever
2894 * parent data there is anyway.
2896 if (!obj_request->offset &&
2897 obj_request->length == rbd_obj_bytes(&rbd_dev->header))
2901 * If the object is known to already exist, its parent data has
2902 * already been copied.
2904 if (obj_request_known_test(obj_request) &&
2905 obj_request_exists_test(obj_request))
2911 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request)
2913 if (img_obj_request_simple(obj_request)) {
2914 struct rbd_device *rbd_dev;
2915 struct ceph_osd_client *osdc;
2917 rbd_dev = obj_request->img_request->rbd_dev;
2918 osdc = &rbd_dev->rbd_client->client->osdc;
2920 return rbd_obj_request_submit(osdc, obj_request);
2924 * It's a layered write. The target object might exist but
2925 * we may not know that yet. If we know it doesn't exist,
2926 * start by reading the data for the full target object from
2927 * the parent so we can use it for a copyup to the target.
2929 if (obj_request_known_test(obj_request))
2930 return rbd_img_obj_parent_read_full(obj_request);
2932 /* We don't know whether the target exists. Go find out. */
2934 return rbd_img_obj_exists_submit(obj_request);
2937 static int rbd_img_request_submit(struct rbd_img_request *img_request)
2939 struct rbd_obj_request *obj_request;
2940 struct rbd_obj_request *next_obj_request;
2942 dout("%s: img %p\n", __func__, img_request);
2943 for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
2946 ret = rbd_img_obj_request_submit(obj_request);
2954 static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
2956 struct rbd_obj_request *obj_request;
2957 struct rbd_device *rbd_dev;
2962 rbd_assert(img_request_child_test(img_request));
2964 /* First get what we need from the image request and release it */
2966 obj_request = img_request->obj_request;
2967 img_xferred = img_request->xferred;
2968 img_result = img_request->result;
2969 rbd_img_request_put(img_request);
2972 * If the overlap has become 0 (most likely because the
2973 * image has been flattened) we need to re-submit the
2976 rbd_assert(obj_request);
2977 rbd_assert(obj_request->img_request);
2978 rbd_dev = obj_request->img_request->rbd_dev;
2979 if (!rbd_dev->parent_overlap) {
2980 struct ceph_osd_client *osdc;
2982 osdc = &rbd_dev->rbd_client->client->osdc;
2983 img_result = rbd_obj_request_submit(osdc, obj_request);
2988 obj_request->result = img_result;
2989 if (obj_request->result)
2993 * We need to zero anything beyond the parent overlap
2994 * boundary. Since rbd_img_obj_request_read_callback()
2995 * will zero anything beyond the end of a short read, an
2996 * easy way to do this is to pretend the data from the
2997 * parent came up short--ending at the overlap boundary.
2999 rbd_assert(obj_request->img_offset < U64_MAX - obj_request->length);
3000 obj_end = obj_request->img_offset + obj_request->length;
3001 if (obj_end > rbd_dev->parent_overlap) {
3004 if (obj_request->img_offset < rbd_dev->parent_overlap)
3005 xferred = rbd_dev->parent_overlap -
3006 obj_request->img_offset;
3008 obj_request->xferred = min(img_xferred, xferred);
3010 obj_request->xferred = img_xferred;
3013 rbd_img_obj_request_read_callback(obj_request);
3014 rbd_obj_request_complete(obj_request);
3017 static void rbd_img_parent_read(struct rbd_obj_request *obj_request)
3019 struct rbd_img_request *img_request;
3022 rbd_assert(obj_request_img_data_test(obj_request));
3023 rbd_assert(obj_request->img_request != NULL);
3024 rbd_assert(obj_request->result == (s32) -ENOENT);
3025 rbd_assert(obj_request_type_valid(obj_request->type));
3027 /* rbd_read_finish(obj_request, obj_request->length); */
3028 img_request = rbd_parent_request_create(obj_request,
3029 obj_request->img_offset,
3030 obj_request->length);
3035 if (obj_request->type == OBJ_REQUEST_BIO)
3036 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
3037 obj_request->bio_list);
3039 result = rbd_img_request_fill(img_request, OBJ_REQUEST_PAGES,
3040 obj_request->pages);
3044 img_request->callback = rbd_img_parent_read_callback;
3045 result = rbd_img_request_submit(img_request);
3052 rbd_img_request_put(img_request);
3053 obj_request->result = result;
3054 obj_request->xferred = 0;
3055 obj_request_done_set(obj_request);
3058 static int rbd_obj_notify_ack_sync(struct rbd_device *rbd_dev, u64 notify_id)
3060 struct rbd_obj_request *obj_request;
3061 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3064 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
3065 OBJ_REQUEST_NODATA);
3070 obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
3072 if (!obj_request->osd_req)
3075 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_NOTIFY_ACK,
3077 rbd_osd_req_format_read(obj_request);
3079 ret = rbd_obj_request_submit(osdc, obj_request);
3082 ret = rbd_obj_request_wait(obj_request);
3084 rbd_obj_request_put(obj_request);
3089 static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
3091 struct rbd_device *rbd_dev = (struct rbd_device *)data;
3097 dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__,
3098 rbd_dev->header_name, (unsigned long long)notify_id,
3099 (unsigned int)opcode);
3102 * Until adequate refresh error handling is in place, there is
3103 * not much we can do here, except warn.
3105 * See http://tracker.ceph.com/issues/5040
3107 ret = rbd_dev_refresh(rbd_dev);
3109 rbd_warn(rbd_dev, "refresh failed: %d", ret);
3111 ret = rbd_obj_notify_ack_sync(rbd_dev, notify_id);
3113 rbd_warn(rbd_dev, "notify_ack ret %d", ret);
3117 * Send a (un)watch request and wait for the ack. Return a request
3118 * with a ref held on success or error.
3120 static struct rbd_obj_request *rbd_obj_watch_request_helper(
3121 struct rbd_device *rbd_dev,
3124 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3125 struct rbd_obj_request *obj_request;
3128 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
3129 OBJ_REQUEST_NODATA);
3131 return ERR_PTR(-ENOMEM);
3133 obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_WRITE, 1,
3135 if (!obj_request->osd_req) {
3140 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH,
3141 rbd_dev->watch_event->cookie, 0, watch);
3142 rbd_osd_req_format_write(obj_request);
3145 ceph_osdc_set_request_linger(osdc, obj_request->osd_req);
3147 ret = rbd_obj_request_submit(osdc, obj_request);
3151 ret = rbd_obj_request_wait(obj_request);
3155 ret = obj_request->result;
3158 rbd_obj_request_end(obj_request);
3165 rbd_obj_request_put(obj_request);
3166 return ERR_PTR(ret);
3170 * Initiate a watch request, synchronously.
3172 static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev)
3174 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3175 struct rbd_obj_request *obj_request;
3178 rbd_assert(!rbd_dev->watch_event);
3179 rbd_assert(!rbd_dev->watch_request);
3181 ret = ceph_osdc_create_event(osdc, rbd_watch_cb, rbd_dev,
3182 &rbd_dev->watch_event);
3186 obj_request = rbd_obj_watch_request_helper(rbd_dev, true);
3187 if (IS_ERR(obj_request)) {
3188 ceph_osdc_cancel_event(rbd_dev->watch_event);
3189 rbd_dev->watch_event = NULL;
3190 return PTR_ERR(obj_request);
3194 * A watch request is set to linger, so the underlying osd
3195 * request won't go away until we unregister it. We retain
3196 * a pointer to the object request during that time (in
3197 * rbd_dev->watch_request), so we'll keep a reference to it.
3198 * We'll drop that reference after we've unregistered it in
3199 * rbd_dev_header_unwatch_sync().
3201 rbd_dev->watch_request = obj_request;
3207 * Tear down a watch request, synchronously.
3209 static void rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev)
3211 struct rbd_obj_request *obj_request;
3213 rbd_assert(rbd_dev->watch_event);
3214 rbd_assert(rbd_dev->watch_request);
3216 rbd_obj_request_end(rbd_dev->watch_request);
3217 rbd_obj_request_put(rbd_dev->watch_request);
3218 rbd_dev->watch_request = NULL;
3220 obj_request = rbd_obj_watch_request_helper(rbd_dev, false);
3221 if (!IS_ERR(obj_request))
3222 rbd_obj_request_put(obj_request);
3224 rbd_warn(rbd_dev, "unable to tear down watch request (%ld)",
3225 PTR_ERR(obj_request));
3227 ceph_osdc_cancel_event(rbd_dev->watch_event);
3228 rbd_dev->watch_event = NULL;
3232 * Synchronous osd object method call. Returns the number of bytes
3233 * returned in the outbound buffer, or a negative error code.
3235 static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
3236 const char *object_name,
3237 const char *class_name,
3238 const char *method_name,
3239 const void *outbound,
3240 size_t outbound_size,
3242 size_t inbound_size)
3244 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3245 struct rbd_obj_request *obj_request;
3246 struct page **pages;
3251 * Method calls are ultimately read operations. The result
3252 * should placed into the inbound buffer provided. They
3253 * also supply outbound data--parameters for the object
3254 * method. Currently if this is present it will be a
3257 page_count = (u32)calc_pages_for(0, inbound_size);
3258 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
3260 return PTR_ERR(pages);
3263 obj_request = rbd_obj_request_create(object_name, 0, inbound_size,
3268 obj_request->pages = pages;
3269 obj_request->page_count = page_count;
3271 obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
3273 if (!obj_request->osd_req)
3276 osd_req_op_cls_init(obj_request->osd_req, 0, CEPH_OSD_OP_CALL,
3277 class_name, method_name);
3278 if (outbound_size) {
3279 struct ceph_pagelist *pagelist;
3281 pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
3285 ceph_pagelist_init(pagelist);
3286 ceph_pagelist_append(pagelist, outbound, outbound_size);
3287 osd_req_op_cls_request_data_pagelist(obj_request->osd_req, 0,
3290 osd_req_op_cls_response_data_pages(obj_request->osd_req, 0,
3291 obj_request->pages, inbound_size,
3293 rbd_osd_req_format_read(obj_request);
3295 ret = rbd_obj_request_submit(osdc, obj_request);
3298 ret = rbd_obj_request_wait(obj_request);
3302 ret = obj_request->result;
3306 rbd_assert(obj_request->xferred < (u64)INT_MAX);
3307 ret = (int)obj_request->xferred;
3308 ceph_copy_from_page_vector(pages, inbound, 0, obj_request->xferred);
3311 rbd_obj_request_put(obj_request);
3313 ceph_release_page_vector(pages, page_count);
3318 static void rbd_handle_request(struct rbd_device *rbd_dev, struct request *rq)
3320 struct rbd_img_request *img_request;
3321 struct ceph_snap_context *snapc = NULL;
3322 u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT;
3323 u64 length = blk_rq_bytes(rq);
3324 enum obj_operation_type op_type;
3328 if (rq->cmd_flags & REQ_DISCARD)
3329 op_type = OBJ_OP_DISCARD;
3330 else if (rq->cmd_flags & REQ_WRITE)
3331 op_type = OBJ_OP_WRITE;
3333 op_type = OBJ_OP_READ;
3335 /* Ignore/skip any zero-length requests */
3338 dout("%s: zero-length request\n", __func__);
3343 /* Only reads are allowed to a read-only device */
3345 if (op_type != OBJ_OP_READ) {
3346 if (rbd_dev->mapping.read_only) {
3350 rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP);
3354 * Quit early if the mapped snapshot no longer exists. It's
3355 * still possible the snapshot will have disappeared by the
3356 * time our request arrives at the osd, but there's no sense in
3357 * sending it if we already know.
3359 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
3360 dout("request for non-existent snapshot");
3361 rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
3366 if (offset && length > U64_MAX - offset + 1) {
3367 rbd_warn(rbd_dev, "bad request range (%llu~%llu)", offset,
3370 goto err_rq; /* Shouldn't happen */
3373 down_read(&rbd_dev->header_rwsem);
3374 mapping_size = rbd_dev->mapping.size;
3375 if (op_type != OBJ_OP_READ) {
3376 snapc = rbd_dev->header.snapc;
3377 ceph_get_snap_context(snapc);
3379 up_read(&rbd_dev->header_rwsem);
3381 if (offset + length > mapping_size) {
3382 rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)", offset,
3383 length, mapping_size);
3388 img_request = rbd_img_request_create(rbd_dev, offset, length, op_type,
3394 img_request->rq = rq;
3396 if (op_type == OBJ_OP_DISCARD)
3397 result = rbd_img_request_fill(img_request, OBJ_REQUEST_NODATA,
3400 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
3403 goto err_img_request;
3405 result = rbd_img_request_submit(img_request);
3407 goto err_img_request;
3412 rbd_img_request_put(img_request);
3415 rbd_warn(rbd_dev, "%s %llx at %llx result %d",
3416 obj_op_name(op_type), length, offset, result);
3418 ceph_put_snap_context(snapc);
3419 blk_end_request_all(rq, result);
3422 static void rbd_request_workfn(struct work_struct *work)
3424 struct rbd_device *rbd_dev =
3425 container_of(work, struct rbd_device, rq_work);
3426 struct request *rq, *next;
3427 LIST_HEAD(requests);
3429 spin_lock_irq(&rbd_dev->lock); /* rq->q->queue_lock */
3430 list_splice_init(&rbd_dev->rq_queue, &requests);
3431 spin_unlock_irq(&rbd_dev->lock);
3433 list_for_each_entry_safe(rq, next, &requests, queuelist) {
3434 list_del_init(&rq->queuelist);
3435 rbd_handle_request(rbd_dev, rq);
3440 * Called with q->queue_lock held and interrupts disabled, possibly on
3441 * the way to schedule(). Do not sleep here!
3443 static void rbd_request_fn(struct request_queue *q)
3445 struct rbd_device *rbd_dev = q->queuedata;
3449 rbd_assert(rbd_dev);
3451 while ((rq = blk_fetch_request(q))) {
3452 /* Ignore any non-FS requests that filter through. */
3453 if (rq->cmd_type != REQ_TYPE_FS) {
3454 dout("%s: non-fs request type %d\n", __func__,
3455 (int) rq->cmd_type);
3456 __blk_end_request_all(rq, 0);
3460 list_add_tail(&rq->queuelist, &rbd_dev->rq_queue);
3465 queue_work(rbd_dev->rq_wq, &rbd_dev->rq_work);
3469 * a queue callback. Makes sure that we don't create a bio that spans across
3470 * multiple osd objects. One exception would be with a single page bios,
3471 * which we handle later at bio_chain_clone_range()
3473 static int rbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd,
3474 struct bio_vec *bvec)
3476 struct rbd_device *rbd_dev = q->queuedata;
3477 sector_t sector_offset;
3478 sector_t sectors_per_obj;
3479 sector_t obj_sector_offset;
3483 * Find how far into its rbd object the partition-relative
3484 * bio start sector is to offset relative to the enclosing
3487 sector_offset = get_start_sect(bmd->bi_bdev) + bmd->bi_sector;
3488 sectors_per_obj = 1 << (rbd_dev->header.obj_order - SECTOR_SHIFT);
3489 obj_sector_offset = sector_offset & (sectors_per_obj - 1);
3492 * Compute the number of bytes from that offset to the end
3493 * of the object. Account for what's already used by the bio.
3495 ret = (int) (sectors_per_obj - obj_sector_offset) << SECTOR_SHIFT;
3496 if (ret > bmd->bi_size)
3497 ret -= bmd->bi_size;
3502 * Don't send back more than was asked for. And if the bio
3503 * was empty, let the whole thing through because: "Note
3504 * that a block device *must* allow a single page to be
3505 * added to an empty bio."
3507 rbd_assert(bvec->bv_len <= PAGE_SIZE);
3508 if (ret > (int) bvec->bv_len || !bmd->bi_size)
3509 ret = (int) bvec->bv_len;
3514 static void rbd_free_disk(struct rbd_device *rbd_dev)
3516 struct gendisk *disk = rbd_dev->disk;
3521 rbd_dev->disk = NULL;
3522 if (disk->flags & GENHD_FL_UP) {
3525 blk_cleanup_queue(disk->queue);
3530 static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
3531 const char *object_name,
3532 u64 offset, u64 length, void *buf)
3535 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3536 struct rbd_obj_request *obj_request;
3537 struct page **pages = NULL;
3542 page_count = (u32) calc_pages_for(offset, length);
3543 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
3545 ret = PTR_ERR(pages);
3548 obj_request = rbd_obj_request_create(object_name, offset, length,
3553 obj_request->pages = pages;
3554 obj_request->page_count = page_count;
3556 obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
3558 if (!obj_request->osd_req)
3561 osd_req_op_extent_init(obj_request->osd_req, 0, CEPH_OSD_OP_READ,
3562 offset, length, 0, 0);
3563 osd_req_op_extent_osd_data_pages(obj_request->osd_req, 0,
3565 obj_request->length,
3566 obj_request->offset & ~PAGE_MASK,
3568 rbd_osd_req_format_read(obj_request);
3570 ret = rbd_obj_request_submit(osdc, obj_request);
3573 ret = rbd_obj_request_wait(obj_request);
3577 ret = obj_request->result;
3581 rbd_assert(obj_request->xferred <= (u64) SIZE_MAX);
3582 size = (size_t) obj_request->xferred;
3583 ceph_copy_from_page_vector(pages, buf, 0, size);
3584 rbd_assert(size <= (size_t)INT_MAX);
3588 rbd_obj_request_put(obj_request);
3590 ceph_release_page_vector(pages, page_count);
3596 * Read the complete header for the given rbd device. On successful
3597 * return, the rbd_dev->header field will contain up-to-date
3598 * information about the image.
3600 static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
3602 struct rbd_image_header_ondisk *ondisk = NULL;
3609 * The complete header will include an array of its 64-bit
3610 * snapshot ids, followed by the names of those snapshots as
3611 * a contiguous block of NUL-terminated strings. Note that
3612 * the number of snapshots could change by the time we read
3613 * it in, in which case we re-read it.
3620 size = sizeof (*ondisk);
3621 size += snap_count * sizeof (struct rbd_image_snap_ondisk);
3623 ondisk = kmalloc(size, GFP_KERNEL);
3627 ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_name,
3631 if ((size_t)ret < size) {
3633 rbd_warn(rbd_dev, "short header read (want %zd got %d)",
3637 if (!rbd_dev_ondisk_valid(ondisk)) {
3639 rbd_warn(rbd_dev, "invalid header");
3643 names_size = le64_to_cpu(ondisk->snap_names_len);
3644 want_count = snap_count;
3645 snap_count = le32_to_cpu(ondisk->snap_count);
3646 } while (snap_count != want_count);
3648 ret = rbd_header_from_disk(rbd_dev, ondisk);
3656 * Clear the rbd device's EXISTS flag if the snapshot it's mapped to
3657 * has disappeared from the (just updated) snapshot context.
3659 static void rbd_exists_validate(struct rbd_device *rbd_dev)
3663 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags))
3666 snap_id = rbd_dev->spec->snap_id;
3667 if (snap_id == CEPH_NOSNAP)
3670 if (rbd_dev_snap_index(rbd_dev, snap_id) == BAD_SNAP_INDEX)
3671 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
3674 static void rbd_dev_update_size(struct rbd_device *rbd_dev)
3680 * Don't hold the lock while doing disk operations,
3681 * or lock ordering will conflict with the bdev mutex via:
3682 * rbd_add() -> blkdev_get() -> rbd_open()
3684 spin_lock_irq(&rbd_dev->lock);
3685 removing = test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags);
3686 spin_unlock_irq(&rbd_dev->lock);
3688 * If the device is being removed, rbd_dev->disk has
3689 * been destroyed, so don't try to update its size
3692 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
3693 dout("setting size to %llu sectors", (unsigned long long)size);
3694 set_capacity(rbd_dev->disk, size);
3695 revalidate_disk(rbd_dev->disk);
3699 static int rbd_dev_refresh(struct rbd_device *rbd_dev)
3704 down_write(&rbd_dev->header_rwsem);
3705 mapping_size = rbd_dev->mapping.size;
3707 ret = rbd_dev_header_info(rbd_dev);
3712 * If there is a parent, see if it has disappeared due to the
3713 * mapped image getting flattened.
3715 if (rbd_dev->parent) {
3716 ret = rbd_dev_v2_parent_info(rbd_dev);
3721 if (rbd_dev->spec->snap_id == CEPH_NOSNAP) {
3722 if (rbd_dev->mapping.size != rbd_dev->header.image_size)
3723 rbd_dev->mapping.size = rbd_dev->header.image_size;
3725 /* validate mapped snapshot's EXISTS flag */
3726 rbd_exists_validate(rbd_dev);
3729 up_write(&rbd_dev->header_rwsem);
3731 if (mapping_size != rbd_dev->mapping.size)
3732 rbd_dev_update_size(rbd_dev);
3737 static int rbd_init_disk(struct rbd_device *rbd_dev)
3739 struct gendisk *disk;
3740 struct request_queue *q;
3743 /* create gendisk info */
3744 disk = alloc_disk(single_major ?
3745 (1 << RBD_SINGLE_MAJOR_PART_SHIFT) :
3746 RBD_MINORS_PER_MAJOR);
3750 snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
3752 disk->major = rbd_dev->major;
3753 disk->first_minor = rbd_dev->minor;
3755 disk->flags |= GENHD_FL_EXT_DEVT;
3756 disk->fops = &rbd_bd_ops;
3757 disk->private_data = rbd_dev;
3759 q = blk_init_queue(rbd_request_fn, &rbd_dev->lock);
3763 /* We use the default size, but let's be explicit about it. */
3764 blk_queue_physical_block_size(q, SECTOR_SIZE);
3766 /* set io sizes to object size */
3767 segment_size = rbd_obj_bytes(&rbd_dev->header);
3768 blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
3769 blk_queue_max_segment_size(q, segment_size);
3770 blk_queue_io_min(q, segment_size);
3771 blk_queue_io_opt(q, segment_size);
3773 /* enable the discard support */
3774 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
3775 q->limits.discard_granularity = segment_size;
3776 q->limits.discard_alignment = segment_size;
3778 blk_queue_merge_bvec(q, rbd_merge_bvec);
3781 q->queuedata = rbd_dev;
3783 rbd_dev->disk = disk;
3796 static struct rbd_device *dev_to_rbd_dev(struct device *dev)
3798 return container_of(dev, struct rbd_device, dev);
3801 static ssize_t rbd_size_show(struct device *dev,
3802 struct device_attribute *attr, char *buf)
3804 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3806 return sprintf(buf, "%llu\n",
3807 (unsigned long long)rbd_dev->mapping.size);
3811 * Note this shows the features for whatever's mapped, which is not
3812 * necessarily the base image.
3814 static ssize_t rbd_features_show(struct device *dev,
3815 struct device_attribute *attr, char *buf)
3817 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3819 return sprintf(buf, "0x%016llx\n",
3820 (unsigned long long)rbd_dev->mapping.features);
3823 static ssize_t rbd_major_show(struct device *dev,
3824 struct device_attribute *attr, char *buf)
3826 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3829 return sprintf(buf, "%d\n", rbd_dev->major);
3831 return sprintf(buf, "(none)\n");
3834 static ssize_t rbd_minor_show(struct device *dev,
3835 struct device_attribute *attr, char *buf)
3837 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3839 return sprintf(buf, "%d\n", rbd_dev->minor);
3842 static ssize_t rbd_client_id_show(struct device *dev,
3843 struct device_attribute *attr, char *buf)
3845 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3847 return sprintf(buf, "client%lld\n",
3848 ceph_client_id(rbd_dev->rbd_client->client));
3851 static ssize_t rbd_pool_show(struct device *dev,
3852 struct device_attribute *attr, char *buf)
3854 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3856 return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
3859 static ssize_t rbd_pool_id_show(struct device *dev,
3860 struct device_attribute *attr, char *buf)
3862 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3864 return sprintf(buf, "%llu\n",
3865 (unsigned long long) rbd_dev->spec->pool_id);
3868 static ssize_t rbd_name_show(struct device *dev,
3869 struct device_attribute *attr, char *buf)
3871 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3873 if (rbd_dev->spec->image_name)
3874 return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
3876 return sprintf(buf, "(unknown)\n");
3879 static ssize_t rbd_image_id_show(struct device *dev,
3880 struct device_attribute *attr, char *buf)
3882 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3884 return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
3888 * Shows the name of the currently-mapped snapshot (or
3889 * RBD_SNAP_HEAD_NAME for the base image).
3891 static ssize_t rbd_snap_show(struct device *dev,
3892 struct device_attribute *attr,
3895 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3897 return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
3901 * For a v2 image, shows the chain of parent images, separated by empty
3902 * lines. For v1 images or if there is no parent, shows "(no parent
3905 static ssize_t rbd_parent_show(struct device *dev,
3906 struct device_attribute *attr,
3909 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3912 if (!rbd_dev->parent)
3913 return sprintf(buf, "(no parent image)\n");
3915 for ( ; rbd_dev->parent; rbd_dev = rbd_dev->parent) {
3916 struct rbd_spec *spec = rbd_dev->parent_spec;
3918 count += sprintf(&buf[count], "%s"
3919 "pool_id %llu\npool_name %s\n"
3920 "image_id %s\nimage_name %s\n"
3921 "snap_id %llu\nsnap_name %s\n"
3923 !count ? "" : "\n", /* first? */
3924 spec->pool_id, spec->pool_name,
3925 spec->image_id, spec->image_name ?: "(unknown)",
3926 spec->snap_id, spec->snap_name,
3927 rbd_dev->parent_overlap);
3933 static ssize_t rbd_image_refresh(struct device *dev,
3934 struct device_attribute *attr,
3938 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3941 ret = rbd_dev_refresh(rbd_dev);
3948 static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL);
3949 static DEVICE_ATTR(features, S_IRUGO, rbd_features_show, NULL);
3950 static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL);
3951 static DEVICE_ATTR(minor, S_IRUGO, rbd_minor_show, NULL);
3952 static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL);
3953 static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL);
3954 static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL);
3955 static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
3956 static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL);
3957 static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
3958 static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
3959 static DEVICE_ATTR(parent, S_IRUGO, rbd_parent_show, NULL);
3961 static struct attribute *rbd_attrs[] = {
3962 &dev_attr_size.attr,
3963 &dev_attr_features.attr,
3964 &dev_attr_major.attr,
3965 &dev_attr_minor.attr,
3966 &dev_attr_client_id.attr,
3967 &dev_attr_pool.attr,
3968 &dev_attr_pool_id.attr,
3969 &dev_attr_name.attr,
3970 &dev_attr_image_id.attr,
3971 &dev_attr_current_snap.attr,
3972 &dev_attr_parent.attr,
3973 &dev_attr_refresh.attr,
3977 static struct attribute_group rbd_attr_group = {
3981 static const struct attribute_group *rbd_attr_groups[] = {
3986 static void rbd_sysfs_dev_release(struct device *dev)
3990 static struct device_type rbd_device_type = {
3992 .groups = rbd_attr_groups,
3993 .release = rbd_sysfs_dev_release,
3996 static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
3998 kref_get(&spec->kref);
4003 static void rbd_spec_free(struct kref *kref);
4004 static void rbd_spec_put(struct rbd_spec *spec)
4007 kref_put(&spec->kref, rbd_spec_free);
4010 static struct rbd_spec *rbd_spec_alloc(void)
4012 struct rbd_spec *spec;
4014 spec = kzalloc(sizeof (*spec), GFP_KERNEL);
4018 spec->pool_id = CEPH_NOPOOL;
4019 spec->snap_id = CEPH_NOSNAP;
4020 kref_init(&spec->kref);
4025 static void rbd_spec_free(struct kref *kref)
4027 struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
4029 kfree(spec->pool_name);
4030 kfree(spec->image_id);
4031 kfree(spec->image_name);
4032 kfree(spec->snap_name);
4036 static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
4037 struct rbd_spec *spec)
4039 struct rbd_device *rbd_dev;
4041 rbd_dev = kzalloc(sizeof (*rbd_dev), GFP_KERNEL);
4045 spin_lock_init(&rbd_dev->lock);
4046 INIT_LIST_HEAD(&rbd_dev->rq_queue);
4047 INIT_WORK(&rbd_dev->rq_work, rbd_request_workfn);
4049 atomic_set(&rbd_dev->parent_ref, 0);
4050 INIT_LIST_HEAD(&rbd_dev->node);
4051 init_rwsem(&rbd_dev->header_rwsem);
4053 rbd_dev->spec = spec;
4054 rbd_dev->rbd_client = rbdc;
4056 /* Initialize the layout used for all rbd requests */
4058 rbd_dev->layout.fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
4059 rbd_dev->layout.fl_stripe_count = cpu_to_le32(1);
4060 rbd_dev->layout.fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
4061 rbd_dev->layout.fl_pg_pool = cpu_to_le32((u32) spec->pool_id);
4066 static void rbd_dev_destroy(struct rbd_device *rbd_dev)
4068 rbd_put_client(rbd_dev->rbd_client);
4069 rbd_spec_put(rbd_dev->spec);
4074 * Get the size and object order for an image snapshot, or if
4075 * snap_id is CEPH_NOSNAP, gets this information for the base
4078 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
4079 u8 *order, u64 *snap_size)
4081 __le64 snapid = cpu_to_le64(snap_id);
4086 } __attribute__ ((packed)) size_buf = { 0 };
4088 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4090 &snapid, sizeof (snapid),
4091 &size_buf, sizeof (size_buf));
4092 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4095 if (ret < sizeof (size_buf))
4099 *order = size_buf.order;
4100 dout(" order %u", (unsigned int)*order);
4102 *snap_size = le64_to_cpu(size_buf.size);
4104 dout(" snap_id 0x%016llx snap_size = %llu\n",
4105 (unsigned long long)snap_id,
4106 (unsigned long long)*snap_size);
4111 static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
4113 return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
4114 &rbd_dev->header.obj_order,
4115 &rbd_dev->header.image_size);
4118 static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
4124 reply_buf = kzalloc(RBD_OBJ_PREFIX_LEN_MAX, GFP_KERNEL);
4128 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4129 "rbd", "get_object_prefix", NULL, 0,
4130 reply_buf, RBD_OBJ_PREFIX_LEN_MAX);
4131 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4136 rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
4137 p + ret, NULL, GFP_NOIO);
4140 if (IS_ERR(rbd_dev->header.object_prefix)) {
4141 ret = PTR_ERR(rbd_dev->header.object_prefix);
4142 rbd_dev->header.object_prefix = NULL;
4144 dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
4152 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
4155 __le64 snapid = cpu_to_le64(snap_id);
4159 } __attribute__ ((packed)) features_buf = { 0 };
4163 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4164 "rbd", "get_features",
4165 &snapid, sizeof (snapid),
4166 &features_buf, sizeof (features_buf));
4167 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4170 if (ret < sizeof (features_buf))
4173 incompat = le64_to_cpu(features_buf.incompat);
4174 if (incompat & ~RBD_FEATURES_SUPPORTED)
4177 *snap_features = le64_to_cpu(features_buf.features);
4179 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
4180 (unsigned long long)snap_id,
4181 (unsigned long long)*snap_features,
4182 (unsigned long long)le64_to_cpu(features_buf.incompat));
4187 static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
4189 return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
4190 &rbd_dev->header.features);
4193 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
4195 struct rbd_spec *parent_spec;
4197 void *reply_buf = NULL;
4207 parent_spec = rbd_spec_alloc();
4211 size = sizeof (__le64) + /* pool_id */
4212 sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX + /* image_id */
4213 sizeof (__le64) + /* snap_id */
4214 sizeof (__le64); /* overlap */
4215 reply_buf = kmalloc(size, GFP_KERNEL);
4221 snapid = cpu_to_le64(rbd_dev->spec->snap_id);
4222 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4223 "rbd", "get_parent",
4224 &snapid, sizeof (snapid),
4226 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4231 end = reply_buf + ret;
4233 ceph_decode_64_safe(&p, end, pool_id, out_err);
4234 if (pool_id == CEPH_NOPOOL) {
4236 * Either the parent never existed, or we have
4237 * record of it but the image got flattened so it no
4238 * longer has a parent. When the parent of a
4239 * layered image disappears we immediately set the
4240 * overlap to 0. The effect of this is that all new
4241 * requests will be treated as if the image had no
4244 if (rbd_dev->parent_overlap) {
4245 rbd_dev->parent_overlap = 0;
4247 rbd_dev_parent_put(rbd_dev);
4248 pr_info("%s: clone image has been flattened\n",
4249 rbd_dev->disk->disk_name);
4252 goto out; /* No parent? No problem. */
4255 /* The ceph file layout needs to fit pool id in 32 bits */
4258 if (pool_id > (u64)U32_MAX) {
4259 rbd_warn(NULL, "parent pool id too large (%llu > %u)",
4260 (unsigned long long)pool_id, U32_MAX);
4264 image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
4265 if (IS_ERR(image_id)) {
4266 ret = PTR_ERR(image_id);
4269 ceph_decode_64_safe(&p, end, snap_id, out_err);
4270 ceph_decode_64_safe(&p, end, overlap, out_err);
4273 * The parent won't change (except when the clone is
4274 * flattened, already handled that). So we only need to
4275 * record the parent spec we have not already done so.
4277 if (!rbd_dev->parent_spec) {
4278 parent_spec->pool_id = pool_id;
4279 parent_spec->image_id = image_id;
4280 parent_spec->snap_id = snap_id;
4281 rbd_dev->parent_spec = parent_spec;
4282 parent_spec = NULL; /* rbd_dev now owns this */
4288 * We always update the parent overlap. If it's zero we
4289 * treat it specially.
4291 rbd_dev->parent_overlap = overlap;
4295 /* A null parent_spec indicates it's the initial probe */
4299 * The overlap has become zero, so the clone
4300 * must have been resized down to 0 at some
4301 * point. Treat this the same as a flatten.
4303 rbd_dev_parent_put(rbd_dev);
4304 pr_info("%s: clone image now standalone\n",
4305 rbd_dev->disk->disk_name);
4308 * For the initial probe, if we find the
4309 * overlap is zero we just pretend there was
4312 rbd_warn(rbd_dev, "ignoring parent with overlap 0");
4319 rbd_spec_put(parent_spec);
4324 static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
4328 __le64 stripe_count;
4329 } __attribute__ ((packed)) striping_info_buf = { 0 };
4330 size_t size = sizeof (striping_info_buf);
4337 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4338 "rbd", "get_stripe_unit_count", NULL, 0,
4339 (char *)&striping_info_buf, size);
4340 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4347 * We don't actually support the "fancy striping" feature
4348 * (STRIPINGV2) yet, but if the striping sizes are the
4349 * defaults the behavior is the same as before. So find
4350 * out, and only fail if the image has non-default values.
4353 obj_size = (u64)1 << rbd_dev->header.obj_order;
4354 p = &striping_info_buf;
4355 stripe_unit = ceph_decode_64(&p);
4356 if (stripe_unit != obj_size) {
4357 rbd_warn(rbd_dev, "unsupported stripe unit "
4358 "(got %llu want %llu)",
4359 stripe_unit, obj_size);
4362 stripe_count = ceph_decode_64(&p);
4363 if (stripe_count != 1) {
4364 rbd_warn(rbd_dev, "unsupported stripe count "
4365 "(got %llu want 1)", stripe_count);
4368 rbd_dev->header.stripe_unit = stripe_unit;
4369 rbd_dev->header.stripe_count = stripe_count;
4374 static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
4376 size_t image_id_size;
4381 void *reply_buf = NULL;
4383 char *image_name = NULL;
4386 rbd_assert(!rbd_dev->spec->image_name);
4388 len = strlen(rbd_dev->spec->image_id);
4389 image_id_size = sizeof (__le32) + len;
4390 image_id = kmalloc(image_id_size, GFP_KERNEL);
4395 end = image_id + image_id_size;
4396 ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
4398 size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
4399 reply_buf = kmalloc(size, GFP_KERNEL);
4403 ret = rbd_obj_method_sync(rbd_dev, RBD_DIRECTORY,
4404 "rbd", "dir_get_name",
4405 image_id, image_id_size,
4410 end = reply_buf + ret;
4412 image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
4413 if (IS_ERR(image_name))
4416 dout("%s: name is %s len is %zd\n", __func__, image_name, len);
4424 static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4426 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4427 const char *snap_name;
4430 /* Skip over names until we find the one we are looking for */
4432 snap_name = rbd_dev->header.snap_names;
4433 while (which < snapc->num_snaps) {
4434 if (!strcmp(name, snap_name))
4435 return snapc->snaps[which];
4436 snap_name += strlen(snap_name) + 1;
4442 static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4444 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4449 for (which = 0; !found && which < snapc->num_snaps; which++) {
4450 const char *snap_name;
4452 snap_id = snapc->snaps[which];
4453 snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
4454 if (IS_ERR(snap_name)) {
4455 /* ignore no-longer existing snapshots */
4456 if (PTR_ERR(snap_name) == -ENOENT)
4461 found = !strcmp(name, snap_name);
4464 return found ? snap_id : CEPH_NOSNAP;
4468 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
4469 * no snapshot by that name is found, or if an error occurs.
4471 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4473 if (rbd_dev->image_format == 1)
4474 return rbd_v1_snap_id_by_name(rbd_dev, name);
4476 return rbd_v2_snap_id_by_name(rbd_dev, name);
4480 * An image being mapped will have everything but the snap id.
4482 static int rbd_spec_fill_snap_id(struct rbd_device *rbd_dev)
4484 struct rbd_spec *spec = rbd_dev->spec;
4486 rbd_assert(spec->pool_id != CEPH_NOPOOL && spec->pool_name);
4487 rbd_assert(spec->image_id && spec->image_name);
4488 rbd_assert(spec->snap_name);
4490 if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
4493 snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
4494 if (snap_id == CEPH_NOSNAP)
4497 spec->snap_id = snap_id;
4499 spec->snap_id = CEPH_NOSNAP;
4506 * A parent image will have all ids but none of the names.
4508 * All names in an rbd spec are dynamically allocated. It's OK if we
4509 * can't figure out the name for an image id.
4511 static int rbd_spec_fill_names(struct rbd_device *rbd_dev)
4513 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4514 struct rbd_spec *spec = rbd_dev->spec;
4515 const char *pool_name;
4516 const char *image_name;
4517 const char *snap_name;
4520 rbd_assert(spec->pool_id != CEPH_NOPOOL);
4521 rbd_assert(spec->image_id);
4522 rbd_assert(spec->snap_id != CEPH_NOSNAP);
4524 /* Get the pool name; we have to make our own copy of this */
4526 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
4528 rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
4531 pool_name = kstrdup(pool_name, GFP_KERNEL);
4535 /* Fetch the image name; tolerate failure here */
4537 image_name = rbd_dev_image_name(rbd_dev);
4539 rbd_warn(rbd_dev, "unable to get image name");
4541 /* Fetch the snapshot name */
4543 snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
4544 if (IS_ERR(snap_name)) {
4545 ret = PTR_ERR(snap_name);
4549 spec->pool_name = pool_name;
4550 spec->image_name = image_name;
4551 spec->snap_name = snap_name;
4561 static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
4570 struct ceph_snap_context *snapc;
4574 * We'll need room for the seq value (maximum snapshot id),
4575 * snapshot count, and array of that many snapshot ids.
4576 * For now we have a fixed upper limit on the number we're
4577 * prepared to receive.
4579 size = sizeof (__le64) + sizeof (__le32) +
4580 RBD_MAX_SNAP_COUNT * sizeof (__le64);
4581 reply_buf = kzalloc(size, GFP_KERNEL);
4585 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4586 "rbd", "get_snapcontext", NULL, 0,
4588 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4593 end = reply_buf + ret;
4595 ceph_decode_64_safe(&p, end, seq, out);
4596 ceph_decode_32_safe(&p, end, snap_count, out);
4599 * Make sure the reported number of snapshot ids wouldn't go
4600 * beyond the end of our buffer. But before checking that,
4601 * make sure the computed size of the snapshot context we
4602 * allocate is representable in a size_t.
4604 if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
4609 if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
4613 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
4619 for (i = 0; i < snap_count; i++)
4620 snapc->snaps[i] = ceph_decode_64(&p);
4622 ceph_put_snap_context(rbd_dev->header.snapc);
4623 rbd_dev->header.snapc = snapc;
4625 dout(" snap context seq = %llu, snap_count = %u\n",
4626 (unsigned long long)seq, (unsigned int)snap_count);
4633 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
4644 size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
4645 reply_buf = kmalloc(size, GFP_KERNEL);
4647 return ERR_PTR(-ENOMEM);
4649 snapid = cpu_to_le64(snap_id);
4650 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4651 "rbd", "get_snapshot_name",
4652 &snapid, sizeof (snapid),
4654 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4656 snap_name = ERR_PTR(ret);
4661 end = reply_buf + ret;
4662 snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
4663 if (IS_ERR(snap_name))
4666 dout(" snap_id 0x%016llx snap_name = %s\n",
4667 (unsigned long long)snap_id, snap_name);
4674 static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
4676 bool first_time = rbd_dev->header.object_prefix == NULL;
4679 ret = rbd_dev_v2_image_size(rbd_dev);
4684 ret = rbd_dev_v2_header_onetime(rbd_dev);
4689 ret = rbd_dev_v2_snap_context(rbd_dev);
4690 dout("rbd_dev_v2_snap_context returned %d\n", ret);
4695 static int rbd_dev_header_info(struct rbd_device *rbd_dev)
4697 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
4699 if (rbd_dev->image_format == 1)
4700 return rbd_dev_v1_header_info(rbd_dev);
4702 return rbd_dev_v2_header_info(rbd_dev);
4705 static int rbd_bus_add_dev(struct rbd_device *rbd_dev)
4710 dev = &rbd_dev->dev;
4711 dev->bus = &rbd_bus_type;
4712 dev->type = &rbd_device_type;
4713 dev->parent = &rbd_root_dev;
4714 dev->release = rbd_dev_device_release;
4715 dev_set_name(dev, "%d", rbd_dev->dev_id);
4716 ret = device_register(dev);
4721 static void rbd_bus_del_dev(struct rbd_device *rbd_dev)
4723 device_unregister(&rbd_dev->dev);
4727 * Get a unique rbd identifier for the given new rbd_dev, and add
4728 * the rbd_dev to the global list.
4730 static int rbd_dev_id_get(struct rbd_device *rbd_dev)
4734 new_dev_id = ida_simple_get(&rbd_dev_id_ida,
4735 0, minor_to_rbd_dev_id(1 << MINORBITS),
4740 rbd_dev->dev_id = new_dev_id;
4742 spin_lock(&rbd_dev_list_lock);
4743 list_add_tail(&rbd_dev->node, &rbd_dev_list);
4744 spin_unlock(&rbd_dev_list_lock);
4746 dout("rbd_dev %p given dev id %d\n", rbd_dev, rbd_dev->dev_id);
4752 * Remove an rbd_dev from the global list, and record that its
4753 * identifier is no longer in use.
4755 static void rbd_dev_id_put(struct rbd_device *rbd_dev)
4757 spin_lock(&rbd_dev_list_lock);
4758 list_del_init(&rbd_dev->node);
4759 spin_unlock(&rbd_dev_list_lock);
4761 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
4763 dout("rbd_dev %p released dev id %d\n", rbd_dev, rbd_dev->dev_id);
4767 * Skips over white space at *buf, and updates *buf to point to the
4768 * first found non-space character (if any). Returns the length of
4769 * the token (string of non-white space characters) found. Note
4770 * that *buf must be terminated with '\0'.
4772 static inline size_t next_token(const char **buf)
4775 * These are the characters that produce nonzero for
4776 * isspace() in the "C" and "POSIX" locales.
4778 const char *spaces = " \f\n\r\t\v";
4780 *buf += strspn(*buf, spaces); /* Find start of token */
4782 return strcspn(*buf, spaces); /* Return token length */
4786 * Finds the next token in *buf, and if the provided token buffer is
4787 * big enough, copies the found token into it. The result, if
4788 * copied, is guaranteed to be terminated with '\0'. Note that *buf
4789 * must be terminated with '\0' on entry.
4791 * Returns the length of the token found (not including the '\0').
4792 * Return value will be 0 if no token is found, and it will be >=
4793 * token_size if the token would not fit.
4795 * The *buf pointer will be updated to point beyond the end of the
4796 * found token. Note that this occurs even if the token buffer is
4797 * too small to hold it.
4799 static inline size_t copy_token(const char **buf,
4805 len = next_token(buf);
4806 if (len < token_size) {
4807 memcpy(token, *buf, len);
4808 *(token + len) = '\0';
4816 * Finds the next token in *buf, dynamically allocates a buffer big
4817 * enough to hold a copy of it, and copies the token into the new
4818 * buffer. The copy is guaranteed to be terminated with '\0'. Note
4819 * that a duplicate buffer is created even for a zero-length token.
4821 * Returns a pointer to the newly-allocated duplicate, or a null
4822 * pointer if memory for the duplicate was not available. If
4823 * the lenp argument is a non-null pointer, the length of the token
4824 * (not including the '\0') is returned in *lenp.
4826 * If successful, the *buf pointer will be updated to point beyond
4827 * the end of the found token.
4829 * Note: uses GFP_KERNEL for allocation.
4831 static inline char *dup_token(const char **buf, size_t *lenp)
4836 len = next_token(buf);
4837 dup = kmemdup(*buf, len + 1, GFP_KERNEL);
4840 *(dup + len) = '\0';
4850 * Parse the options provided for an "rbd add" (i.e., rbd image
4851 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
4852 * and the data written is passed here via a NUL-terminated buffer.
4853 * Returns 0 if successful or an error code otherwise.
4855 * The information extracted from these options is recorded in
4856 * the other parameters which return dynamically-allocated
4859 * The address of a pointer that will refer to a ceph options
4860 * structure. Caller must release the returned pointer using
4861 * ceph_destroy_options() when it is no longer needed.
4863 * Address of an rbd options pointer. Fully initialized by
4864 * this function; caller must release with kfree().
4866 * Address of an rbd image specification pointer. Fully
4867 * initialized by this function based on parsed options.
4868 * Caller must release with rbd_spec_put().
4870 * The options passed take this form:
4871 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
4874 * A comma-separated list of one or more monitor addresses.
4875 * A monitor address is an ip address, optionally followed
4876 * by a port number (separated by a colon).
4877 * I.e.: ip1[:port1][,ip2[:port2]...]
4879 * A comma-separated list of ceph and/or rbd options.
4881 * The name of the rados pool containing the rbd image.
4883 * The name of the image in that pool to map.
4885 * An optional snapshot id. If provided, the mapping will
4886 * present data from the image at the time that snapshot was
4887 * created. The image head is used if no snapshot id is
4888 * provided. Snapshot mappings are always read-only.
4890 static int rbd_add_parse_args(const char *buf,
4891 struct ceph_options **ceph_opts,
4892 struct rbd_options **opts,
4893 struct rbd_spec **rbd_spec)
4897 const char *mon_addrs;
4899 size_t mon_addrs_size;
4900 struct rbd_spec *spec = NULL;
4901 struct rbd_options *rbd_opts = NULL;
4902 struct ceph_options *copts;
4905 /* The first four tokens are required */
4907 len = next_token(&buf);
4909 rbd_warn(NULL, "no monitor address(es) provided");
4913 mon_addrs_size = len + 1;
4917 options = dup_token(&buf, NULL);
4921 rbd_warn(NULL, "no options provided");
4925 spec = rbd_spec_alloc();
4929 spec->pool_name = dup_token(&buf, NULL);
4930 if (!spec->pool_name)
4932 if (!*spec->pool_name) {
4933 rbd_warn(NULL, "no pool name provided");
4937 spec->image_name = dup_token(&buf, NULL);
4938 if (!spec->image_name)
4940 if (!*spec->image_name) {
4941 rbd_warn(NULL, "no image name provided");
4946 * Snapshot name is optional; default is to use "-"
4947 * (indicating the head/no snapshot).
4949 len = next_token(&buf);
4951 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
4952 len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
4953 } else if (len > RBD_MAX_SNAP_NAME_LEN) {
4954 ret = -ENAMETOOLONG;
4957 snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
4960 *(snap_name + len) = '\0';
4961 spec->snap_name = snap_name;
4963 /* Initialize all rbd options to the defaults */
4965 rbd_opts = kzalloc(sizeof (*rbd_opts), GFP_KERNEL);
4969 rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
4971 copts = ceph_parse_options(options, mon_addrs,
4972 mon_addrs + mon_addrs_size - 1,
4973 parse_rbd_opts_token, rbd_opts);
4974 if (IS_ERR(copts)) {
4975 ret = PTR_ERR(copts);
4996 * Return pool id (>= 0) or a negative error code.
4998 static int rbd_add_get_pool_id(struct rbd_client *rbdc, const char *pool_name)
5001 unsigned long timeout = rbdc->client->options->mount_timeout * HZ;
5006 ret = ceph_pg_poolid_by_name(rbdc->client->osdc.osdmap, pool_name);
5007 if (ret == -ENOENT && tries++ < 1) {
5008 ret = ceph_monc_do_get_version(&rbdc->client->monc, "osdmap",
5013 if (rbdc->client->osdc.osdmap->epoch < newest_epoch) {
5014 ceph_monc_request_next_osdmap(&rbdc->client->monc);
5015 (void) ceph_monc_wait_osdmap(&rbdc->client->monc,
5016 newest_epoch, timeout);
5019 /* the osdmap we have is new enough */
5028 * An rbd format 2 image has a unique identifier, distinct from the
5029 * name given to it by the user. Internally, that identifier is
5030 * what's used to specify the names of objects related to the image.
5032 * A special "rbd id" object is used to map an rbd image name to its
5033 * id. If that object doesn't exist, then there is no v2 rbd image
5034 * with the supplied name.
5036 * This function will record the given rbd_dev's image_id field if
5037 * it can be determined, and in that case will return 0. If any
5038 * errors occur a negative errno will be returned and the rbd_dev's
5039 * image_id field will be unchanged (and should be NULL).
5041 static int rbd_dev_image_id(struct rbd_device *rbd_dev)
5050 * When probing a parent image, the image id is already
5051 * known (and the image name likely is not). There's no
5052 * need to fetch the image id again in this case. We
5053 * do still need to set the image format though.
5055 if (rbd_dev->spec->image_id) {
5056 rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;
5062 * First, see if the format 2 image id file exists, and if
5063 * so, get the image's persistent id from it.
5065 size = sizeof (RBD_ID_PREFIX) + strlen(rbd_dev->spec->image_name);
5066 object_name = kmalloc(size, GFP_NOIO);
5069 sprintf(object_name, "%s%s", RBD_ID_PREFIX, rbd_dev->spec->image_name);
5070 dout("rbd id object name is %s\n", object_name);
5072 /* Response will be an encoded string, which includes a length */
5074 size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
5075 response = kzalloc(size, GFP_NOIO);
5081 /* If it doesn't exist we'll assume it's a format 1 image */
5083 ret = rbd_obj_method_sync(rbd_dev, object_name,
5084 "rbd", "get_id", NULL, 0,
5085 response, RBD_IMAGE_ID_LEN_MAX);
5086 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5087 if (ret == -ENOENT) {
5088 image_id = kstrdup("", GFP_KERNEL);
5089 ret = image_id ? 0 : -ENOMEM;
5091 rbd_dev->image_format = 1;
5092 } else if (ret >= 0) {
5095 image_id = ceph_extract_encoded_string(&p, p + ret,
5097 ret = PTR_ERR_OR_ZERO(image_id);
5099 rbd_dev->image_format = 2;
5103 rbd_dev->spec->image_id = image_id;
5104 dout("image_id is %s\n", image_id);
5114 * Undo whatever state changes are made by v1 or v2 header info
5117 static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
5119 struct rbd_image_header *header;
5121 /* Drop parent reference unless it's already been done (or none) */
5123 if (rbd_dev->parent_overlap)
5124 rbd_dev_parent_put(rbd_dev);
5126 /* Free dynamic fields from the header, then zero it out */
5128 header = &rbd_dev->header;
5129 ceph_put_snap_context(header->snapc);
5130 kfree(header->snap_sizes);
5131 kfree(header->snap_names);
5132 kfree(header->object_prefix);
5133 memset(header, 0, sizeof (*header));
5136 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
5140 ret = rbd_dev_v2_object_prefix(rbd_dev);
5145 * Get the and check features for the image. Currently the
5146 * features are assumed to never change.
5148 ret = rbd_dev_v2_features(rbd_dev);
5152 /* If the image supports fancy striping, get its parameters */
5154 if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
5155 ret = rbd_dev_v2_striping_info(rbd_dev);
5159 /* No support for crypto and compression type format 2 images */
5163 rbd_dev->header.features = 0;
5164 kfree(rbd_dev->header.object_prefix);
5165 rbd_dev->header.object_prefix = NULL;
5170 static int rbd_dev_probe_parent(struct rbd_device *rbd_dev)
5172 struct rbd_device *parent = NULL;
5173 struct rbd_spec *parent_spec;
5174 struct rbd_client *rbdc;
5177 if (!rbd_dev->parent_spec)
5180 * We need to pass a reference to the client and the parent
5181 * spec when creating the parent rbd_dev. Images related by
5182 * parent/child relationships always share both.
5184 parent_spec = rbd_spec_get(rbd_dev->parent_spec);
5185 rbdc = __rbd_get_client(rbd_dev->rbd_client);
5188 parent = rbd_dev_create(rbdc, parent_spec);
5192 ret = rbd_dev_image_probe(parent, false);
5195 rbd_dev->parent = parent;
5196 atomic_set(&rbd_dev->parent_ref, 1);
5201 rbd_dev_unparent(rbd_dev);
5202 kfree(rbd_dev->header_name);
5203 rbd_dev_destroy(parent);
5205 rbd_put_client(rbdc);
5206 rbd_spec_put(parent_spec);
5212 static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
5216 /* Get an id and fill in device name. */
5218 ret = rbd_dev_id_get(rbd_dev);
5222 BUILD_BUG_ON(DEV_NAME_LEN
5223 < sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH);
5224 sprintf(rbd_dev->name, "%s%d", RBD_DRV_NAME, rbd_dev->dev_id);
5226 /* Record our major and minor device numbers. */
5228 if (!single_major) {
5229 ret = register_blkdev(0, rbd_dev->name);
5233 rbd_dev->major = ret;
5236 rbd_dev->major = rbd_major;
5237 rbd_dev->minor = rbd_dev_id_to_minor(rbd_dev->dev_id);
5240 /* Set up the blkdev mapping. */
5242 ret = rbd_init_disk(rbd_dev);
5244 goto err_out_blkdev;
5246 ret = rbd_dev_mapping_set(rbd_dev);
5250 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
5251 set_disk_ro(rbd_dev->disk, rbd_dev->mapping.read_only);
5253 rbd_dev->rq_wq = alloc_workqueue("%s", 0, 0, rbd_dev->disk->disk_name);
5254 if (!rbd_dev->rq_wq) {
5256 goto err_out_mapping;
5259 ret = rbd_bus_add_dev(rbd_dev);
5261 goto err_out_workqueue;
5263 /* Everything's ready. Announce the disk to the world. */
5265 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
5266 add_disk(rbd_dev->disk);
5268 pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name,
5269 (unsigned long long) rbd_dev->mapping.size);
5274 destroy_workqueue(rbd_dev->rq_wq);
5275 rbd_dev->rq_wq = NULL;
5277 rbd_dev_mapping_clear(rbd_dev);
5279 rbd_free_disk(rbd_dev);
5282 unregister_blkdev(rbd_dev->major, rbd_dev->name);
5284 rbd_dev_id_put(rbd_dev);
5285 rbd_dev_mapping_clear(rbd_dev);
5290 static int rbd_dev_header_name(struct rbd_device *rbd_dev)
5292 struct rbd_spec *spec = rbd_dev->spec;
5295 /* Record the header object name for this rbd image. */
5297 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
5299 if (rbd_dev->image_format == 1)
5300 size = strlen(spec->image_name) + sizeof (RBD_SUFFIX);
5302 size = sizeof (RBD_HEADER_PREFIX) + strlen(spec->image_id);
5304 rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
5305 if (!rbd_dev->header_name)
5308 if (rbd_dev->image_format == 1)
5309 sprintf(rbd_dev->header_name, "%s%s",
5310 spec->image_name, RBD_SUFFIX);
5312 sprintf(rbd_dev->header_name, "%s%s",
5313 RBD_HEADER_PREFIX, spec->image_id);
5317 static void rbd_dev_image_release(struct rbd_device *rbd_dev)
5319 rbd_dev_unprobe(rbd_dev);
5320 kfree(rbd_dev->header_name);
5321 rbd_dev->header_name = NULL;
5322 rbd_dev->image_format = 0;
5323 kfree(rbd_dev->spec->image_id);
5324 rbd_dev->spec->image_id = NULL;
5326 rbd_dev_destroy(rbd_dev);
5330 * Probe for the existence of the header object for the given rbd
5331 * device. If this image is the one being mapped (i.e., not a
5332 * parent), initiate a watch on its header object before using that
5333 * object to get detailed information about the rbd image.
5335 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
5340 * Get the id from the image id object. Unless there's an
5341 * error, rbd_dev->spec->image_id will be filled in with
5342 * a dynamically-allocated string, and rbd_dev->image_format
5343 * will be set to either 1 or 2.
5345 ret = rbd_dev_image_id(rbd_dev);
5349 ret = rbd_dev_header_name(rbd_dev);
5351 goto err_out_format;
5354 ret = rbd_dev_header_watch_sync(rbd_dev);
5356 goto out_header_name;
5359 ret = rbd_dev_header_info(rbd_dev);
5364 * If this image is the one being mapped, we have pool name and
5365 * id, image name and id, and snap name - need to fill snap id.
5366 * Otherwise this is a parent image, identified by pool, image
5367 * and snap ids - need to fill in names for those ids.
5370 ret = rbd_spec_fill_snap_id(rbd_dev);
5372 ret = rbd_spec_fill_names(rbd_dev);
5376 if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
5377 ret = rbd_dev_v2_parent_info(rbd_dev);
5382 * Need to warn users if this image is the one being
5383 * mapped and has a parent.
5385 if (mapping && rbd_dev->parent_spec)
5387 "WARNING: kernel layering is EXPERIMENTAL!");
5390 ret = rbd_dev_probe_parent(rbd_dev);
5394 dout("discovered format %u image, header name is %s\n",
5395 rbd_dev->image_format, rbd_dev->header_name);
5399 rbd_dev_unprobe(rbd_dev);
5402 rbd_dev_header_unwatch_sync(rbd_dev);
5404 kfree(rbd_dev->header_name);
5405 rbd_dev->header_name = NULL;
5407 rbd_dev->image_format = 0;
5408 kfree(rbd_dev->spec->image_id);
5409 rbd_dev->spec->image_id = NULL;
5413 static ssize_t do_rbd_add(struct bus_type *bus,
5417 struct rbd_device *rbd_dev = NULL;
5418 struct ceph_options *ceph_opts = NULL;
5419 struct rbd_options *rbd_opts = NULL;
5420 struct rbd_spec *spec = NULL;
5421 struct rbd_client *rbdc;
5425 if (!try_module_get(THIS_MODULE))
5428 /* parse add command */
5429 rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
5431 goto err_out_module;
5432 read_only = rbd_opts->read_only;
5434 rbd_opts = NULL; /* done with this */
5436 rbdc = rbd_get_client(ceph_opts);
5443 rc = rbd_add_get_pool_id(rbdc, spec->pool_name);
5445 goto err_out_client;
5446 spec->pool_id = (u64)rc;
5448 /* The ceph file layout needs to fit pool id in 32 bits */
5450 if (spec->pool_id > (u64)U32_MAX) {
5451 rbd_warn(NULL, "pool id too large (%llu > %u)",
5452 (unsigned long long)spec->pool_id, U32_MAX);
5454 goto err_out_client;
5457 rbd_dev = rbd_dev_create(rbdc, spec);
5459 goto err_out_client;
5460 rbdc = NULL; /* rbd_dev now owns this */
5461 spec = NULL; /* rbd_dev now owns this */
5463 rc = rbd_dev_image_probe(rbd_dev, true);
5465 goto err_out_rbd_dev;
5467 /* If we are mapping a snapshot it must be marked read-only */
5469 if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
5471 rbd_dev->mapping.read_only = read_only;
5473 rc = rbd_dev_device_setup(rbd_dev);
5476 * rbd_dev_header_unwatch_sync() can't be moved into
5477 * rbd_dev_image_release() without refactoring, see
5478 * commit 1f3ef78861ac.
5480 rbd_dev_header_unwatch_sync(rbd_dev);
5481 rbd_dev_image_release(rbd_dev);
5482 goto err_out_module;
5488 rbd_dev_destroy(rbd_dev);
5490 rbd_put_client(rbdc);
5494 module_put(THIS_MODULE);
5496 dout("Error adding device %s\n", buf);
5501 static ssize_t rbd_add(struct bus_type *bus,
5508 return do_rbd_add(bus, buf, count);
5511 static ssize_t rbd_add_single_major(struct bus_type *bus,
5515 return do_rbd_add(bus, buf, count);
5518 static void rbd_dev_device_release(struct device *dev)
5520 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5522 destroy_workqueue(rbd_dev->rq_wq);
5523 rbd_free_disk(rbd_dev);
5524 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
5525 rbd_dev_mapping_clear(rbd_dev);
5527 unregister_blkdev(rbd_dev->major, rbd_dev->name);
5528 rbd_dev_id_put(rbd_dev);
5529 rbd_dev_mapping_clear(rbd_dev);
5532 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
5534 while (rbd_dev->parent) {
5535 struct rbd_device *first = rbd_dev;
5536 struct rbd_device *second = first->parent;
5537 struct rbd_device *third;
5540 * Follow to the parent with no grandparent and
5543 while (second && (third = second->parent)) {
5548 rbd_dev_image_release(second);
5549 first->parent = NULL;
5550 first->parent_overlap = 0;
5552 rbd_assert(first->parent_spec);
5553 rbd_spec_put(first->parent_spec);
5554 first->parent_spec = NULL;
5558 static ssize_t do_rbd_remove(struct bus_type *bus,
5562 struct rbd_device *rbd_dev = NULL;
5563 struct list_head *tmp;
5566 bool already = false;
5569 ret = kstrtoul(buf, 10, &ul);
5573 /* convert to int; abort if we lost anything in the conversion */
5579 spin_lock(&rbd_dev_list_lock);
5580 list_for_each(tmp, &rbd_dev_list) {
5581 rbd_dev = list_entry(tmp, struct rbd_device, node);
5582 if (rbd_dev->dev_id == dev_id) {
5588 spin_lock_irq(&rbd_dev->lock);
5589 if (rbd_dev->open_count)
5592 already = test_and_set_bit(RBD_DEV_FLAG_REMOVING,
5594 spin_unlock_irq(&rbd_dev->lock);
5596 spin_unlock(&rbd_dev_list_lock);
5597 if (ret < 0 || already)
5600 rbd_dev_header_unwatch_sync(rbd_dev);
5602 * flush remaining watch callbacks - these must be complete
5603 * before the osd_client is shutdown
5605 dout("%s: flushing notifies", __func__);
5606 ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
5609 * Don't free anything from rbd_dev->disk until after all
5610 * notifies are completely processed. Otherwise
5611 * rbd_bus_del_dev() will race with rbd_watch_cb(), resulting
5612 * in a potential use after free of rbd_dev->disk or rbd_dev.
5614 rbd_bus_del_dev(rbd_dev);
5615 rbd_dev_image_release(rbd_dev);
5616 module_put(THIS_MODULE);
5621 static ssize_t rbd_remove(struct bus_type *bus,
5628 return do_rbd_remove(bus, buf, count);
5631 static ssize_t rbd_remove_single_major(struct bus_type *bus,
5635 return do_rbd_remove(bus, buf, count);
5639 * create control files in sysfs
5642 static int rbd_sysfs_init(void)
5646 ret = device_register(&rbd_root_dev);
5650 ret = bus_register(&rbd_bus_type);
5652 device_unregister(&rbd_root_dev);
5657 static void rbd_sysfs_cleanup(void)
5659 bus_unregister(&rbd_bus_type);
5660 device_unregister(&rbd_root_dev);
5663 static int rbd_slab_init(void)
5665 rbd_assert(!rbd_img_request_cache);
5666 rbd_img_request_cache = kmem_cache_create("rbd_img_request",
5667 sizeof (struct rbd_img_request),
5668 __alignof__(struct rbd_img_request),
5670 if (!rbd_img_request_cache)
5673 rbd_assert(!rbd_obj_request_cache);
5674 rbd_obj_request_cache = kmem_cache_create("rbd_obj_request",
5675 sizeof (struct rbd_obj_request),
5676 __alignof__(struct rbd_obj_request),
5678 if (!rbd_obj_request_cache)
5681 rbd_assert(!rbd_segment_name_cache);
5682 rbd_segment_name_cache = kmem_cache_create("rbd_segment_name",
5683 CEPH_MAX_OID_NAME_LEN + 1, 1, 0, NULL);
5684 if (rbd_segment_name_cache)
5687 if (rbd_obj_request_cache) {
5688 kmem_cache_destroy(rbd_obj_request_cache);
5689 rbd_obj_request_cache = NULL;
5692 kmem_cache_destroy(rbd_img_request_cache);
5693 rbd_img_request_cache = NULL;
5698 static void rbd_slab_exit(void)
5700 rbd_assert(rbd_segment_name_cache);
5701 kmem_cache_destroy(rbd_segment_name_cache);
5702 rbd_segment_name_cache = NULL;
5704 rbd_assert(rbd_obj_request_cache);
5705 kmem_cache_destroy(rbd_obj_request_cache);
5706 rbd_obj_request_cache = NULL;
5708 rbd_assert(rbd_img_request_cache);
5709 kmem_cache_destroy(rbd_img_request_cache);
5710 rbd_img_request_cache = NULL;
5713 static int __init rbd_init(void)
5717 if (!libceph_compatible(NULL)) {
5718 rbd_warn(NULL, "libceph incompatibility (quitting)");
5722 rc = rbd_slab_init();
5727 rbd_major = register_blkdev(0, RBD_DRV_NAME);
5728 if (rbd_major < 0) {
5734 rc = rbd_sysfs_init();
5736 goto err_out_blkdev;
5739 pr_info("loaded (major %d)\n", rbd_major);
5741 pr_info("loaded\n");
5747 unregister_blkdev(rbd_major, RBD_DRV_NAME);
5753 static void __exit rbd_exit(void)
5755 ida_destroy(&rbd_dev_id_ida);
5756 rbd_sysfs_cleanup();
5758 unregister_blkdev(rbd_major, RBD_DRV_NAME);
5762 module_init(rbd_init);
5763 module_exit(rbd_exit);
5765 MODULE_AUTHOR("Alex Elder <elder@inktank.com>");
5766 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
5767 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
5768 /* following authorship retained from original osdblk.c */
5769 MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
5771 MODULE_DESCRIPTION("RADOS Block Device (RBD) driver");
5772 MODULE_LICENSE("GPL");