3 rbd.c -- Export ceph rados objects as a Linux block device
6 based on drivers/block/osdblk.c:
8 Copyright 2009 Red Hat, Inc.
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 For usage instructions, please refer to:
27 Documentation/ABI/testing/sysfs-bus-rbd
31 #include <linux/ceph/libceph.h>
32 #include <linux/ceph/osd_client.h>
33 #include <linux/ceph/mon_client.h>
34 #include <linux/ceph/cls_lock_client.h>
35 #include <linux/ceph/decode.h>
36 #include <linux/parser.h>
37 #include <linux/bsearch.h>
39 #include <linux/kernel.h>
40 #include <linux/device.h>
41 #include <linux/module.h>
42 #include <linux/blk-mq.h>
44 #include <linux/blkdev.h>
45 #include <linux/slab.h>
46 #include <linux/idr.h>
47 #include <linux/workqueue.h>
49 #include "rbd_types.h"
51 #define RBD_DEBUG /* Activate rbd_assert() calls */
54 * The basic unit of block I/O is a sector. It is interpreted in a
55 * number of contexts in Linux (blk, bio, genhd), but the default is
56 * universally 512 bytes. These symbols are just slightly more
57 * meaningful than the bare numbers they represent.
59 #define SECTOR_SHIFT 9
60 #define SECTOR_SIZE (1ULL << SECTOR_SHIFT)
63 * Increment the given counter and return its updated value.
64 * If the counter is already 0 it will not be incremented.
65 * If the counter is already at its maximum value returns
66 * -EINVAL without updating it.
68 static int atomic_inc_return_safe(atomic_t *v)
72 counter = (unsigned int)__atomic_add_unless(v, 1, 0);
73 if (counter <= (unsigned int)INT_MAX)
81 /* Decrement the counter. Return the resulting value, or -EINVAL */
82 static int atomic_dec_return_safe(atomic_t *v)
86 counter = atomic_dec_return(v);
95 #define RBD_DRV_NAME "rbd"
97 #define RBD_MINORS_PER_MAJOR 256
98 #define RBD_SINGLE_MAJOR_PART_SHIFT 4
100 #define RBD_MAX_PARENT_CHAIN_LEN 16
102 #define RBD_SNAP_DEV_NAME_PREFIX "snap_"
103 #define RBD_MAX_SNAP_NAME_LEN \
104 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
106 #define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
108 #define RBD_SNAP_HEAD_NAME "-"
110 #define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */
112 /* This allows a single page to hold an image name sent by OSD */
113 #define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
114 #define RBD_IMAGE_ID_LEN_MAX 64
116 #define RBD_OBJ_PREFIX_LEN_MAX 64
118 #define RBD_NOTIFY_TIMEOUT 5 /* seconds */
119 #define RBD_RETRY_DELAY msecs_to_jiffies(1000)
123 #define RBD_FEATURE_LAYERING (1ULL<<0)
124 #define RBD_FEATURE_STRIPINGV2 (1ULL<<1)
125 #define RBD_FEATURE_EXCLUSIVE_LOCK (1ULL<<2)
126 #define RBD_FEATURE_DATA_POOL (1ULL<<7)
128 #define RBD_FEATURES_ALL (RBD_FEATURE_LAYERING | \
129 RBD_FEATURE_STRIPINGV2 | \
130 RBD_FEATURE_EXCLUSIVE_LOCK | \
131 RBD_FEATURE_DATA_POOL)
133 /* Features supported by this (client software) implementation. */
135 #define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL)
138 * An RBD device name will be "rbd#", where the "rbd" comes from
139 * RBD_DRV_NAME above, and # is a unique integer identifier.
141 #define DEV_NAME_LEN 32
144 * block device image metadata (in-memory version)
146 struct rbd_image_header {
147 /* These six fields never change for a given rbd image */
153 u64 features; /* Might be changeable someday? */
155 /* The remaining fields need to be updated occasionally */
157 struct ceph_snap_context *snapc;
158 char *snap_names; /* format 1 only */
159 u64 *snap_sizes; /* format 1 only */
163 * An rbd image specification.
165 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
166 * identify an image. Each rbd_dev structure includes a pointer to
167 * an rbd_spec structure that encapsulates this identity.
169 * Each of the id's in an rbd_spec has an associated name. For a
170 * user-mapped image, the names are supplied and the id's associated
171 * with them are looked up. For a layered image, a parent image is
172 * defined by the tuple, and the names are looked up.
174 * An rbd_dev structure contains a parent_spec pointer which is
175 * non-null if the image it represents is a child in a layered
176 * image. This pointer will refer to the rbd_spec structure used
177 * by the parent rbd_dev for its own identity (i.e., the structure
178 * is shared between the parent and child).
180 * Since these structures are populated once, during the discovery
181 * phase of image construction, they are effectively immutable so
182 * we make no effort to synchronize access to them.
184 * Note that code herein does not assume the image name is known (it
185 * could be a null pointer).
189 const char *pool_name;
191 const char *image_id;
192 const char *image_name;
195 const char *snap_name;
201 * an instance of the client. multiple devices may share an rbd client.
204 struct ceph_client *client;
206 struct list_head node;
209 struct rbd_img_request;
210 typedef void (*rbd_img_callback_t)(struct rbd_img_request *);
212 #define BAD_WHICH U32_MAX /* Good which or bad which, which? */
214 struct rbd_obj_request;
215 typedef void (*rbd_obj_callback_t)(struct rbd_obj_request *);
217 enum obj_request_type {
218 OBJ_REQUEST_NODATA, OBJ_REQUEST_BIO, OBJ_REQUEST_PAGES
221 enum obj_operation_type {
228 OBJ_REQ_DONE, /* completion flag: not done = 0, done = 1 */
229 OBJ_REQ_IMG_DATA, /* object usage: standalone = 0, image = 1 */
230 OBJ_REQ_KNOWN, /* EXISTS flag valid: no = 0, yes = 1 */
231 OBJ_REQ_EXISTS, /* target exists: no = 0, yes = 1 */
234 struct rbd_obj_request {
236 u64 offset; /* object start byte */
237 u64 length; /* bytes from offset */
241 * An object request associated with an image will have its
242 * img_data flag set; a standalone object request will not.
244 * A standalone object request will have which == BAD_WHICH
245 * and a null obj_request pointer.
247 * An object request initiated in support of a layered image
248 * object (to check for its existence before a write) will
249 * have which == BAD_WHICH and a non-null obj_request pointer.
251 * Finally, an object request for rbd image data will have
252 * which != BAD_WHICH, and will have a non-null img_request
253 * pointer. The value of which will be in the range
254 * 0..(img_request->obj_request_count-1).
257 struct rbd_obj_request *obj_request; /* STAT op */
259 struct rbd_img_request *img_request;
261 /* links for img_request->obj_requests list */
262 struct list_head links;
265 u32 which; /* posn image request list */
267 enum obj_request_type type;
269 struct bio *bio_list;
275 struct page **copyup_pages;
276 u32 copyup_page_count;
278 struct ceph_osd_request *osd_req;
280 u64 xferred; /* bytes transferred */
283 rbd_obj_callback_t callback;
284 struct completion completion;
290 IMG_REQ_WRITE, /* I/O direction: read = 0, write = 1 */
291 IMG_REQ_CHILD, /* initiator: block = 0, child image = 1 */
292 IMG_REQ_LAYERED, /* ENOENT handling: normal = 0, layered = 1 */
293 IMG_REQ_DISCARD, /* discard: normal = 0, discard request = 1 */
296 struct rbd_img_request {
297 struct rbd_device *rbd_dev;
298 u64 offset; /* starting image byte offset */
299 u64 length; /* byte count from offset */
302 u64 snap_id; /* for reads */
303 struct ceph_snap_context *snapc; /* for writes */
306 struct request *rq; /* block request */
307 struct rbd_obj_request *obj_request; /* obj req initiator */
309 struct page **copyup_pages;
310 u32 copyup_page_count;
311 spinlock_t completion_lock;/* protects next_completion */
313 rbd_img_callback_t callback;
314 u64 xferred;/* aggregate bytes transferred */
315 int result; /* first nonzero obj_request result */
317 u32 obj_request_count;
318 struct list_head obj_requests; /* rbd_obj_request structs */
323 #define for_each_obj_request(ireq, oreq) \
324 list_for_each_entry(oreq, &(ireq)->obj_requests, links)
325 #define for_each_obj_request_from(ireq, oreq) \
326 list_for_each_entry_from(oreq, &(ireq)->obj_requests, links)
327 #define for_each_obj_request_safe(ireq, oreq, n) \
328 list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links)
330 enum rbd_watch_state {
331 RBD_WATCH_STATE_UNREGISTERED,
332 RBD_WATCH_STATE_REGISTERED,
333 RBD_WATCH_STATE_ERROR,
336 enum rbd_lock_state {
337 RBD_LOCK_STATE_UNLOCKED,
338 RBD_LOCK_STATE_LOCKED,
339 RBD_LOCK_STATE_RELEASING,
342 /* WatchNotify::ClientId */
343 struct rbd_client_id {
358 int dev_id; /* blkdev unique id */
360 int major; /* blkdev assigned major */
362 struct gendisk *disk; /* blkdev's gendisk and rq */
364 u32 image_format; /* Either 1 or 2 */
365 struct rbd_client *rbd_client;
367 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
369 spinlock_t lock; /* queue, flags, open_count */
371 struct rbd_image_header header;
372 unsigned long flags; /* possibly lock protected */
373 struct rbd_spec *spec;
374 struct rbd_options *opts;
375 char *config_info; /* add{,_single_major} string */
377 struct ceph_object_id header_oid;
378 struct ceph_object_locator header_oloc;
380 struct ceph_file_layout layout; /* used for all rbd requests */
382 struct mutex watch_mutex;
383 enum rbd_watch_state watch_state;
384 struct ceph_osd_linger_request *watch_handle;
386 struct delayed_work watch_dwork;
388 struct rw_semaphore lock_rwsem;
389 enum rbd_lock_state lock_state;
390 char lock_cookie[32];
391 struct rbd_client_id owner_cid;
392 struct work_struct acquired_lock_work;
393 struct work_struct released_lock_work;
394 struct delayed_work lock_dwork;
395 struct work_struct unlock_work;
396 wait_queue_head_t lock_waitq;
398 struct workqueue_struct *task_wq;
400 struct rbd_spec *parent_spec;
403 struct rbd_device *parent;
405 /* Block layer tags. */
406 struct blk_mq_tag_set tag_set;
408 /* protects updating the header */
409 struct rw_semaphore header_rwsem;
411 struct rbd_mapping mapping;
413 struct list_head node;
417 unsigned long open_count; /* protected by lock */
421 * Flag bits for rbd_dev->flags:
422 * - REMOVING (which is coupled with rbd_dev->open_count) is protected
424 * - BLACKLISTED is protected by rbd_dev->lock_rwsem
427 RBD_DEV_FLAG_EXISTS, /* mapped snapshot has not been deleted */
428 RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
429 RBD_DEV_FLAG_BLACKLISTED, /* our ceph_client is blacklisted */
432 static DEFINE_MUTEX(client_mutex); /* Serialize client creation */
434 static LIST_HEAD(rbd_dev_list); /* devices */
435 static DEFINE_SPINLOCK(rbd_dev_list_lock);
437 static LIST_HEAD(rbd_client_list); /* clients */
438 static DEFINE_SPINLOCK(rbd_client_list_lock);
440 /* Slab caches for frequently-allocated structures */
442 static struct kmem_cache *rbd_img_request_cache;
443 static struct kmem_cache *rbd_obj_request_cache;
445 static int rbd_major;
446 static DEFINE_IDA(rbd_dev_id_ida);
448 static struct workqueue_struct *rbd_wq;
451 * Default to false for now, as single-major requires >= 0.75 version of
452 * userspace rbd utility.
454 static bool single_major = false;
455 module_param(single_major, bool, S_IRUGO);
456 MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: false)");
458 static int rbd_img_request_submit(struct rbd_img_request *img_request);
460 static ssize_t rbd_add(struct bus_type *bus, const char *buf,
462 static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
464 static ssize_t rbd_add_single_major(struct bus_type *bus, const char *buf,
466 static ssize_t rbd_remove_single_major(struct bus_type *bus, const char *buf,
468 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth);
469 static void rbd_spec_put(struct rbd_spec *spec);
471 static int rbd_dev_id_to_minor(int dev_id)
473 return dev_id << RBD_SINGLE_MAJOR_PART_SHIFT;
476 static int minor_to_rbd_dev_id(int minor)
478 return minor >> RBD_SINGLE_MAJOR_PART_SHIFT;
481 static bool __rbd_is_lock_owner(struct rbd_device *rbd_dev)
483 return rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED ||
484 rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING;
487 static bool rbd_is_lock_owner(struct rbd_device *rbd_dev)
491 down_read(&rbd_dev->lock_rwsem);
492 is_lock_owner = __rbd_is_lock_owner(rbd_dev);
493 up_read(&rbd_dev->lock_rwsem);
494 return is_lock_owner;
497 static ssize_t rbd_supported_features_show(struct bus_type *bus, char *buf)
499 return sprintf(buf, "0x%llx\n", RBD_FEATURES_SUPPORTED);
502 static BUS_ATTR(add, S_IWUSR, NULL, rbd_add);
503 static BUS_ATTR(remove, S_IWUSR, NULL, rbd_remove);
504 static BUS_ATTR(add_single_major, S_IWUSR, NULL, rbd_add_single_major);
505 static BUS_ATTR(remove_single_major, S_IWUSR, NULL, rbd_remove_single_major);
506 static BUS_ATTR(supported_features, S_IRUGO, rbd_supported_features_show, NULL);
508 static struct attribute *rbd_bus_attrs[] = {
510 &bus_attr_remove.attr,
511 &bus_attr_add_single_major.attr,
512 &bus_attr_remove_single_major.attr,
513 &bus_attr_supported_features.attr,
517 static umode_t rbd_bus_is_visible(struct kobject *kobj,
518 struct attribute *attr, int index)
521 (attr == &bus_attr_add_single_major.attr ||
522 attr == &bus_attr_remove_single_major.attr))
528 static const struct attribute_group rbd_bus_group = {
529 .attrs = rbd_bus_attrs,
530 .is_visible = rbd_bus_is_visible,
532 __ATTRIBUTE_GROUPS(rbd_bus);
534 static struct bus_type rbd_bus_type = {
536 .bus_groups = rbd_bus_groups,
539 static void rbd_root_dev_release(struct device *dev)
543 static struct device rbd_root_dev = {
545 .release = rbd_root_dev_release,
548 static __printf(2, 3)
549 void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
551 struct va_format vaf;
559 printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
560 else if (rbd_dev->disk)
561 printk(KERN_WARNING "%s: %s: %pV\n",
562 RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
563 else if (rbd_dev->spec && rbd_dev->spec->image_name)
564 printk(KERN_WARNING "%s: image %s: %pV\n",
565 RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
566 else if (rbd_dev->spec && rbd_dev->spec->image_id)
567 printk(KERN_WARNING "%s: id %s: %pV\n",
568 RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
570 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
571 RBD_DRV_NAME, rbd_dev, &vaf);
576 #define rbd_assert(expr) \
577 if (unlikely(!(expr))) { \
578 printk(KERN_ERR "\nAssertion failure in %s() " \
580 "\trbd_assert(%s);\n\n", \
581 __func__, __LINE__, #expr); \
584 #else /* !RBD_DEBUG */
585 # define rbd_assert(expr) ((void) 0)
586 #endif /* !RBD_DEBUG */
588 static void rbd_osd_copyup_callback(struct rbd_obj_request *obj_request);
589 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
590 static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
591 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
593 static int rbd_dev_refresh(struct rbd_device *rbd_dev);
594 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
595 static int rbd_dev_header_info(struct rbd_device *rbd_dev);
596 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev);
597 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
599 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
600 u8 *order, u64 *snap_size);
601 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
604 static int rbd_open(struct block_device *bdev, fmode_t mode)
606 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
607 bool removing = false;
609 if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
612 spin_lock_irq(&rbd_dev->lock);
613 if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
616 rbd_dev->open_count++;
617 spin_unlock_irq(&rbd_dev->lock);
621 (void) get_device(&rbd_dev->dev);
626 static void rbd_release(struct gendisk *disk, fmode_t mode)
628 struct rbd_device *rbd_dev = disk->private_data;
629 unsigned long open_count_before;
631 spin_lock_irq(&rbd_dev->lock);
632 open_count_before = rbd_dev->open_count--;
633 spin_unlock_irq(&rbd_dev->lock);
634 rbd_assert(open_count_before > 0);
636 put_device(&rbd_dev->dev);
639 static int rbd_ioctl_set_ro(struct rbd_device *rbd_dev, unsigned long arg)
644 bool ro_changed = false;
646 /* get_user() may sleep, so call it before taking rbd_dev->lock */
647 if (get_user(val, (int __user *)(arg)))
650 ro = val ? true : false;
651 /* Snapshot doesn't allow to write*/
652 if (rbd_dev->spec->snap_id != CEPH_NOSNAP && !ro)
655 spin_lock_irq(&rbd_dev->lock);
656 /* prevent others open this device */
657 if (rbd_dev->open_count > 1) {
662 if (rbd_dev->mapping.read_only != ro) {
663 rbd_dev->mapping.read_only = ro;
668 spin_unlock_irq(&rbd_dev->lock);
669 /* set_disk_ro() may sleep, so call it after releasing rbd_dev->lock */
670 if (ret == 0 && ro_changed)
671 set_disk_ro(rbd_dev->disk, ro ? 1 : 0);
676 static int rbd_ioctl(struct block_device *bdev, fmode_t mode,
677 unsigned int cmd, unsigned long arg)
679 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
684 ret = rbd_ioctl_set_ro(rbd_dev, arg);
694 static int rbd_compat_ioctl(struct block_device *bdev, fmode_t mode,
695 unsigned int cmd, unsigned long arg)
697 return rbd_ioctl(bdev, mode, cmd, arg);
699 #endif /* CONFIG_COMPAT */
701 static const struct block_device_operations rbd_bd_ops = {
702 .owner = THIS_MODULE,
704 .release = rbd_release,
707 .compat_ioctl = rbd_compat_ioctl,
712 * Initialize an rbd client instance. Success or not, this function
713 * consumes ceph_opts. Caller holds client_mutex.
715 static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
717 struct rbd_client *rbdc;
720 dout("%s:\n", __func__);
721 rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
725 kref_init(&rbdc->kref);
726 INIT_LIST_HEAD(&rbdc->node);
728 rbdc->client = ceph_create_client(ceph_opts, rbdc);
729 if (IS_ERR(rbdc->client))
731 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
733 ret = ceph_open_session(rbdc->client);
737 spin_lock(&rbd_client_list_lock);
738 list_add_tail(&rbdc->node, &rbd_client_list);
739 spin_unlock(&rbd_client_list_lock);
741 dout("%s: rbdc %p\n", __func__, rbdc);
745 ceph_destroy_client(rbdc->client);
750 ceph_destroy_options(ceph_opts);
751 dout("%s: error %d\n", __func__, ret);
756 static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
758 kref_get(&rbdc->kref);
764 * Find a ceph client with specific addr and configuration. If
765 * found, bump its reference count.
767 static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
769 struct rbd_client *client_node;
772 if (ceph_opts->flags & CEPH_OPT_NOSHARE)
775 spin_lock(&rbd_client_list_lock);
776 list_for_each_entry(client_node, &rbd_client_list, node) {
777 if (!ceph_compare_options(ceph_opts, client_node->client)) {
778 __rbd_get_client(client_node);
784 spin_unlock(&rbd_client_list_lock);
786 return found ? client_node : NULL;
790 * (Per device) rbd map options
797 /* string args above */
805 static match_table_t rbd_opts_tokens = {
806 {Opt_queue_depth, "queue_depth=%d"},
808 /* string args above */
809 {Opt_read_only, "read_only"},
810 {Opt_read_only, "ro"}, /* Alternate spelling */
811 {Opt_read_write, "read_write"},
812 {Opt_read_write, "rw"}, /* Alternate spelling */
813 {Opt_lock_on_read, "lock_on_read"},
814 {Opt_exclusive, "exclusive"},
825 #define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_MAX_RQ
826 #define RBD_READ_ONLY_DEFAULT false
827 #define RBD_LOCK_ON_READ_DEFAULT false
828 #define RBD_EXCLUSIVE_DEFAULT false
830 static int parse_rbd_opts_token(char *c, void *private)
832 struct rbd_options *rbd_opts = private;
833 substring_t argstr[MAX_OPT_ARGS];
834 int token, intval, ret;
836 token = match_token(c, rbd_opts_tokens, argstr);
837 if (token < Opt_last_int) {
838 ret = match_int(&argstr[0], &intval);
840 pr_err("bad mount option arg (not int) at '%s'\n", c);
843 dout("got int token %d val %d\n", token, intval);
844 } else if (token > Opt_last_int && token < Opt_last_string) {
845 dout("got string token %d val %s\n", token, argstr[0].from);
847 dout("got token %d\n", token);
851 case Opt_queue_depth:
853 pr_err("queue_depth out of range\n");
856 rbd_opts->queue_depth = intval;
859 rbd_opts->read_only = true;
862 rbd_opts->read_only = false;
864 case Opt_lock_on_read:
865 rbd_opts->lock_on_read = true;
868 rbd_opts->exclusive = true;
871 /* libceph prints "bad option" msg */
878 static char* obj_op_name(enum obj_operation_type op_type)
893 * Get a ceph client with specific addr and configuration, if one does
894 * not exist create it. Either way, ceph_opts is consumed by this
897 static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
899 struct rbd_client *rbdc;
901 mutex_lock_nested(&client_mutex, SINGLE_DEPTH_NESTING);
902 rbdc = rbd_client_find(ceph_opts);
903 if (rbdc) /* using an existing client */
904 ceph_destroy_options(ceph_opts);
906 rbdc = rbd_client_create(ceph_opts);
907 mutex_unlock(&client_mutex);
913 * Destroy ceph client
915 * Caller must hold rbd_client_list_lock.
917 static void rbd_client_release(struct kref *kref)
919 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
921 dout("%s: rbdc %p\n", __func__, rbdc);
922 spin_lock(&rbd_client_list_lock);
923 list_del(&rbdc->node);
924 spin_unlock(&rbd_client_list_lock);
926 ceph_destroy_client(rbdc->client);
931 * Drop reference to ceph client node. If it's not referenced anymore, release
934 static void rbd_put_client(struct rbd_client *rbdc)
937 kref_put(&rbdc->kref, rbd_client_release);
940 static bool rbd_image_format_valid(u32 image_format)
942 return image_format == 1 || image_format == 2;
945 static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
950 /* The header has to start with the magic rbd header text */
951 if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
954 /* The bio layer requires at least sector-sized I/O */
956 if (ondisk->options.order < SECTOR_SHIFT)
959 /* If we use u64 in a few spots we may be able to loosen this */
961 if (ondisk->options.order > 8 * sizeof (int) - 1)
965 * The size of a snapshot header has to fit in a size_t, and
966 * that limits the number of snapshots.
968 snap_count = le32_to_cpu(ondisk->snap_count);
969 size = SIZE_MAX - sizeof (struct ceph_snap_context);
970 if (snap_count > size / sizeof (__le64))
974 * Not only that, but the size of the entire the snapshot
975 * header must also be representable in a size_t.
977 size -= snap_count * sizeof (__le64);
978 if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
985 * returns the size of an object in the image
987 static u32 rbd_obj_bytes(struct rbd_image_header *header)
989 return 1U << header->obj_order;
992 static void rbd_init_layout(struct rbd_device *rbd_dev)
994 if (rbd_dev->header.stripe_unit == 0 ||
995 rbd_dev->header.stripe_count == 0) {
996 rbd_dev->header.stripe_unit = rbd_obj_bytes(&rbd_dev->header);
997 rbd_dev->header.stripe_count = 1;
1000 rbd_dev->layout.stripe_unit = rbd_dev->header.stripe_unit;
1001 rbd_dev->layout.stripe_count = rbd_dev->header.stripe_count;
1002 rbd_dev->layout.object_size = rbd_obj_bytes(&rbd_dev->header);
1003 rbd_dev->layout.pool_id = rbd_dev->header.data_pool_id == CEPH_NOPOOL ?
1004 rbd_dev->spec->pool_id : rbd_dev->header.data_pool_id;
1005 RCU_INIT_POINTER(rbd_dev->layout.pool_ns, NULL);
1009 * Fill an rbd image header with information from the given format 1
1012 static int rbd_header_from_disk(struct rbd_device *rbd_dev,
1013 struct rbd_image_header_ondisk *ondisk)
1015 struct rbd_image_header *header = &rbd_dev->header;
1016 bool first_time = header->object_prefix == NULL;
1017 struct ceph_snap_context *snapc;
1018 char *object_prefix = NULL;
1019 char *snap_names = NULL;
1020 u64 *snap_sizes = NULL;
1025 /* Allocate this now to avoid having to handle failure below */
1028 object_prefix = kstrndup(ondisk->object_prefix,
1029 sizeof(ondisk->object_prefix),
1035 /* Allocate the snapshot context and fill it in */
1037 snap_count = le32_to_cpu(ondisk->snap_count);
1038 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
1041 snapc->seq = le64_to_cpu(ondisk->snap_seq);
1043 struct rbd_image_snap_ondisk *snaps;
1044 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
1046 /* We'll keep a copy of the snapshot names... */
1048 if (snap_names_len > (u64)SIZE_MAX)
1050 snap_names = kmalloc(snap_names_len, GFP_KERNEL);
1054 /* ...as well as the array of their sizes. */
1055 snap_sizes = kmalloc_array(snap_count,
1056 sizeof(*header->snap_sizes),
1062 * Copy the names, and fill in each snapshot's id
1065 * Note that rbd_dev_v1_header_info() guarantees the
1066 * ondisk buffer we're working with has
1067 * snap_names_len bytes beyond the end of the
1068 * snapshot id array, this memcpy() is safe.
1070 memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
1071 snaps = ondisk->snaps;
1072 for (i = 0; i < snap_count; i++) {
1073 snapc->snaps[i] = le64_to_cpu(snaps[i].id);
1074 snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
1078 /* We won't fail any more, fill in the header */
1081 header->object_prefix = object_prefix;
1082 header->obj_order = ondisk->options.order;
1083 rbd_init_layout(rbd_dev);
1085 ceph_put_snap_context(header->snapc);
1086 kfree(header->snap_names);
1087 kfree(header->snap_sizes);
1090 /* The remaining fields always get updated (when we refresh) */
1092 header->image_size = le64_to_cpu(ondisk->image_size);
1093 header->snapc = snapc;
1094 header->snap_names = snap_names;
1095 header->snap_sizes = snap_sizes;
1103 ceph_put_snap_context(snapc);
1104 kfree(object_prefix);
1109 static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
1111 const char *snap_name;
1113 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
1115 /* Skip over names until we find the one we are looking for */
1117 snap_name = rbd_dev->header.snap_names;
1119 snap_name += strlen(snap_name) + 1;
1121 return kstrdup(snap_name, GFP_KERNEL);
1125 * Snapshot id comparison function for use with qsort()/bsearch().
1126 * Note that result is for snapshots in *descending* order.
1128 static int snapid_compare_reverse(const void *s1, const void *s2)
1130 u64 snap_id1 = *(u64 *)s1;
1131 u64 snap_id2 = *(u64 *)s2;
1133 if (snap_id1 < snap_id2)
1135 return snap_id1 == snap_id2 ? 0 : -1;
1139 * Search a snapshot context to see if the given snapshot id is
1142 * Returns the position of the snapshot id in the array if it's found,
1143 * or BAD_SNAP_INDEX otherwise.
1145 * Note: The snapshot array is in kept sorted (by the osd) in
1146 * reverse order, highest snapshot id first.
1148 static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
1150 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
1153 found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps,
1154 sizeof (snap_id), snapid_compare_reverse);
1156 return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX;
1159 static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
1163 const char *snap_name;
1165 which = rbd_dev_snap_index(rbd_dev, snap_id);
1166 if (which == BAD_SNAP_INDEX)
1167 return ERR_PTR(-ENOENT);
1169 snap_name = _rbd_dev_v1_snap_name(rbd_dev, which);
1170 return snap_name ? snap_name : ERR_PTR(-ENOMEM);
1173 static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
1175 if (snap_id == CEPH_NOSNAP)
1176 return RBD_SNAP_HEAD_NAME;
1178 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1179 if (rbd_dev->image_format == 1)
1180 return rbd_dev_v1_snap_name(rbd_dev, snap_id);
1182 return rbd_dev_v2_snap_name(rbd_dev, snap_id);
1185 static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
1188 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1189 if (snap_id == CEPH_NOSNAP) {
1190 *snap_size = rbd_dev->header.image_size;
1191 } else if (rbd_dev->image_format == 1) {
1194 which = rbd_dev_snap_index(rbd_dev, snap_id);
1195 if (which == BAD_SNAP_INDEX)
1198 *snap_size = rbd_dev->header.snap_sizes[which];
1203 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
1212 static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
1215 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1216 if (snap_id == CEPH_NOSNAP) {
1217 *snap_features = rbd_dev->header.features;
1218 } else if (rbd_dev->image_format == 1) {
1219 *snap_features = 0; /* No features for format 1 */
1224 ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, &features);
1228 *snap_features = features;
1233 static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
1235 u64 snap_id = rbd_dev->spec->snap_id;
1240 ret = rbd_snap_size(rbd_dev, snap_id, &size);
1243 ret = rbd_snap_features(rbd_dev, snap_id, &features);
1247 rbd_dev->mapping.size = size;
1248 rbd_dev->mapping.features = features;
1253 static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
1255 rbd_dev->mapping.size = 0;
1256 rbd_dev->mapping.features = 0;
1259 static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset)
1261 u64 segment_size = rbd_obj_bytes(&rbd_dev->header);
1263 return offset & (segment_size - 1);
1266 static u64 rbd_segment_length(struct rbd_device *rbd_dev,
1267 u64 offset, u64 length)
1269 u64 segment_size = rbd_obj_bytes(&rbd_dev->header);
1271 offset &= segment_size - 1;
1273 rbd_assert(length <= U64_MAX - offset);
1274 if (offset + length > segment_size)
1275 length = segment_size - offset;
1284 static void bio_chain_put(struct bio *chain)
1290 chain = chain->bi_next;
1296 * zeros a bio chain, starting at specific offset
1298 static void zero_bio_chain(struct bio *chain, int start_ofs)
1301 struct bvec_iter iter;
1302 unsigned long flags;
1307 bio_for_each_segment(bv, chain, iter) {
1308 if (pos + bv.bv_len > start_ofs) {
1309 int remainder = max(start_ofs - pos, 0);
1310 buf = bvec_kmap_irq(&bv, &flags);
1311 memset(buf + remainder, 0,
1312 bv.bv_len - remainder);
1313 flush_dcache_page(bv.bv_page);
1314 bvec_kunmap_irq(buf, &flags);
1319 chain = chain->bi_next;
1324 * similar to zero_bio_chain(), zeros data defined by a page array,
1325 * starting at the given byte offset from the start of the array and
1326 * continuing up to the given end offset. The pages array is
1327 * assumed to be big enough to hold all bytes up to the end.
1329 static void zero_pages(struct page **pages, u64 offset, u64 end)
1331 struct page **page = &pages[offset >> PAGE_SHIFT];
1333 rbd_assert(end > offset);
1334 rbd_assert(end - offset <= (u64)SIZE_MAX);
1335 while (offset < end) {
1338 unsigned long flags;
1341 page_offset = offset & ~PAGE_MASK;
1342 length = min_t(size_t, PAGE_SIZE - page_offset, end - offset);
1343 local_irq_save(flags);
1344 kaddr = kmap_atomic(*page);
1345 memset(kaddr + page_offset, 0, length);
1346 flush_dcache_page(*page);
1347 kunmap_atomic(kaddr);
1348 local_irq_restore(flags);
1356 * Clone a portion of a bio, starting at the given byte offset
1357 * and continuing for the number of bytes indicated.
1359 static struct bio *bio_clone_range(struct bio *bio_src,
1360 unsigned int offset,
1366 bio = bio_clone(bio_src, gfpmask);
1368 return NULL; /* ENOMEM */
1370 bio_advance(bio, offset);
1371 bio->bi_iter.bi_size = len;
1377 * Clone a portion of a bio chain, starting at the given byte offset
1378 * into the first bio in the source chain and continuing for the
1379 * number of bytes indicated. The result is another bio chain of
1380 * exactly the given length, or a null pointer on error.
1382 * The bio_src and offset parameters are both in-out. On entry they
1383 * refer to the first source bio and the offset into that bio where
1384 * the start of data to be cloned is located.
1386 * On return, bio_src is updated to refer to the bio in the source
1387 * chain that contains first un-cloned byte, and *offset will
1388 * contain the offset of that byte within that bio.
1390 static struct bio *bio_chain_clone_range(struct bio **bio_src,
1391 unsigned int *offset,
1395 struct bio *bi = *bio_src;
1396 unsigned int off = *offset;
1397 struct bio *chain = NULL;
1400 /* Build up a chain of clone bios up to the limit */
1402 if (!bi || off >= bi->bi_iter.bi_size || !len)
1403 return NULL; /* Nothing to clone */
1407 unsigned int bi_size;
1411 rbd_warn(NULL, "bio_chain exhausted with %u left", len);
1412 goto out_err; /* EINVAL; ran out of bio's */
1414 bi_size = min_t(unsigned int, bi->bi_iter.bi_size - off, len);
1415 bio = bio_clone_range(bi, off, bi_size, gfpmask);
1417 goto out_err; /* ENOMEM */
1420 end = &bio->bi_next;
1423 if (off == bi->bi_iter.bi_size) {
1434 bio_chain_put(chain);
1440 * The default/initial value for all object request flags is 0. For
1441 * each flag, once its value is set to 1 it is never reset to 0
1444 static void obj_request_img_data_set(struct rbd_obj_request *obj_request)
1446 if (test_and_set_bit(OBJ_REQ_IMG_DATA, &obj_request->flags)) {
1447 struct rbd_device *rbd_dev;
1449 rbd_dev = obj_request->img_request->rbd_dev;
1450 rbd_warn(rbd_dev, "obj_request %p already marked img_data",
1455 static bool obj_request_img_data_test(struct rbd_obj_request *obj_request)
1458 return test_bit(OBJ_REQ_IMG_DATA, &obj_request->flags) != 0;
1461 static void obj_request_done_set(struct rbd_obj_request *obj_request)
1463 if (test_and_set_bit(OBJ_REQ_DONE, &obj_request->flags)) {
1464 struct rbd_device *rbd_dev = NULL;
1466 if (obj_request_img_data_test(obj_request))
1467 rbd_dev = obj_request->img_request->rbd_dev;
1468 rbd_warn(rbd_dev, "obj_request %p already marked done",
1473 static bool obj_request_done_test(struct rbd_obj_request *obj_request)
1476 return test_bit(OBJ_REQ_DONE, &obj_request->flags) != 0;
1480 * This sets the KNOWN flag after (possibly) setting the EXISTS
1481 * flag. The latter is set based on the "exists" value provided.
1483 * Note that for our purposes once an object exists it never goes
1484 * away again. It's possible that the response from two existence
1485 * checks are separated by the creation of the target object, and
1486 * the first ("doesn't exist") response arrives *after* the second
1487 * ("does exist"). In that case we ignore the second one.
1489 static void obj_request_existence_set(struct rbd_obj_request *obj_request,
1493 set_bit(OBJ_REQ_EXISTS, &obj_request->flags);
1494 set_bit(OBJ_REQ_KNOWN, &obj_request->flags);
1498 static bool obj_request_known_test(struct rbd_obj_request *obj_request)
1501 return test_bit(OBJ_REQ_KNOWN, &obj_request->flags) != 0;
1504 static bool obj_request_exists_test(struct rbd_obj_request *obj_request)
1507 return test_bit(OBJ_REQ_EXISTS, &obj_request->flags) != 0;
1510 static bool obj_request_overlaps_parent(struct rbd_obj_request *obj_request)
1512 struct rbd_device *rbd_dev = obj_request->img_request->rbd_dev;
1514 return obj_request->img_offset <
1515 round_up(rbd_dev->parent_overlap, rbd_obj_bytes(&rbd_dev->header));
1518 static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
1520 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1521 kref_read(&obj_request->kref));
1522 kref_get(&obj_request->kref);
1525 static void rbd_obj_request_destroy(struct kref *kref);
1526 static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1528 rbd_assert(obj_request != NULL);
1529 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1530 kref_read(&obj_request->kref));
1531 kref_put(&obj_request->kref, rbd_obj_request_destroy);
1534 static void rbd_img_request_get(struct rbd_img_request *img_request)
1536 dout("%s: img %p (was %d)\n", __func__, img_request,
1537 kref_read(&img_request->kref));
1538 kref_get(&img_request->kref);
1541 static bool img_request_child_test(struct rbd_img_request *img_request);
1542 static void rbd_parent_request_destroy(struct kref *kref);
1543 static void rbd_img_request_destroy(struct kref *kref);
1544 static void rbd_img_request_put(struct rbd_img_request *img_request)
1546 rbd_assert(img_request != NULL);
1547 dout("%s: img %p (was %d)\n", __func__, img_request,
1548 kref_read(&img_request->kref));
1549 if (img_request_child_test(img_request))
1550 kref_put(&img_request->kref, rbd_parent_request_destroy);
1552 kref_put(&img_request->kref, rbd_img_request_destroy);
1555 static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
1556 struct rbd_obj_request *obj_request)
1558 rbd_assert(obj_request->img_request == NULL);
1560 /* Image request now owns object's original reference */
1561 obj_request->img_request = img_request;
1562 obj_request->which = img_request->obj_request_count;
1563 rbd_assert(!obj_request_img_data_test(obj_request));
1564 obj_request_img_data_set(obj_request);
1565 rbd_assert(obj_request->which != BAD_WHICH);
1566 img_request->obj_request_count++;
1567 list_add_tail(&obj_request->links, &img_request->obj_requests);
1568 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1569 obj_request->which);
1572 static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
1573 struct rbd_obj_request *obj_request)
1575 rbd_assert(obj_request->which != BAD_WHICH);
1577 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1578 obj_request->which);
1579 list_del(&obj_request->links);
1580 rbd_assert(img_request->obj_request_count > 0);
1581 img_request->obj_request_count--;
1582 rbd_assert(obj_request->which == img_request->obj_request_count);
1583 obj_request->which = BAD_WHICH;
1584 rbd_assert(obj_request_img_data_test(obj_request));
1585 rbd_assert(obj_request->img_request == img_request);
1586 obj_request->img_request = NULL;
1587 obj_request->callback = NULL;
1588 rbd_obj_request_put(obj_request);
1591 static bool obj_request_type_valid(enum obj_request_type type)
1594 case OBJ_REQUEST_NODATA:
1595 case OBJ_REQUEST_BIO:
1596 case OBJ_REQUEST_PAGES:
1603 static void rbd_img_obj_callback(struct rbd_obj_request *obj_request);
1605 static void rbd_obj_request_submit(struct rbd_obj_request *obj_request)
1607 struct ceph_osd_request *osd_req = obj_request->osd_req;
1609 dout("%s %p object_no %016llx %llu~%llu osd_req %p\n", __func__,
1610 obj_request, obj_request->object_no, obj_request->offset,
1611 obj_request->length, osd_req);
1612 if (obj_request_img_data_test(obj_request)) {
1613 WARN_ON(obj_request->callback != rbd_img_obj_callback);
1614 rbd_img_request_get(obj_request->img_request);
1616 ceph_osdc_start_request(osd_req->r_osdc, osd_req, false);
1619 static void rbd_img_request_complete(struct rbd_img_request *img_request)
1622 dout("%s: img %p\n", __func__, img_request);
1625 * If no error occurred, compute the aggregate transfer
1626 * count for the image request. We could instead use
1627 * atomic64_cmpxchg() to update it as each object request
1628 * completes; not clear which way is better off hand.
1630 if (!img_request->result) {
1631 struct rbd_obj_request *obj_request;
1634 for_each_obj_request(img_request, obj_request)
1635 xferred += obj_request->xferred;
1636 img_request->xferred = xferred;
1639 if (img_request->callback)
1640 img_request->callback(img_request);
1642 rbd_img_request_put(img_request);
1646 * The default/initial value for all image request flags is 0. Each
1647 * is conditionally set to 1 at image request initialization time
1648 * and currently never change thereafter.
1650 static void img_request_write_set(struct rbd_img_request *img_request)
1652 set_bit(IMG_REQ_WRITE, &img_request->flags);
1656 static bool img_request_write_test(struct rbd_img_request *img_request)
1659 return test_bit(IMG_REQ_WRITE, &img_request->flags) != 0;
1663 * Set the discard flag when the img_request is an discard request
1665 static void img_request_discard_set(struct rbd_img_request *img_request)
1667 set_bit(IMG_REQ_DISCARD, &img_request->flags);
1671 static bool img_request_discard_test(struct rbd_img_request *img_request)
1674 return test_bit(IMG_REQ_DISCARD, &img_request->flags) != 0;
1677 static void img_request_child_set(struct rbd_img_request *img_request)
1679 set_bit(IMG_REQ_CHILD, &img_request->flags);
1683 static void img_request_child_clear(struct rbd_img_request *img_request)
1685 clear_bit(IMG_REQ_CHILD, &img_request->flags);
1689 static bool img_request_child_test(struct rbd_img_request *img_request)
1692 return test_bit(IMG_REQ_CHILD, &img_request->flags) != 0;
1695 static void img_request_layered_set(struct rbd_img_request *img_request)
1697 set_bit(IMG_REQ_LAYERED, &img_request->flags);
1701 static void img_request_layered_clear(struct rbd_img_request *img_request)
1703 clear_bit(IMG_REQ_LAYERED, &img_request->flags);
1707 static bool img_request_layered_test(struct rbd_img_request *img_request)
1710 return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
1713 static enum obj_operation_type
1714 rbd_img_request_op_type(struct rbd_img_request *img_request)
1716 if (img_request_write_test(img_request))
1717 return OBJ_OP_WRITE;
1718 else if (img_request_discard_test(img_request))
1719 return OBJ_OP_DISCARD;
1725 rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
1727 u64 xferred = obj_request->xferred;
1728 u64 length = obj_request->length;
1730 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1731 obj_request, obj_request->img_request, obj_request->result,
1734 * ENOENT means a hole in the image. We zero-fill the entire
1735 * length of the request. A short read also implies zero-fill
1736 * to the end of the request. An error requires the whole
1737 * length of the request to be reported finished with an error
1738 * to the block layer. In each case we update the xferred
1739 * count to indicate the whole request was satisfied.
1741 rbd_assert(obj_request->type != OBJ_REQUEST_NODATA);
1742 if (obj_request->result == -ENOENT) {
1743 if (obj_request->type == OBJ_REQUEST_BIO)
1744 zero_bio_chain(obj_request->bio_list, 0);
1746 zero_pages(obj_request->pages, 0, length);
1747 obj_request->result = 0;
1748 } else if (xferred < length && !obj_request->result) {
1749 if (obj_request->type == OBJ_REQUEST_BIO)
1750 zero_bio_chain(obj_request->bio_list, xferred);
1752 zero_pages(obj_request->pages, xferred, length);
1754 obj_request->xferred = length;
1755 obj_request_done_set(obj_request);
1758 static void rbd_obj_request_complete(struct rbd_obj_request *obj_request)
1760 dout("%s: obj %p cb %p\n", __func__, obj_request,
1761 obj_request->callback);
1762 if (obj_request->callback)
1763 obj_request->callback(obj_request);
1765 complete_all(&obj_request->completion);
1768 static void rbd_obj_request_error(struct rbd_obj_request *obj_request, int err)
1770 obj_request->result = err;
1771 obj_request->xferred = 0;
1773 * kludge - mirror rbd_obj_request_submit() to match a put in
1774 * rbd_img_obj_callback()
1776 if (obj_request_img_data_test(obj_request)) {
1777 WARN_ON(obj_request->callback != rbd_img_obj_callback);
1778 rbd_img_request_get(obj_request->img_request);
1780 obj_request_done_set(obj_request);
1781 rbd_obj_request_complete(obj_request);
1784 static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
1786 struct rbd_img_request *img_request = NULL;
1787 struct rbd_device *rbd_dev = NULL;
1788 bool layered = false;
1790 if (obj_request_img_data_test(obj_request)) {
1791 img_request = obj_request->img_request;
1792 layered = img_request && img_request_layered_test(img_request);
1793 rbd_dev = img_request->rbd_dev;
1796 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1797 obj_request, img_request, obj_request->result,
1798 obj_request->xferred, obj_request->length);
1799 if (layered && obj_request->result == -ENOENT &&
1800 obj_request->img_offset < rbd_dev->parent_overlap)
1801 rbd_img_parent_read(obj_request);
1802 else if (img_request)
1803 rbd_img_obj_request_read_callback(obj_request);
1805 obj_request_done_set(obj_request);
1808 static void rbd_osd_write_callback(struct rbd_obj_request *obj_request)
1810 dout("%s: obj %p result %d %llu\n", __func__, obj_request,
1811 obj_request->result, obj_request->length);
1813 * There is no such thing as a successful short write. Set
1814 * it to our originally-requested length.
1816 obj_request->xferred = obj_request->length;
1817 obj_request_done_set(obj_request);
1820 static void rbd_osd_discard_callback(struct rbd_obj_request *obj_request)
1822 dout("%s: obj %p result %d %llu\n", __func__, obj_request,
1823 obj_request->result, obj_request->length);
1825 * There is no such thing as a successful short discard. Set
1826 * it to our originally-requested length.
1828 obj_request->xferred = obj_request->length;
1829 /* discarding a non-existent object is not a problem */
1830 if (obj_request->result == -ENOENT)
1831 obj_request->result = 0;
1832 obj_request_done_set(obj_request);
1836 * For a simple stat call there's nothing to do. We'll do more if
1837 * this is part of a write sequence for a layered image.
1839 static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
1841 dout("%s: obj %p\n", __func__, obj_request);
1842 obj_request_done_set(obj_request);
1845 static void rbd_osd_call_callback(struct rbd_obj_request *obj_request)
1847 dout("%s: obj %p\n", __func__, obj_request);
1849 if (obj_request_img_data_test(obj_request))
1850 rbd_osd_copyup_callback(obj_request);
1852 obj_request_done_set(obj_request);
1855 static void rbd_osd_req_callback(struct ceph_osd_request *osd_req)
1857 struct rbd_obj_request *obj_request = osd_req->r_priv;
1860 dout("%s: osd_req %p\n", __func__, osd_req);
1861 rbd_assert(osd_req == obj_request->osd_req);
1862 if (obj_request_img_data_test(obj_request)) {
1863 rbd_assert(obj_request->img_request);
1864 rbd_assert(obj_request->which != BAD_WHICH);
1866 rbd_assert(obj_request->which == BAD_WHICH);
1869 if (osd_req->r_result < 0)
1870 obj_request->result = osd_req->r_result;
1873 * We support a 64-bit length, but ultimately it has to be
1874 * passed to the block layer, which just supports a 32-bit
1877 obj_request->xferred = osd_req->r_ops[0].outdata_len;
1878 rbd_assert(obj_request->xferred < (u64)UINT_MAX);
1880 opcode = osd_req->r_ops[0].op;
1882 case CEPH_OSD_OP_READ:
1883 rbd_osd_read_callback(obj_request);
1885 case CEPH_OSD_OP_SETALLOCHINT:
1886 rbd_assert(osd_req->r_ops[1].op == CEPH_OSD_OP_WRITE ||
1887 osd_req->r_ops[1].op == CEPH_OSD_OP_WRITEFULL);
1889 case CEPH_OSD_OP_WRITE:
1890 case CEPH_OSD_OP_WRITEFULL:
1891 rbd_osd_write_callback(obj_request);
1893 case CEPH_OSD_OP_STAT:
1894 rbd_osd_stat_callback(obj_request);
1896 case CEPH_OSD_OP_DELETE:
1897 case CEPH_OSD_OP_TRUNCATE:
1898 case CEPH_OSD_OP_ZERO:
1899 rbd_osd_discard_callback(obj_request);
1901 case CEPH_OSD_OP_CALL:
1902 rbd_osd_call_callback(obj_request);
1905 rbd_warn(NULL, "unexpected OSD op: object_no %016llx opcode %d",
1906 obj_request->object_no, opcode);
1910 if (obj_request_done_test(obj_request))
1911 rbd_obj_request_complete(obj_request);
1914 static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request)
1916 struct ceph_osd_request *osd_req = obj_request->osd_req;
1918 rbd_assert(obj_request_img_data_test(obj_request));
1919 osd_req->r_snapid = obj_request->img_request->snap_id;
1922 static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
1924 struct ceph_osd_request *osd_req = obj_request->osd_req;
1926 ktime_get_real_ts(&osd_req->r_mtime);
1927 osd_req->r_data_offset = obj_request->offset;
1930 static struct ceph_osd_request *
1931 __rbd_osd_req_create(struct rbd_device *rbd_dev,
1932 struct ceph_snap_context *snapc,
1933 int num_ops, unsigned int flags,
1934 struct rbd_obj_request *obj_request)
1936 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1937 struct ceph_osd_request *req;
1938 const char *name_format = rbd_dev->image_format == 1 ?
1939 RBD_V1_DATA_FORMAT : RBD_V2_DATA_FORMAT;
1941 req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false, GFP_NOIO);
1945 req->r_flags = flags;
1946 req->r_callback = rbd_osd_req_callback;
1947 req->r_priv = obj_request;
1949 req->r_base_oloc.pool = rbd_dev->layout.pool_id;
1950 if (ceph_oid_aprintf(&req->r_base_oid, GFP_NOIO, name_format,
1951 rbd_dev->header.object_prefix, obj_request->object_no))
1954 if (ceph_osdc_alloc_messages(req, GFP_NOIO))
1960 ceph_osdc_put_request(req);
1965 * Create an osd request. A read request has one osd op (read).
1966 * A write request has either one (watch) or two (hint+write) osd ops.
1967 * (All rbd data writes are prefixed with an allocation hint op, but
1968 * technically osd watch is a write request, hence this distinction.)
1970 static struct ceph_osd_request *rbd_osd_req_create(
1971 struct rbd_device *rbd_dev,
1972 enum obj_operation_type op_type,
1973 unsigned int num_ops,
1974 struct rbd_obj_request *obj_request)
1976 struct ceph_snap_context *snapc = NULL;
1978 if (obj_request_img_data_test(obj_request) &&
1979 (op_type == OBJ_OP_DISCARD || op_type == OBJ_OP_WRITE)) {
1980 struct rbd_img_request *img_request = obj_request->img_request;
1981 if (op_type == OBJ_OP_WRITE) {
1982 rbd_assert(img_request_write_test(img_request));
1984 rbd_assert(img_request_discard_test(img_request));
1986 snapc = img_request->snapc;
1989 rbd_assert(num_ops == 1 || ((op_type == OBJ_OP_WRITE) && num_ops == 2));
1991 return __rbd_osd_req_create(rbd_dev, snapc, num_ops,
1992 (op_type == OBJ_OP_WRITE || op_type == OBJ_OP_DISCARD) ?
1993 CEPH_OSD_FLAG_WRITE : CEPH_OSD_FLAG_READ, obj_request);
1997 * Create a copyup osd request based on the information in the object
1998 * request supplied. A copyup request has two or three osd ops, a
1999 * copyup method call, potentially a hint op, and a write or truncate
2002 static struct ceph_osd_request *
2003 rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
2005 struct rbd_img_request *img_request;
2006 int num_osd_ops = 3;
2008 rbd_assert(obj_request_img_data_test(obj_request));
2009 img_request = obj_request->img_request;
2010 rbd_assert(img_request);
2011 rbd_assert(img_request_write_test(img_request) ||
2012 img_request_discard_test(img_request));
2014 if (img_request_discard_test(img_request))
2017 return __rbd_osd_req_create(img_request->rbd_dev,
2018 img_request->snapc, num_osd_ops,
2019 CEPH_OSD_FLAG_WRITE, obj_request);
2022 static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req)
2024 ceph_osdc_put_request(osd_req);
2027 static struct rbd_obj_request *
2028 rbd_obj_request_create(enum obj_request_type type)
2030 struct rbd_obj_request *obj_request;
2032 rbd_assert(obj_request_type_valid(type));
2034 obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_NOIO);
2038 obj_request->which = BAD_WHICH;
2039 obj_request->type = type;
2040 INIT_LIST_HEAD(&obj_request->links);
2041 init_completion(&obj_request->completion);
2042 kref_init(&obj_request->kref);
2044 dout("%s %p\n", __func__, obj_request);
2048 static void rbd_obj_request_destroy(struct kref *kref)
2050 struct rbd_obj_request *obj_request;
2052 obj_request = container_of(kref, struct rbd_obj_request, kref);
2054 dout("%s: obj %p\n", __func__, obj_request);
2056 rbd_assert(obj_request->img_request == NULL);
2057 rbd_assert(obj_request->which == BAD_WHICH);
2059 if (obj_request->osd_req)
2060 rbd_osd_req_destroy(obj_request->osd_req);
2062 rbd_assert(obj_request_type_valid(obj_request->type));
2063 switch (obj_request->type) {
2064 case OBJ_REQUEST_NODATA:
2065 break; /* Nothing to do */
2066 case OBJ_REQUEST_BIO:
2067 if (obj_request->bio_list)
2068 bio_chain_put(obj_request->bio_list);
2070 case OBJ_REQUEST_PAGES:
2071 /* img_data requests don't own their page array */
2072 if (obj_request->pages &&
2073 !obj_request_img_data_test(obj_request))
2074 ceph_release_page_vector(obj_request->pages,
2075 obj_request->page_count);
2079 kmem_cache_free(rbd_obj_request_cache, obj_request);
2082 /* It's OK to call this for a device with no parent */
2084 static void rbd_spec_put(struct rbd_spec *spec);
2085 static void rbd_dev_unparent(struct rbd_device *rbd_dev)
2087 rbd_dev_remove_parent(rbd_dev);
2088 rbd_spec_put(rbd_dev->parent_spec);
2089 rbd_dev->parent_spec = NULL;
2090 rbd_dev->parent_overlap = 0;
2094 * Parent image reference counting is used to determine when an
2095 * image's parent fields can be safely torn down--after there are no
2096 * more in-flight requests to the parent image. When the last
2097 * reference is dropped, cleaning them up is safe.
2099 static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
2103 if (!rbd_dev->parent_spec)
2106 counter = atomic_dec_return_safe(&rbd_dev->parent_ref);
2110 /* Last reference; clean up parent data structures */
2113 rbd_dev_unparent(rbd_dev);
2115 rbd_warn(rbd_dev, "parent reference underflow");
2119 * If an image has a non-zero parent overlap, get a reference to its
2122 * Returns true if the rbd device has a parent with a non-zero
2123 * overlap and a reference for it was successfully taken, or
2126 static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
2130 if (!rbd_dev->parent_spec)
2133 down_read(&rbd_dev->header_rwsem);
2134 if (rbd_dev->parent_overlap)
2135 counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
2136 up_read(&rbd_dev->header_rwsem);
2139 rbd_warn(rbd_dev, "parent reference overflow");
2145 * Caller is responsible for filling in the list of object requests
2146 * that comprises the image request, and the Linux request pointer
2147 * (if there is one).
2149 static struct rbd_img_request *rbd_img_request_create(
2150 struct rbd_device *rbd_dev,
2151 u64 offset, u64 length,
2152 enum obj_operation_type op_type,
2153 struct ceph_snap_context *snapc)
2155 struct rbd_img_request *img_request;
2157 img_request = kmem_cache_alloc(rbd_img_request_cache, GFP_NOIO);
2161 img_request->rq = NULL;
2162 img_request->rbd_dev = rbd_dev;
2163 img_request->offset = offset;
2164 img_request->length = length;
2165 img_request->flags = 0;
2166 if (op_type == OBJ_OP_DISCARD) {
2167 img_request_discard_set(img_request);
2168 img_request->snapc = snapc;
2169 } else if (op_type == OBJ_OP_WRITE) {
2170 img_request_write_set(img_request);
2171 img_request->snapc = snapc;
2173 img_request->snap_id = rbd_dev->spec->snap_id;
2175 if (rbd_dev_parent_get(rbd_dev))
2176 img_request_layered_set(img_request);
2177 spin_lock_init(&img_request->completion_lock);
2178 img_request->next_completion = 0;
2179 img_request->callback = NULL;
2180 img_request->result = 0;
2181 img_request->obj_request_count = 0;
2182 INIT_LIST_HEAD(&img_request->obj_requests);
2183 kref_init(&img_request->kref);
2185 dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev,
2186 obj_op_name(op_type), offset, length, img_request);
2191 static void rbd_img_request_destroy(struct kref *kref)
2193 struct rbd_img_request *img_request;
2194 struct rbd_obj_request *obj_request;
2195 struct rbd_obj_request *next_obj_request;
2197 img_request = container_of(kref, struct rbd_img_request, kref);
2199 dout("%s: img %p\n", __func__, img_request);
2201 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2202 rbd_img_obj_request_del(img_request, obj_request);
2203 rbd_assert(img_request->obj_request_count == 0);
2205 if (img_request_layered_test(img_request)) {
2206 img_request_layered_clear(img_request);
2207 rbd_dev_parent_put(img_request->rbd_dev);
2210 if (img_request_write_test(img_request) ||
2211 img_request_discard_test(img_request))
2212 ceph_put_snap_context(img_request->snapc);
2214 kmem_cache_free(rbd_img_request_cache, img_request);
2217 static struct rbd_img_request *rbd_parent_request_create(
2218 struct rbd_obj_request *obj_request,
2219 u64 img_offset, u64 length)
2221 struct rbd_img_request *parent_request;
2222 struct rbd_device *rbd_dev;
2224 rbd_assert(obj_request->img_request);
2225 rbd_dev = obj_request->img_request->rbd_dev;
2227 parent_request = rbd_img_request_create(rbd_dev->parent, img_offset,
2228 length, OBJ_OP_READ, NULL);
2229 if (!parent_request)
2232 img_request_child_set(parent_request);
2233 rbd_obj_request_get(obj_request);
2234 parent_request->obj_request = obj_request;
2236 return parent_request;
2239 static void rbd_parent_request_destroy(struct kref *kref)
2241 struct rbd_img_request *parent_request;
2242 struct rbd_obj_request *orig_request;
2244 parent_request = container_of(kref, struct rbd_img_request, kref);
2245 orig_request = parent_request->obj_request;
2247 parent_request->obj_request = NULL;
2248 rbd_obj_request_put(orig_request);
2249 img_request_child_clear(parent_request);
2251 rbd_img_request_destroy(kref);
2254 static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
2256 struct rbd_img_request *img_request;
2257 unsigned int xferred;
2261 rbd_assert(obj_request_img_data_test(obj_request));
2262 img_request = obj_request->img_request;
2264 rbd_assert(obj_request->xferred <= (u64)UINT_MAX);
2265 xferred = (unsigned int)obj_request->xferred;
2266 result = obj_request->result;
2268 struct rbd_device *rbd_dev = img_request->rbd_dev;
2269 enum obj_operation_type op_type;
2271 if (img_request_discard_test(img_request))
2272 op_type = OBJ_OP_DISCARD;
2273 else if (img_request_write_test(img_request))
2274 op_type = OBJ_OP_WRITE;
2276 op_type = OBJ_OP_READ;
2278 rbd_warn(rbd_dev, "%s %llx at %llx (%llx)",
2279 obj_op_name(op_type), obj_request->length,
2280 obj_request->img_offset, obj_request->offset);
2281 rbd_warn(rbd_dev, " result %d xferred %x",
2283 if (!img_request->result)
2284 img_request->result = result;
2286 * Need to end I/O on the entire obj_request worth of
2287 * bytes in case of error.
2289 xferred = obj_request->length;
2292 if (img_request_child_test(img_request)) {
2293 rbd_assert(img_request->obj_request != NULL);
2294 more = obj_request->which < img_request->obj_request_count - 1;
2296 rbd_assert(img_request->rq != NULL);
2298 more = blk_update_request(img_request->rq, result, xferred);
2300 __blk_mq_end_request(img_request->rq, result);
2306 static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
2308 struct rbd_img_request *img_request;
2309 u32 which = obj_request->which;
2312 rbd_assert(obj_request_img_data_test(obj_request));
2313 img_request = obj_request->img_request;
2315 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
2316 rbd_assert(img_request != NULL);
2317 rbd_assert(img_request->obj_request_count > 0);
2318 rbd_assert(which != BAD_WHICH);
2319 rbd_assert(which < img_request->obj_request_count);
2321 spin_lock_irq(&img_request->completion_lock);
2322 if (which != img_request->next_completion)
2325 for_each_obj_request_from(img_request, obj_request) {
2327 rbd_assert(which < img_request->obj_request_count);
2329 if (!obj_request_done_test(obj_request))
2331 more = rbd_img_obj_end_request(obj_request);
2335 rbd_assert(more ^ (which == img_request->obj_request_count));
2336 img_request->next_completion = which;
2338 spin_unlock_irq(&img_request->completion_lock);
2339 rbd_img_request_put(img_request);
2342 rbd_img_request_complete(img_request);
2346 * Add individual osd ops to the given ceph_osd_request and prepare
2347 * them for submission. num_ops is the current number of
2348 * osd operations already to the object request.
2350 static void rbd_img_obj_request_fill(struct rbd_obj_request *obj_request,
2351 struct ceph_osd_request *osd_request,
2352 enum obj_operation_type op_type,
2353 unsigned int num_ops)
2355 struct rbd_img_request *img_request = obj_request->img_request;
2356 struct rbd_device *rbd_dev = img_request->rbd_dev;
2357 u64 object_size = rbd_obj_bytes(&rbd_dev->header);
2358 u64 offset = obj_request->offset;
2359 u64 length = obj_request->length;
2363 if (op_type == OBJ_OP_DISCARD) {
2364 if (!offset && length == object_size &&
2365 (!img_request_layered_test(img_request) ||
2366 !obj_request_overlaps_parent(obj_request))) {
2367 opcode = CEPH_OSD_OP_DELETE;
2368 } else if ((offset + length == object_size)) {
2369 opcode = CEPH_OSD_OP_TRUNCATE;
2371 down_read(&rbd_dev->header_rwsem);
2372 img_end = rbd_dev->header.image_size;
2373 up_read(&rbd_dev->header_rwsem);
2375 if (obj_request->img_offset + length == img_end)
2376 opcode = CEPH_OSD_OP_TRUNCATE;
2378 opcode = CEPH_OSD_OP_ZERO;
2380 } else if (op_type == OBJ_OP_WRITE) {
2381 if (!offset && length == object_size)
2382 opcode = CEPH_OSD_OP_WRITEFULL;
2384 opcode = CEPH_OSD_OP_WRITE;
2385 osd_req_op_alloc_hint_init(osd_request, num_ops,
2386 object_size, object_size);
2389 opcode = CEPH_OSD_OP_READ;
2392 if (opcode == CEPH_OSD_OP_DELETE)
2393 osd_req_op_init(osd_request, num_ops, opcode, 0);
2395 osd_req_op_extent_init(osd_request, num_ops, opcode,
2396 offset, length, 0, 0);
2398 if (obj_request->type == OBJ_REQUEST_BIO)
2399 osd_req_op_extent_osd_data_bio(osd_request, num_ops,
2400 obj_request->bio_list, length);
2401 else if (obj_request->type == OBJ_REQUEST_PAGES)
2402 osd_req_op_extent_osd_data_pages(osd_request, num_ops,
2403 obj_request->pages, length,
2404 offset & ~PAGE_MASK, false, false);
2406 /* Discards are also writes */
2407 if (op_type == OBJ_OP_WRITE || op_type == OBJ_OP_DISCARD)
2408 rbd_osd_req_format_write(obj_request);
2410 rbd_osd_req_format_read(obj_request);
2414 * Split up an image request into one or more object requests, each
2415 * to a different object. The "type" parameter indicates whether
2416 * "data_desc" is the pointer to the head of a list of bio
2417 * structures, or the base of a page array. In either case this
2418 * function assumes data_desc describes memory sufficient to hold
2419 * all data described by the image request.
2421 static int rbd_img_request_fill(struct rbd_img_request *img_request,
2422 enum obj_request_type type,
2425 struct rbd_device *rbd_dev = img_request->rbd_dev;
2426 struct rbd_obj_request *obj_request = NULL;
2427 struct rbd_obj_request *next_obj_request;
2428 struct bio *bio_list = NULL;
2429 unsigned int bio_offset = 0;
2430 struct page **pages = NULL;
2431 enum obj_operation_type op_type;
2435 dout("%s: img %p type %d data_desc %p\n", __func__, img_request,
2436 (int)type, data_desc);
2438 img_offset = img_request->offset;
2439 resid = img_request->length;
2440 rbd_assert(resid > 0);
2441 op_type = rbd_img_request_op_type(img_request);
2443 if (type == OBJ_REQUEST_BIO) {
2444 bio_list = data_desc;
2445 rbd_assert(img_offset ==
2446 bio_list->bi_iter.bi_sector << SECTOR_SHIFT);
2447 } else if (type == OBJ_REQUEST_PAGES) {
2452 struct ceph_osd_request *osd_req;
2453 u64 object_no = img_offset >> rbd_dev->header.obj_order;
2454 u64 offset = rbd_segment_offset(rbd_dev, img_offset);
2455 u64 length = rbd_segment_length(rbd_dev, img_offset, resid);
2457 obj_request = rbd_obj_request_create(type);
2461 obj_request->object_no = object_no;
2462 obj_request->offset = offset;
2463 obj_request->length = length;
2466 * set obj_request->img_request before creating the
2467 * osd_request so that it gets the right snapc
2469 rbd_img_obj_request_add(img_request, obj_request);
2471 if (type == OBJ_REQUEST_BIO) {
2472 unsigned int clone_size;
2474 rbd_assert(length <= (u64)UINT_MAX);
2475 clone_size = (unsigned int)length;
2476 obj_request->bio_list =
2477 bio_chain_clone_range(&bio_list,
2481 if (!obj_request->bio_list)
2483 } else if (type == OBJ_REQUEST_PAGES) {
2484 unsigned int page_count;
2486 obj_request->pages = pages;
2487 page_count = (u32)calc_pages_for(offset, length);
2488 obj_request->page_count = page_count;
2489 if ((offset + length) & ~PAGE_MASK)
2490 page_count--; /* more on last page */
2491 pages += page_count;
2494 osd_req = rbd_osd_req_create(rbd_dev, op_type,
2495 (op_type == OBJ_OP_WRITE) ? 2 : 1,
2500 obj_request->osd_req = osd_req;
2501 obj_request->callback = rbd_img_obj_callback;
2502 obj_request->img_offset = img_offset;
2504 rbd_img_obj_request_fill(obj_request, osd_req, op_type, 0);
2506 img_offset += length;
2513 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2514 rbd_img_obj_request_del(img_request, obj_request);
2520 rbd_osd_copyup_callback(struct rbd_obj_request *obj_request)
2522 struct rbd_img_request *img_request;
2523 struct rbd_device *rbd_dev;
2524 struct page **pages;
2527 dout("%s: obj %p\n", __func__, obj_request);
2529 rbd_assert(obj_request->type == OBJ_REQUEST_BIO ||
2530 obj_request->type == OBJ_REQUEST_NODATA);
2531 rbd_assert(obj_request_img_data_test(obj_request));
2532 img_request = obj_request->img_request;
2533 rbd_assert(img_request);
2535 rbd_dev = img_request->rbd_dev;
2536 rbd_assert(rbd_dev);
2538 pages = obj_request->copyup_pages;
2539 rbd_assert(pages != NULL);
2540 obj_request->copyup_pages = NULL;
2541 page_count = obj_request->copyup_page_count;
2542 rbd_assert(page_count);
2543 obj_request->copyup_page_count = 0;
2544 ceph_release_page_vector(pages, page_count);
2547 * We want the transfer count to reflect the size of the
2548 * original write request. There is no such thing as a
2549 * successful short write, so if the request was successful
2550 * we can just set it to the originally-requested length.
2552 if (!obj_request->result)
2553 obj_request->xferred = obj_request->length;
2555 obj_request_done_set(obj_request);
2559 rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
2561 struct rbd_obj_request *orig_request;
2562 struct ceph_osd_request *osd_req;
2563 struct rbd_device *rbd_dev;
2564 struct page **pages;
2565 enum obj_operation_type op_type;
2570 rbd_assert(img_request_child_test(img_request));
2572 /* First get what we need from the image request */
2574 pages = img_request->copyup_pages;
2575 rbd_assert(pages != NULL);
2576 img_request->copyup_pages = NULL;
2577 page_count = img_request->copyup_page_count;
2578 rbd_assert(page_count);
2579 img_request->copyup_page_count = 0;
2581 orig_request = img_request->obj_request;
2582 rbd_assert(orig_request != NULL);
2583 rbd_assert(obj_request_type_valid(orig_request->type));
2584 img_result = img_request->result;
2585 parent_length = img_request->length;
2586 rbd_assert(img_result || parent_length == img_request->xferred);
2587 rbd_img_request_put(img_request);
2589 rbd_assert(orig_request->img_request);
2590 rbd_dev = orig_request->img_request->rbd_dev;
2591 rbd_assert(rbd_dev);
2594 * If the overlap has become 0 (most likely because the
2595 * image has been flattened) we need to free the pages
2596 * and re-submit the original write request.
2598 if (!rbd_dev->parent_overlap) {
2599 ceph_release_page_vector(pages, page_count);
2600 rbd_obj_request_submit(orig_request);
2608 * The original osd request is of no use to use any more.
2609 * We need a new one that can hold the three ops in a copyup
2610 * request. Allocate the new copyup osd request for the
2611 * original request, and release the old one.
2613 img_result = -ENOMEM;
2614 osd_req = rbd_osd_req_create_copyup(orig_request);
2617 rbd_osd_req_destroy(orig_request->osd_req);
2618 orig_request->osd_req = osd_req;
2619 orig_request->copyup_pages = pages;
2620 orig_request->copyup_page_count = page_count;
2622 /* Initialize the copyup op */
2624 osd_req_op_cls_init(osd_req, 0, CEPH_OSD_OP_CALL, "rbd", "copyup");
2625 osd_req_op_cls_request_data_pages(osd_req, 0, pages, parent_length, 0,
2628 /* Add the other op(s) */
2630 op_type = rbd_img_request_op_type(orig_request->img_request);
2631 rbd_img_obj_request_fill(orig_request, osd_req, op_type, 1);
2633 /* All set, send it off. */
2635 rbd_obj_request_submit(orig_request);
2639 ceph_release_page_vector(pages, page_count);
2640 rbd_obj_request_error(orig_request, img_result);
2644 * Read from the parent image the range of data that covers the
2645 * entire target of the given object request. This is used for
2646 * satisfying a layered image write request when the target of an
2647 * object request from the image request does not exist.
2649 * A page array big enough to hold the returned data is allocated
2650 * and supplied to rbd_img_request_fill() as the "data descriptor."
2651 * When the read completes, this page array will be transferred to
2652 * the original object request for the copyup operation.
2654 * If an error occurs, it is recorded as the result of the original
2655 * object request in rbd_img_obj_exists_callback().
2657 static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
2659 struct rbd_device *rbd_dev = obj_request->img_request->rbd_dev;
2660 struct rbd_img_request *parent_request = NULL;
2663 struct page **pages = NULL;
2667 rbd_assert(rbd_dev->parent != NULL);
2670 * Determine the byte range covered by the object in the
2671 * child image to which the original request was to be sent.
2673 img_offset = obj_request->img_offset - obj_request->offset;
2674 length = rbd_obj_bytes(&rbd_dev->header);
2677 * There is no defined parent data beyond the parent
2678 * overlap, so limit what we read at that boundary if
2681 if (img_offset + length > rbd_dev->parent_overlap) {
2682 rbd_assert(img_offset < rbd_dev->parent_overlap);
2683 length = rbd_dev->parent_overlap - img_offset;
2687 * Allocate a page array big enough to receive the data read
2690 page_count = (u32)calc_pages_for(0, length);
2691 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2692 if (IS_ERR(pages)) {
2693 result = PTR_ERR(pages);
2699 parent_request = rbd_parent_request_create(obj_request,
2700 img_offset, length);
2701 if (!parent_request)
2704 result = rbd_img_request_fill(parent_request, OBJ_REQUEST_PAGES, pages);
2708 parent_request->copyup_pages = pages;
2709 parent_request->copyup_page_count = page_count;
2710 parent_request->callback = rbd_img_obj_parent_read_full_callback;
2712 result = rbd_img_request_submit(parent_request);
2716 parent_request->copyup_pages = NULL;
2717 parent_request->copyup_page_count = 0;
2718 parent_request->obj_request = NULL;
2719 rbd_obj_request_put(obj_request);
2722 ceph_release_page_vector(pages, page_count);
2724 rbd_img_request_put(parent_request);
2728 static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request)
2730 struct rbd_obj_request *orig_request;
2731 struct rbd_device *rbd_dev;
2734 rbd_assert(!obj_request_img_data_test(obj_request));
2737 * All we need from the object request is the original
2738 * request and the result of the STAT op. Grab those, then
2739 * we're done with the request.
2741 orig_request = obj_request->obj_request;
2742 obj_request->obj_request = NULL;
2743 rbd_obj_request_put(orig_request);
2744 rbd_assert(orig_request);
2745 rbd_assert(orig_request->img_request);
2747 result = obj_request->result;
2748 obj_request->result = 0;
2750 dout("%s: obj %p for obj %p result %d %llu/%llu\n", __func__,
2751 obj_request, orig_request, result,
2752 obj_request->xferred, obj_request->length);
2753 rbd_obj_request_put(obj_request);
2756 * If the overlap has become 0 (most likely because the
2757 * image has been flattened) we need to re-submit the
2760 rbd_dev = orig_request->img_request->rbd_dev;
2761 if (!rbd_dev->parent_overlap) {
2762 rbd_obj_request_submit(orig_request);
2767 * Our only purpose here is to determine whether the object
2768 * exists, and we don't want to treat the non-existence as
2769 * an error. If something else comes back, transfer the
2770 * error to the original request and complete it now.
2773 obj_request_existence_set(orig_request, true);
2774 } else if (result == -ENOENT) {
2775 obj_request_existence_set(orig_request, false);
2777 goto fail_orig_request;
2781 * Resubmit the original request now that we have recorded
2782 * whether the target object exists.
2784 result = rbd_img_obj_request_submit(orig_request);
2786 goto fail_orig_request;
2791 rbd_obj_request_error(orig_request, result);
2794 static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request)
2796 struct rbd_device *rbd_dev = obj_request->img_request->rbd_dev;
2797 struct rbd_obj_request *stat_request;
2798 struct page **pages;
2803 stat_request = rbd_obj_request_create(OBJ_REQUEST_PAGES);
2807 stat_request->object_no = obj_request->object_no;
2809 stat_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
2811 if (!stat_request->osd_req) {
2813 goto fail_stat_request;
2817 * The response data for a STAT call consists of:
2824 size = sizeof (__le64) + sizeof (__le32) + sizeof (__le32);
2825 page_count = (u32)calc_pages_for(0, size);
2826 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2827 if (IS_ERR(pages)) {
2828 ret = PTR_ERR(pages);
2829 goto fail_stat_request;
2832 osd_req_op_init(stat_request->osd_req, 0, CEPH_OSD_OP_STAT, 0);
2833 osd_req_op_raw_data_in_pages(stat_request->osd_req, 0, pages, size, 0,
2836 rbd_obj_request_get(obj_request);
2837 stat_request->obj_request = obj_request;
2838 stat_request->pages = pages;
2839 stat_request->page_count = page_count;
2840 stat_request->callback = rbd_img_obj_exists_callback;
2842 rbd_obj_request_submit(stat_request);
2846 rbd_obj_request_put(stat_request);
2850 static bool img_obj_request_simple(struct rbd_obj_request *obj_request)
2852 struct rbd_img_request *img_request = obj_request->img_request;
2853 struct rbd_device *rbd_dev = img_request->rbd_dev;
2856 if (!img_request_write_test(img_request) &&
2857 !img_request_discard_test(img_request))
2860 /* Non-layered writes */
2861 if (!img_request_layered_test(img_request))
2865 * Layered writes outside of the parent overlap range don't
2866 * share any data with the parent.
2868 if (!obj_request_overlaps_parent(obj_request))
2872 * Entire-object layered writes - we will overwrite whatever
2873 * parent data there is anyway.
2875 if (!obj_request->offset &&
2876 obj_request->length == rbd_obj_bytes(&rbd_dev->header))
2880 * If the object is known to already exist, its parent data has
2881 * already been copied.
2883 if (obj_request_known_test(obj_request) &&
2884 obj_request_exists_test(obj_request))
2890 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request)
2892 rbd_assert(obj_request_img_data_test(obj_request));
2893 rbd_assert(obj_request_type_valid(obj_request->type));
2894 rbd_assert(obj_request->img_request);
2896 if (img_obj_request_simple(obj_request)) {
2897 rbd_obj_request_submit(obj_request);
2902 * It's a layered write. The target object might exist but
2903 * we may not know that yet. If we know it doesn't exist,
2904 * start by reading the data for the full target object from
2905 * the parent so we can use it for a copyup to the target.
2907 if (obj_request_known_test(obj_request))
2908 return rbd_img_obj_parent_read_full(obj_request);
2910 /* We don't know whether the target exists. Go find out. */
2912 return rbd_img_obj_exists_submit(obj_request);
2915 static int rbd_img_request_submit(struct rbd_img_request *img_request)
2917 struct rbd_obj_request *obj_request;
2918 struct rbd_obj_request *next_obj_request;
2921 dout("%s: img %p\n", __func__, img_request);
2923 rbd_img_request_get(img_request);
2924 for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
2925 ret = rbd_img_obj_request_submit(obj_request);
2931 rbd_img_request_put(img_request);
2935 static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
2937 struct rbd_obj_request *obj_request;
2938 struct rbd_device *rbd_dev;
2943 rbd_assert(img_request_child_test(img_request));
2945 /* First get what we need from the image request and release it */
2947 obj_request = img_request->obj_request;
2948 img_xferred = img_request->xferred;
2949 img_result = img_request->result;
2950 rbd_img_request_put(img_request);
2953 * If the overlap has become 0 (most likely because the
2954 * image has been flattened) we need to re-submit the
2957 rbd_assert(obj_request);
2958 rbd_assert(obj_request->img_request);
2959 rbd_dev = obj_request->img_request->rbd_dev;
2960 if (!rbd_dev->parent_overlap) {
2961 rbd_obj_request_submit(obj_request);
2965 obj_request->result = img_result;
2966 if (obj_request->result)
2970 * We need to zero anything beyond the parent overlap
2971 * boundary. Since rbd_img_obj_request_read_callback()
2972 * will zero anything beyond the end of a short read, an
2973 * easy way to do this is to pretend the data from the
2974 * parent came up short--ending at the overlap boundary.
2976 rbd_assert(obj_request->img_offset < U64_MAX - obj_request->length);
2977 obj_end = obj_request->img_offset + obj_request->length;
2978 if (obj_end > rbd_dev->parent_overlap) {
2981 if (obj_request->img_offset < rbd_dev->parent_overlap)
2982 xferred = rbd_dev->parent_overlap -
2983 obj_request->img_offset;
2985 obj_request->xferred = min(img_xferred, xferred);
2987 obj_request->xferred = img_xferred;
2990 rbd_img_obj_request_read_callback(obj_request);
2991 rbd_obj_request_complete(obj_request);
2994 static void rbd_img_parent_read(struct rbd_obj_request *obj_request)
2996 struct rbd_img_request *img_request;
2999 rbd_assert(obj_request_img_data_test(obj_request));
3000 rbd_assert(obj_request->img_request != NULL);
3001 rbd_assert(obj_request->result == (s32) -ENOENT);
3002 rbd_assert(obj_request_type_valid(obj_request->type));
3004 /* rbd_read_finish(obj_request, obj_request->length); */
3005 img_request = rbd_parent_request_create(obj_request,
3006 obj_request->img_offset,
3007 obj_request->length);
3012 if (obj_request->type == OBJ_REQUEST_BIO)
3013 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
3014 obj_request->bio_list);
3016 result = rbd_img_request_fill(img_request, OBJ_REQUEST_PAGES,
3017 obj_request->pages);
3021 img_request->callback = rbd_img_parent_read_callback;
3022 result = rbd_img_request_submit(img_request);
3029 rbd_img_request_put(img_request);
3030 obj_request->result = result;
3031 obj_request->xferred = 0;
3032 obj_request_done_set(obj_request);
3035 static const struct rbd_client_id rbd_empty_cid;
3037 static bool rbd_cid_equal(const struct rbd_client_id *lhs,
3038 const struct rbd_client_id *rhs)
3040 return lhs->gid == rhs->gid && lhs->handle == rhs->handle;
3043 static struct rbd_client_id rbd_get_cid(struct rbd_device *rbd_dev)
3045 struct rbd_client_id cid;
3047 mutex_lock(&rbd_dev->watch_mutex);
3048 cid.gid = ceph_client_gid(rbd_dev->rbd_client->client);
3049 cid.handle = rbd_dev->watch_cookie;
3050 mutex_unlock(&rbd_dev->watch_mutex);
3055 * lock_rwsem must be held for write
3057 static void rbd_set_owner_cid(struct rbd_device *rbd_dev,
3058 const struct rbd_client_id *cid)
3060 dout("%s rbd_dev %p %llu-%llu -> %llu-%llu\n", __func__, rbd_dev,
3061 rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle,
3062 cid->gid, cid->handle);
3063 rbd_dev->owner_cid = *cid; /* struct */
3066 static void format_lock_cookie(struct rbd_device *rbd_dev, char *buf)
3068 mutex_lock(&rbd_dev->watch_mutex);
3069 sprintf(buf, "%s %llu", RBD_LOCK_COOKIE_PREFIX, rbd_dev->watch_cookie);
3070 mutex_unlock(&rbd_dev->watch_mutex);
3074 * lock_rwsem must be held for write
3076 static int rbd_lock(struct rbd_device *rbd_dev)
3078 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3079 struct rbd_client_id cid = rbd_get_cid(rbd_dev);
3083 WARN_ON(__rbd_is_lock_owner(rbd_dev) ||
3084 rbd_dev->lock_cookie[0] != '\0');
3086 format_lock_cookie(rbd_dev, cookie);
3087 ret = ceph_cls_lock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
3088 RBD_LOCK_NAME, CEPH_CLS_LOCK_EXCLUSIVE, cookie,
3089 RBD_LOCK_TAG, "", 0);
3093 rbd_dev->lock_state = RBD_LOCK_STATE_LOCKED;
3094 strcpy(rbd_dev->lock_cookie, cookie);
3095 rbd_set_owner_cid(rbd_dev, &cid);
3096 queue_work(rbd_dev->task_wq, &rbd_dev->acquired_lock_work);
3101 * lock_rwsem must be held for write
3103 static void rbd_unlock(struct rbd_device *rbd_dev)
3105 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3108 WARN_ON(!__rbd_is_lock_owner(rbd_dev) ||
3109 rbd_dev->lock_cookie[0] == '\0');
3111 ret = ceph_cls_unlock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
3112 RBD_LOCK_NAME, rbd_dev->lock_cookie);
3113 if (ret && ret != -ENOENT)
3114 rbd_warn(rbd_dev, "failed to unlock: %d", ret);
3116 /* treat errors as the image is unlocked */
3117 rbd_dev->lock_state = RBD_LOCK_STATE_UNLOCKED;
3118 rbd_dev->lock_cookie[0] = '\0';
3119 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
3120 queue_work(rbd_dev->task_wq, &rbd_dev->released_lock_work);
3123 static int __rbd_notify_op_lock(struct rbd_device *rbd_dev,
3124 enum rbd_notify_op notify_op,
3125 struct page ***preply_pages,
3128 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3129 struct rbd_client_id cid = rbd_get_cid(rbd_dev);
3130 int buf_size = 4 + 8 + 8 + CEPH_ENCODING_START_BLK_LEN;
3134 dout("%s rbd_dev %p notify_op %d\n", __func__, rbd_dev, notify_op);
3136 /* encode *LockPayload NotifyMessage (op + ClientId) */
3137 ceph_start_encoding(&p, 2, 1, buf_size - CEPH_ENCODING_START_BLK_LEN);
3138 ceph_encode_32(&p, notify_op);
3139 ceph_encode_64(&p, cid.gid);
3140 ceph_encode_64(&p, cid.handle);
3142 return ceph_osdc_notify(osdc, &rbd_dev->header_oid,
3143 &rbd_dev->header_oloc, buf, buf_size,
3144 RBD_NOTIFY_TIMEOUT, preply_pages, preply_len);
3147 static void rbd_notify_op_lock(struct rbd_device *rbd_dev,
3148 enum rbd_notify_op notify_op)
3150 struct page **reply_pages;
3153 __rbd_notify_op_lock(rbd_dev, notify_op, &reply_pages, &reply_len);
3154 ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len));
3157 static void rbd_notify_acquired_lock(struct work_struct *work)
3159 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
3160 acquired_lock_work);
3162 rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_ACQUIRED_LOCK);
3165 static void rbd_notify_released_lock(struct work_struct *work)
3167 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
3168 released_lock_work);
3170 rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_RELEASED_LOCK);
3173 static int rbd_request_lock(struct rbd_device *rbd_dev)
3175 struct page **reply_pages;
3177 bool lock_owner_responded = false;
3180 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3182 ret = __rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_REQUEST_LOCK,
3183 &reply_pages, &reply_len);
3184 if (ret && ret != -ETIMEDOUT) {
3185 rbd_warn(rbd_dev, "failed to request lock: %d", ret);
3189 if (reply_len > 0 && reply_len <= PAGE_SIZE) {
3190 void *p = page_address(reply_pages[0]);
3191 void *const end = p + reply_len;
3194 ceph_decode_32_safe(&p, end, n, e_inval); /* num_acks */
3199 ceph_decode_need(&p, end, 8 + 8, e_inval);
3200 p += 8 + 8; /* skip gid and cookie */
3202 ceph_decode_32_safe(&p, end, len, e_inval);
3206 if (lock_owner_responded) {
3208 "duplicate lock owners detected");
3213 lock_owner_responded = true;
3214 ret = ceph_start_decoding(&p, end, 1, "ResponseMessage",
3218 "failed to decode ResponseMessage: %d",
3223 ret = ceph_decode_32(&p);
3227 if (!lock_owner_responded) {
3228 rbd_warn(rbd_dev, "no lock owners detected");
3233 ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len));
3241 static void wake_requests(struct rbd_device *rbd_dev, bool wake_all)
3243 dout("%s rbd_dev %p wake_all %d\n", __func__, rbd_dev, wake_all);
3245 cancel_delayed_work(&rbd_dev->lock_dwork);
3247 wake_up_all(&rbd_dev->lock_waitq);
3249 wake_up(&rbd_dev->lock_waitq);
3252 static int get_lock_owner_info(struct rbd_device *rbd_dev,
3253 struct ceph_locker **lockers, u32 *num_lockers)
3255 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3260 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3262 ret = ceph_cls_lock_info(osdc, &rbd_dev->header_oid,
3263 &rbd_dev->header_oloc, RBD_LOCK_NAME,
3264 &lock_type, &lock_tag, lockers, num_lockers);
3268 if (*num_lockers == 0) {
3269 dout("%s rbd_dev %p no lockers detected\n", __func__, rbd_dev);
3273 if (strcmp(lock_tag, RBD_LOCK_TAG)) {
3274 rbd_warn(rbd_dev, "locked by external mechanism, tag %s",
3280 if (lock_type == CEPH_CLS_LOCK_SHARED) {
3281 rbd_warn(rbd_dev, "shared lock type detected");
3286 if (strncmp((*lockers)[0].id.cookie, RBD_LOCK_COOKIE_PREFIX,
3287 strlen(RBD_LOCK_COOKIE_PREFIX))) {
3288 rbd_warn(rbd_dev, "locked by external mechanism, cookie %s",
3289 (*lockers)[0].id.cookie);
3299 static int find_watcher(struct rbd_device *rbd_dev,
3300 const struct ceph_locker *locker)
3302 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3303 struct ceph_watch_item *watchers;
3309 ret = ceph_osdc_list_watchers(osdc, &rbd_dev->header_oid,
3310 &rbd_dev->header_oloc, &watchers,
3315 sscanf(locker->id.cookie, RBD_LOCK_COOKIE_PREFIX " %llu", &cookie);
3316 for (i = 0; i < num_watchers; i++) {
3317 if (!memcmp(&watchers[i].addr, &locker->info.addr,
3318 sizeof(locker->info.addr)) &&
3319 watchers[i].cookie == cookie) {
3320 struct rbd_client_id cid = {
3321 .gid = le64_to_cpu(watchers[i].name.num),
3325 dout("%s rbd_dev %p found cid %llu-%llu\n", __func__,
3326 rbd_dev, cid.gid, cid.handle);
3327 rbd_set_owner_cid(rbd_dev, &cid);
3333 dout("%s rbd_dev %p no watchers\n", __func__, rbd_dev);
3341 * lock_rwsem must be held for write
3343 static int rbd_try_lock(struct rbd_device *rbd_dev)
3345 struct ceph_client *client = rbd_dev->rbd_client->client;
3346 struct ceph_locker *lockers;
3351 ret = rbd_lock(rbd_dev);
3355 /* determine if the current lock holder is still alive */
3356 ret = get_lock_owner_info(rbd_dev, &lockers, &num_lockers);
3360 if (num_lockers == 0)
3363 ret = find_watcher(rbd_dev, lockers);
3366 ret = 0; /* have to request lock */
3370 rbd_warn(rbd_dev, "%s%llu seems dead, breaking lock",
3371 ENTITY_NAME(lockers[0].id.name));
3373 ret = ceph_monc_blacklist_add(&client->monc,
3374 &lockers[0].info.addr);
3376 rbd_warn(rbd_dev, "blacklist of %s%llu failed: %d",
3377 ENTITY_NAME(lockers[0].id.name), ret);
3381 ret = ceph_cls_break_lock(&client->osdc, &rbd_dev->header_oid,
3382 &rbd_dev->header_oloc, RBD_LOCK_NAME,
3383 lockers[0].id.cookie,
3384 &lockers[0].id.name);
3385 if (ret && ret != -ENOENT)
3389 ceph_free_lockers(lockers, num_lockers);
3393 ceph_free_lockers(lockers, num_lockers);
3398 * ret is set only if lock_state is RBD_LOCK_STATE_UNLOCKED
3400 static enum rbd_lock_state rbd_try_acquire_lock(struct rbd_device *rbd_dev,
3403 enum rbd_lock_state lock_state;
3405 down_read(&rbd_dev->lock_rwsem);
3406 dout("%s rbd_dev %p read lock_state %d\n", __func__, rbd_dev,
3407 rbd_dev->lock_state);
3408 if (__rbd_is_lock_owner(rbd_dev)) {
3409 lock_state = rbd_dev->lock_state;
3410 up_read(&rbd_dev->lock_rwsem);
3414 up_read(&rbd_dev->lock_rwsem);
3415 down_write(&rbd_dev->lock_rwsem);
3416 dout("%s rbd_dev %p write lock_state %d\n", __func__, rbd_dev,
3417 rbd_dev->lock_state);
3418 if (!__rbd_is_lock_owner(rbd_dev)) {
3419 *pret = rbd_try_lock(rbd_dev);
3421 rbd_warn(rbd_dev, "failed to acquire lock: %d", *pret);
3424 lock_state = rbd_dev->lock_state;
3425 up_write(&rbd_dev->lock_rwsem);
3429 static void rbd_acquire_lock(struct work_struct *work)
3431 struct rbd_device *rbd_dev = container_of(to_delayed_work(work),
3432 struct rbd_device, lock_dwork);
3433 enum rbd_lock_state lock_state;
3436 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3438 lock_state = rbd_try_acquire_lock(rbd_dev, &ret);
3439 if (lock_state != RBD_LOCK_STATE_UNLOCKED || ret == -EBLACKLISTED) {
3440 if (lock_state == RBD_LOCK_STATE_LOCKED)
3441 wake_requests(rbd_dev, true);
3442 dout("%s rbd_dev %p lock_state %d ret %d - done\n", __func__,
3443 rbd_dev, lock_state, ret);
3447 ret = rbd_request_lock(rbd_dev);
3448 if (ret == -ETIMEDOUT) {
3449 goto again; /* treat this as a dead client */
3450 } else if (ret == -EROFS) {
3451 rbd_warn(rbd_dev, "peer will not release lock");
3453 * If this is rbd_add_acquire_lock(), we want to fail
3454 * immediately -- reuse BLACKLISTED flag. Otherwise we
3457 if (!(rbd_dev->disk->flags & GENHD_FL_UP)) {
3458 set_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags);
3459 /* wake "rbd map --exclusive" process */
3460 wake_requests(rbd_dev, false);
3462 } else if (ret < 0) {
3463 rbd_warn(rbd_dev, "error requesting lock: %d", ret);
3464 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork,
3468 * lock owner acked, but resend if we don't see them
3471 dout("%s rbd_dev %p requeueing lock_dwork\n", __func__,
3473 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork,
3474 msecs_to_jiffies(2 * RBD_NOTIFY_TIMEOUT * MSEC_PER_SEC));
3479 * lock_rwsem must be held for write
3481 static bool rbd_release_lock(struct rbd_device *rbd_dev)
3483 dout("%s rbd_dev %p read lock_state %d\n", __func__, rbd_dev,
3484 rbd_dev->lock_state);
3485 if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED)
3488 rbd_dev->lock_state = RBD_LOCK_STATE_RELEASING;
3489 downgrade_write(&rbd_dev->lock_rwsem);
3491 * Ensure that all in-flight IO is flushed.
3493 * FIXME: ceph_osdc_sync() flushes the entire OSD client, which
3494 * may be shared with other devices.
3496 ceph_osdc_sync(&rbd_dev->rbd_client->client->osdc);
3497 up_read(&rbd_dev->lock_rwsem);
3499 down_write(&rbd_dev->lock_rwsem);
3500 dout("%s rbd_dev %p write lock_state %d\n", __func__, rbd_dev,
3501 rbd_dev->lock_state);
3502 if (rbd_dev->lock_state != RBD_LOCK_STATE_RELEASING)
3505 rbd_unlock(rbd_dev);
3507 * Give others a chance to grab the lock - we would re-acquire
3508 * almost immediately if we got new IO during ceph_osdc_sync()
3509 * otherwise. We need to ack our own notifications, so this
3510 * lock_dwork will be requeued from rbd_wait_state_locked()
3511 * after wake_requests() in rbd_handle_released_lock().
3513 cancel_delayed_work(&rbd_dev->lock_dwork);
3517 static void rbd_release_lock_work(struct work_struct *work)
3519 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
3522 down_write(&rbd_dev->lock_rwsem);
3523 rbd_release_lock(rbd_dev);
3524 up_write(&rbd_dev->lock_rwsem);
3527 static void rbd_handle_acquired_lock(struct rbd_device *rbd_dev, u8 struct_v,
3530 struct rbd_client_id cid = { 0 };
3532 if (struct_v >= 2) {
3533 cid.gid = ceph_decode_64(p);
3534 cid.handle = ceph_decode_64(p);
3537 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
3539 if (!rbd_cid_equal(&cid, &rbd_empty_cid)) {
3540 down_write(&rbd_dev->lock_rwsem);
3541 if (rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
3543 * we already know that the remote client is
3546 up_write(&rbd_dev->lock_rwsem);
3550 rbd_set_owner_cid(rbd_dev, &cid);
3551 downgrade_write(&rbd_dev->lock_rwsem);
3553 down_read(&rbd_dev->lock_rwsem);
3556 if (!__rbd_is_lock_owner(rbd_dev))
3557 wake_requests(rbd_dev, false);
3558 up_read(&rbd_dev->lock_rwsem);
3561 static void rbd_handle_released_lock(struct rbd_device *rbd_dev, u8 struct_v,
3564 struct rbd_client_id cid = { 0 };
3566 if (struct_v >= 2) {
3567 cid.gid = ceph_decode_64(p);
3568 cid.handle = ceph_decode_64(p);
3571 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
3573 if (!rbd_cid_equal(&cid, &rbd_empty_cid)) {
3574 down_write(&rbd_dev->lock_rwsem);
3575 if (!rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
3576 dout("%s rbd_dev %p unexpected owner, cid %llu-%llu != owner_cid %llu-%llu\n",
3577 __func__, rbd_dev, cid.gid, cid.handle,
3578 rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle);
3579 up_write(&rbd_dev->lock_rwsem);
3583 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
3584 downgrade_write(&rbd_dev->lock_rwsem);
3586 down_read(&rbd_dev->lock_rwsem);
3589 if (!__rbd_is_lock_owner(rbd_dev))
3590 wake_requests(rbd_dev, false);
3591 up_read(&rbd_dev->lock_rwsem);
3595 * Returns result for ResponseMessage to be encoded (<= 0), or 1 if no
3596 * ResponseMessage is needed.
3598 static int rbd_handle_request_lock(struct rbd_device *rbd_dev, u8 struct_v,
3601 struct rbd_client_id my_cid = rbd_get_cid(rbd_dev);
3602 struct rbd_client_id cid = { 0 };
3605 if (struct_v >= 2) {
3606 cid.gid = ceph_decode_64(p);
3607 cid.handle = ceph_decode_64(p);
3610 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
3612 if (rbd_cid_equal(&cid, &my_cid))
3615 down_read(&rbd_dev->lock_rwsem);
3616 if (__rbd_is_lock_owner(rbd_dev)) {
3617 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED &&
3618 rbd_cid_equal(&rbd_dev->owner_cid, &rbd_empty_cid))
3622 * encode ResponseMessage(0) so the peer can detect
3627 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED) {
3628 if (!rbd_dev->opts->exclusive) {
3629 dout("%s rbd_dev %p queueing unlock_work\n",
3631 queue_work(rbd_dev->task_wq,
3632 &rbd_dev->unlock_work);
3634 /* refuse to release the lock */
3641 up_read(&rbd_dev->lock_rwsem);
3645 static void __rbd_acknowledge_notify(struct rbd_device *rbd_dev,
3646 u64 notify_id, u64 cookie, s32 *result)
3648 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3649 int buf_size = 4 + CEPH_ENCODING_START_BLK_LEN;
3656 /* encode ResponseMessage */
3657 ceph_start_encoding(&p, 1, 1,
3658 buf_size - CEPH_ENCODING_START_BLK_LEN);
3659 ceph_encode_32(&p, *result);
3664 ret = ceph_osdc_notify_ack(osdc, &rbd_dev->header_oid,
3665 &rbd_dev->header_oloc, notify_id, cookie,
3668 rbd_warn(rbd_dev, "acknowledge_notify failed: %d", ret);
3671 static void rbd_acknowledge_notify(struct rbd_device *rbd_dev, u64 notify_id,
3674 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3675 __rbd_acknowledge_notify(rbd_dev, notify_id, cookie, NULL);
3678 static void rbd_acknowledge_notify_result(struct rbd_device *rbd_dev,
3679 u64 notify_id, u64 cookie, s32 result)
3681 dout("%s rbd_dev %p result %d\n", __func__, rbd_dev, result);
3682 __rbd_acknowledge_notify(rbd_dev, notify_id, cookie, &result);
3685 static void rbd_watch_cb(void *arg, u64 notify_id, u64 cookie,
3686 u64 notifier_id, void *data, size_t data_len)
3688 struct rbd_device *rbd_dev = arg;
3690 void *const end = p + data_len;
3696 dout("%s rbd_dev %p cookie %llu notify_id %llu data_len %zu\n",
3697 __func__, rbd_dev, cookie, notify_id, data_len);
3699 ret = ceph_start_decoding(&p, end, 1, "NotifyMessage",
3702 rbd_warn(rbd_dev, "failed to decode NotifyMessage: %d",
3707 notify_op = ceph_decode_32(&p);
3709 /* legacy notification for header updates */
3710 notify_op = RBD_NOTIFY_OP_HEADER_UPDATE;
3714 dout("%s rbd_dev %p notify_op %u\n", __func__, rbd_dev, notify_op);
3715 switch (notify_op) {
3716 case RBD_NOTIFY_OP_ACQUIRED_LOCK:
3717 rbd_handle_acquired_lock(rbd_dev, struct_v, &p);
3718 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
3720 case RBD_NOTIFY_OP_RELEASED_LOCK:
3721 rbd_handle_released_lock(rbd_dev, struct_v, &p);
3722 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
3724 case RBD_NOTIFY_OP_REQUEST_LOCK:
3725 ret = rbd_handle_request_lock(rbd_dev, struct_v, &p);
3727 rbd_acknowledge_notify_result(rbd_dev, notify_id,
3730 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
3732 case RBD_NOTIFY_OP_HEADER_UPDATE:
3733 ret = rbd_dev_refresh(rbd_dev);
3735 rbd_warn(rbd_dev, "refresh failed: %d", ret);
3737 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
3740 if (rbd_is_lock_owner(rbd_dev))
3741 rbd_acknowledge_notify_result(rbd_dev, notify_id,
3742 cookie, -EOPNOTSUPP);
3744 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
3749 static void __rbd_unregister_watch(struct rbd_device *rbd_dev);
3751 static void rbd_watch_errcb(void *arg, u64 cookie, int err)
3753 struct rbd_device *rbd_dev = arg;
3755 rbd_warn(rbd_dev, "encountered watch error: %d", err);
3757 down_write(&rbd_dev->lock_rwsem);
3758 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
3759 up_write(&rbd_dev->lock_rwsem);
3761 mutex_lock(&rbd_dev->watch_mutex);
3762 if (rbd_dev->watch_state == RBD_WATCH_STATE_REGISTERED) {
3763 __rbd_unregister_watch(rbd_dev);
3764 rbd_dev->watch_state = RBD_WATCH_STATE_ERROR;
3766 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->watch_dwork, 0);
3768 mutex_unlock(&rbd_dev->watch_mutex);
3772 * watch_mutex must be locked
3774 static int __rbd_register_watch(struct rbd_device *rbd_dev)
3776 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3777 struct ceph_osd_linger_request *handle;
3779 rbd_assert(!rbd_dev->watch_handle);
3780 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3782 handle = ceph_osdc_watch(osdc, &rbd_dev->header_oid,
3783 &rbd_dev->header_oloc, rbd_watch_cb,
3784 rbd_watch_errcb, rbd_dev);
3786 return PTR_ERR(handle);
3788 rbd_dev->watch_handle = handle;
3793 * watch_mutex must be locked
3795 static void __rbd_unregister_watch(struct rbd_device *rbd_dev)
3797 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3800 rbd_assert(rbd_dev->watch_handle);
3801 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3803 ret = ceph_osdc_unwatch(osdc, rbd_dev->watch_handle);
3805 rbd_warn(rbd_dev, "failed to unwatch: %d", ret);
3807 rbd_dev->watch_handle = NULL;
3810 static int rbd_register_watch(struct rbd_device *rbd_dev)
3814 mutex_lock(&rbd_dev->watch_mutex);
3815 rbd_assert(rbd_dev->watch_state == RBD_WATCH_STATE_UNREGISTERED);
3816 ret = __rbd_register_watch(rbd_dev);
3820 rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED;
3821 rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id;
3824 mutex_unlock(&rbd_dev->watch_mutex);
3828 static void cancel_tasks_sync(struct rbd_device *rbd_dev)
3830 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3832 cancel_delayed_work_sync(&rbd_dev->watch_dwork);
3833 cancel_work_sync(&rbd_dev->acquired_lock_work);
3834 cancel_work_sync(&rbd_dev->released_lock_work);
3835 cancel_delayed_work_sync(&rbd_dev->lock_dwork);
3836 cancel_work_sync(&rbd_dev->unlock_work);
3839 static void rbd_unregister_watch(struct rbd_device *rbd_dev)
3841 WARN_ON(waitqueue_active(&rbd_dev->lock_waitq));
3842 cancel_tasks_sync(rbd_dev);
3844 mutex_lock(&rbd_dev->watch_mutex);
3845 if (rbd_dev->watch_state == RBD_WATCH_STATE_REGISTERED)
3846 __rbd_unregister_watch(rbd_dev);
3847 rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED;
3848 mutex_unlock(&rbd_dev->watch_mutex);
3850 ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
3854 * lock_rwsem must be held for write
3856 static void rbd_reacquire_lock(struct rbd_device *rbd_dev)
3858 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3862 WARN_ON(rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED);
3864 format_lock_cookie(rbd_dev, cookie);
3865 ret = ceph_cls_set_cookie(osdc, &rbd_dev->header_oid,
3866 &rbd_dev->header_oloc, RBD_LOCK_NAME,
3867 CEPH_CLS_LOCK_EXCLUSIVE, rbd_dev->lock_cookie,
3868 RBD_LOCK_TAG, cookie);
3870 if (ret != -EOPNOTSUPP)
3871 rbd_warn(rbd_dev, "failed to update lock cookie: %d",
3875 * Lock cookie cannot be updated on older OSDs, so do
3876 * a manual release and queue an acquire.
3878 if (rbd_release_lock(rbd_dev))
3879 queue_delayed_work(rbd_dev->task_wq,
3880 &rbd_dev->lock_dwork, 0);
3882 strcpy(rbd_dev->lock_cookie, cookie);
3886 static void rbd_reregister_watch(struct work_struct *work)
3888 struct rbd_device *rbd_dev = container_of(to_delayed_work(work),
3889 struct rbd_device, watch_dwork);
3892 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3894 mutex_lock(&rbd_dev->watch_mutex);
3895 if (rbd_dev->watch_state != RBD_WATCH_STATE_ERROR) {
3896 mutex_unlock(&rbd_dev->watch_mutex);
3900 ret = __rbd_register_watch(rbd_dev);
3902 rbd_warn(rbd_dev, "failed to reregister watch: %d", ret);
3903 if (ret == -EBLACKLISTED || ret == -ENOENT) {
3904 set_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags);
3905 wake_requests(rbd_dev, true);
3907 queue_delayed_work(rbd_dev->task_wq,
3908 &rbd_dev->watch_dwork,
3911 mutex_unlock(&rbd_dev->watch_mutex);
3915 rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED;
3916 rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id;
3917 mutex_unlock(&rbd_dev->watch_mutex);
3919 down_write(&rbd_dev->lock_rwsem);
3920 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED)
3921 rbd_reacquire_lock(rbd_dev);
3922 up_write(&rbd_dev->lock_rwsem);
3924 ret = rbd_dev_refresh(rbd_dev);
3926 rbd_warn(rbd_dev, "reregisteration refresh failed: %d", ret);
3930 * Synchronous osd object method call. Returns the number of bytes
3931 * returned in the outbound buffer, or a negative error code.
3933 static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
3934 struct ceph_object_id *oid,
3935 struct ceph_object_locator *oloc,
3936 const char *method_name,
3937 const void *outbound,
3938 size_t outbound_size,
3940 size_t inbound_size)
3942 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3943 struct page *req_page = NULL;
3944 struct page *reply_page;
3948 * Method calls are ultimately read operations. The result
3949 * should placed into the inbound buffer provided. They
3950 * also supply outbound data--parameters for the object
3951 * method. Currently if this is present it will be a
3955 if (outbound_size > PAGE_SIZE)
3958 req_page = alloc_page(GFP_KERNEL);
3962 memcpy(page_address(req_page), outbound, outbound_size);
3965 reply_page = alloc_page(GFP_KERNEL);
3968 __free_page(req_page);
3972 ret = ceph_osdc_call(osdc, oid, oloc, RBD_DRV_NAME, method_name,
3973 CEPH_OSD_FLAG_READ, req_page, outbound_size,
3974 reply_page, &inbound_size);
3976 memcpy(inbound, page_address(reply_page), inbound_size);
3981 __free_page(req_page);
3982 __free_page(reply_page);
3987 * lock_rwsem must be held for read
3989 static void rbd_wait_state_locked(struct rbd_device *rbd_dev)
3995 * Note the use of mod_delayed_work() in rbd_acquire_lock()
3996 * and cancel_delayed_work() in wake_requests().
3998 dout("%s rbd_dev %p queueing lock_dwork\n", __func__, rbd_dev);
3999 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
4000 prepare_to_wait_exclusive(&rbd_dev->lock_waitq, &wait,
4001 TASK_UNINTERRUPTIBLE);
4002 up_read(&rbd_dev->lock_rwsem);
4004 down_read(&rbd_dev->lock_rwsem);
4005 } while (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED &&
4006 !test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags));
4008 finish_wait(&rbd_dev->lock_waitq, &wait);
4011 static void rbd_queue_workfn(struct work_struct *work)
4013 struct request *rq = blk_mq_rq_from_pdu(work);
4014 struct rbd_device *rbd_dev = rq->q->queuedata;
4015 struct rbd_img_request *img_request;
4016 struct ceph_snap_context *snapc = NULL;
4017 u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT;
4018 u64 length = blk_rq_bytes(rq);
4019 enum obj_operation_type op_type;
4021 bool must_be_locked;
4024 switch (req_op(rq)) {
4025 case REQ_OP_DISCARD:
4026 op_type = OBJ_OP_DISCARD;
4029 op_type = OBJ_OP_WRITE;
4032 op_type = OBJ_OP_READ;
4035 dout("%s: non-fs request type %d\n", __func__, req_op(rq));
4040 /* Ignore/skip any zero-length requests */
4043 dout("%s: zero-length request\n", __func__);
4048 /* Only reads are allowed to a read-only device */
4050 if (op_type != OBJ_OP_READ) {
4051 if (rbd_dev->mapping.read_only) {
4055 rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP);
4059 * Quit early if the mapped snapshot no longer exists. It's
4060 * still possible the snapshot will have disappeared by the
4061 * time our request arrives at the osd, but there's no sense in
4062 * sending it if we already know.
4064 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
4065 dout("request for non-existent snapshot");
4066 rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
4071 if (offset && length > U64_MAX - offset + 1) {
4072 rbd_warn(rbd_dev, "bad request range (%llu~%llu)", offset,
4075 goto err_rq; /* Shouldn't happen */
4078 blk_mq_start_request(rq);
4080 down_read(&rbd_dev->header_rwsem);
4081 mapping_size = rbd_dev->mapping.size;
4082 if (op_type != OBJ_OP_READ) {
4083 snapc = rbd_dev->header.snapc;
4084 ceph_get_snap_context(snapc);
4086 up_read(&rbd_dev->header_rwsem);
4088 if (offset + length > mapping_size) {
4089 rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)", offset,
4090 length, mapping_size);
4096 (rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK) &&
4097 (op_type != OBJ_OP_READ || rbd_dev->opts->lock_on_read);
4098 if (must_be_locked) {
4099 down_read(&rbd_dev->lock_rwsem);
4100 if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED &&
4101 !test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) {
4102 if (rbd_dev->opts->exclusive) {
4103 rbd_warn(rbd_dev, "exclusive lock required");
4107 rbd_wait_state_locked(rbd_dev);
4109 if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) {
4110 result = -EBLACKLISTED;
4115 img_request = rbd_img_request_create(rbd_dev, offset, length, op_type,
4121 img_request->rq = rq;
4122 snapc = NULL; /* img_request consumes a ref */
4124 if (op_type == OBJ_OP_DISCARD)
4125 result = rbd_img_request_fill(img_request, OBJ_REQUEST_NODATA,
4128 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
4131 goto err_img_request;
4133 result = rbd_img_request_submit(img_request);
4135 goto err_img_request;
4138 up_read(&rbd_dev->lock_rwsem);
4142 rbd_img_request_put(img_request);
4145 up_read(&rbd_dev->lock_rwsem);
4148 rbd_warn(rbd_dev, "%s %llx at %llx result %d",
4149 obj_op_name(op_type), length, offset, result);
4150 ceph_put_snap_context(snapc);
4152 blk_mq_end_request(rq, result);
4155 static int rbd_queue_rq(struct blk_mq_hw_ctx *hctx,
4156 const struct blk_mq_queue_data *bd)
4158 struct request *rq = bd->rq;
4159 struct work_struct *work = blk_mq_rq_to_pdu(rq);
4161 queue_work(rbd_wq, work);
4162 return BLK_MQ_RQ_QUEUE_OK;
4165 static void rbd_free_disk(struct rbd_device *rbd_dev)
4167 blk_cleanup_queue(rbd_dev->disk->queue);
4168 blk_mq_free_tag_set(&rbd_dev->tag_set);
4169 put_disk(rbd_dev->disk);
4170 rbd_dev->disk = NULL;
4173 static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
4174 struct ceph_object_id *oid,
4175 struct ceph_object_locator *oloc,
4176 void *buf, int buf_len)
4179 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4180 struct ceph_osd_request *req;
4181 struct page **pages;
4182 int num_pages = calc_pages_for(0, buf_len);
4185 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_KERNEL);
4189 ceph_oid_copy(&req->r_base_oid, oid);
4190 ceph_oloc_copy(&req->r_base_oloc, oloc);
4191 req->r_flags = CEPH_OSD_FLAG_READ;
4193 ret = ceph_osdc_alloc_messages(req, GFP_KERNEL);
4197 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
4198 if (IS_ERR(pages)) {
4199 ret = PTR_ERR(pages);
4203 osd_req_op_extent_init(req, 0, CEPH_OSD_OP_READ, 0, buf_len, 0, 0);
4204 osd_req_op_extent_osd_data_pages(req, 0, pages, buf_len, 0, false,
4207 ceph_osdc_start_request(osdc, req, false);
4208 ret = ceph_osdc_wait_request(osdc, req);
4210 ceph_copy_from_page_vector(pages, buf, 0, ret);
4213 ceph_osdc_put_request(req);
4218 * Read the complete header for the given rbd device. On successful
4219 * return, the rbd_dev->header field will contain up-to-date
4220 * information about the image.
4222 static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
4224 struct rbd_image_header_ondisk *ondisk = NULL;
4231 * The complete header will include an array of its 64-bit
4232 * snapshot ids, followed by the names of those snapshots as
4233 * a contiguous block of NUL-terminated strings. Note that
4234 * the number of snapshots could change by the time we read
4235 * it in, in which case we re-read it.
4242 size = sizeof (*ondisk);
4243 size += snap_count * sizeof (struct rbd_image_snap_ondisk);
4245 ondisk = kmalloc(size, GFP_KERNEL);
4249 ret = rbd_obj_read_sync(rbd_dev, &rbd_dev->header_oid,
4250 &rbd_dev->header_oloc, ondisk, size);
4253 if ((size_t)ret < size) {
4255 rbd_warn(rbd_dev, "short header read (want %zd got %d)",
4259 if (!rbd_dev_ondisk_valid(ondisk)) {
4261 rbd_warn(rbd_dev, "invalid header");
4265 names_size = le64_to_cpu(ondisk->snap_names_len);
4266 want_count = snap_count;
4267 snap_count = le32_to_cpu(ondisk->snap_count);
4268 } while (snap_count != want_count);
4270 ret = rbd_header_from_disk(rbd_dev, ondisk);
4278 * Clear the rbd device's EXISTS flag if the snapshot it's mapped to
4279 * has disappeared from the (just updated) snapshot context.
4281 static void rbd_exists_validate(struct rbd_device *rbd_dev)
4285 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags))
4288 snap_id = rbd_dev->spec->snap_id;
4289 if (snap_id == CEPH_NOSNAP)
4292 if (rbd_dev_snap_index(rbd_dev, snap_id) == BAD_SNAP_INDEX)
4293 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
4296 static void rbd_dev_update_size(struct rbd_device *rbd_dev)
4301 * If EXISTS is not set, rbd_dev->disk may be NULL, so don't
4302 * try to update its size. If REMOVING is set, updating size
4303 * is just useless work since the device can't be opened.
4305 if (test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags) &&
4306 !test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags)) {
4307 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
4308 dout("setting size to %llu sectors", (unsigned long long)size);
4309 set_capacity(rbd_dev->disk, size);
4310 revalidate_disk(rbd_dev->disk);
4314 static int rbd_dev_refresh(struct rbd_device *rbd_dev)
4319 down_write(&rbd_dev->header_rwsem);
4320 mapping_size = rbd_dev->mapping.size;
4322 ret = rbd_dev_header_info(rbd_dev);
4327 * If there is a parent, see if it has disappeared due to the
4328 * mapped image getting flattened.
4330 if (rbd_dev->parent) {
4331 ret = rbd_dev_v2_parent_info(rbd_dev);
4336 if (rbd_dev->spec->snap_id == CEPH_NOSNAP) {
4337 rbd_dev->mapping.size = rbd_dev->header.image_size;
4339 /* validate mapped snapshot's EXISTS flag */
4340 rbd_exists_validate(rbd_dev);
4344 up_write(&rbd_dev->header_rwsem);
4345 if (!ret && mapping_size != rbd_dev->mapping.size)
4346 rbd_dev_update_size(rbd_dev);
4351 static int rbd_init_request(struct blk_mq_tag_set *set, struct request *rq,
4352 unsigned int hctx_idx, unsigned int numa_node)
4354 struct work_struct *work = blk_mq_rq_to_pdu(rq);
4356 INIT_WORK(work, rbd_queue_workfn);
4360 static const struct blk_mq_ops rbd_mq_ops = {
4361 .queue_rq = rbd_queue_rq,
4362 .init_request = rbd_init_request,
4365 static int rbd_init_disk(struct rbd_device *rbd_dev)
4367 struct gendisk *disk;
4368 struct request_queue *q;
4372 /* create gendisk info */
4373 disk = alloc_disk(single_major ?
4374 (1 << RBD_SINGLE_MAJOR_PART_SHIFT) :
4375 RBD_MINORS_PER_MAJOR);
4379 snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
4381 disk->major = rbd_dev->major;
4382 disk->first_minor = rbd_dev->minor;
4384 disk->flags |= GENHD_FL_EXT_DEVT;
4385 disk->fops = &rbd_bd_ops;
4386 disk->private_data = rbd_dev;
4388 memset(&rbd_dev->tag_set, 0, sizeof(rbd_dev->tag_set));
4389 rbd_dev->tag_set.ops = &rbd_mq_ops;
4390 rbd_dev->tag_set.queue_depth = rbd_dev->opts->queue_depth;
4391 rbd_dev->tag_set.numa_node = NUMA_NO_NODE;
4392 rbd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
4393 rbd_dev->tag_set.nr_hw_queues = 1;
4394 rbd_dev->tag_set.cmd_size = sizeof(struct work_struct);
4396 err = blk_mq_alloc_tag_set(&rbd_dev->tag_set);
4400 q = blk_mq_init_queue(&rbd_dev->tag_set);
4406 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
4407 /* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */
4409 /* set io sizes to object size */
4410 segment_size = rbd_obj_bytes(&rbd_dev->header);
4411 blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
4412 q->limits.max_sectors = queue_max_hw_sectors(q);
4413 blk_queue_max_segments(q, segment_size / SECTOR_SIZE);
4414 blk_queue_max_segment_size(q, segment_size);
4415 blk_queue_io_min(q, segment_size);
4416 blk_queue_io_opt(q, segment_size);
4418 /* enable the discard support */
4419 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
4420 q->limits.discard_granularity = segment_size;
4421 q->limits.discard_alignment = segment_size;
4422 blk_queue_max_discard_sectors(q, segment_size / SECTOR_SIZE);
4424 if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
4425 q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
4428 * disk_release() expects a queue ref from add_disk() and will
4429 * put it. Hold an extra ref until add_disk() is called.
4431 WARN_ON(!blk_get_queue(q));
4433 q->queuedata = rbd_dev;
4435 rbd_dev->disk = disk;
4439 blk_mq_free_tag_set(&rbd_dev->tag_set);
4449 static struct rbd_device *dev_to_rbd_dev(struct device *dev)
4451 return container_of(dev, struct rbd_device, dev);
4454 static ssize_t rbd_size_show(struct device *dev,
4455 struct device_attribute *attr, char *buf)
4457 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4459 return sprintf(buf, "%llu\n",
4460 (unsigned long long)rbd_dev->mapping.size);
4464 * Note this shows the features for whatever's mapped, which is not
4465 * necessarily the base image.
4467 static ssize_t rbd_features_show(struct device *dev,
4468 struct device_attribute *attr, char *buf)
4470 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4472 return sprintf(buf, "0x%016llx\n",
4473 (unsigned long long)rbd_dev->mapping.features);
4476 static ssize_t rbd_major_show(struct device *dev,
4477 struct device_attribute *attr, char *buf)
4479 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4482 return sprintf(buf, "%d\n", rbd_dev->major);
4484 return sprintf(buf, "(none)\n");
4487 static ssize_t rbd_minor_show(struct device *dev,
4488 struct device_attribute *attr, char *buf)
4490 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4492 return sprintf(buf, "%d\n", rbd_dev->minor);
4495 static ssize_t rbd_client_addr_show(struct device *dev,
4496 struct device_attribute *attr, char *buf)
4498 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4499 struct ceph_entity_addr *client_addr =
4500 ceph_client_addr(rbd_dev->rbd_client->client);
4502 return sprintf(buf, "%pISpc/%u\n", &client_addr->in_addr,
4503 le32_to_cpu(client_addr->nonce));
4506 static ssize_t rbd_client_id_show(struct device *dev,
4507 struct device_attribute *attr, char *buf)
4509 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4511 return sprintf(buf, "client%lld\n",
4512 ceph_client_gid(rbd_dev->rbd_client->client));
4515 static ssize_t rbd_cluster_fsid_show(struct device *dev,
4516 struct device_attribute *attr, char *buf)
4518 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4520 return sprintf(buf, "%pU\n", &rbd_dev->rbd_client->client->fsid);
4523 static ssize_t rbd_config_info_show(struct device *dev,
4524 struct device_attribute *attr, char *buf)
4526 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4528 return sprintf(buf, "%s\n", rbd_dev->config_info);
4531 static ssize_t rbd_pool_show(struct device *dev,
4532 struct device_attribute *attr, char *buf)
4534 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4536 return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
4539 static ssize_t rbd_pool_id_show(struct device *dev,
4540 struct device_attribute *attr, char *buf)
4542 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4544 return sprintf(buf, "%llu\n",
4545 (unsigned long long) rbd_dev->spec->pool_id);
4548 static ssize_t rbd_name_show(struct device *dev,
4549 struct device_attribute *attr, char *buf)
4551 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4553 if (rbd_dev->spec->image_name)
4554 return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
4556 return sprintf(buf, "(unknown)\n");
4559 static ssize_t rbd_image_id_show(struct device *dev,
4560 struct device_attribute *attr, char *buf)
4562 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4564 return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
4568 * Shows the name of the currently-mapped snapshot (or
4569 * RBD_SNAP_HEAD_NAME for the base image).
4571 static ssize_t rbd_snap_show(struct device *dev,
4572 struct device_attribute *attr,
4575 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4577 return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
4580 static ssize_t rbd_snap_id_show(struct device *dev,
4581 struct device_attribute *attr, char *buf)
4583 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4585 return sprintf(buf, "%llu\n", rbd_dev->spec->snap_id);
4589 * For a v2 image, shows the chain of parent images, separated by empty
4590 * lines. For v1 images or if there is no parent, shows "(no parent
4593 static ssize_t rbd_parent_show(struct device *dev,
4594 struct device_attribute *attr,
4597 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4600 if (!rbd_dev->parent)
4601 return sprintf(buf, "(no parent image)\n");
4603 for ( ; rbd_dev->parent; rbd_dev = rbd_dev->parent) {
4604 struct rbd_spec *spec = rbd_dev->parent_spec;
4606 count += sprintf(&buf[count], "%s"
4607 "pool_id %llu\npool_name %s\n"
4608 "image_id %s\nimage_name %s\n"
4609 "snap_id %llu\nsnap_name %s\n"
4611 !count ? "" : "\n", /* first? */
4612 spec->pool_id, spec->pool_name,
4613 spec->image_id, spec->image_name ?: "(unknown)",
4614 spec->snap_id, spec->snap_name,
4615 rbd_dev->parent_overlap);
4621 static ssize_t rbd_image_refresh(struct device *dev,
4622 struct device_attribute *attr,
4626 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4629 ret = rbd_dev_refresh(rbd_dev);
4636 static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL);
4637 static DEVICE_ATTR(features, S_IRUGO, rbd_features_show, NULL);
4638 static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL);
4639 static DEVICE_ATTR(minor, S_IRUGO, rbd_minor_show, NULL);
4640 static DEVICE_ATTR(client_addr, S_IRUGO, rbd_client_addr_show, NULL);
4641 static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL);
4642 static DEVICE_ATTR(cluster_fsid, S_IRUGO, rbd_cluster_fsid_show, NULL);
4643 static DEVICE_ATTR(config_info, S_IRUSR, rbd_config_info_show, NULL);
4644 static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL);
4645 static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL);
4646 static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
4647 static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL);
4648 static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
4649 static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
4650 static DEVICE_ATTR(snap_id, S_IRUGO, rbd_snap_id_show, NULL);
4651 static DEVICE_ATTR(parent, S_IRUGO, rbd_parent_show, NULL);
4653 static struct attribute *rbd_attrs[] = {
4654 &dev_attr_size.attr,
4655 &dev_attr_features.attr,
4656 &dev_attr_major.attr,
4657 &dev_attr_minor.attr,
4658 &dev_attr_client_addr.attr,
4659 &dev_attr_client_id.attr,
4660 &dev_attr_cluster_fsid.attr,
4661 &dev_attr_config_info.attr,
4662 &dev_attr_pool.attr,
4663 &dev_attr_pool_id.attr,
4664 &dev_attr_name.attr,
4665 &dev_attr_image_id.attr,
4666 &dev_attr_current_snap.attr,
4667 &dev_attr_snap_id.attr,
4668 &dev_attr_parent.attr,
4669 &dev_attr_refresh.attr,
4673 static struct attribute_group rbd_attr_group = {
4677 static const struct attribute_group *rbd_attr_groups[] = {
4682 static void rbd_dev_release(struct device *dev);
4684 static const struct device_type rbd_device_type = {
4686 .groups = rbd_attr_groups,
4687 .release = rbd_dev_release,
4690 static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
4692 kref_get(&spec->kref);
4697 static void rbd_spec_free(struct kref *kref);
4698 static void rbd_spec_put(struct rbd_spec *spec)
4701 kref_put(&spec->kref, rbd_spec_free);
4704 static struct rbd_spec *rbd_spec_alloc(void)
4706 struct rbd_spec *spec;
4708 spec = kzalloc(sizeof (*spec), GFP_KERNEL);
4712 spec->pool_id = CEPH_NOPOOL;
4713 spec->snap_id = CEPH_NOSNAP;
4714 kref_init(&spec->kref);
4719 static void rbd_spec_free(struct kref *kref)
4721 struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
4723 kfree(spec->pool_name);
4724 kfree(spec->image_id);
4725 kfree(spec->image_name);
4726 kfree(spec->snap_name);
4730 static void rbd_dev_free(struct rbd_device *rbd_dev)
4732 WARN_ON(rbd_dev->watch_state != RBD_WATCH_STATE_UNREGISTERED);
4733 WARN_ON(rbd_dev->lock_state != RBD_LOCK_STATE_UNLOCKED);
4735 ceph_oid_destroy(&rbd_dev->header_oid);
4736 ceph_oloc_destroy(&rbd_dev->header_oloc);
4737 kfree(rbd_dev->config_info);
4739 rbd_put_client(rbd_dev->rbd_client);
4740 rbd_spec_put(rbd_dev->spec);
4741 kfree(rbd_dev->opts);
4745 static void rbd_dev_release(struct device *dev)
4747 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4748 bool need_put = !!rbd_dev->opts;
4751 destroy_workqueue(rbd_dev->task_wq);
4752 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
4755 rbd_dev_free(rbd_dev);
4758 * This is racy, but way better than putting module outside of
4759 * the release callback. The race window is pretty small, so
4760 * doing something similar to dm (dm-builtin.c) is overkill.
4763 module_put(THIS_MODULE);
4766 static struct rbd_device *__rbd_dev_create(struct rbd_client *rbdc,
4767 struct rbd_spec *spec)
4769 struct rbd_device *rbd_dev;
4771 rbd_dev = kzalloc(sizeof(*rbd_dev), GFP_KERNEL);
4775 spin_lock_init(&rbd_dev->lock);
4776 INIT_LIST_HEAD(&rbd_dev->node);
4777 init_rwsem(&rbd_dev->header_rwsem);
4779 rbd_dev->header.data_pool_id = CEPH_NOPOOL;
4780 ceph_oid_init(&rbd_dev->header_oid);
4781 rbd_dev->header_oloc.pool = spec->pool_id;
4783 mutex_init(&rbd_dev->watch_mutex);
4784 rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED;
4785 INIT_DELAYED_WORK(&rbd_dev->watch_dwork, rbd_reregister_watch);
4787 init_rwsem(&rbd_dev->lock_rwsem);
4788 rbd_dev->lock_state = RBD_LOCK_STATE_UNLOCKED;
4789 INIT_WORK(&rbd_dev->acquired_lock_work, rbd_notify_acquired_lock);
4790 INIT_WORK(&rbd_dev->released_lock_work, rbd_notify_released_lock);
4791 INIT_DELAYED_WORK(&rbd_dev->lock_dwork, rbd_acquire_lock);
4792 INIT_WORK(&rbd_dev->unlock_work, rbd_release_lock_work);
4793 init_waitqueue_head(&rbd_dev->lock_waitq);
4795 rbd_dev->dev.bus = &rbd_bus_type;
4796 rbd_dev->dev.type = &rbd_device_type;
4797 rbd_dev->dev.parent = &rbd_root_dev;
4798 device_initialize(&rbd_dev->dev);
4800 rbd_dev->rbd_client = rbdc;
4801 rbd_dev->spec = spec;
4807 * Create a mapping rbd_dev.
4809 static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
4810 struct rbd_spec *spec,
4811 struct rbd_options *opts)
4813 struct rbd_device *rbd_dev;
4815 rbd_dev = __rbd_dev_create(rbdc, spec);
4819 rbd_dev->opts = opts;
4821 /* get an id and fill in device name */
4822 rbd_dev->dev_id = ida_simple_get(&rbd_dev_id_ida, 0,
4823 minor_to_rbd_dev_id(1 << MINORBITS),
4825 if (rbd_dev->dev_id < 0)
4828 sprintf(rbd_dev->name, RBD_DRV_NAME "%d", rbd_dev->dev_id);
4829 rbd_dev->task_wq = alloc_ordered_workqueue("%s-tasks", WQ_MEM_RECLAIM,
4831 if (!rbd_dev->task_wq)
4834 /* we have a ref from do_rbd_add() */
4835 __module_get(THIS_MODULE);
4837 dout("%s rbd_dev %p dev_id %d\n", __func__, rbd_dev, rbd_dev->dev_id);
4841 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
4843 rbd_dev_free(rbd_dev);
4847 static void rbd_dev_destroy(struct rbd_device *rbd_dev)
4850 put_device(&rbd_dev->dev);
4854 * Get the size and object order for an image snapshot, or if
4855 * snap_id is CEPH_NOSNAP, gets this information for the base
4858 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
4859 u8 *order, u64 *snap_size)
4861 __le64 snapid = cpu_to_le64(snap_id);
4866 } __attribute__ ((packed)) size_buf = { 0 };
4868 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
4869 &rbd_dev->header_oloc, "get_size",
4870 &snapid, sizeof(snapid),
4871 &size_buf, sizeof(size_buf));
4872 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4875 if (ret < sizeof (size_buf))
4879 *order = size_buf.order;
4880 dout(" order %u", (unsigned int)*order);
4882 *snap_size = le64_to_cpu(size_buf.size);
4884 dout(" snap_id 0x%016llx snap_size = %llu\n",
4885 (unsigned long long)snap_id,
4886 (unsigned long long)*snap_size);
4891 static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
4893 return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
4894 &rbd_dev->header.obj_order,
4895 &rbd_dev->header.image_size);
4898 static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
4904 reply_buf = kzalloc(RBD_OBJ_PREFIX_LEN_MAX, GFP_KERNEL);
4908 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
4909 &rbd_dev->header_oloc, "get_object_prefix",
4910 NULL, 0, reply_buf, RBD_OBJ_PREFIX_LEN_MAX);
4911 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4916 rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
4917 p + ret, NULL, GFP_NOIO);
4920 if (IS_ERR(rbd_dev->header.object_prefix)) {
4921 ret = PTR_ERR(rbd_dev->header.object_prefix);
4922 rbd_dev->header.object_prefix = NULL;
4924 dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
4932 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
4935 __le64 snapid = cpu_to_le64(snap_id);
4939 } __attribute__ ((packed)) features_buf = { 0 };
4943 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
4944 &rbd_dev->header_oloc, "get_features",
4945 &snapid, sizeof(snapid),
4946 &features_buf, sizeof(features_buf));
4947 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4950 if (ret < sizeof (features_buf))
4953 unsup = le64_to_cpu(features_buf.incompat) & ~RBD_FEATURES_SUPPORTED;
4955 rbd_warn(rbd_dev, "image uses unsupported features: 0x%llx",
4960 *snap_features = le64_to_cpu(features_buf.features);
4962 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
4963 (unsigned long long)snap_id,
4964 (unsigned long long)*snap_features,
4965 (unsigned long long)le64_to_cpu(features_buf.incompat));
4970 static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
4972 return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
4973 &rbd_dev->header.features);
4976 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
4978 struct rbd_spec *parent_spec;
4980 void *reply_buf = NULL;
4990 parent_spec = rbd_spec_alloc();
4994 size = sizeof (__le64) + /* pool_id */
4995 sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX + /* image_id */
4996 sizeof (__le64) + /* snap_id */
4997 sizeof (__le64); /* overlap */
4998 reply_buf = kmalloc(size, GFP_KERNEL);
5004 snapid = cpu_to_le64(rbd_dev->spec->snap_id);
5005 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5006 &rbd_dev->header_oloc, "get_parent",
5007 &snapid, sizeof(snapid), reply_buf, size);
5008 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5013 end = reply_buf + ret;
5015 ceph_decode_64_safe(&p, end, pool_id, out_err);
5016 if (pool_id == CEPH_NOPOOL) {
5018 * Either the parent never existed, or we have
5019 * record of it but the image got flattened so it no
5020 * longer has a parent. When the parent of a
5021 * layered image disappears we immediately set the
5022 * overlap to 0. The effect of this is that all new
5023 * requests will be treated as if the image had no
5026 if (rbd_dev->parent_overlap) {
5027 rbd_dev->parent_overlap = 0;
5028 rbd_dev_parent_put(rbd_dev);
5029 pr_info("%s: clone image has been flattened\n",
5030 rbd_dev->disk->disk_name);
5033 goto out; /* No parent? No problem. */
5036 /* The ceph file layout needs to fit pool id in 32 bits */
5039 if (pool_id > (u64)U32_MAX) {
5040 rbd_warn(NULL, "parent pool id too large (%llu > %u)",
5041 (unsigned long long)pool_id, U32_MAX);
5045 image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
5046 if (IS_ERR(image_id)) {
5047 ret = PTR_ERR(image_id);
5050 ceph_decode_64_safe(&p, end, snap_id, out_err);
5051 ceph_decode_64_safe(&p, end, overlap, out_err);
5054 * The parent won't change (except when the clone is
5055 * flattened, already handled that). So we only need to
5056 * record the parent spec we have not already done so.
5058 if (!rbd_dev->parent_spec) {
5059 parent_spec->pool_id = pool_id;
5060 parent_spec->image_id = image_id;
5061 parent_spec->snap_id = snap_id;
5062 rbd_dev->parent_spec = parent_spec;
5063 parent_spec = NULL; /* rbd_dev now owns this */
5069 * We always update the parent overlap. If it's zero we issue
5070 * a warning, as we will proceed as if there was no parent.
5074 /* refresh, careful to warn just once */
5075 if (rbd_dev->parent_overlap)
5077 "clone now standalone (overlap became 0)");
5080 rbd_warn(rbd_dev, "clone is standalone (overlap 0)");
5083 rbd_dev->parent_overlap = overlap;
5089 rbd_spec_put(parent_spec);
5094 static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
5098 __le64 stripe_count;
5099 } __attribute__ ((packed)) striping_info_buf = { 0 };
5100 size_t size = sizeof (striping_info_buf);
5107 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5108 &rbd_dev->header_oloc, "get_stripe_unit_count",
5109 NULL, 0, &striping_info_buf, size);
5110 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5117 * We don't actually support the "fancy striping" feature
5118 * (STRIPINGV2) yet, but if the striping sizes are the
5119 * defaults the behavior is the same as before. So find
5120 * out, and only fail if the image has non-default values.
5123 obj_size = rbd_obj_bytes(&rbd_dev->header);
5124 p = &striping_info_buf;
5125 stripe_unit = ceph_decode_64(&p);
5126 if (stripe_unit != obj_size) {
5127 rbd_warn(rbd_dev, "unsupported stripe unit "
5128 "(got %llu want %llu)",
5129 stripe_unit, obj_size);
5132 stripe_count = ceph_decode_64(&p);
5133 if (stripe_count != 1) {
5134 rbd_warn(rbd_dev, "unsupported stripe count "
5135 "(got %llu want 1)", stripe_count);
5138 rbd_dev->header.stripe_unit = stripe_unit;
5139 rbd_dev->header.stripe_count = stripe_count;
5144 static int rbd_dev_v2_data_pool(struct rbd_device *rbd_dev)
5146 __le64 data_pool_id;
5149 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5150 &rbd_dev->header_oloc, "get_data_pool",
5151 NULL, 0, &data_pool_id, sizeof(data_pool_id));
5154 if (ret < sizeof(data_pool_id))
5157 rbd_dev->header.data_pool_id = le64_to_cpu(data_pool_id);
5158 WARN_ON(rbd_dev->header.data_pool_id == CEPH_NOPOOL);
5162 static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
5164 CEPH_DEFINE_OID_ONSTACK(oid);
5165 size_t image_id_size;
5170 void *reply_buf = NULL;
5172 char *image_name = NULL;
5175 rbd_assert(!rbd_dev->spec->image_name);
5177 len = strlen(rbd_dev->spec->image_id);
5178 image_id_size = sizeof (__le32) + len;
5179 image_id = kmalloc(image_id_size, GFP_KERNEL);
5184 end = image_id + image_id_size;
5185 ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
5187 size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
5188 reply_buf = kmalloc(size, GFP_KERNEL);
5192 ceph_oid_printf(&oid, "%s", RBD_DIRECTORY);
5193 ret = rbd_obj_method_sync(rbd_dev, &oid, &rbd_dev->header_oloc,
5194 "dir_get_name", image_id, image_id_size,
5199 end = reply_buf + ret;
5201 image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
5202 if (IS_ERR(image_name))
5205 dout("%s: name is %s len is %zd\n", __func__, image_name, len);
5213 static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
5215 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
5216 const char *snap_name;
5219 /* Skip over names until we find the one we are looking for */
5221 snap_name = rbd_dev->header.snap_names;
5222 while (which < snapc->num_snaps) {
5223 if (!strcmp(name, snap_name))
5224 return snapc->snaps[which];
5225 snap_name += strlen(snap_name) + 1;
5231 static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
5233 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
5238 for (which = 0; !found && which < snapc->num_snaps; which++) {
5239 const char *snap_name;
5241 snap_id = snapc->snaps[which];
5242 snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
5243 if (IS_ERR(snap_name)) {
5244 /* ignore no-longer existing snapshots */
5245 if (PTR_ERR(snap_name) == -ENOENT)
5250 found = !strcmp(name, snap_name);
5253 return found ? snap_id : CEPH_NOSNAP;
5257 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
5258 * no snapshot by that name is found, or if an error occurs.
5260 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
5262 if (rbd_dev->image_format == 1)
5263 return rbd_v1_snap_id_by_name(rbd_dev, name);
5265 return rbd_v2_snap_id_by_name(rbd_dev, name);
5269 * An image being mapped will have everything but the snap id.
5271 static int rbd_spec_fill_snap_id(struct rbd_device *rbd_dev)
5273 struct rbd_spec *spec = rbd_dev->spec;
5275 rbd_assert(spec->pool_id != CEPH_NOPOOL && spec->pool_name);
5276 rbd_assert(spec->image_id && spec->image_name);
5277 rbd_assert(spec->snap_name);
5279 if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
5282 snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
5283 if (snap_id == CEPH_NOSNAP)
5286 spec->snap_id = snap_id;
5288 spec->snap_id = CEPH_NOSNAP;
5295 * A parent image will have all ids but none of the names.
5297 * All names in an rbd spec are dynamically allocated. It's OK if we
5298 * can't figure out the name for an image id.
5300 static int rbd_spec_fill_names(struct rbd_device *rbd_dev)
5302 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
5303 struct rbd_spec *spec = rbd_dev->spec;
5304 const char *pool_name;
5305 const char *image_name;
5306 const char *snap_name;
5309 rbd_assert(spec->pool_id != CEPH_NOPOOL);
5310 rbd_assert(spec->image_id);
5311 rbd_assert(spec->snap_id != CEPH_NOSNAP);
5313 /* Get the pool name; we have to make our own copy of this */
5315 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
5317 rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
5320 pool_name = kstrdup(pool_name, GFP_KERNEL);
5324 /* Fetch the image name; tolerate failure here */
5326 image_name = rbd_dev_image_name(rbd_dev);
5328 rbd_warn(rbd_dev, "unable to get image name");
5330 /* Fetch the snapshot name */
5332 snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
5333 if (IS_ERR(snap_name)) {
5334 ret = PTR_ERR(snap_name);
5338 spec->pool_name = pool_name;
5339 spec->image_name = image_name;
5340 spec->snap_name = snap_name;
5350 static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
5359 struct ceph_snap_context *snapc;
5363 * We'll need room for the seq value (maximum snapshot id),
5364 * snapshot count, and array of that many snapshot ids.
5365 * For now we have a fixed upper limit on the number we're
5366 * prepared to receive.
5368 size = sizeof (__le64) + sizeof (__le32) +
5369 RBD_MAX_SNAP_COUNT * sizeof (__le64);
5370 reply_buf = kzalloc(size, GFP_KERNEL);
5374 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5375 &rbd_dev->header_oloc, "get_snapcontext",
5376 NULL, 0, reply_buf, size);
5377 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5382 end = reply_buf + ret;
5384 ceph_decode_64_safe(&p, end, seq, out);
5385 ceph_decode_32_safe(&p, end, snap_count, out);
5388 * Make sure the reported number of snapshot ids wouldn't go
5389 * beyond the end of our buffer. But before checking that,
5390 * make sure the computed size of the snapshot context we
5391 * allocate is representable in a size_t.
5393 if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
5398 if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
5402 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
5408 for (i = 0; i < snap_count; i++)
5409 snapc->snaps[i] = ceph_decode_64(&p);
5411 ceph_put_snap_context(rbd_dev->header.snapc);
5412 rbd_dev->header.snapc = snapc;
5414 dout(" snap context seq = %llu, snap_count = %u\n",
5415 (unsigned long long)seq, (unsigned int)snap_count);
5422 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
5433 size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
5434 reply_buf = kmalloc(size, GFP_KERNEL);
5436 return ERR_PTR(-ENOMEM);
5438 snapid = cpu_to_le64(snap_id);
5439 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5440 &rbd_dev->header_oloc, "get_snapshot_name",
5441 &snapid, sizeof(snapid), reply_buf, size);
5442 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5444 snap_name = ERR_PTR(ret);
5449 end = reply_buf + ret;
5450 snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
5451 if (IS_ERR(snap_name))
5454 dout(" snap_id 0x%016llx snap_name = %s\n",
5455 (unsigned long long)snap_id, snap_name);
5462 static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
5464 bool first_time = rbd_dev->header.object_prefix == NULL;
5467 ret = rbd_dev_v2_image_size(rbd_dev);
5472 ret = rbd_dev_v2_header_onetime(rbd_dev);
5477 ret = rbd_dev_v2_snap_context(rbd_dev);
5478 if (ret && first_time) {
5479 kfree(rbd_dev->header.object_prefix);
5480 rbd_dev->header.object_prefix = NULL;
5486 static int rbd_dev_header_info(struct rbd_device *rbd_dev)
5488 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
5490 if (rbd_dev->image_format == 1)
5491 return rbd_dev_v1_header_info(rbd_dev);
5493 return rbd_dev_v2_header_info(rbd_dev);
5497 * Skips over white space at *buf, and updates *buf to point to the
5498 * first found non-space character (if any). Returns the length of
5499 * the token (string of non-white space characters) found. Note
5500 * that *buf must be terminated with '\0'.
5502 static inline size_t next_token(const char **buf)
5505 * These are the characters that produce nonzero for
5506 * isspace() in the "C" and "POSIX" locales.
5508 const char *spaces = " \f\n\r\t\v";
5510 *buf += strspn(*buf, spaces); /* Find start of token */
5512 return strcspn(*buf, spaces); /* Return token length */
5516 * Finds the next token in *buf, dynamically allocates a buffer big
5517 * enough to hold a copy of it, and copies the token into the new
5518 * buffer. The copy is guaranteed to be terminated with '\0'. Note
5519 * that a duplicate buffer is created even for a zero-length token.
5521 * Returns a pointer to the newly-allocated duplicate, or a null
5522 * pointer if memory for the duplicate was not available. If
5523 * the lenp argument is a non-null pointer, the length of the token
5524 * (not including the '\0') is returned in *lenp.
5526 * If successful, the *buf pointer will be updated to point beyond
5527 * the end of the found token.
5529 * Note: uses GFP_KERNEL for allocation.
5531 static inline char *dup_token(const char **buf, size_t *lenp)
5536 len = next_token(buf);
5537 dup = kmemdup(*buf, len + 1, GFP_KERNEL);
5540 *(dup + len) = '\0';
5550 * Parse the options provided for an "rbd add" (i.e., rbd image
5551 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
5552 * and the data written is passed here via a NUL-terminated buffer.
5553 * Returns 0 if successful or an error code otherwise.
5555 * The information extracted from these options is recorded in
5556 * the other parameters which return dynamically-allocated
5559 * The address of a pointer that will refer to a ceph options
5560 * structure. Caller must release the returned pointer using
5561 * ceph_destroy_options() when it is no longer needed.
5563 * Address of an rbd options pointer. Fully initialized by
5564 * this function; caller must release with kfree().
5566 * Address of an rbd image specification pointer. Fully
5567 * initialized by this function based on parsed options.
5568 * Caller must release with rbd_spec_put().
5570 * The options passed take this form:
5571 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
5574 * A comma-separated list of one or more monitor addresses.
5575 * A monitor address is an ip address, optionally followed
5576 * by a port number (separated by a colon).
5577 * I.e.: ip1[:port1][,ip2[:port2]...]
5579 * A comma-separated list of ceph and/or rbd options.
5581 * The name of the rados pool containing the rbd image.
5583 * The name of the image in that pool to map.
5585 * An optional snapshot id. If provided, the mapping will
5586 * present data from the image at the time that snapshot was
5587 * created. The image head is used if no snapshot id is
5588 * provided. Snapshot mappings are always read-only.
5590 static int rbd_add_parse_args(const char *buf,
5591 struct ceph_options **ceph_opts,
5592 struct rbd_options **opts,
5593 struct rbd_spec **rbd_spec)
5597 const char *mon_addrs;
5599 size_t mon_addrs_size;
5600 struct rbd_spec *spec = NULL;
5601 struct rbd_options *rbd_opts = NULL;
5602 struct ceph_options *copts;
5605 /* The first four tokens are required */
5607 len = next_token(&buf);
5609 rbd_warn(NULL, "no monitor address(es) provided");
5613 mon_addrs_size = len + 1;
5617 options = dup_token(&buf, NULL);
5621 rbd_warn(NULL, "no options provided");
5625 spec = rbd_spec_alloc();
5629 spec->pool_name = dup_token(&buf, NULL);
5630 if (!spec->pool_name)
5632 if (!*spec->pool_name) {
5633 rbd_warn(NULL, "no pool name provided");
5637 spec->image_name = dup_token(&buf, NULL);
5638 if (!spec->image_name)
5640 if (!*spec->image_name) {
5641 rbd_warn(NULL, "no image name provided");
5646 * Snapshot name is optional; default is to use "-"
5647 * (indicating the head/no snapshot).
5649 len = next_token(&buf);
5651 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
5652 len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
5653 } else if (len > RBD_MAX_SNAP_NAME_LEN) {
5654 ret = -ENAMETOOLONG;
5657 snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
5660 *(snap_name + len) = '\0';
5661 spec->snap_name = snap_name;
5663 /* Initialize all rbd options to the defaults */
5665 rbd_opts = kzalloc(sizeof (*rbd_opts), GFP_KERNEL);
5669 rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
5670 rbd_opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT;
5671 rbd_opts->lock_on_read = RBD_LOCK_ON_READ_DEFAULT;
5672 rbd_opts->exclusive = RBD_EXCLUSIVE_DEFAULT;
5674 copts = ceph_parse_options(options, mon_addrs,
5675 mon_addrs + mon_addrs_size - 1,
5676 parse_rbd_opts_token, rbd_opts);
5677 if (IS_ERR(copts)) {
5678 ret = PTR_ERR(copts);
5699 * Return pool id (>= 0) or a negative error code.
5701 static int rbd_add_get_pool_id(struct rbd_client *rbdc, const char *pool_name)
5703 struct ceph_options *opts = rbdc->client->options;
5709 ret = ceph_pg_poolid_by_name(rbdc->client->osdc.osdmap, pool_name);
5710 if (ret == -ENOENT && tries++ < 1) {
5711 ret = ceph_monc_get_version(&rbdc->client->monc, "osdmap",
5716 if (rbdc->client->osdc.osdmap->epoch < newest_epoch) {
5717 ceph_osdc_maybe_request_map(&rbdc->client->osdc);
5718 (void) ceph_monc_wait_osdmap(&rbdc->client->monc,
5720 opts->mount_timeout);
5723 /* the osdmap we have is new enough */
5731 static void rbd_dev_image_unlock(struct rbd_device *rbd_dev)
5733 down_write(&rbd_dev->lock_rwsem);
5734 if (__rbd_is_lock_owner(rbd_dev))
5735 rbd_unlock(rbd_dev);
5736 up_write(&rbd_dev->lock_rwsem);
5739 static int rbd_add_acquire_lock(struct rbd_device *rbd_dev)
5741 if (!(rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK)) {
5742 rbd_warn(rbd_dev, "exclusive-lock feature is not enabled");
5746 /* FIXME: "rbd map --exclusive" should be in interruptible */
5747 down_read(&rbd_dev->lock_rwsem);
5748 rbd_wait_state_locked(rbd_dev);
5749 up_read(&rbd_dev->lock_rwsem);
5750 if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) {
5751 rbd_warn(rbd_dev, "failed to acquire exclusive lock");
5759 * An rbd format 2 image has a unique identifier, distinct from the
5760 * name given to it by the user. Internally, that identifier is
5761 * what's used to specify the names of objects related to the image.
5763 * A special "rbd id" object is used to map an rbd image name to its
5764 * id. If that object doesn't exist, then there is no v2 rbd image
5765 * with the supplied name.
5767 * This function will record the given rbd_dev's image_id field if
5768 * it can be determined, and in that case will return 0. If any
5769 * errors occur a negative errno will be returned and the rbd_dev's
5770 * image_id field will be unchanged (and should be NULL).
5772 static int rbd_dev_image_id(struct rbd_device *rbd_dev)
5776 CEPH_DEFINE_OID_ONSTACK(oid);
5781 * When probing a parent image, the image id is already
5782 * known (and the image name likely is not). There's no
5783 * need to fetch the image id again in this case. We
5784 * do still need to set the image format though.
5786 if (rbd_dev->spec->image_id) {
5787 rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;
5793 * First, see if the format 2 image id file exists, and if
5794 * so, get the image's persistent id from it.
5796 ret = ceph_oid_aprintf(&oid, GFP_KERNEL, "%s%s", RBD_ID_PREFIX,
5797 rbd_dev->spec->image_name);
5801 dout("rbd id object name is %s\n", oid.name);
5803 /* Response will be an encoded string, which includes a length */
5805 size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
5806 response = kzalloc(size, GFP_NOIO);
5812 /* If it doesn't exist we'll assume it's a format 1 image */
5814 ret = rbd_obj_method_sync(rbd_dev, &oid, &rbd_dev->header_oloc,
5816 response, RBD_IMAGE_ID_LEN_MAX);
5817 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5818 if (ret == -ENOENT) {
5819 image_id = kstrdup("", GFP_KERNEL);
5820 ret = image_id ? 0 : -ENOMEM;
5822 rbd_dev->image_format = 1;
5823 } else if (ret >= 0) {
5826 image_id = ceph_extract_encoded_string(&p, p + ret,
5828 ret = PTR_ERR_OR_ZERO(image_id);
5830 rbd_dev->image_format = 2;
5834 rbd_dev->spec->image_id = image_id;
5835 dout("image_id is %s\n", image_id);
5839 ceph_oid_destroy(&oid);
5844 * Undo whatever state changes are made by v1 or v2 header info
5847 static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
5849 struct rbd_image_header *header;
5851 rbd_dev_parent_put(rbd_dev);
5853 /* Free dynamic fields from the header, then zero it out */
5855 header = &rbd_dev->header;
5856 ceph_put_snap_context(header->snapc);
5857 kfree(header->snap_sizes);
5858 kfree(header->snap_names);
5859 kfree(header->object_prefix);
5860 memset(header, 0, sizeof (*header));
5863 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
5867 ret = rbd_dev_v2_object_prefix(rbd_dev);
5872 * Get the and check features for the image. Currently the
5873 * features are assumed to never change.
5875 ret = rbd_dev_v2_features(rbd_dev);
5879 /* If the image supports fancy striping, get its parameters */
5881 if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
5882 ret = rbd_dev_v2_striping_info(rbd_dev);
5887 if (rbd_dev->header.features & RBD_FEATURE_DATA_POOL) {
5888 ret = rbd_dev_v2_data_pool(rbd_dev);
5893 rbd_init_layout(rbd_dev);
5897 rbd_dev->header.features = 0;
5898 kfree(rbd_dev->header.object_prefix);
5899 rbd_dev->header.object_prefix = NULL;
5904 * @depth is rbd_dev_image_probe() -> rbd_dev_probe_parent() ->
5905 * rbd_dev_image_probe() recursion depth, which means it's also the
5906 * length of the already discovered part of the parent chain.
5908 static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth)
5910 struct rbd_device *parent = NULL;
5913 if (!rbd_dev->parent_spec)
5916 if (++depth > RBD_MAX_PARENT_CHAIN_LEN) {
5917 pr_info("parent chain is too long (%d)\n", depth);
5922 parent = __rbd_dev_create(rbd_dev->rbd_client, rbd_dev->parent_spec);
5929 * Images related by parent/child relationships always share
5930 * rbd_client and spec/parent_spec, so bump their refcounts.
5932 __rbd_get_client(rbd_dev->rbd_client);
5933 rbd_spec_get(rbd_dev->parent_spec);
5935 ret = rbd_dev_image_probe(parent, depth);
5939 rbd_dev->parent = parent;
5940 atomic_set(&rbd_dev->parent_ref, 1);
5944 rbd_dev_unparent(rbd_dev);
5945 rbd_dev_destroy(parent);
5949 static void rbd_dev_device_release(struct rbd_device *rbd_dev)
5951 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
5952 rbd_dev_mapping_clear(rbd_dev);
5953 rbd_free_disk(rbd_dev);
5955 unregister_blkdev(rbd_dev->major, rbd_dev->name);
5959 * rbd_dev->header_rwsem must be locked for write and will be unlocked
5962 static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
5966 /* Record our major and minor device numbers. */
5968 if (!single_major) {
5969 ret = register_blkdev(0, rbd_dev->name);
5971 goto err_out_unlock;
5973 rbd_dev->major = ret;
5976 rbd_dev->major = rbd_major;
5977 rbd_dev->minor = rbd_dev_id_to_minor(rbd_dev->dev_id);
5980 /* Set up the blkdev mapping. */
5982 ret = rbd_init_disk(rbd_dev);
5984 goto err_out_blkdev;
5986 ret = rbd_dev_mapping_set(rbd_dev);
5990 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
5991 set_disk_ro(rbd_dev->disk, rbd_dev->mapping.read_only);
5993 ret = dev_set_name(&rbd_dev->dev, "%d", rbd_dev->dev_id);
5995 goto err_out_mapping;
5997 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
5998 up_write(&rbd_dev->header_rwsem);
6002 rbd_dev_mapping_clear(rbd_dev);
6004 rbd_free_disk(rbd_dev);
6007 unregister_blkdev(rbd_dev->major, rbd_dev->name);
6009 up_write(&rbd_dev->header_rwsem);
6013 static int rbd_dev_header_name(struct rbd_device *rbd_dev)
6015 struct rbd_spec *spec = rbd_dev->spec;
6018 /* Record the header object name for this rbd image. */
6020 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
6021 if (rbd_dev->image_format == 1)
6022 ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
6023 spec->image_name, RBD_SUFFIX);
6025 ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
6026 RBD_HEADER_PREFIX, spec->image_id);
6031 static void rbd_dev_image_release(struct rbd_device *rbd_dev)
6033 rbd_dev_unprobe(rbd_dev);
6035 rbd_unregister_watch(rbd_dev);
6036 rbd_dev->image_format = 0;
6037 kfree(rbd_dev->spec->image_id);
6038 rbd_dev->spec->image_id = NULL;
6042 * Probe for the existence of the header object for the given rbd
6043 * device. If this image is the one being mapped (i.e., not a
6044 * parent), initiate a watch on its header object before using that
6045 * object to get detailed information about the rbd image.
6047 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
6052 * Get the id from the image id object. Unless there's an
6053 * error, rbd_dev->spec->image_id will be filled in with
6054 * a dynamically-allocated string, and rbd_dev->image_format
6055 * will be set to either 1 or 2.
6057 ret = rbd_dev_image_id(rbd_dev);
6061 ret = rbd_dev_header_name(rbd_dev);
6063 goto err_out_format;
6066 ret = rbd_register_watch(rbd_dev);
6069 pr_info("image %s/%s does not exist\n",
6070 rbd_dev->spec->pool_name,
6071 rbd_dev->spec->image_name);
6072 goto err_out_format;
6076 ret = rbd_dev_header_info(rbd_dev);
6081 * If this image is the one being mapped, we have pool name and
6082 * id, image name and id, and snap name - need to fill snap id.
6083 * Otherwise this is a parent image, identified by pool, image
6084 * and snap ids - need to fill in names for those ids.
6087 ret = rbd_spec_fill_snap_id(rbd_dev);
6089 ret = rbd_spec_fill_names(rbd_dev);
6092 pr_info("snap %s/%s@%s does not exist\n",
6093 rbd_dev->spec->pool_name,
6094 rbd_dev->spec->image_name,
6095 rbd_dev->spec->snap_name);
6099 if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
6100 ret = rbd_dev_v2_parent_info(rbd_dev);
6105 * Need to warn users if this image is the one being
6106 * mapped and has a parent.
6108 if (!depth && rbd_dev->parent_spec)
6110 "WARNING: kernel layering is EXPERIMENTAL!");
6113 ret = rbd_dev_probe_parent(rbd_dev, depth);
6117 dout("discovered format %u image, header name is %s\n",
6118 rbd_dev->image_format, rbd_dev->header_oid.name);
6122 rbd_dev_unprobe(rbd_dev);
6125 rbd_unregister_watch(rbd_dev);
6127 rbd_dev->image_format = 0;
6128 kfree(rbd_dev->spec->image_id);
6129 rbd_dev->spec->image_id = NULL;
6133 static ssize_t do_rbd_add(struct bus_type *bus,
6137 struct rbd_device *rbd_dev = NULL;
6138 struct ceph_options *ceph_opts = NULL;
6139 struct rbd_options *rbd_opts = NULL;
6140 struct rbd_spec *spec = NULL;
6141 struct rbd_client *rbdc;
6145 if (!try_module_get(THIS_MODULE))
6148 /* parse add command */
6149 rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
6153 rbdc = rbd_get_client(ceph_opts);
6160 rc = rbd_add_get_pool_id(rbdc, spec->pool_name);
6163 pr_info("pool %s does not exist\n", spec->pool_name);
6164 goto err_out_client;
6166 spec->pool_id = (u64)rc;
6168 rbd_dev = rbd_dev_create(rbdc, spec, rbd_opts);
6171 goto err_out_client;
6173 rbdc = NULL; /* rbd_dev now owns this */
6174 spec = NULL; /* rbd_dev now owns this */
6175 rbd_opts = NULL; /* rbd_dev now owns this */
6177 rbd_dev->config_info = kstrdup(buf, GFP_KERNEL);
6178 if (!rbd_dev->config_info) {
6180 goto err_out_rbd_dev;
6183 down_write(&rbd_dev->header_rwsem);
6184 rc = rbd_dev_image_probe(rbd_dev, 0);
6186 up_write(&rbd_dev->header_rwsem);
6187 goto err_out_rbd_dev;
6190 /* If we are mapping a snapshot it must be marked read-only */
6192 read_only = rbd_dev->opts->read_only;
6193 if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
6195 rbd_dev->mapping.read_only = read_only;
6197 rc = rbd_dev_device_setup(rbd_dev);
6199 goto err_out_image_probe;
6201 if (rbd_dev->opts->exclusive) {
6202 rc = rbd_add_acquire_lock(rbd_dev);
6204 goto err_out_device_setup;
6207 /* Everything's ready. Announce the disk to the world. */
6209 rc = device_add(&rbd_dev->dev);
6211 goto err_out_image_lock;
6213 add_disk(rbd_dev->disk);
6214 /* see rbd_init_disk() */
6215 blk_put_queue(rbd_dev->disk->queue);
6217 spin_lock(&rbd_dev_list_lock);
6218 list_add_tail(&rbd_dev->node, &rbd_dev_list);
6219 spin_unlock(&rbd_dev_list_lock);
6221 pr_info("%s: capacity %llu features 0x%llx\n", rbd_dev->disk->disk_name,
6222 (unsigned long long)get_capacity(rbd_dev->disk) << SECTOR_SHIFT,
6223 rbd_dev->header.features);
6226 module_put(THIS_MODULE);
6230 rbd_dev_image_unlock(rbd_dev);
6231 err_out_device_setup:
6232 rbd_dev_device_release(rbd_dev);
6233 err_out_image_probe:
6234 rbd_dev_image_release(rbd_dev);
6236 rbd_dev_destroy(rbd_dev);
6238 rbd_put_client(rbdc);
6245 static ssize_t rbd_add(struct bus_type *bus,
6252 return do_rbd_add(bus, buf, count);
6255 static ssize_t rbd_add_single_major(struct bus_type *bus,
6259 return do_rbd_add(bus, buf, count);
6262 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
6264 while (rbd_dev->parent) {
6265 struct rbd_device *first = rbd_dev;
6266 struct rbd_device *second = first->parent;
6267 struct rbd_device *third;
6270 * Follow to the parent with no grandparent and
6273 while (second && (third = second->parent)) {
6278 rbd_dev_image_release(second);
6279 rbd_dev_destroy(second);
6280 first->parent = NULL;
6281 first->parent_overlap = 0;
6283 rbd_assert(first->parent_spec);
6284 rbd_spec_put(first->parent_spec);
6285 first->parent_spec = NULL;
6289 static ssize_t do_rbd_remove(struct bus_type *bus,
6293 struct rbd_device *rbd_dev = NULL;
6294 struct list_head *tmp;
6297 bool already = false;
6303 sscanf(buf, "%d %5s", &dev_id, opt_buf);
6305 pr_err("dev_id out of range\n");
6308 if (opt_buf[0] != '\0') {
6309 if (!strcmp(opt_buf, "force")) {
6312 pr_err("bad remove option at '%s'\n", opt_buf);
6318 spin_lock(&rbd_dev_list_lock);
6319 list_for_each(tmp, &rbd_dev_list) {
6320 rbd_dev = list_entry(tmp, struct rbd_device, node);
6321 if (rbd_dev->dev_id == dev_id) {
6327 spin_lock_irq(&rbd_dev->lock);
6328 if (rbd_dev->open_count && !force)
6331 already = test_and_set_bit(RBD_DEV_FLAG_REMOVING,
6333 spin_unlock_irq(&rbd_dev->lock);
6335 spin_unlock(&rbd_dev_list_lock);
6336 if (ret < 0 || already)
6341 * Prevent new IO from being queued and wait for existing
6342 * IO to complete/fail.
6344 blk_mq_freeze_queue(rbd_dev->disk->queue);
6345 blk_set_queue_dying(rbd_dev->disk->queue);
6348 del_gendisk(rbd_dev->disk);
6349 spin_lock(&rbd_dev_list_lock);
6350 list_del_init(&rbd_dev->node);
6351 spin_unlock(&rbd_dev_list_lock);
6352 device_del(&rbd_dev->dev);
6354 rbd_dev_image_unlock(rbd_dev);
6355 rbd_dev_device_release(rbd_dev);
6356 rbd_dev_image_release(rbd_dev);
6357 rbd_dev_destroy(rbd_dev);
6361 static ssize_t rbd_remove(struct bus_type *bus,
6368 return do_rbd_remove(bus, buf, count);
6371 static ssize_t rbd_remove_single_major(struct bus_type *bus,
6375 return do_rbd_remove(bus, buf, count);
6379 * create control files in sysfs
6382 static int rbd_sysfs_init(void)
6386 ret = device_register(&rbd_root_dev);
6390 ret = bus_register(&rbd_bus_type);
6392 device_unregister(&rbd_root_dev);
6397 static void rbd_sysfs_cleanup(void)
6399 bus_unregister(&rbd_bus_type);
6400 device_unregister(&rbd_root_dev);
6403 static int rbd_slab_init(void)
6405 rbd_assert(!rbd_img_request_cache);
6406 rbd_img_request_cache = KMEM_CACHE(rbd_img_request, 0);
6407 if (!rbd_img_request_cache)
6410 rbd_assert(!rbd_obj_request_cache);
6411 rbd_obj_request_cache = KMEM_CACHE(rbd_obj_request, 0);
6412 if (!rbd_obj_request_cache)
6418 kmem_cache_destroy(rbd_img_request_cache);
6419 rbd_img_request_cache = NULL;
6423 static void rbd_slab_exit(void)
6425 rbd_assert(rbd_obj_request_cache);
6426 kmem_cache_destroy(rbd_obj_request_cache);
6427 rbd_obj_request_cache = NULL;
6429 rbd_assert(rbd_img_request_cache);
6430 kmem_cache_destroy(rbd_img_request_cache);
6431 rbd_img_request_cache = NULL;
6434 static int __init rbd_init(void)
6438 if (!libceph_compatible(NULL)) {
6439 rbd_warn(NULL, "libceph incompatibility (quitting)");
6443 rc = rbd_slab_init();
6448 * The number of active work items is limited by the number of
6449 * rbd devices * queue depth, so leave @max_active at default.
6451 rbd_wq = alloc_workqueue(RBD_DRV_NAME, WQ_MEM_RECLAIM, 0);
6458 rbd_major = register_blkdev(0, RBD_DRV_NAME);
6459 if (rbd_major < 0) {
6465 rc = rbd_sysfs_init();
6467 goto err_out_blkdev;
6470 pr_info("loaded (major %d)\n", rbd_major);
6472 pr_info("loaded\n");
6478 unregister_blkdev(rbd_major, RBD_DRV_NAME);
6480 destroy_workqueue(rbd_wq);
6486 static void __exit rbd_exit(void)
6488 ida_destroy(&rbd_dev_id_ida);
6489 rbd_sysfs_cleanup();
6491 unregister_blkdev(rbd_major, RBD_DRV_NAME);
6492 destroy_workqueue(rbd_wq);
6496 module_init(rbd_init);
6497 module_exit(rbd_exit);
6499 MODULE_AUTHOR("Alex Elder <elder@inktank.com>");
6500 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
6501 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
6502 /* following authorship retained from original osdblk.c */
6503 MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
6505 MODULE_DESCRIPTION("RADOS Block Device (RBD) driver");
6506 MODULE_LICENSE("GPL");