extern void drbd_go_diskless(struct drbd_conf *mdev);
extern void drbd_ldev_destroy(struct drbd_conf *mdev);
-
/* Meta data layout
We reserve a 128MB Block (4k aligned)
* either at the end of the backing device
extern rwlock_t global_state_lock;
extern int conn_lowest_minor(struct drbd_tconn *tconn);
-extern struct drbd_conf *drbd_new_device(unsigned int minor);
+enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor, int vnr);
extern void drbd_free_mdev(struct drbd_conf *mdev);
+extern void drbd_delete_device(unsigned int minor);
struct drbd_tconn *drbd_new_tconn(char *name);
extern void drbd_free_tconn(struct drbd_tconn *tconn);
return thi ? thi->name : task->comm;
}
-#ifdef CONFIG_SMP
int conn_lowest_minor(struct drbd_tconn *tconn)
{
int minor = 0;
- idr_get_next(&tconn->volumes, &minor);
+
+ if (!idr_get_next(&tconn->volumes, &minor))
+ return -1;
return minor;
}
+
+#ifdef CONFIG_SMP
/**
* drbd_calc_cpu_mask() - Generate CPU masks, spread over all CPUs
* @mdev: DRBD device.
dev_err(DEV, "%d EEs in net list found!\n", rr);
}
-/* caution. no locking.
- * currently only used from module cleanup code. */
-static void drbd_delete_device(unsigned int minor)
+/* caution. no locking. */
+void drbd_delete_device(unsigned int minor)
{
struct drbd_conf *mdev = minor_to_mdev(minor);
if (!mdev)
return;
+ idr_remove(&mdev->tconn->volumes, minor);
+
/* paranoia asserts */
D_ASSERT(mdev->open_cnt == 0);
D_ASSERT(list_empty(&mdev->tconn->data.work.q));
bdput(mdev->this_bdev);
drbd_free_resources(mdev);
- drbd_free_tconn(mdev->tconn);
drbd_release_ee_lists(mdev);
if (!tconn->name)
goto fail;
+ if (!zalloc_cpumask_var(&tconn->cpu_mask, GFP_KERNEL))
+ goto fail;
+
if (!tl_init(tconn))
goto fail;
fail:
tl_cleanup(tconn);
+ free_cpumask_var(tconn->cpu_mask);
kfree(tconn->name);
kfree(tconn);
write_unlock_irq(&global_state_lock);
idr_destroy(&tconn->volumes);
+ free_cpumask_var(tconn->cpu_mask);
kfree(tconn->name);
kfree(tconn->int_dig_out);
kfree(tconn->int_dig_in);
kfree(tconn);
}
-struct drbd_conf *drbd_new_device(unsigned int minor)
+enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor, int vnr)
{
struct drbd_conf *mdev;
struct gendisk *disk;
struct request_queue *q;
- char conn_name[9]; /* drbd1234N */
- int vnr;
+ int vnr_got = vnr;
+
+ mdev = minor_to_mdev(minor);
+ if (mdev)
+ return ERR_MINOR_EXISTS;
/* GFP_KERNEL, we are outside of all write-out paths */
mdev = kzalloc(sizeof(struct drbd_conf), GFP_KERNEL);
if (!mdev)
- return NULL;
- sprintf(conn_name, "drbd%d", minor);
- mdev->tconn = drbd_new_tconn(conn_name);
- if (!mdev->tconn)
- goto out_no_tconn;
- if (!idr_pre_get(&mdev->tconn->volumes, GFP_KERNEL))
- goto out_no_cpumask;
- if (idr_get_new(&mdev->tconn->volumes, mdev, &vnr))
- goto out_no_cpumask;
- if (vnr != 0) {
- dev_err(DEV, "vnr = %d\n", vnr);
- goto out_no_cpumask;
- }
- if (!zalloc_cpumask_var(&mdev->tconn->cpu_mask, GFP_KERNEL))
- goto out_no_cpumask;
+ return ERR_NOMEM;
+
+ mdev->tconn = tconn;
+ if (!idr_pre_get(&tconn->volumes, GFP_KERNEL))
+ goto out_no_idr;
+ if (idr_get_new(&tconn->volumes, mdev, &vnr_got))
+ goto out_no_idr;
+ if (vnr_got != vnr) {
+ dev_err(DEV, "vnr_got (%d) != vnr (%d)\n", vnr_got, vnr);
+ goto out_no_q;
+ }
mdev->minor = minor;
INIT_LIST_HEAD(&mdev->current_epoch->list);
mdev->epochs = 1;
- return mdev;
+ minor_table[minor] = mdev;
+ add_disk(disk);
+
+ return NO_ERROR;
/* out_whatever_else:
kfree(mdev->current_epoch); */
out_no_disk:
blk_cleanup_queue(q);
out_no_q:
- free_cpumask_var(mdev->tconn->cpu_mask);
-out_no_cpumask:
- drbd_free_tconn(mdev->tconn);
-out_no_tconn:
+ idr_remove(&tconn->volumes, vnr_got);
+out_no_idr:
kfree(mdev);
- return NULL;
+ return ERR_NOMEM;
}
/* counterpart of drbd_new_device.
return rv;
}
-static struct drbd_conf *ensure_mdev(int minor, int create)
-{
- struct drbd_conf *mdev;
-
- if (minor >= minor_count)
- return NULL;
-
- mdev = minor_to_mdev(minor);
-
- if (!mdev && create) {
- struct gendisk *disk = NULL;
- mdev = drbd_new_device(minor);
-
- spin_lock_irq(&drbd_pp_lock);
- if (minor_table[minor] == NULL) {
- minor_table[minor] = mdev;
- disk = mdev->vdisk;
- mdev = NULL;
- } /* else: we lost the race */
- spin_unlock_irq(&drbd_pp_lock);
-
- if (disk) /* we won the race above */
- /* in case we ever add a drbd_delete_device(),
- * don't forget the del_gendisk! */
- add_disk(disk);
- else /* we lost the race above */
- drbd_free_mdev(mdev);
-
- mdev = minor_to_mdev(minor);
- }
-
- return mdev;
-}
-
static int drbd_nl_primary(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
struct drbd_nl_cfg_reply *reply)
{
if (!expect(sc.al_extents <= DRBD_AL_EXTENTS_MAX))
sc.al_extents = DRBD_AL_EXTENTS_MAX;
- /* to avoid spurious errors when configuring minors before configuring
- * the minors they depend on: if necessary, first create the minor we
- * depend on */
- if (sc.after >= 0)
- ensure_mdev(sc.after, 1);
-
/* most sanity checks done, try to assign the new sync-after
* dependency. need to hold the global lock in there,
* to avoid a race in the dependency loop check. */
return 0;
}
+static int drbd_nl_new_conn(struct drbd_nl_cfg_req *nlp, struct drbd_nl_cfg_reply *reply)
+{
+ struct new_connection args;
+
+ if (!new_connection_from_tags(nlp->tag_list, &args)) {
+ reply->ret_code = ERR_MANDATORY_TAG;
+ return 0;
+ }
+
+ reply->ret_code = NO_ERROR;
+ if (!drbd_new_tconn(args.name))
+ reply->ret_code = ERR_NOMEM;
+
+ return 0;
+}
+
+static int drbd_nl_new_minor(struct drbd_tconn *tconn,
+ struct drbd_nl_cfg_req *nlp, struct drbd_nl_cfg_reply *reply)
+{
+ struct new_minor args;
+
+ args.vol_nr = 0;
+ args.minor = 0;
+
+ if (!new_minor_from_tags(nlp->tag_list, &args)) {
+ reply->ret_code = ERR_MANDATORY_TAG;
+ return 0;
+ }
+
+ reply->ret_code = conn_new_minor(tconn, args.minor, args.vol_nr);
+
+ return 0;
+}
+
+static int drbd_nl_del_minor(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
+ struct drbd_nl_cfg_reply *reply)
+{
+ if (mdev->state.disk == D_DISKLESS &&
+ mdev->state.conn == C_STANDALONE &&
+ mdev->state.role == R_SECONDARY) {
+ drbd_delete_device(mdev_to_minor(mdev));
+ reply->ret_code = NO_ERROR;
+ } else {
+ reply->ret_code = ERR_MINOR_CONFIGURED;
+ }
+ return 0;
+}
+
+static int drbd_nl_del_conn(struct drbd_tconn *tconn,
+ struct drbd_nl_cfg_req *nlp, struct drbd_nl_cfg_reply *reply)
+{
+ if (conn_lowest_minor(tconn) < 0) {
+ drbd_free_tconn(tconn);
+ reply->ret_code = NO_ERROR;
+ } else {
+ reply->ret_code = ERR_CONN_IN_USE;
+ }
+
+ return 0;
+}
+
enum cn_handler_type {
CHT_MINOR,
CHT_CONN,
CHT_CTOR,
/* CHT_RES, later */
};
-
struct cn_handler_struct {
enum cn_handler_type type;
union {
sizeof(struct get_timeout_flag_tag_len_struct)},
[ P_start_ov ] = { CHT_MINOR, { &drbd_nl_start_ov }, 0 },
[ P_new_c_uuid ] = { CHT_MINOR, { &drbd_nl_new_c_uuid }, 0 },
+ [ P_new_connection ] = { CHT_CTOR, { .constructor = &drbd_nl_new_conn }, 0 },
+ [ P_new_minor ] = { CHT_CONN, { .conn_based = &drbd_nl_new_minor }, 0 },
+ [ P_del_minor ] = { CHT_MINOR, { &drbd_nl_del_minor }, 0 },
+ [ P_del_connection ] = { CHT_CONN, { .conn_based = &drbd_nl_del_conn }, 0 },
};
static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms *nsp)
ERR_PIC_AFTER_DEP = 156,
ERR_PIC_PEER_DEP = 157,
ERR_CONN_NOT_KNOWN = 158,
+ ERR_CONN_IN_USE = 159,
+ ERR_MINOR_CONFIGURED = 160,
+ ERR_MINOR_EXISTS = 161,
/* insert new ones above this line */
AFTER_LAST_ERR_CODE
NL_RESPONSE(return_code_only, 27)
#endif
+NL_PACKET(new_connection, 28, /* CHT_CTOR */
+ NL_STRING( 85, T_MANDATORY, name, DRBD_NL_OBJ_NAME_LEN)
+)
+
+NL_PACKET(new_minor, 29, /* CHT_CONN */
+ NL_INTEGER( 86, T_MANDATORY, minor)
+ NL_INTEGER( 87, T_MANDATORY, vol_nr)
+)
+
+NL_PACKET(del_minor, 30, ) /* CHT_MINOR */
+NL_PACKET(del_connection, 31, ) /* CHT_CONN */
+
#undef NL_PACKET
#undef NL_INTEGER
#undef NL_INT64