]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - drivers/block/drbd/drbd_nl.c
drbd: Basic refcounting for drbd_tconn
[karo-tx-linux.git] / drivers / block / drbd / drbd_nl.c
index 515bcd948a43d7ee04650a06044e7bea6bc4742c..23c34baa75a4f764bebb7c39e613df3c63bb780d 100644 (file)
 #include <linux/fs.h>
 #include <linux/file.h>
 #include <linux/slab.h>
-#include <linux/connector.h>
 #include <linux/blkpg.h>
 #include <linux/cpumask.h>
 #include "drbd_int.h"
 #include "drbd_req.h"
 #include "drbd_wrappers.h"
 #include <asm/unaligned.h>
-#include <linux/drbd_tag_magic.h>
 #include <linux/drbd_limits.h>
-#include <linux/compiler.h>
 #include <linux/kthread.h>
 
-static unsigned short *tl_add_blob(unsigned short *, enum drbd_tags, const void *, int);
-static unsigned short *tl_add_str(unsigned short *, enum drbd_tags, const char *);
-static unsigned short *tl_add_int(unsigned short *, enum drbd_tags, const void *);
-
-/* see get_sb_bdev and bd_claim */
+#include <net/genetlink.h>
+
+/* .doit */
+// int drbd_adm_create_resource(struct sk_buff *skb, struct genl_info *info);
+// int drbd_adm_delete_resource(struct sk_buff *skb, struct genl_info *info);
+
+int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info);
+
+int drbd_adm_create_connection(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_delete_connection(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_down(struct sk_buff *skb, struct genl_info *info);
+
+int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info);
+/* .dumpit */
+int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb);
+
+#include <linux/drbd_genl_api.h>
+#include <linux/genl_magic_func.h>
+
+/* used blkdev_get_by_path, to claim our meta data device(s) */
 static char *drbd_m_holder = "Hands off! this is DRBD's meta data device.";
 
-/* Generate the tag_list to struct functions */
-#define NL_PACKET(name, number, fields) \
-static int name ## _from_tags(struct drbd_conf *mdev, \
-       unsigned short *tags, struct name *arg) __attribute__ ((unused)); \
-static int name ## _from_tags(struct drbd_conf *mdev, \
-       unsigned short *tags, struct name *arg) \
-{ \
-       int tag; \
-       int dlen; \
-       \
-       while ((tag = get_unaligned(tags++)) != TT_END) {       \
-               dlen = get_unaligned(tags++);                   \
-               switch (tag_number(tag)) { \
-               fields \
-               default: \
-                       if (tag & T_MANDATORY) { \
-                               dev_err(DEV, "Unknown tag: %d\n", tag_number(tag)); \
-                               return 0; \
-                       } \
-               } \
-               tags = (unsigned short *)((char *)tags + dlen); \
-       } \
-       return 1; \
-}
-#define NL_INTEGER(pn, pr, member) \
-       case pn: /* D_ASSERT( tag_type(tag) == TT_INTEGER ); */ \
-               arg->member = get_unaligned((int *)(tags));     \
-               break;
-#define NL_INT64(pn, pr, member) \
-       case pn: /* D_ASSERT( tag_type(tag) == TT_INT64 ); */ \
-               arg->member = get_unaligned((u64 *)(tags));     \
-               break;
-#define NL_BIT(pn, pr, member) \
-       case pn: /* D_ASSERT( tag_type(tag) == TT_BIT ); */ \
-               arg->member = *(char *)(tags) ? 1 : 0; \
-               break;
-#define NL_STRING(pn, pr, member, len) \
-       case pn: /* D_ASSERT( tag_type(tag) == TT_STRING ); */ \
-               if (dlen > len) { \
-                       dev_err(DEV, "arg too long: %s (%u wanted, max len: %u bytes)\n", \
-                               #member, dlen, (unsigned int)len); \
-                       return 0; \
-               } \
-                arg->member ## _len = dlen; \
-                memcpy(arg->member, tags, min_t(size_t, dlen, len)); \
-                break;
-#include "linux/drbd_nl.h"
-
-/* Generate the struct to tag_list functions */
-#define NL_PACKET(name, number, fields) \
-static unsigned short* \
-name ## _to_tags(struct drbd_conf *mdev, \
-       struct name *arg, unsigned short *tags) __attribute__ ((unused)); \
-static unsigned short* \
-name ## _to_tags(struct drbd_conf *mdev, \
-       struct name *arg, unsigned short *tags) \
-{ \
-       fields \
-       return tags; \
-}
-
-#define NL_INTEGER(pn, pr, member) \
-       put_unaligned(pn | pr | TT_INTEGER, tags++);    \
-       put_unaligned(sizeof(int), tags++);             \
-       put_unaligned(arg->member, (int *)tags);        \
-       tags = (unsigned short *)((char *)tags+sizeof(int));
-#define NL_INT64(pn, pr, member) \
-       put_unaligned(pn | pr | TT_INT64, tags++);      \
-       put_unaligned(sizeof(u64), tags++);             \
-       put_unaligned(arg->member, (u64 *)tags);        \
-       tags = (unsigned short *)((char *)tags+sizeof(u64));
-#define NL_BIT(pn, pr, member) \
-       put_unaligned(pn | pr | TT_BIT, tags++);        \
-       put_unaligned(sizeof(char), tags++);            \
-       *(char *)tags = arg->member; \
-       tags = (unsigned short *)((char *)tags+sizeof(char));
-#define NL_STRING(pn, pr, member, len) \
-       put_unaligned(pn | pr | TT_STRING, tags++);     \
-       put_unaligned(arg->member ## _len, tags++);     \
-       memcpy(tags, arg->member, arg->member ## _len); \
-       tags = (unsigned short *)((char *)tags + arg->member ## _len);
-#include "linux/drbd_nl.h"
-
-void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name);
-void drbd_nl_send_reply(struct cn_msg *, int);
+/* Configuration is strictly serialized, because generic netlink message
+ * processing is strictly serialized by the genl_lock().
+ * Which means we can use one static global drbd_config_context struct.
+ */
+static struct drbd_config_context {
+       /* assigned from drbd_genlmsghdr */
+       unsigned int minor;
+       /* assigned from request attributes, if present */
+       unsigned int volume;
+#define VOLUME_UNSPECIFIED             (-1U)
+       /* pointer into the request skb,
+        * limited lifetime! */
+       char *conn_name;
+
+       /* reply buffer */
+       struct sk_buff *reply_skb;
+       /* pointer into reply buffer */
+       struct drbd_genlmsghdr *reply_dh;
+       /* resolved from attributes, if possible */
+       struct drbd_conf *mdev;
+       struct drbd_tconn *tconn;
+} adm_ctx;
 
-int drbd_khelper(struct drbd_conf *mdev, char *cmd)
+static void drbd_adm_send_reply(struct sk_buff *skb, struct genl_info *info)
 {
-       char *envp[] = { "HOME=/",
-                       "TERM=linux",
-                       "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
-                       NULL, /* Will be set to address family */
-                       NULL, /* Will be set to address */
-                       NULL };
+       genlmsg_end(skb, genlmsg_data(nlmsg_data(nlmsg_hdr(skb))));
+       if (genlmsg_reply(skb, info))
+               printk(KERN_ERR "drbd: error sending genl reply\n");
+}
 
-       char mb[12], af[20], ad[60], *afs;
-       char *argv[] = {usermode_helper, cmd, mb, NULL };
-       int ret;
+/* Used on a fresh "drbd_adm_prepare"d reply_skb, this cannot fail: The only
+ * reason it could fail was no space in skb, and there are 4k available. */
+int drbd_msg_put_info(const char *info)
+{
+       struct sk_buff *skb = adm_ctx.reply_skb;
+       struct nlattr *nla;
+       int err = -EMSGSIZE;
 
-       snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev));
+       if (!info || !info[0])
+               return 0;
+
+       nla = nla_nest_start(skb, DRBD_NLA_CFG_REPLY);
+       if (!nla)
+               return err;
+
+       err = nla_put_string(skb, T_info_text, info);
+       if (err) {
+               nla_nest_cancel(skb, nla);
+               return err;
+       } else
+               nla_nest_end(skb, nla);
+       return 0;
+}
+
+/* This would be a good candidate for a "pre_doit" hook,
+ * and per-family private info->pointers.
+ * But we need to stay compatible with older kernels.
+ * If it returns successfully, adm_ctx members are valid.
+ */
+#define DRBD_ADM_NEED_MINOR    1
+#define DRBD_ADM_NEED_CONN     2
+static int drbd_adm_prepare(struct sk_buff *skb, struct genl_info *info,
+               unsigned flags)
+{
+       struct drbd_genlmsghdr *d_in = info->userhdr;
+       const u8 cmd = info->genlhdr->cmd;
+       int err;
+
+       memset(&adm_ctx, 0, sizeof(adm_ctx));
+
+       /* genl_rcv_msg only checks for CAP_NET_ADMIN on "GENL_ADMIN_PERM" :( */
+       if (cmd != DRBD_ADM_GET_STATUS
+       && security_netlink_recv(skb, CAP_SYS_ADMIN))
+              return -EPERM;
+
+       adm_ctx.reply_skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+       if (!adm_ctx.reply_skb)
+               goto fail;
+
+       adm_ctx.reply_dh = genlmsg_put_reply(adm_ctx.reply_skb,
+                                       info, &drbd_genl_family, 0, cmd);
+       /* put of a few bytes into a fresh skb of >= 4k will always succeed.
+        * but anyways */
+       if (!adm_ctx.reply_dh)
+               goto fail;
+
+       adm_ctx.reply_dh->minor = d_in->minor;
+       adm_ctx.reply_dh->ret_code = NO_ERROR;
+
+       if (info->attrs[DRBD_NLA_CFG_CONTEXT]) {
+               struct nlattr *nla;
+               /* parse and validate only */
+               err = drbd_cfg_context_from_attrs(NULL, info);
+               if (err)
+                       goto fail;
+
+               /* It was present, and valid,
+                * copy it over to the reply skb. */
+               err = nla_put_nohdr(adm_ctx.reply_skb,
+                               info->attrs[DRBD_NLA_CFG_CONTEXT]->nla_len,
+                               info->attrs[DRBD_NLA_CFG_CONTEXT]);
+               if (err)
+                       goto fail;
+
+               /* and assign stuff to the global adm_ctx */
+               nla = nested_attr_tb[__nla_type(T_ctx_volume)];
+               adm_ctx.volume = nla ? nla_get_u32(nla) : VOLUME_UNSPECIFIED;
+               nla = nested_attr_tb[__nla_type(T_ctx_conn_name)];
+               if (nla)
+                       adm_ctx.conn_name = nla_data(nla);
+       } else
+               adm_ctx.volume = VOLUME_UNSPECIFIED;
+
+       adm_ctx.minor = d_in->minor;
+       adm_ctx.mdev = minor_to_mdev(d_in->minor);
+       adm_ctx.tconn = conn_by_name(adm_ctx.conn_name);
+
+       if (!adm_ctx.mdev && (flags & DRBD_ADM_NEED_MINOR)) {
+               drbd_msg_put_info("unknown minor");
+               return ERR_MINOR_INVALID;
+       }
+       if (!adm_ctx.tconn && (flags & DRBD_ADM_NEED_CONN)) {
+               drbd_msg_put_info("unknown connection");
+               return ERR_INVALID_REQUEST;
+       }
+
+       /* some more paranoia, if the request was over-determined */
+       if (adm_ctx.mdev && adm_ctx.tconn &&
+           adm_ctx.mdev->tconn != adm_ctx.tconn) {
+               pr_warning("request: minor=%u, conn=%s; but that minor belongs to connection %s\n",
+                               adm_ctx.minor, adm_ctx.conn_name, adm_ctx.mdev->tconn->name);
+               drbd_msg_put_info("minor exists in different connection");
+               return ERR_INVALID_REQUEST;
+       }
+       if (adm_ctx.mdev &&
+           adm_ctx.volume != VOLUME_UNSPECIFIED &&
+           adm_ctx.volume != adm_ctx.mdev->vnr) {
+               pr_warning("request: minor=%u, volume=%u; but that minor is volume %u in %s\n",
+                               adm_ctx.minor, adm_ctx.volume,
+                               adm_ctx.mdev->vnr, adm_ctx.mdev->tconn->name);
+               drbd_msg_put_info("minor exists as different volume");
+               return ERR_INVALID_REQUEST;
+       }
+       if (adm_ctx.mdev && !adm_ctx.tconn)
+               adm_ctx.tconn = adm_ctx.mdev->tconn;
+       return NO_ERROR;
+
+fail:
+       nlmsg_free(adm_ctx.reply_skb);
+       adm_ctx.reply_skb = NULL;
+       return -ENOMEM;
+}
+
+static int drbd_adm_finish(struct genl_info *info, int retcode)
+{
+       struct nlattr *nla;
+       const char *conn_name = NULL;
+
+       if (!adm_ctx.reply_skb)
+               return -ENOMEM;
+
+       adm_ctx.reply_dh->ret_code = retcode;
+
+       nla = info->attrs[DRBD_NLA_CFG_CONTEXT];
+       if (nla) {
+               nla = nla_find_nested(nla, __nla_type(T_ctx_conn_name));
+               if (nla)
+                       conn_name = nla_data(nla);
+       }
+
+       drbd_adm_send_reply(adm_ctx.reply_skb, info);
+       return 0;
+}
+
+static void setup_khelper_env(struct drbd_tconn *tconn, char **envp)
+{
+       char *afs;
+       struct net_conf *nc;
 
-       if (get_net_conf(mdev)) {
-               switch (((struct sockaddr *)mdev->net_conf->peer_addr)->sa_family) {
+       rcu_read_lock();
+       nc = rcu_dereference(tconn->net_conf);
+       if (nc) {
+               switch (((struct sockaddr *)nc->peer_addr)->sa_family) {
                case AF_INET6:
                        afs = "ipv6";
-                       snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI6",
-                                &((struct sockaddr_in6 *)mdev->net_conf->peer_addr)->sin6_addr);
+                       snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI6",
+                                &((struct sockaddr_in6 *)nc->peer_addr)->sin6_addr);
                        break;
                case AF_INET:
                        afs = "ipv4";
-                       snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI4",
-                                &((struct sockaddr_in *)mdev->net_conf->peer_addr)->sin_addr);
+                       snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
+                                &((struct sockaddr_in *)nc->peer_addr)->sin_addr);
                        break;
                default:
                        afs = "ssocks";
-                       snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI4",
-                                &((struct sockaddr_in *)mdev->net_conf->peer_addr)->sin_addr);
+                       snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
+                                &((struct sockaddr_in *)nc->peer_addr)->sin_addr);
                }
-               snprintf(af, 20, "DRBD_PEER_AF=%s", afs);
-               envp[3]=af;
-               envp[4]=ad;
-               put_net_conf(mdev);
+               snprintf(envp[3], 20, "DRBD_PEER_AF=%s", afs);
        }
+       rcu_read_unlock();
+}
+
+int drbd_khelper(struct drbd_conf *mdev, char *cmd)
+{
+       char *envp[] = { "HOME=/",
+                       "TERM=linux",
+                       "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
+                        (char[20]) { }, /* address family */
+                        (char[60]) { }, /* address */
+                       NULL };
+       char mb[12];
+       char *argv[] = {usermode_helper, cmd, mb, NULL };
+       struct sib_info sib;
+       int ret;
+
+       snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev));
+       setup_khelper_env(mdev->tconn, envp);
 
        /* The helper may take some time.
         * write out any unsynced meta data changes now */
        drbd_md_sync(mdev);
 
        dev_info(DEV, "helper command: %s %s %s\n", usermode_helper, cmd, mb);
-
-       drbd_bcast_ev_helper(mdev, cmd);
+       sib.sib_reason = SIB_HELPER_PRE;
+       sib.helper_name = cmd;
+       drbd_bcast_event(mdev, &sib);
        ret = call_usermodehelper(usermode_helper, argv, envp, 1);
        if (ret)
                dev_warn(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
@@ -188,6 +316,54 @@ int drbd_khelper(struct drbd_conf *mdev, char *cmd)
                dev_info(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
                                usermode_helper, cmd, mb,
                                (ret >> 8) & 0xff, ret);
+       sib.sib_reason = SIB_HELPER_POST;
+       sib.helper_exit_code = ret;
+       drbd_bcast_event(mdev, &sib);
+
+       if (ret < 0) /* Ignore any ERRNOs we got. */
+               ret = 0;
+
+       return ret;
+}
+
+static void conn_md_sync(struct drbd_tconn *tconn)
+{
+       struct drbd_conf *mdev;
+       int vnr;
+
+       down_read(&drbd_cfg_rwsem);
+       idr_for_each_entry(&tconn->volumes, mdev, vnr)
+               drbd_md_sync(mdev);
+       up_read(&drbd_cfg_rwsem);
+}
+
+int conn_khelper(struct drbd_tconn *tconn, char *cmd)
+{
+       char *envp[] = { "HOME=/",
+                       "TERM=linux",
+                       "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
+                        (char[20]) { }, /* address family */
+                        (char[60]) { }, /* address */
+                       NULL };
+       char *argv[] = {usermode_helper, cmd, tconn->name, NULL };
+       int ret;
+
+       setup_khelper_env(tconn, envp);
+       conn_md_sync(tconn);
+
+       conn_info(tconn, "helper command: %s %s %s\n", usermode_helper, cmd, tconn->name);
+       /* TODO: conn_bcast_event() ?? */
+
+       ret = call_usermodehelper(usermode_helper, argv, envp, 1);
+       if (ret)
+               conn_warn(tconn, "helper command: %s %s %s exit code %u (0x%x)\n",
+                         usermode_helper, cmd, tconn->name,
+                         (ret >> 8) & 0xff, ret);
+       else
+               conn_info(tconn, "helper command: %s %s %s exit code %u (0x%x)\n",
+                         usermode_helper, cmd, tconn->name,
+                         (ret >> 8) & 0xff, ret);
+       /* TODO: conn_bcast_event() ?? */
 
        if (ret < 0) /* Ignore any ERRNOs we got. */
                ret = 0;
@@ -195,116 +371,128 @@ int drbd_khelper(struct drbd_conf *mdev, char *cmd)
        return ret;
 }
 
-enum drbd_disk_state drbd_try_outdate_peer(struct drbd_conf *mdev)
+static enum drbd_fencing_p highest_fencing_policy(struct drbd_tconn *tconn)
+{
+       enum drbd_fencing_p fp = FP_NOT_AVAIL;
+       struct drbd_conf *mdev;
+       int vnr;
+
+       rcu_read_lock();
+       idr_for_each_entry(&tconn->volumes, mdev, vnr) {
+               if (get_ldev_if_state(mdev, D_CONSISTENT)) {
+                       fp = max_t(enum drbd_fencing_p, fp, mdev->ldev->dc.fencing);
+                       put_ldev(mdev);
+               }
+       }
+       rcu_read_unlock();
+
+       return fp;
+}
+
+bool conn_try_outdate_peer(struct drbd_tconn *tconn)
 {
+       union drbd_state mask = { };
+       union drbd_state val = { };
+       enum drbd_fencing_p fp;
        char *ex_to_string;
        int r;
-       enum drbd_disk_state nps;
-       enum drbd_fencing_p fp;
 
-       D_ASSERT(mdev->state.pdsk == D_UNKNOWN);
+       if (tconn->cstate >= C_WF_REPORT_PARAMS) {
+               conn_err(tconn, "Expected cstate < C_WF_REPORT_PARAMS\n");
+               return false;
+       }
 
-       if (get_ldev_if_state(mdev, D_CONSISTENT)) {
-               fp = mdev->ldev->dc.fencing;
-               put_ldev(mdev);
-       } else {
-               dev_warn(DEV, "Not fencing peer, I'm not even Consistent myself.\n");
-               nps = mdev->state.pdsk;
+       fp = highest_fencing_policy(tconn);
+       switch (fp) {
+       case FP_NOT_AVAIL:
+               conn_warn(tconn, "Not fencing peer, I'm not even Consistent myself.\n");
                goto out;
+       case FP_DONT_CARE:
+               return true;
+       default: ;
        }
 
-       r = drbd_khelper(mdev, "fence-peer");
+       r = conn_khelper(tconn, "fence-peer");
 
        switch ((r>>8) & 0xff) {
        case 3: /* peer is inconsistent */
                ex_to_string = "peer is inconsistent or worse";
-               nps = D_INCONSISTENT;
+               mask.pdsk = D_MASK;
+               val.pdsk = D_INCONSISTENT;
                break;
        case 4: /* peer got outdated, or was already outdated */
                ex_to_string = "peer was fenced";
-               nps = D_OUTDATED;
+               mask.pdsk = D_MASK;
+               val.pdsk = D_OUTDATED;
                break;
        case 5: /* peer was down */
-               if (mdev->state.disk == D_UP_TO_DATE) {
+               if (conn_highest_disk(tconn) == D_UP_TO_DATE) {
                        /* we will(have) create(d) a new UUID anyways... */
                        ex_to_string = "peer is unreachable, assumed to be dead";
-                       nps = D_OUTDATED;
+                       mask.pdsk = D_MASK;
+                       val.pdsk = D_OUTDATED;
                } else {
                        ex_to_string = "peer unreachable, doing nothing since disk != UpToDate";
-                       nps = mdev->state.pdsk;
                }
                break;
        case 6: /* Peer is primary, voluntarily outdate myself.
                 * This is useful when an unconnected R_SECONDARY is asked to
                 * become R_PRIMARY, but finds the other peer being active. */
                ex_to_string = "peer is active";
-               dev_warn(DEV, "Peer is primary, outdating myself.\n");
-               nps = D_UNKNOWN;
-               _drbd_request_state(mdev, NS(disk, D_OUTDATED), CS_WAIT_COMPLETE);
+               conn_warn(tconn, "Peer is primary, outdating myself.\n");
+               mask.disk = D_MASK;
+               val.disk = D_OUTDATED;
                break;
        case 7:
                if (fp != FP_STONITH)
-                       dev_err(DEV, "fence-peer() = 7 && fencing != Stonith !!!\n");
+                       conn_err(tconn, "fence-peer() = 7 && fencing != Stonith !!!\n");
                ex_to_string = "peer was stonithed";
-               nps = D_OUTDATED;
+               mask.pdsk = D_MASK;
+               val.pdsk = D_OUTDATED;
                break;
        default:
                /* The script is broken ... */
-               nps = D_UNKNOWN;
-               dev_err(DEV, "fence-peer helper broken, returned %d\n", (r>>8)&0xff);
-               return nps;
+               conn_err(tconn, "fence-peer helper broken, returned %d\n", (r>>8)&0xff);
+               return false; /* Eventually leave IO frozen */
        }
 
-       dev_info(DEV, "fence-peer helper returned %d (%s)\n",
-                       (r>>8) & 0xff, ex_to_string);
+       conn_info(tconn, "fence-peer helper returned %d (%s)\n",
+                 (r>>8) & 0xff, ex_to_string);
 
-out:
-       if (mdev->state.susp_fen && nps >= D_UNKNOWN) {
-               /* The handler was not successful... unfreeze here, the
-                  state engine can not unfreeze... */
-               _drbd_request_state(mdev, NS(susp_fen, 0), CS_VERBOSE);
-       }
+ out:
 
-       return nps;
+       /* Not using
+          conn_request_state(tconn, mask, val, CS_VERBOSE);
+          here, because we might were able to re-establish the connection in the
+          meantime. */
+       spin_lock_irq(&tconn->req_lock);
+       if (tconn->cstate < C_WF_REPORT_PARAMS)
+               _conn_request_state(tconn, mask, val, CS_VERBOSE);
+       spin_unlock_irq(&tconn->req_lock);
+
+       return conn_highest_pdsk(tconn) <= D_OUTDATED;
 }
 
 static int _try_outdate_peer_async(void *data)
 {
-       struct drbd_conf *mdev = (struct drbd_conf *)data;
-       enum drbd_disk_state nps;
-       union drbd_state ns;
-
-       nps = drbd_try_outdate_peer(mdev);
+       struct drbd_tconn *tconn = (struct drbd_tconn *)data;
 
-       /* Not using
-          drbd_request_state(mdev, NS(pdsk, nps));
-          here, because we might were able to re-establish the connection
-          in the meantime. This can only partially be solved in the state's
-          engine is_valid_state() and is_valid_state_transition()
-          functions.
-
-          nps can be D_INCONSISTENT, D_OUTDATED or D_UNKNOWN.
-          pdsk == D_INCONSISTENT while conn >= C_CONNECTED is valid,
-          therefore we have to have the pre state change check here.
-       */
-       spin_lock_irq(&mdev->req_lock);
-       ns = mdev->state;
-       if (ns.conn < C_WF_REPORT_PARAMS) {
-               ns.pdsk = nps;
-               _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
-       }
-       spin_unlock_irq(&mdev->req_lock);
+       conn_try_outdate_peer(tconn);
 
+       kref_put(&tconn->kref, &conn_destroy);
        return 0;
 }
 
-void drbd_try_outdate_peer_async(struct drbd_conf *mdev)
+void conn_try_outdate_peer_async(struct drbd_tconn *tconn)
 {
        struct task_struct *opa;
 
-       opa = kthread_run(_try_outdate_peer_async, mdev, "drbd%d_a_helper", mdev_to_minor(mdev));
-       if (IS_ERR(opa))
-               dev_err(DEV, "out of mem, failed to invoke fence-peer helper\n");
+       kref_get(&tconn->kref);
+       opa = kthread_run(_try_outdate_peer_async, tconn, "drbd_async_h");
+       if (IS_ERR(opa)) {
+               conn_err(tconn, "out of mem, failed to invoke fence-peer helper\n");
+               kref_put(&tconn->kref, &conn_destroy);
+       }
 }
 
 enum drbd_state_rv
@@ -312,15 +500,15 @@ drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
 {
        const int max_tries = 4;
        enum drbd_state_rv rv = SS_UNKNOWN_ERROR;
+       struct net_conf *nc;
        int try = 0;
        int forced = 0;
        union drbd_state mask, val;
-       enum drbd_disk_state nps;
 
        if (new_role == R_PRIMARY)
-               request_ping(mdev); /* Detect a dead peer ASAP */
+               request_ping(mdev->tconn); /* Detect a dead peer ASAP */
 
-       mutex_lock(&mdev->state_mutex);
+       mutex_lock(mdev->state_mutex);
 
        mask.i = 0; mask.role = R_MASK;
        val.i  = 0; val.role  = new_role;
@@ -348,38 +536,34 @@ drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
                if (rv == SS_NO_UP_TO_DATE_DISK &&
                    mdev->state.disk == D_CONSISTENT && mask.pdsk == 0) {
                        D_ASSERT(mdev->state.pdsk == D_UNKNOWN);
-                       nps = drbd_try_outdate_peer(mdev);
 
-                       if (nps == D_OUTDATED || nps == D_INCONSISTENT) {
+                       if (conn_try_outdate_peer(mdev->tconn)) {
                                val.disk = D_UP_TO_DATE;
                                mask.disk = D_MASK;
                        }
-
-                       val.pdsk = nps;
-                       mask.pdsk = D_MASK;
-
                        continue;
                }
 
                if (rv == SS_NOTHING_TO_DO)
-                       goto fail;
+                       goto out;
                if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) {
-                       nps = drbd_try_outdate_peer(mdev);
-
-                       if (force && nps > D_OUTDATED) {
+                       if (!conn_try_outdate_peer(mdev->tconn) && force) {
                                dev_warn(DEV, "Forced into split brain situation!\n");
-                               nps = D_OUTDATED;
-                       }
-
-                       mask.pdsk = D_MASK;
-                       val.pdsk  = nps;
+                               mask.pdsk = D_MASK;
+                               val.pdsk  = D_OUTDATED;
 
+                       }
                        continue;
                }
                if (rv == SS_TWO_PRIMARIES) {
                        /* Maybe the peer is detected as dead very soon...
                           retry at most once more in this case. */
-                       schedule_timeout_interruptible((mdev->net_conf->ping_timeo+1)*HZ/10);
+                       int timeo;
+                       rcu_read_lock();
+                       nc = rcu_dereference(mdev->tconn->net_conf);
+                       timeo = nc ? (nc->ping_timeo + 1) * HZ / 10 : 1;
+                       rcu_read_unlock();
+                       schedule_timeout_interruptible(timeo);
                        if (try < max_tries)
                                try = max_tries - 1;
                        continue;
@@ -388,13 +572,13 @@ drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
                        rv = _drbd_request_state(mdev, mask, val,
                                                CS_VERBOSE + CS_WAIT_COMPLETE);
                        if (rv < SS_SUCCESS)
-                               goto fail;
+                               goto out;
                }
                break;
        }
 
        if (rv < SS_SUCCESS)
-               goto fail;
+               goto out;
 
        if (forced)
                dev_warn(DEV, "Forced to consider local data as UpToDate!\n");
@@ -409,10 +593,12 @@ drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
                        put_ldev(mdev);
                }
        } else {
-               if (get_net_conf(mdev)) {
-                       mdev->net_conf->want_lose = 0;
-                       put_net_conf(mdev);
-               }
+               mutex_lock(&mdev->tconn->net_conf_update);
+               nc = mdev->tconn->net_conf;
+               if (nc)
+                       nc->want_lose = 0; /* without copy; single bit op is atomic */
+               mutex_unlock(&mdev->tconn->net_conf_update);
+
                set_disk_ro(mdev->vdisk, false);
                if (get_ldev(mdev)) {
                        if (((mdev->state.conn < C_CONNECTED ||
@@ -438,67 +624,47 @@ drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
        drbd_md_sync(mdev);
 
        kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
- fail:
-       mutex_unlock(&mdev->state_mutex);
+out:
+       mutex_unlock(mdev->state_mutex);
        return rv;
 }
 
-static struct drbd_conf *ensure_mdev(int minor, int create)
+static const char *from_attrs_err_to_txt(int err)
 {
-       struct drbd_conf *mdev;
-
-       if (minor >= minor_count)
-               return NULL;
-
-       mdev = minor_to_mdev(minor);
-
-       if (!mdev && create) {
-               struct gendisk *disk = NULL;
-               mdev = drbd_new_device(minor);
-
-               spin_lock_irq(&drbd_pp_lock);
-               if (minor_table[minor] == NULL) {
-                       minor_table[minor] = mdev;
-                       disk = mdev->vdisk;
-                       mdev = NULL;
-               } /* else: we lost the race */
-               spin_unlock_irq(&drbd_pp_lock);
-
-               if (disk) /* we won the race above */
-                       /* in case we ever add a drbd_delete_device(),
-                        * don't forget the del_gendisk! */
-                       add_disk(disk);
-               else /* we lost the race above */
-                       drbd_free_mdev(mdev);
-
-               mdev = minor_to_mdev(minor);
-       }
-
-       return mdev;
+       return  err == -ENOMSG ? "required attribute missing" :
+               err == -EOPNOTSUPP ? "unknown mandatory attribute" :
+               err == -EEXIST ? "can not change invariant setting" :
+               "invalid attribute value";
 }
 
-static int drbd_nl_primary(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
-                          struct drbd_nl_cfg_reply *reply)
+int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info)
 {
-       struct primary primary_args;
-
-       memset(&primary_args, 0, sizeof(struct primary));
-       if (!primary_from_tags(mdev, nlp->tag_list, &primary_args)) {
-               reply->ret_code = ERR_MANDATORY_TAG;
-               return 0;
-       }
-
-       reply->ret_code =
-               drbd_set_role(mdev, R_PRIMARY, primary_args.primary_force);
+       struct set_role_parms parms;
+       int err;
+       enum drbd_ret_code retcode;
 
-       return 0;
-}
+       retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+       if (!adm_ctx.reply_skb)
+               return retcode;
+       if (retcode != NO_ERROR)
+               goto out;
 
-static int drbd_nl_secondary(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
-                            struct drbd_nl_cfg_reply *reply)
-{
-       reply->ret_code = drbd_set_role(mdev, R_SECONDARY, 0);
+       memset(&parms, 0, sizeof(parms));
+       if (info->attrs[DRBD_NLA_SET_ROLE_PARMS]) {
+               err = set_role_parms_from_attrs(&parms, info);
+               if (err) {
+                       retcode = ERR_MANDATORY_TAG;
+                       drbd_msg_put_info(from_attrs_err_to_txt(err));
+                       goto out;
+               }
+       }
 
+       if (info->genlhdr->cmd == DRBD_ADM_PRIMARY)
+               retcode = drbd_set_role(adm_ctx.mdev, R_PRIMARY, parms.assume_uptodate);
+       else
+               retcode = drbd_set_role(adm_ctx.mdev, R_SECONDARY, 0);
+out:
+       drbd_adm_finish(info, retcode);
        return 0;
 }
 
@@ -527,7 +693,7 @@ static void drbd_md_set_sector_offsets(struct drbd_conf *mdev,
        case DRBD_MD_INDEX_FLEX_INT:
                bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
                /* al size is still fixed */
-               bdev->md.al_offset = -MD_AL_MAX_SIZE;
+               bdev->md.al_offset = -MD_AL_SECTORS;
                /* we need (slightly less than) ~ this much bitmap sectors: */
                md_size_sect = drbd_get_capacity(bdev->backing_bdev);
                md_size_sect = ALIGN(md_size_sect, BM_SECT_PER_EXT);
@@ -575,10 +741,16 @@ char *ppsize(char *buf, unsigned long long size)
  *  R_PRIMARY D_INCONSISTENT, and C_SYNC_TARGET:
  *  peer may not initiate a resize.
  */
+/* Note these are not to be confused with
+ * drbd_adm_suspend_io/drbd_adm_resume_io,
+ * which are (sub) state changes triggered by admin (drbdsetup),
+ * and can be long lived.
+ * This changes an mdev->flag, is triggered by drbd internals,
+ * and should be short-lived. */
 void drbd_suspend_io(struct drbd_conf *mdev)
 {
        set_bit(SUSPEND_IO, &mdev->flags);
-       if (is_susp(mdev->state))
+       if (drbd_suspended(mdev))
                return;
        wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
 }
@@ -744,24 +916,24 @@ drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev, int ass
  * failed, and 0 on success. You should call drbd_md_sync() after you called
  * this function.
  */
-static int drbd_check_al_size(struct drbd_conf *mdev)
+static int drbd_check_al_size(struct drbd_conf *mdev, struct disk_conf *dc)
 {
        struct lru_cache *n, *t;
        struct lc_element *e;
        unsigned int in_use;
        int i;
 
-       ERR_IF(mdev->sync_conf.al_extents < 7)
-               mdev->sync_conf.al_extents = 127;
+       if (!expect(dc->al_extents >= DRBD_AL_EXTENTS_MIN))
+               dc->al_extents = DRBD_AL_EXTENTS_MIN;
 
        if (mdev->act_log &&
-           mdev->act_log->nr_elements == mdev->sync_conf.al_extents)
+           mdev->act_log->nr_elements == dc->al_extents)
                return 0;
 
        in_use = 0;
        t = mdev->act_log;
-       n = lc_create("act_log", drbd_al_ext_cache,
-               mdev->sync_conf.al_extents, sizeof(struct lc_element), 0);
+       n = lc_create("act_log", drbd_al_ext_cache, AL_UPDATES_PER_TRANSACTION,
+               dc->al_extents, sizeof(struct lc_element), 0);
 
        if (n == NULL) {
                dev_err(DEV, "Cannot allocate act_log lru!\n");
@@ -845,9 +1017,9 @@ void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
           Because new from 8.3.8 onwards the peer can use multiple
           BIOs for a single peer_request */
        if (mdev->state.conn >= C_CONNECTED) {
-               if (mdev->agreed_pro_version < 94)
+               if (mdev->tconn->agreed_pro_version < 94)
                        peer = mdev->peer_max_bio_size;
-               else if (mdev->agreed_pro_version == 94)
+               else if (mdev->tconn->agreed_pro_version == 94)
                        peer = DRBD_MAX_SIZE_H80_PACKET;
                else /* drbd 8.3.8 onwards */
                        peer = DRBD_MAX_BIO_SIZE;
@@ -871,29 +1043,27 @@ void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
  * or start a new one.  Flush any pending work, there may still be an
  * after_state_change queued.
  */
-static void drbd_reconfig_start(struct drbd_conf *mdev)
+static void conn_reconfig_start(struct drbd_tconn *tconn)
 {
-       wait_event(mdev->state_wait, !test_and_set_bit(CONFIG_PENDING, &mdev->flags));
-       wait_event(mdev->state_wait, !test_bit(DEVICE_DYING, &mdev->flags));
-       drbd_thread_start(&mdev->worker);
-       drbd_flush_workqueue(mdev);
+       wait_event(tconn->ping_wait, !test_and_set_bit(CONFIG_PENDING, &tconn->flags));
+       wait_event(tconn->ping_wait, !test_bit(OBJECT_DYING, &tconn->flags));
+       drbd_thread_start(&tconn->worker);
+       conn_flush_workqueue(tconn);
 }
 
 /* if still unconfigured, stops worker again.
  * if configured now, clears CONFIG_PENDING.
  * wakes potential waiters */
-static void drbd_reconfig_done(struct drbd_conf *mdev)
+static void conn_reconfig_done(struct drbd_tconn *tconn)
 {
-       spin_lock_irq(&mdev->req_lock);
-       if (mdev->state.disk == D_DISKLESS &&
-           mdev->state.conn == C_STANDALONE &&
-           mdev->state.role == R_SECONDARY) {
-               set_bit(DEVICE_DYING, &mdev->flags);
-               drbd_thread_stop_nowait(&mdev->worker);
+       spin_lock_irq(&tconn->req_lock);
+       if (conn_all_vols_unconf(tconn)) {
+               set_bit(OBJECT_DYING, &tconn->flags);
+               drbd_thread_stop_nowait(&tconn->worker);
        } else
-               clear_bit(CONFIG_PENDING, &mdev->flags);
-       spin_unlock_irq(&mdev->req_lock);
-       wake_up(&mdev->state_wait);
+               clear_bit(CONFIG_PENDING, &tconn->flags);
+       spin_unlock_irq(&tconn->req_lock);
+       wake_up(&tconn->ping_wait);
 }
 
 /* Make sure IO is suspended before calling this function(). */
@@ -901,29 +1071,134 @@ static void drbd_suspend_al(struct drbd_conf *mdev)
 {
        int s = 0;
 
-       if (lc_try_lock(mdev->act_log)) {
-               drbd_al_shrink(mdev);
-               lc_unlock(mdev->act_log);
-       } else {
+       if (!lc_try_lock(mdev->act_log)) {
                dev_warn(DEV, "Failed to lock al in drbd_suspend_al()\n");
                return;
        }
 
-       spin_lock_irq(&mdev->req_lock);
+       drbd_al_shrink(mdev);
+       spin_lock_irq(&mdev->tconn->req_lock);
        if (mdev->state.conn < C_CONNECTED)
                s = !test_and_set_bit(AL_SUSPENDED, &mdev->flags);
-
-       spin_unlock_irq(&mdev->req_lock);
+       spin_unlock_irq(&mdev->tconn->req_lock);
+       lc_unlock(mdev->act_log);
 
        if (s)
                dev_info(DEV, "Suspended AL updates\n");
 }
 
-/* does always return 0;
- * interesting return code is in reply->ret_code */
-static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
-                            struct drbd_nl_cfg_reply *reply)
+int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
+{
+       enum drbd_ret_code retcode;
+       struct drbd_conf *mdev;
+       struct disk_conf *ndc; /* new disk conf */
+       int err, fifo_size;
+       int *rs_plan_s = NULL;
+
+       retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+       if (!adm_ctx.reply_skb)
+               return retcode;
+       if (retcode != NO_ERROR)
+               goto out;
+
+       mdev = adm_ctx.mdev;
+
+       /* we also need a disk
+        * to change the options on */
+       if (!get_ldev(mdev)) {
+               retcode = ERR_NO_DISK;
+               goto out;
+       }
+
+/* FIXME freeze IO, cluster wide.
+ *
+ * We should make sure no-one uses
+ * some half-updated struct when we
+ * assign it later. */
+
+       ndc = kmalloc(sizeof(*ndc), GFP_KERNEL);
+       if (!ndc) {
+               retcode = ERR_NOMEM;
+               goto fail;
+       }
+
+       memcpy(ndc, &mdev->ldev->dc, sizeof(*ndc));
+       err = disk_conf_from_attrs_for_change(ndc, info);
+       if (err) {
+               retcode = ERR_MANDATORY_TAG;
+               drbd_msg_put_info(from_attrs_err_to_txt(err));
+       }
+
+       if (!expect(ndc->resync_rate >= 1))
+               ndc->resync_rate = 1;
+
+       /* clip to allowed range */
+       if (!expect(ndc->al_extents >= DRBD_AL_EXTENTS_MIN))
+               ndc->al_extents = DRBD_AL_EXTENTS_MIN;
+       if (!expect(ndc->al_extents <= DRBD_AL_EXTENTS_MAX))
+               ndc->al_extents = DRBD_AL_EXTENTS_MAX;
+
+       /* most sanity checks done, try to assign the new sync-after
+        * dependency.  need to hold the global lock in there,
+        * to avoid a race in the dependency loop check. */
+       retcode = drbd_alter_sa(mdev, ndc->resync_after);
+       if (retcode != NO_ERROR)
+               goto fail;
+
+       fifo_size = (ndc->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
+       if (fifo_size != mdev->rs_plan_s.size && fifo_size > 0) {
+               rs_plan_s   = kzalloc(sizeof(int) * fifo_size, GFP_KERNEL);
+               if (!rs_plan_s) {
+                       dev_err(DEV, "kmalloc of fifo_buffer failed");
+                       retcode = ERR_NOMEM;
+                       goto fail;
+               }
+       }
+
+       if (fifo_size != mdev->rs_plan_s.size) {
+               kfree(mdev->rs_plan_s.values);
+               mdev->rs_plan_s.values = rs_plan_s;
+               mdev->rs_plan_s.size   = fifo_size;
+               mdev->rs_planed = 0;
+               rs_plan_s = NULL;
+       }
+
+       wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
+       drbd_al_shrink(mdev);
+       err = drbd_check_al_size(mdev, ndc);
+       lc_unlock(mdev->act_log);
+       wake_up(&mdev->al_wait);
+
+       if (err) {
+               retcode = ERR_NOMEM;
+               goto fail;
+       }
+
+       /* FIXME
+        * To avoid someone looking at a half-updated struct, we probably
+        * should have a rw-semaphor on net_conf and disk_conf.
+        */
+       mdev->ldev->dc = *ndc;
+
+       drbd_md_sync(mdev);
+
+
+       if (mdev->state.conn >= C_CONNECTED)
+               drbd_send_sync_param(mdev);
+
+ fail:
+       put_ldev(mdev);
+       kfree(ndc);
+       kfree(rs_plan_s);
+ out:
+       drbd_adm_finish(info, retcode);
+       return 0;
+}
+
+int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
 {
+       struct drbd_conf *mdev;
+       int err;
        enum drbd_ret_code retcode;
        enum determine_dev_size dd;
        sector_t max_possible_sectors;
@@ -933,10 +1208,17 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
        struct lru_cache *resync_lru = NULL;
        union drbd_state ns, os;
        enum drbd_state_rv rv;
+       struct net_conf *nc;
        int cp_discovered = 0;
-       int logical_block_size;
 
-       drbd_reconfig_start(mdev);
+       retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+       if (!adm_ctx.reply_skb)
+               return retcode;
+       if (retcode != NO_ERROR)
+               goto finish;
+
+       mdev = adm_ctx.mdev;
+       conn_reconfig_start(mdev->tconn);
 
        /* if you want to reconfigure, please tear down first */
        if (mdev->state.disk > D_DISKLESS) {
@@ -949,36 +1231,57 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
         * to realize a "hot spare" feature (not that I'd recommend that) */
        wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
 
-       /* allocation not in the IO path, cqueue thread context */
+       /* allocation not in the IO path, drbdsetup context */
        nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
        if (!nbc) {
                retcode = ERR_NOMEM;
                goto fail;
        }
 
-       nbc->dc.disk_size     = DRBD_DISK_SIZE_SECT_DEF;
-       nbc->dc.on_io_error   = DRBD_ON_IO_ERROR_DEF;
-       nbc->dc.fencing       = DRBD_FENCING_DEF;
-       nbc->dc.max_bio_bvecs = DRBD_MAX_BIO_BVECS_DEF;
-
-       if (!disk_conf_from_tags(mdev, nlp->tag_list, &nbc->dc)) {
+       nbc->dc = (struct disk_conf) {
+               {}, 0, /* backing_dev */
+               {}, 0, /* meta_dev */
+               0, /* meta_dev_idx */
+               DRBD_DISK_SIZE_SECT_DEF, /* disk_size */
+               DRBD_MAX_BIO_BVECS_DEF, /* max_bio_bvecs */
+               DRBD_ON_IO_ERROR_DEF, /* on_io_error */
+               DRBD_FENCING_DEF, /* fencing */
+               DRBD_RATE_DEF, /* resync_rate */
+               DRBD_AFTER_DEF, /* resync_after */
+               DRBD_AL_EXTENTS_DEF, /* al_extents */
+               DRBD_C_PLAN_AHEAD_DEF, /* c_plan_ahead */
+               DRBD_C_DELAY_TARGET_DEF, /* c_delay_target */
+               DRBD_C_FILL_TARGET_DEF, /* c_fill_target */
+               DRBD_C_MAX_RATE_DEF, /* c_max_rate */
+               DRBD_C_MIN_RATE_DEF, /* c_min_rate */
+               0, /* no_disk_barrier */
+               0, /* no_disk_flush */
+               0, /* no_disk_drain */
+               0, /* no_md_flush */
+       };
+
+       err = disk_conf_from_attrs(&nbc->dc, info);
+       if (err) {
                retcode = ERR_MANDATORY_TAG;
+               drbd_msg_put_info(from_attrs_err_to_txt(err));
                goto fail;
        }
 
-       if (nbc->dc.meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) {
+       if ((int)nbc->dc.meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) {
                retcode = ERR_MD_IDX_INVALID;
                goto fail;
        }
 
-       if (get_net_conf(mdev)) {
-               int prot = mdev->net_conf->wire_protocol;
-               put_net_conf(mdev);
-               if (nbc->dc.fencing == FP_STONITH && prot == DRBD_PROT_A) {
+       rcu_read_lock();
+       nc = rcu_dereference(mdev->tconn->net_conf);
+       if (nc) {
+               if (nbc->dc.fencing == FP_STONITH && nc->wire_protocol == DRBD_PROT_A) {
+                       rcu_read_unlock();
                        retcode = ERR_STONITH_AND_PROT_A;
                        goto fail;
                }
        }
+       rcu_read_unlock();
 
        bdev = blkdev_get_by_path(nbc->dc.backing_dev,
                                  FMODE_READ | FMODE_WRITE | FMODE_EXCL, mdev);
@@ -1000,7 +1303,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
         */
        bdev = blkdev_get_by_path(nbc->dc.meta_dev,
                                  FMODE_READ | FMODE_WRITE | FMODE_EXCL,
-                                 (nbc->dc.meta_dev_idx < 0) ?
+                                 ((int)nbc->dc.meta_dev_idx < 0) ?
                                  (void *)mdev : (void *)drbd_m_holder);
        if (IS_ERR(bdev)) {
                dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.meta_dev,
@@ -1018,7 +1321,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
        }
 
        resync_lru = lc_create("resync", drbd_bm_ext_cache,
-                       61, sizeof(struct bm_extent),
+                       1, 61, sizeof(struct bm_extent),
                        offsetof(struct bm_extent, lce));
        if (!resync_lru) {
                retcode = ERR_NOMEM;
@@ -1036,7 +1339,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
                goto fail;
        }
 
-       if (nbc->dc.meta_dev_idx < 0) {
+       if ((int)nbc->dc.meta_dev_idx < 0) {
                max_possible_sectors = DRBD_MAX_SECTORS_FLEX;
                /* at least one MB, otherwise it does not make sense */
                min_md_device_sectors = (2<<10);
@@ -1067,14 +1370,14 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
                dev_warn(DEV, "==> truncating very big lower level device "
                        "to currently maximum possible %llu sectors <==\n",
                        (unsigned long long) max_possible_sectors);
-               if (nbc->dc.meta_dev_idx >= 0)
+               if ((int)nbc->dc.meta_dev_idx >= 0)
                        dev_warn(DEV, "==>> using internal or flexible "
                                      "meta data may help <<==\n");
        }
 
        drbd_suspend_io(mdev);
        /* also wait for the last barrier ack. */
-       wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_pending_cnt) || is_susp(mdev->state));
+       wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_pending_cnt) || drbd_suspended(mdev));
        /* and for any other previously queued work */
        drbd_flush_workqueue(mdev);
 
@@ -1089,25 +1392,6 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
 
        drbd_md_set_sector_offsets(mdev, nbc);
 
-       /* allocate a second IO page if logical_block_size != 512 */
-       logical_block_size = bdev_logical_block_size(nbc->md_bdev);
-       if (logical_block_size == 0)
-               logical_block_size = MD_SECTOR_SIZE;
-
-       if (logical_block_size != MD_SECTOR_SIZE) {
-               if (!mdev->md_io_tmpp) {
-                       struct page *page = alloc_page(GFP_NOIO);
-                       if (!page)
-                               goto force_diskless_dec;
-
-                       dev_warn(DEV, "Meta data's bdev logical_block_size = %d != %d\n",
-                            logical_block_size, MD_SECTOR_SIZE);
-                       dev_warn(DEV, "Workaround engaged (has performance impact).\n");
-
-                       mdev->md_io_tmpp = page;
-               }
-       }
-
        if (!mdev->bitmap) {
                if (drbd_bm_init(mdev)) {
                        retcode = ERR_NOMEM;
@@ -1129,7 +1413,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
        }
 
        /* Since we are diskless, fix the activity log first... */
-       if (drbd_check_al_size(mdev)) {
+       if (drbd_check_al_size(mdev, &nbc->dc)) {
                retcode = ERR_NOMEM;
                goto force_diskless_dec;
        }
@@ -1173,7 +1457,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
                clear_bit(CRASHED_PRIMARY, &mdev->flags);
 
        if (drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
-           !(mdev->state.role == R_PRIMARY && mdev->state.susp_nod)) {
+           !(mdev->state.role == R_PRIMARY && mdev->tconn->susp_nod)) {
                set_bit(CRASHED_PRIMARY, &mdev->flags);
                cp_discovered = 1;
        }
@@ -1222,7 +1506,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
                }
        } else {
                if (drbd_bitmap_io(mdev, &drbd_bm_read,
-                       "read from attaching", BM_LOCKED_MASK) < 0) {
+                       "read from attaching", BM_LOCKED_MASK)) {
                        retcode = ERR_IO_MD_DISK;
                        goto force_diskless_dec;
                }
@@ -1240,9 +1524,9 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
        if (_drbd_bm_total_weight(mdev) == drbd_bm_bits(mdev))
                drbd_suspend_al(mdev); /* IO is still suspended here... */
 
-       spin_lock_irq(&mdev->req_lock);
-       os = mdev->state;
-       ns.i = os.i;
+       spin_lock_irq(&mdev->tconn->req_lock);
+       os = drbd_read_state(mdev);
+       ns = os;
        /* If MDF_CONSISTENT is not set go into inconsistent state,
           otherwise investigate MDF_WasUpToDate...
           If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state,
@@ -1284,8 +1568,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
        }
 
        rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
-       ns = mdev->state;
-       spin_unlock_irq(&mdev->req_lock);
+       spin_unlock_irq(&mdev->tconn->req_lock);
 
        if (rv < SS_SUCCESS)
                goto force_diskless_dec;
@@ -1300,8 +1583,8 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
 
        kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
        put_ldev(mdev);
-       reply->ret_code = retcode;
-       drbd_reconfig_done(mdev);
+       conn_reconfig_done(mdev->tconn);
+       drbd_adm_finish(info, retcode);
        return 0;
 
  force_diskless_dec:
@@ -1310,6 +1593,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
        drbd_force_state(mdev, NS(disk, D_FAILED));
        drbd_md_sync(mdev);
  fail:
+       conn_reconfig_done(mdev->tconn);
        if (nbc) {
                if (nbc->backing_bdev)
                        blkdev_put(nbc->backing_bdev,
@@ -1321,140 +1605,378 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
        }
        lc_destroy(resync_lru);
 
-       reply->ret_code = retcode;
-       drbd_reconfig_done(mdev);
+ finish:
+       drbd_adm_finish(info, retcode);
        return 0;
 }
 
+static int adm_detach(struct drbd_conf *mdev)
+{
+       enum drbd_state_rv retcode;
+       drbd_suspend_io(mdev); /* so no-one is stuck in drbd_al_begin_io */
+       retcode = drbd_request_state(mdev, NS(disk, D_DISKLESS));
+       wait_event(mdev->misc_wait,
+                       mdev->state.disk != D_DISKLESS ||
+                       !atomic_read(&mdev->local_cnt));
+       drbd_resume_io(mdev);
+       return retcode;
+}
+
 /* Detaching the disk is a process in multiple stages.  First we need to lock
  * out application IO, in-flight IO, IO stuck in drbd_al_begin_io.
  * Then we transition to D_DISKLESS, and wait for put_ldev() to return all
  * internal references as well.
  * Only then we have finally detached. */
-static int drbd_nl_detach(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
-                         struct drbd_nl_cfg_reply *reply)
+int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info)
 {
        enum drbd_ret_code retcode;
-       int ret;
-       drbd_suspend_io(mdev); /* so no-one is stuck in drbd_al_begin_io */
-       retcode = drbd_request_state(mdev, NS(disk, D_FAILED));
-       /* D_FAILED will transition to DISKLESS. */
-       ret = wait_event_interruptible(mdev->misc_wait,
-                       mdev->state.disk != D_FAILED);
-       drbd_resume_io(mdev);
-       if ((int)retcode == (int)SS_IS_DISKLESS)
-               retcode = SS_NOTHING_TO_DO;
-       if (ret)
-               retcode = ERR_INTR;
-       reply->ret_code = retcode;
+
+       retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+       if (!adm_ctx.reply_skb)
+               return retcode;
+       if (retcode != NO_ERROR)
+               goto out;
+
+       retcode = adm_detach(adm_ctx.mdev);
+out:
+       drbd_adm_finish(info, retcode);
        return 0;
 }
 
-static int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
-                           struct drbd_nl_cfg_reply *reply)
+static bool conn_resync_running(struct drbd_tconn *tconn)
 {
-       int i, ns;
-       enum drbd_ret_code retcode;
-       struct net_conf *new_conf = NULL;
-       struct crypto_hash *tfm = NULL;
-       struct crypto_hash *integrity_w_tfm = NULL;
-       struct crypto_hash *integrity_r_tfm = NULL;
-       struct hlist_head *new_tl_hash = NULL;
-       struct hlist_head *new_ee_hash = NULL;
-       struct drbd_conf *odev;
-       char hmac_name[CRYPTO_MAX_ALG_NAME];
-       void *int_dig_out = NULL;
-       void *int_dig_in = NULL;
-       void *int_dig_vv = NULL;
-       struct sockaddr *new_my_addr, *new_peer_addr, *taken_addr;
+       struct drbd_conf *mdev;
+       bool rv = false;
+       int vnr;
+
+       rcu_read_lock();
+       idr_for_each_entry(&tconn->volumes, mdev, vnr) {
+               if (mdev->state.conn == C_SYNC_SOURCE ||
+                   mdev->state.conn == C_SYNC_TARGET ||
+                   mdev->state.conn == C_PAUSED_SYNC_S ||
+                   mdev->state.conn == C_PAUSED_SYNC_T) {
+                       rv = true;
+                       break;
+               }
+       }
+       rcu_read_unlock();
 
-       drbd_reconfig_start(mdev);
+       return rv;
+}
 
-       if (mdev->state.conn > C_STANDALONE) {
-               retcode = ERR_NET_CONFIGURED;
-               goto fail;
+static bool conn_ov_running(struct drbd_tconn *tconn)
+{
+       struct drbd_conf *mdev;
+       bool rv = false;
+       int vnr;
+
+       rcu_read_lock();
+       idr_for_each_entry(&tconn->volumes, mdev, vnr) {
+               if (mdev->state.conn == C_VERIFY_S ||
+                   mdev->state.conn == C_VERIFY_T) {
+                       rv = true;
+                       break;
+               }
        }
+       rcu_read_unlock();
+
+       return rv;
+}
+
+static enum drbd_ret_code
+_check_net_options(struct drbd_tconn *tconn, struct net_conf *old_conf, struct net_conf *new_conf)
+{
+       struct drbd_conf *mdev;
+       int i;
+
+       if (old_conf && tconn->agreed_pro_version < 100 &&
+           tconn->cstate == C_WF_REPORT_PARAMS &&
+           new_conf->wire_protocol != old_conf->wire_protocol)
+               return ERR_NEED_APV_100;
+
+       if (new_conf->two_primaries &&
+           (new_conf->wire_protocol != DRBD_PROT_C))
+               return ERR_NOT_PROTO_C;
+
+       idr_for_each_entry(&tconn->volumes, mdev, i) {
+               if (get_ldev(mdev)) {
+                       enum drbd_fencing_p fp = mdev->ldev->dc.fencing;
+                       put_ldev(mdev);
+                       if (new_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH)
+                               return ERR_STONITH_AND_PROT_A;
+               }
+               if (mdev->state.role == R_PRIMARY && new_conf->want_lose)
+                       return ERR_DISCARD;
+       }
+
+       if (new_conf->on_congestion != OC_BLOCK && new_conf->wire_protocol != DRBD_PROT_A)
+               return ERR_CONG_NOT_PROTO_A;
+
+       return NO_ERROR;
+}
+
+static enum drbd_ret_code
+check_net_options(struct drbd_tconn *tconn, struct net_conf *new_conf)
+{
+       static enum drbd_ret_code rv;
+       struct drbd_conf *mdev;
+       int i;
+
+       rcu_read_lock();
+       rv = _check_net_options(tconn, rcu_dereference(tconn->net_conf), new_conf);
+       rcu_read_unlock();
+
+       /* tconn->volumes protected by genl_lock() here */
+       idr_for_each_entry(&tconn->volumes, mdev, i) {
+               if (!mdev->bitmap) {
+                       if(drbd_bm_init(mdev))
+                               return ERR_NOMEM;
+               }
+       }
+
+       return rv;
+}
+
+int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
+{
+       enum drbd_ret_code retcode;
+       struct drbd_tconn *tconn;
+       struct net_conf *old_conf, *new_conf = NULL;
+       int err;
+       int ovr; /* online verify running */
+       int rsr; /* re-sync running */
+       struct crypto_hash *verify_tfm = NULL;
+       struct crypto_hash *csums_tfm = NULL;
+
+
+       retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
+       if (!adm_ctx.reply_skb)
+               return retcode;
+       if (retcode != NO_ERROR)
+               goto out;
+
+       tconn = adm_ctx.tconn;
 
-       /* allocation not in the IO path, cqueue thread context */
        new_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
        if (!new_conf) {
                retcode = ERR_NOMEM;
+               goto out;
+       }
+
+       conn_reconfig_start(tconn);
+
+       mutex_lock(&tconn->net_conf_update);
+       old_conf = tconn->net_conf;
+
+       if (!old_conf) {
+               drbd_msg_put_info("net conf missing, try connect");
+               retcode = ERR_INVALID_REQUEST;
                goto fail;
        }
 
-       new_conf->timeout          = DRBD_TIMEOUT_DEF;
-       new_conf->try_connect_int  = DRBD_CONNECT_INT_DEF;
-       new_conf->ping_int         = DRBD_PING_INT_DEF;
-       new_conf->max_epoch_size   = DRBD_MAX_EPOCH_SIZE_DEF;
-       new_conf->max_buffers      = DRBD_MAX_BUFFERS_DEF;
-       new_conf->unplug_watermark = DRBD_UNPLUG_WATERMARK_DEF;
-       new_conf->sndbuf_size      = DRBD_SNDBUF_SIZE_DEF;
-       new_conf->rcvbuf_size      = DRBD_RCVBUF_SIZE_DEF;
-       new_conf->ko_count         = DRBD_KO_COUNT_DEF;
-       new_conf->after_sb_0p      = DRBD_AFTER_SB_0P_DEF;
-       new_conf->after_sb_1p      = DRBD_AFTER_SB_1P_DEF;
-       new_conf->after_sb_2p      = DRBD_AFTER_SB_2P_DEF;
-       new_conf->want_lose        = 0;
-       new_conf->two_primaries    = 0;
-       new_conf->wire_protocol    = DRBD_PROT_C;
-       new_conf->ping_timeo       = DRBD_PING_TIMEO_DEF;
-       new_conf->rr_conflict      = DRBD_RR_CONFLICT_DEF;
-       new_conf->on_congestion    = DRBD_ON_CONGESTION_DEF;
-       new_conf->cong_extents     = DRBD_CONG_EXTENTS_DEF;
-
-       if (!net_conf_from_tags(mdev, nlp->tag_list, new_conf)) {
+       *new_conf = *old_conf;
+
+       err = net_conf_from_attrs_for_change(new_conf, info);
+       if (err) {
                retcode = ERR_MANDATORY_TAG;
+               drbd_msg_put_info(from_attrs_err_to_txt(err));
                goto fail;
        }
 
-       if (new_conf->two_primaries
-           && (new_conf->wire_protocol != DRBD_PROT_C)) {
-               retcode = ERR_NOT_PROTO_C;
+       retcode = check_net_options(tconn, new_conf);
+       if (retcode != NO_ERROR)
+               goto fail;
+
+       /* re-sync running */
+       rsr = conn_resync_running(tconn);
+       if (rsr && old_conf && strcmp(new_conf->csums_alg, old_conf->csums_alg)) {
+               retcode = ERR_CSUMS_RESYNC_RUNNING;
                goto fail;
        }
 
-       if (get_ldev(mdev)) {
-               enum drbd_fencing_p fp = mdev->ldev->dc.fencing;
-               put_ldev(mdev);
-               if (new_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH) {
-                       retcode = ERR_STONITH_AND_PROT_A;
+       if (!rsr && new_conf->csums_alg[0]) {
+               csums_tfm = crypto_alloc_hash(new_conf->csums_alg, 0, CRYPTO_ALG_ASYNC);
+               if (IS_ERR(csums_tfm)) {
+                       csums_tfm = NULL;
+                       retcode = ERR_CSUMS_ALG;
+                       goto fail;
+               }
+
+               if (!drbd_crypto_is_hash(crypto_hash_tfm(csums_tfm))) {
+                       retcode = ERR_CSUMS_ALG_ND;
                        goto fail;
                }
        }
 
-       if (new_conf->on_congestion != OC_BLOCK && new_conf->wire_protocol != DRBD_PROT_A) {
-               retcode = ERR_CONG_NOT_PROTO_A;
+       /* online verify running */
+       ovr = conn_ov_running(tconn);
+       if (ovr) {
+               if (strcmp(new_conf->verify_alg, old_conf->verify_alg)) {
+                       retcode = ERR_VERIFY_RUNNING;
+                       goto fail;
+               }
+       }
+
+       if (!ovr && new_conf->verify_alg[0]) {
+               verify_tfm = crypto_alloc_hash(new_conf->verify_alg, 0, CRYPTO_ALG_ASYNC);
+               if (IS_ERR(verify_tfm)) {
+                       verify_tfm = NULL;
+                       retcode = ERR_VERIFY_ALG;
+                       goto fail;
+               }
+
+               if (!drbd_crypto_is_hash(crypto_hash_tfm(verify_tfm))) {
+                       retcode = ERR_VERIFY_ALG_ND;
+                       goto fail;
+               }
+       }
+
+       rcu_assign_pointer(tconn->net_conf, new_conf);
+
+       if (!rsr) {
+               crypto_free_hash(tconn->csums_tfm);
+               tconn->csums_tfm = csums_tfm;
+               csums_tfm = NULL;
+       }
+       if (!ovr) {
+               crypto_free_hash(tconn->verify_tfm);
+               tconn->verify_tfm = verify_tfm;
+               verify_tfm = NULL;
+       }
+
+       mutex_unlock(&tconn->net_conf_update);
+       synchronize_rcu();
+       kfree(old_conf);
+
+       if (tconn->cstate >= C_WF_REPORT_PARAMS)
+               drbd_send_sync_param(minor_to_mdev(conn_lowest_minor(tconn)));
+
+       goto done;
+
+ fail:
+       mutex_unlock(&tconn->net_conf_update);
+       crypto_free_hash(csums_tfm);
+       crypto_free_hash(verify_tfm);
+       kfree(new_conf);
+ done:
+       conn_reconfig_done(tconn);
+ out:
+       drbd_adm_finish(info, retcode);
+       return 0;
+}
+
+int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
+{
+       char hmac_name[CRYPTO_MAX_ALG_NAME];
+       struct drbd_conf *mdev;
+       struct net_conf *old_conf, *new_conf = NULL;
+       struct crypto_hash *tfm = NULL;
+       struct crypto_hash *integrity_w_tfm = NULL;
+       struct crypto_hash *integrity_r_tfm = NULL;
+       void *int_dig_in = NULL;
+       void *int_dig_vv = NULL;
+       struct drbd_tconn *oconn;
+       struct drbd_tconn *tconn;
+       struct sockaddr *new_my_addr, *new_peer_addr, *taken_addr;
+       enum drbd_ret_code retcode;
+       int i;
+       int err;
+
+       retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
+       if (!adm_ctx.reply_skb)
+               return retcode;
+       if (retcode != NO_ERROR)
+               goto out;
+
+       tconn = adm_ctx.tconn;
+       conn_reconfig_start(tconn);
+
+       if (tconn->cstate > C_STANDALONE) {
+               retcode = ERR_NET_CONFIGURED;
+               goto fail;
+       }
+
+       /* allocation not in the IO path, cqueue thread context */
+       new_conf = kmalloc(sizeof(struct net_conf), GFP_KERNEL);
+       if (!new_conf) {
+               retcode = ERR_NOMEM;
                goto fail;
        }
 
-       if (mdev->state.role == R_PRIMARY && new_conf->want_lose) {
-               retcode = ERR_DISCARD;
+       *new_conf = (struct net_conf) {
+               {}, 0, /* my_addr */
+               {}, 0, /* peer_addr */
+               {}, 0, /* shared_secret */
+               {}, 0, /* cram_hmac_alg */
+               {}, 0, /* integrity_alg */
+               {}, 0, /* verify_alg */
+               {}, 0, /* csums_alg */
+               DRBD_PROTOCOL_DEF, /* wire_protocol */
+               DRBD_CONNECT_INT_DEF, /* try_connect_int */
+               DRBD_TIMEOUT_DEF, /* timeout */
+               DRBD_PING_INT_DEF, /* ping_int */
+               DRBD_PING_TIMEO_DEF, /* ping_timeo */
+               DRBD_SNDBUF_SIZE_DEF, /* sndbuf_size */
+               DRBD_RCVBUF_SIZE_DEF, /* rcvbuf_size */
+               DRBD_KO_COUNT_DEF, /* ko_count */
+               DRBD_MAX_BUFFERS_DEF, /* max_buffers */
+               DRBD_MAX_EPOCH_SIZE_DEF, /* max_epoch_size */
+               DRBD_UNPLUG_WATERMARK_DEF, /* unplug_watermark */
+               DRBD_AFTER_SB_0P_DEF, /* after_sb_0p */
+               DRBD_AFTER_SB_1P_DEF, /* after_sb_1p */
+               DRBD_AFTER_SB_2P_DEF, /* after_sb_2p */
+               DRBD_RR_CONFLICT_DEF, /* rr_conflict */
+               DRBD_ON_CONGESTION_DEF, /* on_congestion */
+               DRBD_CONG_FILL_DEF, /* cong_fill */
+               DRBD_CONG_EXTENTS_DEF, /* cong_extents */
+               0, /* two_primaries */
+               0, /* want_lose */
+               0, /* no_cork */
+               0, /* always_asbp */
+               0, /* dry_run */
+               0, /* use_rle */
+       };
+
+       err = net_conf_from_attrs(new_conf, info);
+       if (err) {
+               retcode = ERR_MANDATORY_TAG;
+               drbd_msg_put_info(from_attrs_err_to_txt(err));
                goto fail;
        }
 
+       retcode = check_net_options(tconn, new_conf);
+       if (retcode != NO_ERROR)
+               goto fail;
+
        retcode = NO_ERROR;
 
        new_my_addr = (struct sockaddr *)&new_conf->my_addr;
        new_peer_addr = (struct sockaddr *)&new_conf->peer_addr;
-       for (i = 0; i < minor_count; i++) {
-               odev = minor_to_mdev(i);
-               if (!odev || odev == mdev)
+
+       /* No need to take drbd_cfg_rwsem here.  All reconfiguration is
+        * strictly serialized on genl_lock(). We are protected against
+        * concurrent reconfiguration/addition/deletion */
+       list_for_each_entry(oconn, &drbd_tconns, all_tconn) {
+               struct net_conf *nc;
+               if (oconn == tconn)
                        continue;
-               if (get_net_conf(odev)) {
-                       taken_addr = (struct sockaddr *)&odev->net_conf->my_addr;
-                       if (new_conf->my_addr_len == odev->net_conf->my_addr_len &&
+
+               rcu_read_lock();
+               nc = rcu_dereference(oconn->net_conf);
+               if (nc) {
+                       taken_addr = (struct sockaddr *)&nc->my_addr;
+                       if (new_conf->my_addr_len == nc->my_addr_len &&
                            !memcmp(new_my_addr, taken_addr, new_conf->my_addr_len))
                                retcode = ERR_LOCAL_ADDR;
 
-                       taken_addr = (struct sockaddr *)&odev->net_conf->peer_addr;
-                       if (new_conf->peer_addr_len == odev->net_conf->peer_addr_len &&
+                       taken_addr = (struct sockaddr *)&nc->peer_addr;
+                       if (new_conf->peer_addr_len == nc->peer_addr_len &&
                            !memcmp(new_peer_addr, taken_addr, new_conf->peer_addr_len))
                                retcode = ERR_PEER_ADDR;
-
-                       put_net_conf(odev);
-                       if (retcode != NO_ERROR)
-                               goto fail;
                }
+               rcu_read_unlock();
+               if (retcode != NO_ERROR)
+                       goto fail;
        }
 
        if (new_conf->cram_hmac_alg[0] != 0) {
@@ -1494,33 +2016,11 @@ static int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
                }
        }
 
-       ns = new_conf->max_epoch_size/8;
-       if (mdev->tl_hash_s != ns) {
-               new_tl_hash = kzalloc(ns*sizeof(void *), GFP_KERNEL);
-               if (!new_tl_hash) {
-                       retcode = ERR_NOMEM;
-                       goto fail;
-               }
-       }
-
-       ns = new_conf->max_buffers/8;
-       if (new_conf->two_primaries && (mdev->ee_hash_s != ns)) {
-               new_ee_hash = kzalloc(ns*sizeof(void *), GFP_KERNEL);
-               if (!new_ee_hash) {
-                       retcode = ERR_NOMEM;
-                       goto fail;
-               }
-       }
-
        ((char *)new_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0;
 
+       /* allocation not in the IO path, cqueue thread context */
        if (integrity_w_tfm) {
                i = crypto_hash_digestsize(integrity_w_tfm);
-               int_dig_out = kmalloc(i, GFP_KERNEL);
-               if (!int_dig_out) {
-                       retcode = ERR_NOMEM;
-                       goto fail;
-               }
                int_dig_in = kmalloc(i, GFP_KERNEL);
                if (!int_dig_in) {
                        retcode = ERR_NOMEM;
@@ -1533,133 +2033,131 @@ static int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
                }
        }
 
-       if (!mdev->bitmap) {
-               if(drbd_bm_init(mdev)) {
-                       retcode = ERR_NOMEM;
-                       goto fail;
-               }
-       }
+       conn_flush_workqueue(tconn);
 
-       drbd_flush_workqueue(mdev);
-       spin_lock_irq(&mdev->req_lock);
-       if (mdev->net_conf != NULL) {
+       mutex_lock(&tconn->net_conf_update);
+       old_conf = tconn->net_conf;
+       if (old_conf) {
                retcode = ERR_NET_CONFIGURED;
-               spin_unlock_irq(&mdev->req_lock);
+               mutex_unlock(&tconn->net_conf_update);
                goto fail;
        }
-       mdev->net_conf = new_conf;
+       rcu_assign_pointer(tconn->net_conf, new_conf);
 
-       mdev->send_cnt = 0;
-       mdev->recv_cnt = 0;
-
-       if (new_tl_hash) {
-               kfree(mdev->tl_hash);
-               mdev->tl_hash_s = mdev->net_conf->max_epoch_size/8;
-               mdev->tl_hash = new_tl_hash;
-       }
-
-       if (new_ee_hash) {
-               kfree(mdev->ee_hash);
-               mdev->ee_hash_s = mdev->net_conf->max_buffers/8;
-               mdev->ee_hash = new_ee_hash;
-       }
+       conn_free_crypto(tconn);
+       tconn->cram_hmac_tfm = tfm;
+       tconn->integrity_w_tfm = integrity_w_tfm;
+       tconn->integrity_r_tfm = integrity_r_tfm;
+       tconn->int_dig_in = int_dig_in;
+       tconn->int_dig_vv = int_dig_vv;
 
-       crypto_free_hash(mdev->cram_hmac_tfm);
-       mdev->cram_hmac_tfm = tfm;
+       mutex_unlock(&tconn->net_conf_update);
 
-       crypto_free_hash(mdev->integrity_w_tfm);
-       mdev->integrity_w_tfm = integrity_w_tfm;
+       retcode = conn_request_state(tconn, NS(conn, C_UNCONNECTED), CS_VERBOSE);
 
-       crypto_free_hash(mdev->integrity_r_tfm);
-       mdev->integrity_r_tfm = integrity_r_tfm;
-
-       kfree(mdev->int_dig_out);
-       kfree(mdev->int_dig_in);
-       kfree(mdev->int_dig_vv);
-       mdev->int_dig_out=int_dig_out;
-       mdev->int_dig_in=int_dig_in;
-       mdev->int_dig_vv=int_dig_vv;
-       retcode = _drbd_set_state(_NS(mdev, conn, C_UNCONNECTED), CS_VERBOSE, NULL);
-       spin_unlock_irq(&mdev->req_lock);
-
-       kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
-       reply->ret_code = retcode;
-       drbd_reconfig_done(mdev);
+       rcu_read_lock();
+       idr_for_each_entry(&tconn->volumes, mdev, i) {
+               mdev->send_cnt = 0;
+               mdev->recv_cnt = 0;
+               kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
+       }
+       rcu_read_unlock();
+       conn_reconfig_done(tconn);
+       drbd_adm_finish(info, retcode);
        return 0;
 
 fail:
-       kfree(int_dig_out);
        kfree(int_dig_in);
        kfree(int_dig_vv);
        crypto_free_hash(tfm);
        crypto_free_hash(integrity_w_tfm);
        crypto_free_hash(integrity_r_tfm);
-       kfree(new_tl_hash);
-       kfree(new_ee_hash);
        kfree(new_conf);
 
-       reply->ret_code = retcode;
-       drbd_reconfig_done(mdev);
+       conn_reconfig_done(tconn);
+out:
+       drbd_adm_finish(info, retcode);
        return 0;
 }
 
-static int drbd_nl_disconnect(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
-                             struct drbd_nl_cfg_reply *reply)
+static enum drbd_state_rv conn_try_disconnect(struct drbd_tconn *tconn, bool force)
 {
-       int retcode;
-       struct disconnect dc;
-
-       memset(&dc, 0, sizeof(struct disconnect));
-       if (!disconnect_from_tags(mdev, nlp->tag_list, &dc)) {
-               retcode = ERR_MANDATORY_TAG;
-               goto fail;
+       enum drbd_state_rv rv;
+       if (force) {
+               spin_lock_irq(&tconn->req_lock);
+               if (tconn->cstate >= C_WF_CONNECTION)
+                       _conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
+               spin_unlock_irq(&tconn->req_lock);
+               return SS_SUCCESS;
+       }
+
+       rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING), 0);
+
+       switch (rv) {
+       case SS_NOTHING_TO_DO:
+       case SS_ALREADY_STANDALONE:
+               return SS_SUCCESS;
+       case SS_PRIMARY_NOP:
+               /* Our state checking code wants to see the peer outdated. */
+               rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
+                                                       pdsk, D_OUTDATED), CS_VERBOSE);
+               break;
+       case SS_CW_FAILED_BY_PEER:
+               /* The peer probably wants to see us outdated. */
+               rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
+                                                       disk, D_OUTDATED), 0);
+               if (rv == SS_IS_DISKLESS || rv == SS_LOWER_THAN_OUTDATED) {
+                       conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
+                       rv = SS_SUCCESS;
+               }
+               break;
+       default:;
+               /* no special handling necessary */
        }
 
-       if (dc.force) {
-               spin_lock_irq(&mdev->req_lock);
-               if (mdev->state.conn >= C_WF_CONNECTION)
-                       _drbd_set_state(_NS(mdev, conn, C_DISCONNECTING), CS_HARD, NULL);
-               spin_unlock_irq(&mdev->req_lock);
-               goto done;
-       }
+       return rv;
+}
 
-       retcode = _drbd_request_state(mdev, NS(conn, C_DISCONNECTING), CS_ORDERED);
+int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
+{
+       struct disconnect_parms parms;
+       struct drbd_tconn *tconn;
+       enum drbd_state_rv rv;
+       enum drbd_ret_code retcode;
+       int err;
 
-       if (retcode == SS_NOTHING_TO_DO)
-               goto done;
-       else if (retcode == SS_ALREADY_STANDALONE)
-               goto done;
-       else if (retcode == SS_PRIMARY_NOP) {
-               /* Our statche checking code wants to see the peer outdated. */
-               retcode = drbd_request_state(mdev, NS2(conn, C_DISCONNECTING,
-                                                     pdsk, D_OUTDATED));
-       } else if (retcode == SS_CW_FAILED_BY_PEER) {
-               /* The peer probably wants to see us outdated. */
-               retcode = _drbd_request_state(mdev, NS2(conn, C_DISCONNECTING,
-                                                       disk, D_OUTDATED),
-                                             CS_ORDERED);
-               if (retcode == SS_IS_DISKLESS || retcode == SS_LOWER_THAN_OUTDATED) {
-                       drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
-                       retcode = SS_SUCCESS;
+       retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
+       if (!adm_ctx.reply_skb)
+               return retcode;
+       if (retcode != NO_ERROR)
+               goto fail;
+
+       tconn = adm_ctx.tconn;
+       memset(&parms, 0, sizeof(parms));
+       if (info->attrs[DRBD_NLA_DISCONNECT_PARMS]) {
+               err = disconnect_parms_from_attrs(&parms, info);
+               if (err) {
+                       retcode = ERR_MANDATORY_TAG;
+                       drbd_msg_put_info(from_attrs_err_to_txt(err));
+                       goto fail;
                }
        }
 
-       if (retcode < SS_SUCCESS)
+       rv = conn_try_disconnect(tconn, parms.force_disconnect);
+       if (rv < SS_SUCCESS)
                goto fail;
 
-       if (wait_event_interruptible(mdev->state_wait,
-                                    mdev->state.conn != C_DISCONNECTING)) {
+       if (wait_event_interruptible(tconn->ping_wait,
+                                    tconn->cstate != C_DISCONNECTING)) {
                /* Do not test for mdev->state.conn == C_STANDALONE, since
                   someone else might connect us in the mean time! */
                retcode = ERR_INTR;
                goto fail;
        }
 
- done:
        retcode = NO_ERROR;
  fail:
-       drbd_md_sync(mdev);
-       reply->ret_code = retcode;
+       drbd_adm_finish(info, retcode);
        return 0;
 }
 
@@ -1671,7 +2169,7 @@ void resync_after_online_grow(struct drbd_conf *mdev)
        if (mdev->state.role != mdev->state.peer)
                iass = (mdev->state.role == R_PRIMARY);
        else
-               iass = test_bit(DISCARD_CONCURRENT, &mdev->flags);
+               iass = test_bit(DISCARD_CONCURRENT, &mdev->tconn->flags);
 
        if (iass)
                drbd_start_resync(mdev, C_SYNC_SOURCE);
@@ -1679,20 +2177,32 @@ void resync_after_online_grow(struct drbd_conf *mdev)
                _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE);
 }
 
-static int drbd_nl_resize(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
-                         struct drbd_nl_cfg_reply *reply)
+int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
 {
-       struct resize rs;
-       int retcode = NO_ERROR;
+       struct resize_parms rs;
+       struct drbd_conf *mdev;
+       enum drbd_ret_code retcode;
        enum determine_dev_size dd;
        enum dds_flags ddsf;
+       int err;
 
-       memset(&rs, 0, sizeof(struct resize));
-       if (!resize_from_tags(mdev, nlp->tag_list, &rs)) {
-               retcode = ERR_MANDATORY_TAG;
+       retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+       if (!adm_ctx.reply_skb)
+               return retcode;
+       if (retcode != NO_ERROR)
                goto fail;
+
+       memset(&rs, 0, sizeof(struct resize_parms));
+       if (info->attrs[DRBD_NLA_RESIZE_PARMS]) {
+               err = resize_parms_from_attrs(&rs, info);
+               if (err) {
+                       retcode = ERR_MANDATORY_TAG;
+                       drbd_msg_put_info(from_attrs_err_to_txt(err));
+                       goto fail;
+               }
        }
 
+       mdev = adm_ctx.mdev;
        if (mdev->state.conn > C_CONNECTED) {
                retcode = ERR_RESIZE_RESYNC;
                goto fail;
@@ -1709,7 +2219,7 @@ static int drbd_nl_resize(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
                goto fail;
        }
 
-       if (rs.no_resync && mdev->agreed_pro_version < 93) {
+       if (rs.no_resync && mdev->tconn->agreed_pro_version < 93) {
                retcode = ERR_NEED_APV_93;
                goto fail;
        }
@@ -1736,208 +2246,88 @@ static int drbd_nl_resize(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
        }
 
  fail:
-       reply->ret_code = retcode;
+       drbd_adm_finish(info, retcode);
        return 0;
 }
 
-static int drbd_nl_syncer_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
-                              struct drbd_nl_cfg_reply *reply)
+int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info)
 {
-       int retcode = NO_ERROR;
-       int err;
-       int ovr; /* online verify running */
-       int rsr; /* re-sync running */
-       struct crypto_hash *verify_tfm = NULL;
-       struct crypto_hash *csums_tfm = NULL;
-       struct syncer_conf sc;
+       enum drbd_ret_code retcode;
        cpumask_var_t new_cpu_mask;
+       struct drbd_tconn *tconn;
        int *rs_plan_s = NULL;
-       int fifo_size;
+       struct res_opts sc;
+       int err;
+
+       retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
+       if (!adm_ctx.reply_skb)
+               return retcode;
+       if (retcode != NO_ERROR)
+               goto fail;
+       tconn = adm_ctx.tconn;
 
        if (!zalloc_cpumask_var(&new_cpu_mask, GFP_KERNEL)) {
                retcode = ERR_NOMEM;
+               drbd_msg_put_info("unable to allocate cpumask");
                goto fail;
        }
 
-       if (nlp->flags & DRBD_NL_SET_DEFAULTS) {
-               memset(&sc, 0, sizeof(struct syncer_conf));
-               sc.rate       = DRBD_RATE_DEF;
-               sc.after      = DRBD_AFTER_DEF;
-               sc.al_extents = DRBD_AL_EXTENTS_DEF;
+       if (((struct drbd_genlmsghdr*)info->userhdr)->flags
+                       & DRBD_GENL_F_SET_DEFAULTS) {
+               memset(&sc, 0, sizeof(struct res_opts));
                sc.on_no_data  = DRBD_ON_NO_DATA_DEF;
-               sc.c_plan_ahead = DRBD_C_PLAN_AHEAD_DEF;
-               sc.c_delay_target = DRBD_C_DELAY_TARGET_DEF;
-               sc.c_fill_target = DRBD_C_FILL_TARGET_DEF;
-               sc.c_max_rate = DRBD_C_MAX_RATE_DEF;
-               sc.c_min_rate = DRBD_C_MIN_RATE_DEF;
        } else
-               memcpy(&sc, &mdev->sync_conf, sizeof(struct syncer_conf));
+               sc = tconn->res_opts;
 
-       if (!syncer_conf_from_tags(mdev, nlp->tag_list, &sc)) {
+       err = res_opts_from_attrs(&sc, info);
+       if (err) {
                retcode = ERR_MANDATORY_TAG;
+               drbd_msg_put_info(from_attrs_err_to_txt(err));
                goto fail;
        }
 
-       /* re-sync running */
-       rsr = ( mdev->state.conn == C_SYNC_SOURCE ||
-               mdev->state.conn == C_SYNC_TARGET ||
-               mdev->state.conn == C_PAUSED_SYNC_S ||
-               mdev->state.conn == C_PAUSED_SYNC_T );
-
-       if (rsr && strcmp(sc.csums_alg, mdev->sync_conf.csums_alg)) {
-               retcode = ERR_CSUMS_RESYNC_RUNNING;
-               goto fail;
-       }
-
-       if (!rsr && sc.csums_alg[0]) {
-               csums_tfm = crypto_alloc_hash(sc.csums_alg, 0, CRYPTO_ALG_ASYNC);
-               if (IS_ERR(csums_tfm)) {
-                       csums_tfm = NULL;
-                       retcode = ERR_CSUMS_ALG;
-                       goto fail;
-               }
-
-               if (!drbd_crypto_is_hash(crypto_hash_tfm(csums_tfm))) {
-                       retcode = ERR_CSUMS_ALG_ND;
-                       goto fail;
-               }
-       }
-
-       /* online verify running */
-       ovr = (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T);
-
-       if (ovr) {
-               if (strcmp(sc.verify_alg, mdev->sync_conf.verify_alg)) {
-                       retcode = ERR_VERIFY_RUNNING;
-                       goto fail;
-               }
-       }
-
-       if (!ovr && sc.verify_alg[0]) {
-               verify_tfm = crypto_alloc_hash(sc.verify_alg, 0, CRYPTO_ALG_ASYNC);
-               if (IS_ERR(verify_tfm)) {
-                       verify_tfm = NULL;
-                       retcode = ERR_VERIFY_ALG;
-                       goto fail;
-               }
-
-               if (!drbd_crypto_is_hash(crypto_hash_tfm(verify_tfm))) {
-                       retcode = ERR_VERIFY_ALG_ND;
-                       goto fail;
-               }
-       }
-
-       /* silently ignore cpu mask on UP kernel */
-       if (nr_cpu_ids > 1 && sc.cpu_mask[0] != 0) {
-               err = __bitmap_parse(sc.cpu_mask, 32, 0,
-                               cpumask_bits(new_cpu_mask), nr_cpu_ids);
-               if (err) {
-                       dev_warn(DEV, "__bitmap_parse() failed with %d\n", err);
-                       retcode = ERR_CPU_MASK_PARSE;
-                       goto fail;
-               }
-       }
-
-       ERR_IF (sc.rate < 1) sc.rate = 1;
-       ERR_IF (sc.al_extents < 7) sc.al_extents = 127; /* arbitrary minimum */
-#define AL_MAX ((MD_AL_MAX_SIZE-1) * AL_EXTENTS_PT)
-       if (sc.al_extents > AL_MAX) {
-               dev_err(DEV, "sc.al_extents > %d\n", AL_MAX);
-               sc.al_extents = AL_MAX;
-       }
-#undef AL_MAX
-
-       /* to avoid spurious errors when configuring minors before configuring
-        * the minors they depend on: if necessary, first create the minor we
-        * depend on */
-       if (sc.after >= 0)
-               ensure_mdev(sc.after, 1);
-
-       /* most sanity checks done, try to assign the new sync-after
-        * dependency.  need to hold the global lock in there,
-        * to avoid a race in the dependency loop check. */
-       retcode = drbd_alter_sa(mdev, sc.after);
-       if (retcode != NO_ERROR)
-               goto fail;
-
-       fifo_size = (sc.c_plan_ahead * 10 * SLEEP_TIME) / HZ;
-       if (fifo_size != mdev->rs_plan_s.size && fifo_size > 0) {
-               rs_plan_s   = kzalloc(sizeof(int) * fifo_size, GFP_KERNEL);
-               if (!rs_plan_s) {
-                       dev_err(DEV, "kmalloc of fifo_buffer failed");
-                       retcode = ERR_NOMEM;
-                       goto fail;
-               }
-       }
-
-       /* ok, assign the rest of it as well.
-        * lock against receive_SyncParam() */
-       spin_lock(&mdev->peer_seq_lock);
-       mdev->sync_conf = sc;
-
-       if (!rsr) {
-               crypto_free_hash(mdev->csums_tfm);
-               mdev->csums_tfm = csums_tfm;
-               csums_tfm = NULL;
-       }
-
-       if (!ovr) {
-               crypto_free_hash(mdev->verify_tfm);
-               mdev->verify_tfm = verify_tfm;
-               verify_tfm = NULL;
-       }
-
-       if (fifo_size != mdev->rs_plan_s.size) {
-               kfree(mdev->rs_plan_s.values);
-               mdev->rs_plan_s.values = rs_plan_s;
-               mdev->rs_plan_s.size   = fifo_size;
-               mdev->rs_planed = 0;
-               rs_plan_s = NULL;
-       }
-
-       spin_unlock(&mdev->peer_seq_lock);
-
-       if (get_ldev(mdev)) {
-               wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
-               drbd_al_shrink(mdev);
-               err = drbd_check_al_size(mdev);
-               lc_unlock(mdev->act_log);
-               wake_up(&mdev->al_wait);
-
-               put_ldev(mdev);
-               drbd_md_sync(mdev);
-
+       /* silently ignore cpu mask on UP kernel */
+       if (nr_cpu_ids > 1 && sc.cpu_mask[0] != 0) {
+               err = __bitmap_parse(sc.cpu_mask, 32, 0,
+                               cpumask_bits(new_cpu_mask), nr_cpu_ids);
                if (err) {
-                       retcode = ERR_NOMEM;
+                       conn_warn(tconn, "__bitmap_parse() failed with %d\n", err);
+                       retcode = ERR_CPU_MASK_PARSE;
                        goto fail;
                }
        }
 
-       if (mdev->state.conn >= C_CONNECTED)
-               drbd_send_sync_param(mdev, &sc);
 
-       if (!cpumask_equal(mdev->cpu_mask, new_cpu_mask)) {
-               cpumask_copy(mdev->cpu_mask, new_cpu_mask);
-               drbd_calc_cpu_mask(mdev);
-               mdev->receiver.reset_cpu_mask = 1;
-               mdev->asender.reset_cpu_mask = 1;
-               mdev->worker.reset_cpu_mask = 1;
+       tconn->res_opts = sc;
+
+       if (!cpumask_equal(tconn->cpu_mask, new_cpu_mask)) {
+               cpumask_copy(tconn->cpu_mask, new_cpu_mask);
+               drbd_calc_cpu_mask(tconn);
+               tconn->receiver.reset_cpu_mask = 1;
+               tconn->asender.reset_cpu_mask = 1;
+               tconn->worker.reset_cpu_mask = 1;
        }
 
-       kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
 fail:
        kfree(rs_plan_s);
        free_cpumask_var(new_cpu_mask);
-       crypto_free_hash(csums_tfm);
-       crypto_free_hash(verify_tfm);
-       reply->ret_code = retcode;
+
+       drbd_adm_finish(info, retcode);
        return 0;
 }
 
-static int drbd_nl_invalidate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
-                             struct drbd_nl_cfg_reply *reply)
+int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info)
 {
-       int retcode;
+       struct drbd_conf *mdev;
+       int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
+
+       retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+       if (!adm_ctx.reply_skb)
+               return retcode;
+       if (retcode != NO_ERROR)
+               goto out;
+
+       mdev = adm_ctx.mdev;
 
        /* If there is still bitmap IO pending, probably because of a previous
         * resync just being finished, wait for it before requesting a new resync. */
@@ -1949,10 +2339,10 @@ static int drbd_nl_invalidate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl
                retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
 
        while (retcode == SS_NEED_CONNECTION) {
-               spin_lock_irq(&mdev->req_lock);
+               spin_lock_irq(&mdev->tconn->req_lock);
                if (mdev->state.conn < C_CONNECTED)
                        retcode = _drbd_set_state(_NS(mdev, disk, D_INCONSISTENT), CS_VERBOSE, NULL);
-               spin_unlock_irq(&mdev->req_lock);
+               spin_unlock_irq(&mdev->tconn->req_lock);
 
                if (retcode != SS_NEED_CONNECTION)
                        break;
@@ -1960,7 +2350,8 @@ static int drbd_nl_invalidate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl
                retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
        }
 
-       reply->ret_code = retcode;
+out:
+       drbd_adm_finish(info, retcode);
        return 0;
 }
 
@@ -1973,56 +2364,58 @@ static int drbd_bmio_set_susp_al(struct drbd_conf *mdev)
        return rv;
 }
 
-static int drbd_nl_invalidate_peer(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
-                                  struct drbd_nl_cfg_reply *reply)
+static int drbd_adm_simple_request_state(struct sk_buff *skb, struct genl_info *info,
+               union drbd_state mask, union drbd_state val)
 {
-       int retcode;
-
-       /* If there is still bitmap IO pending, probably because of a previous
-        * resync just being finished, wait for it before requesting a new resync. */
-       wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
+       enum drbd_ret_code retcode;
 
-       retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S), CS_ORDERED);
-
-       if (retcode < SS_SUCCESS) {
-               if (retcode == SS_NEED_CONNECTION && mdev->state.role == R_PRIMARY) {
-                       /* The peer will get a resync upon connect anyways. Just make that
-                          into a full resync. */
-                       retcode = drbd_request_state(mdev, NS(pdsk, D_INCONSISTENT));
-                       if (retcode >= SS_SUCCESS) {
-                               if (drbd_bitmap_io(mdev, &drbd_bmio_set_susp_al,
-                                       "set_n_write from invalidate_peer",
-                                       BM_LOCKED_SET_ALLOWED))
-                                       retcode = ERR_IO_MD_DISK;
-                       }
-               } else
-                       retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S));
-       }
+       retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+       if (!adm_ctx.reply_skb)
+               return retcode;
+       if (retcode != NO_ERROR)
+               goto out;
 
-       reply->ret_code = retcode;
+       retcode = drbd_request_state(adm_ctx.mdev, mask, val);
+out:
+       drbd_adm_finish(info, retcode);
        return 0;
 }
 
-static int drbd_nl_pause_sync(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
-                             struct drbd_nl_cfg_reply *reply)
+int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info)
 {
-       int retcode = NO_ERROR;
+       return drbd_adm_simple_request_state(skb, info, NS(conn, C_STARTING_SYNC_S));
+}
 
-       if (drbd_request_state(mdev, NS(user_isp, 1)) == SS_NOTHING_TO_DO)
-               retcode = ERR_PAUSE_IS_SET;
+int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info)
+{
+       enum drbd_ret_code retcode;
 
-       reply->ret_code = retcode;
+       retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+       if (!adm_ctx.reply_skb)
+               return retcode;
+       if (retcode != NO_ERROR)
+               goto out;
+
+       if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 1)) == SS_NOTHING_TO_DO)
+               retcode = ERR_PAUSE_IS_SET;
+out:
+       drbd_adm_finish(info, retcode);
        return 0;
 }
 
-static int drbd_nl_resume_sync(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
-                              struct drbd_nl_cfg_reply *reply)
+int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info)
 {
-       int retcode = NO_ERROR;
-       union drbd_state s;
+       union drbd_dev_state s;
+       enum drbd_ret_code retcode;
+
+       retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+       if (!adm_ctx.reply_skb)
+               return retcode;
+       if (retcode != NO_ERROR)
+               goto out;
 
-       if (drbd_request_state(mdev, NS(user_isp, 0)) == SS_NOTHING_TO_DO) {
-               s = mdev->state;
+       if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 0)) == SS_NOTHING_TO_DO) {
+               s = adm_ctx.mdev->state;
                if (s.conn == C_PAUSED_SYNC_S || s.conn == C_PAUSED_SYNC_T) {
                        retcode = s.aftr_isp ? ERR_PIC_AFTER_DEP :
                                  s.peer_isp ? ERR_PIC_PEER_DEP : ERR_PAUSE_IS_CLEAR;
@@ -2031,170 +2424,432 @@ static int drbd_nl_resume_sync(struct drbd_conf *mdev, struct drbd_nl_cfg_req *n
                }
        }
 
-       reply->ret_code = retcode;
+out:
+       drbd_adm_finish(info, retcode);
        return 0;
 }
 
-static int drbd_nl_suspend_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
-                             struct drbd_nl_cfg_reply *reply)
+int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info)
 {
-       reply->ret_code = drbd_request_state(mdev, NS(susp, 1));
-
-       return 0;
+       return drbd_adm_simple_request_state(skb, info, NS(susp, 1));
 }
 
-static int drbd_nl_resume_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
-                            struct drbd_nl_cfg_reply *reply)
+int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info)
 {
+       struct drbd_conf *mdev;
+       int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
+
+       retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+       if (!adm_ctx.reply_skb)
+               return retcode;
+       if (retcode != NO_ERROR)
+               goto out;
+
+       mdev = adm_ctx.mdev;
        if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
                drbd_uuid_new_current(mdev);
                clear_bit(NEW_CUR_UUID, &mdev->flags);
        }
        drbd_suspend_io(mdev);
-       reply->ret_code = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
-       if (reply->ret_code == SS_SUCCESS) {
+       retcode = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
+       if (retcode == SS_SUCCESS) {
                if (mdev->state.conn < C_CONNECTED)
-                       tl_clear(mdev);
+                       tl_clear(mdev->tconn);
                if (mdev->state.disk == D_DISKLESS || mdev->state.disk == D_FAILED)
-                       tl_restart(mdev, fail_frozen_disk_io);
+                       tl_restart(mdev->tconn, FAIL_FROZEN_DISK_IO);
        }
        drbd_resume_io(mdev);
 
+out:
+       drbd_adm_finish(info, retcode);
        return 0;
 }
 
-static int drbd_nl_outdate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
-                          struct drbd_nl_cfg_reply *reply)
+int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info)
 {
-       reply->ret_code = drbd_request_state(mdev, NS(disk, D_OUTDATED));
-       return 0;
+       return drbd_adm_simple_request_state(skb, info, NS(disk, D_OUTDATED));
 }
 
-static int drbd_nl_get_config(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
-                          struct drbd_nl_cfg_reply *reply)
+int nla_put_drbd_cfg_context(struct sk_buff *skb, const char *conn_name, unsigned vnr)
 {
-       unsigned short *tl;
+       struct nlattr *nla;
+       nla = nla_nest_start(skb, DRBD_NLA_CFG_CONTEXT);
+       if (!nla)
+               goto nla_put_failure;
+       if (vnr != VOLUME_UNSPECIFIED)
+               NLA_PUT_U32(skb, T_ctx_volume, vnr);
+       NLA_PUT_STRING(skb, T_ctx_conn_name, conn_name);
+       nla_nest_end(skb, nla);
+       return 0;
 
-       tl = reply->tag_list;
+nla_put_failure:
+       if (nla)
+               nla_nest_cancel(skb, nla);
+       return -EMSGSIZE;
+}
 
-       if (get_ldev(mdev)) {
-               tl = disk_conf_to_tags(mdev, &mdev->ldev->dc, tl);
-               put_ldev(mdev);
+int nla_put_status_info(struct sk_buff *skb, struct drbd_conf *mdev,
+               const struct sib_info *sib)
+{
+       struct state_info *si = NULL; /* for sizeof(si->member); */
+       struct net_conf *nc;
+       struct nlattr *nla;
+       int got_ldev;
+       int err = 0;
+       int exclude_sensitive;
+
+       /* If sib != NULL, this is drbd_bcast_event, which anyone can listen
+        * to.  So we better exclude_sensitive information.
+        *
+        * If sib == NULL, this is drbd_adm_get_status, executed synchronously
+        * in the context of the requesting user process. Exclude sensitive
+        * information, unless current has superuser.
+        *
+        * NOTE: for drbd_adm_get_status_all(), this is a netlink dump, and
+        * relies on the current implementation of netlink_dump(), which
+        * executes the dump callback successively from netlink_recvmsg(),
+        * always in the context of the receiving process */
+       exclude_sensitive = sib || !capable(CAP_SYS_ADMIN);
+
+       got_ldev = get_ldev(mdev);
+
+       /* We need to add connection name and volume number information still.
+        * Minor number is in drbd_genlmsghdr. */
+       if (nla_put_drbd_cfg_context(skb, mdev->tconn->name, mdev->vnr))
+               goto nla_put_failure;
+
+       if (res_opts_to_skb(skb, &mdev->tconn->res_opts, exclude_sensitive))
+               goto nla_put_failure;
+
+       if (got_ldev)
+               if (disk_conf_to_skb(skb, &mdev->ldev->dc, exclude_sensitive))
+                       goto nla_put_failure;
+
+       rcu_read_lock();
+       nc = rcu_dereference(mdev->tconn->net_conf);
+       if (nc)
+               err = net_conf_to_skb(skb, nc, exclude_sensitive);
+       rcu_read_unlock();
+       if (err)
+               goto nla_put_failure;
+
+       nla = nla_nest_start(skb, DRBD_NLA_STATE_INFO);
+       if (!nla)
+               goto nla_put_failure;
+       NLA_PUT_U32(skb, T_sib_reason, sib ? sib->sib_reason : SIB_GET_STATUS_REPLY);
+       NLA_PUT_U32(skb, T_current_state, mdev->state.i);
+       NLA_PUT_U64(skb, T_ed_uuid, mdev->ed_uuid);
+       NLA_PUT_U64(skb, T_capacity, drbd_get_capacity(mdev->this_bdev));
+
+       if (got_ldev) {
+               NLA_PUT_U32(skb, T_disk_flags, mdev->ldev->md.flags);
+               NLA_PUT(skb, T_uuids, sizeof(si->uuids), mdev->ldev->md.uuid);
+               NLA_PUT_U64(skb, T_bits_total, drbd_bm_bits(mdev));
+               NLA_PUT_U64(skb, T_bits_oos, drbd_bm_total_weight(mdev));
+               if (C_SYNC_SOURCE <= mdev->state.conn &&
+                   C_PAUSED_SYNC_T >= mdev->state.conn) {
+                       NLA_PUT_U64(skb, T_bits_rs_total, mdev->rs_total);
+                       NLA_PUT_U64(skb, T_bits_rs_failed, mdev->rs_failed);
+               }
        }
 
-       if (get_net_conf(mdev)) {
-               tl = net_conf_to_tags(mdev, mdev->net_conf, tl);
-               put_net_conf(mdev);
+       if (sib) {
+               switch(sib->sib_reason) {
+               case SIB_SYNC_PROGRESS:
+               case SIB_GET_STATUS_REPLY:
+                       break;
+               case SIB_STATE_CHANGE:
+                       NLA_PUT_U32(skb, T_prev_state, sib->os.i);
+                       NLA_PUT_U32(skb, T_new_state, sib->ns.i);
+                       break;
+               case SIB_HELPER_POST:
+                       NLA_PUT_U32(skb,
+                               T_helper_exit_code, sib->helper_exit_code);
+                       /* fall through */
+               case SIB_HELPER_PRE:
+                       NLA_PUT_STRING(skb, T_helper, sib->helper_name);
+                       break;
+               }
        }
-       tl = syncer_conf_to_tags(mdev, &mdev->sync_conf, tl);
+       nla_nest_end(skb, nla);
 
-       put_unaligned(TT_END, tl++); /* Close the tag list */
-
-       return (int)((char *)tl - (char *)reply->tag_list);
+       if (0)
+nla_put_failure:
+               err = -EMSGSIZE;
+       if (got_ldev)
+               put_ldev(mdev);
+       return err;
 }
 
-static int drbd_nl_get_state(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
-                            struct drbd_nl_cfg_reply *reply)
+int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info)
 {
-       unsigned short *tl = reply->tag_list;
-       union drbd_state s = mdev->state;
-       unsigned long rs_left;
-       unsigned int res;
+       enum drbd_ret_code retcode;
+       int err;
 
-       tl = get_state_to_tags(mdev, (struct get_state *)&s, tl);
+       retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+       if (!adm_ctx.reply_skb)
+               return retcode;
+       if (retcode != NO_ERROR)
+               goto out;
 
-       /* no local ref, no bitmap, no syncer progress. */
-       if (s.conn >= C_SYNC_SOURCE && s.conn <= C_PAUSED_SYNC_T) {
-               if (get_ldev(mdev)) {
-                       drbd_get_syncer_progress(mdev, &rs_left, &res);
-                       tl = tl_add_int(tl, T_sync_progress, &res);
-                       put_ldev(mdev);
-               }
+       err = nla_put_status_info(adm_ctx.reply_skb, adm_ctx.mdev, NULL);
+       if (err) {
+               nlmsg_free(adm_ctx.reply_skb);
+               return err;
        }
-       put_unaligned(TT_END, tl++); /* Close the tag list */
-
-       return (int)((char *)tl - (char *)reply->tag_list);
+out:
+       drbd_adm_finish(info, retcode);
+       return 0;
 }
 
-static int drbd_nl_get_uuids(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
-                            struct drbd_nl_cfg_reply *reply)
+int get_one_status(struct sk_buff *skb, struct netlink_callback *cb)
 {
-       unsigned short *tl;
-
-       tl = reply->tag_list;
+       struct drbd_conf *mdev;
+       struct drbd_genlmsghdr *dh;
+       struct drbd_tconn *pos = (struct drbd_tconn*)cb->args[0];
+       struct drbd_tconn *tconn = NULL;
+       struct drbd_tconn *tmp;
+       unsigned volume = cb->args[1];
+
+       /* Open coded, deferred, iteration:
+        * list_for_each_entry_safe(tconn, tmp, &drbd_tconns, all_tconn) {
+        *      idr_for_each_entry(&tconn->volumes, mdev, i) {
+        *        ...
+        *      }
+        * }
+        * where tconn is cb->args[0];
+        * and i is cb->args[1];
+        *
+        * cb->args[2] indicates if we shall loop over all resources,
+        * or just dump all volumes of a single resource.
+        *
+        * This may miss entries inserted after this dump started,
+        * or entries deleted before they are reached.
+        *
+        * We need to make sure the mdev won't disappear while
+        * we are looking at it, and revalidate our iterators
+        * on each iteration.
+        */
 
-       if (get_ldev(mdev)) {
-               tl = tl_add_blob(tl, T_uuids, mdev->ldev->md.uuid, UI_SIZE*sizeof(u64));
-               tl = tl_add_int(tl, T_uuids_flags, &mdev->ldev->md.flags);
-               put_ldev(mdev);
+       /* synchronize with conn_create()/conn_destroy() */
+       down_read(&drbd_cfg_rwsem);
+       /* revalidate iterator position */
+       list_for_each_entry(tmp, &drbd_tconns, all_tconn) {
+               if (pos == NULL) {
+                       /* first iteration */
+                       pos = tmp;
+                       tconn = pos;
+                       break;
+               }
+               if (tmp == pos) {
+                       tconn = pos;
+                       break;
+               }
        }
-       put_unaligned(TT_END, tl++); /* Close the tag list */
+       if (tconn) {
+next_tconn:
+               mdev = idr_get_next(&tconn->volumes, &volume);
+               if (!mdev) {
+                       /* No more volumes to dump on this tconn.
+                        * Advance tconn iterator. */
+                       pos = list_entry(tconn->all_tconn.next,
+                                       struct drbd_tconn, all_tconn);
+                       /* Did we dump any volume on this tconn yet? */
+                       if (volume != 0) {
+                               /* If we reached the end of the list,
+                                * or only a single resource dump was requested,
+                                * we are done. */
+                               if (&pos->all_tconn == &drbd_tconns || cb->args[2])
+                                       goto out;
+                               volume = 0;
+                               tconn = pos;
+                               goto next_tconn;
+                       }
+               }
+
+               dh = genlmsg_put(skb, NETLINK_CB(cb->skb).pid,
+                               cb->nlh->nlmsg_seq, &drbd_genl_family,
+                               NLM_F_MULTI, DRBD_ADM_GET_STATUS);
+               if (!dh)
+                       goto out;
+
+               if (!mdev) {
+                       /* this is a tconn without a single volume */
+                       dh->minor = -1U;
+                       dh->ret_code = NO_ERROR;
+                       if (nla_put_drbd_cfg_context(skb, tconn->name, VOLUME_UNSPECIFIED))
+                               genlmsg_cancel(skb, dh);
+                       else
+                               genlmsg_end(skb, dh);
+                       goto out;
+               }
+
+               D_ASSERT(mdev->vnr == volume);
+               D_ASSERT(mdev->tconn == tconn);
+
+               dh->minor = mdev_to_minor(mdev);
+               dh->ret_code = NO_ERROR;
 
-       return (int)((char *)tl - (char *)reply->tag_list);
+               if (nla_put_status_info(skb, mdev, NULL)) {
+                       genlmsg_cancel(skb, dh);
+                       goto out;
+               }
+               genlmsg_end(skb, dh);
+        }
+
+out:
+       up_read(&drbd_cfg_rwsem);
+       /* where to start the next iteration */
+        cb->args[0] = (long)pos;
+        cb->args[1] = (pos == tconn) ? volume + 1 : 0;
+
+       /* No more tconns/volumes/minors found results in an empty skb.
+        * Which will terminate the dump. */
+        return skb->len;
 }
 
-/**
- * drbd_nl_get_timeout_flag() - Used by drbdsetup to find out which timeout value to use
- * @mdev:      DRBD device.
- * @nlp:       Netlink/connector packet from drbdsetup
- * @reply:     Reply packet for drbdsetup
+/*
+ * Request status of all resources, or of all volumes within a single resource.
+ *
+ * This is a dump, as the answer may not fit in a single reply skb otherwise.
+ * Which means we cannot use the family->attrbuf or other such members, because
+ * dump is NOT protected by the genl_lock().  During dump, we only have access
+ * to the incoming skb, and need to opencode "parsing" of the nlattr payload.
+ *
+ * Once things are setup properly, we call into get_one_status().
  */
-static int drbd_nl_get_timeout_flag(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
-                                   struct drbd_nl_cfg_reply *reply)
+int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb)
 {
-       unsigned short *tl;
-       char rv;
+       const unsigned hdrlen = GENL_HDRLEN + GENL_MAGIC_FAMILY_HDRSZ;
+       struct nlattr *nla;
+       const char *conn_name;
+       struct drbd_tconn *tconn;
+
+       /* Is this a followup call? */
+       if (cb->args[0]) {
+               /* ... of a single resource dump,
+                * and the resource iterator has been advanced already? */
+               if (cb->args[2] && cb->args[2] != cb->args[0])
+                       return 0; /* DONE. */
+               goto dump;
+       }
+
+       /* First call (from netlink_dump_start).  We need to figure out
+        * which resource(s) the user wants us to dump. */
+       nla = nla_find(nlmsg_attrdata(cb->nlh, hdrlen),
+                       nlmsg_attrlen(cb->nlh, hdrlen),
+                       DRBD_NLA_CFG_CONTEXT);
+
+       /* No explicit context given.  Dump all. */
+       if (!nla)
+               goto dump;
+       nla = nla_find_nested(nla, __nla_type(T_ctx_conn_name));
+       /* context given, but no name present? */
+       if (!nla)
+               return -EINVAL;
+       conn_name = nla_data(nla);
+       tconn = conn_by_name(conn_name);
+       if (!tconn)
+               return -ENODEV;
+
+       /* prime iterators, and set "filter" mode mark:
+        * only dump this tconn. */
+       cb->args[0] = (long)tconn;
+       /* cb->args[1] = 0; passed in this way. */
+       cb->args[2] = (long)tconn;
+
+dump:
+       return get_one_status(skb, cb);
+}
 
-       tl = reply->tag_list;
+int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info)
+{
+       enum drbd_ret_code retcode;
+       struct timeout_parms tp;
+       int err;
 
-       rv = mdev->state.pdsk == D_OUTDATED        ? UT_PEER_OUTDATED :
-         test_bit(USE_DEGR_WFC_T, &mdev->flags) ? UT_DEGRADED : UT_DEFAULT;
+       retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+       if (!adm_ctx.reply_skb)
+               return retcode;
+       if (retcode != NO_ERROR)
+               goto out;
 
-       tl = tl_add_blob(tl, T_use_degraded, &rv, sizeof(rv));
-       put_unaligned(TT_END, tl++); /* Close the tag list */
+       tp.timeout_type =
+               adm_ctx.mdev->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED :
+               test_bit(USE_DEGR_WFC_T, &adm_ctx.mdev->flags) ? UT_DEGRADED :
+               UT_DEFAULT;
 
-       return (int)((char *)tl - (char *)reply->tag_list);
+       err = timeout_parms_to_priv_skb(adm_ctx.reply_skb, &tp);
+       if (err) {
+               nlmsg_free(adm_ctx.reply_skb);
+               return err;
+       }
+out:
+       drbd_adm_finish(info, retcode);
+       return 0;
 }
 
-static int drbd_nl_start_ov(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
-                                   struct drbd_nl_cfg_reply *reply)
+int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info)
 {
-       /* default to resume from last known position, if possible */
-       struct start_ov args =
-               { .start_sector = mdev->ov_start_sector };
+       struct drbd_conf *mdev;
+       enum drbd_ret_code retcode;
 
-       if (!start_ov_from_tags(mdev, nlp->tag_list, &args)) {
-               reply->ret_code = ERR_MANDATORY_TAG;
-               return 0;
-       }
+       retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+       if (!adm_ctx.reply_skb)
+               return retcode;
+       if (retcode != NO_ERROR)
+               goto out;
 
+       mdev = adm_ctx.mdev;
+       if (info->attrs[DRBD_NLA_START_OV_PARMS]) {
+               /* resume from last known position, if possible */
+               struct start_ov_parms parms =
+                       { .ov_start_sector = mdev->ov_start_sector };
+               int err = start_ov_parms_from_attrs(&parms, info);
+               if (err) {
+                       retcode = ERR_MANDATORY_TAG;
+                       drbd_msg_put_info(from_attrs_err_to_txt(err));
+                       goto out;
+               }
+               /* w_make_ov_request expects position to be aligned */
+               mdev->ov_start_sector = parms.ov_start_sector & ~BM_SECT_PER_BIT;
+       }
        /* If there is still bitmap IO pending, e.g. previous resync or verify
         * just being finished, wait for it before requesting a new resync. */
        wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
-
-       /* w_make_ov_request expects position to be aligned */
-       mdev->ov_start_sector = args.start_sector & ~BM_SECT_PER_BIT;
-       reply->ret_code = drbd_request_state(mdev,NS(conn,C_VERIFY_S));
+       retcode = drbd_request_state(mdev,NS(conn,C_VERIFY_S));
+out:
+       drbd_adm_finish(info, retcode);
        return 0;
 }
 
 
-static int drbd_nl_new_c_uuid(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
-                             struct drbd_nl_cfg_reply *reply)
+int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info)
 {
-       int retcode = NO_ERROR;
+       struct drbd_conf *mdev;
+       enum drbd_ret_code retcode;
        int skip_initial_sync = 0;
        int err;
+       struct new_c_uuid_parms args;
 
-       struct new_c_uuid args;
+       retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+       if (!adm_ctx.reply_skb)
+               return retcode;
+       if (retcode != NO_ERROR)
+               goto out_nolock;
 
-       memset(&args, 0, sizeof(struct new_c_uuid));
-       if (!new_c_uuid_from_tags(mdev, nlp->tag_list, &args)) {
-               reply->ret_code = ERR_MANDATORY_TAG;
-               return 0;
+       mdev = adm_ctx.mdev;
+       memset(&args, 0, sizeof(args));
+       if (info->attrs[DRBD_NLA_NEW_C_UUID_PARMS]) {
+               err = new_c_uuid_parms_from_attrs(&args, info);
+               if (err) {
+                       retcode = ERR_MANDATORY_TAG;
+                       drbd_msg_put_info(from_attrs_err_to_txt(err));
+                       goto out_nolock;
+               }
        }
 
-       mutex_lock(&mdev->state_mutex); /* Protects us against serialized state changes. */
+       mutex_lock(mdev->state_mutex); /* Protects us against serialized state changes. */
 
        if (!get_ldev(mdev)) {
                retcode = ERR_NO_DISK;
@@ -2202,7 +2857,7 @@ static int drbd_nl_new_c_uuid(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl
        }
 
        /* this is "skip initial sync", assume to be clean */
-       if (mdev->state.conn == C_CONNECTED && mdev->agreed_pro_version >= 90 &&
+       if (mdev->state.conn == C_CONNECTED && mdev->tconn->agreed_pro_version >= 90 &&
            mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) {
                dev_info(DEV, "Preparing to skip initial sync\n");
                skip_initial_sync = 1;
@@ -2225,10 +2880,10 @@ static int drbd_nl_new_c_uuid(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl
                        drbd_send_uuids_skip_initial_sync(mdev);
                        _drbd_uuid_set(mdev, UI_BITMAP, 0);
                        drbd_print_uuids(mdev, "cleared bitmap UUID");
-                       spin_lock_irq(&mdev->req_lock);
+                       spin_lock_irq(&mdev->tconn->req_lock);
                        _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
                                        CS_VERBOSE, NULL);
-                       spin_unlock_irq(&mdev->req_lock);
+                       spin_unlock_irq(&mdev->tconn->req_lock);
                }
        }
 
@@ -2236,416 +2891,272 @@ static int drbd_nl_new_c_uuid(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl
 out_dec:
        put_ldev(mdev);
 out:
-       mutex_unlock(&mdev->state_mutex);
-
-       reply->ret_code = retcode;
+       mutex_unlock(mdev->state_mutex);
+out_nolock:
+       drbd_adm_finish(info, retcode);
        return 0;
 }
 
-struct cn_handler_struct {
-       int (*function)(struct drbd_conf *,
-                        struct drbd_nl_cfg_req *,
-                        struct drbd_nl_cfg_reply *);
-       int reply_body_size;
-};
-
-static struct cn_handler_struct cnd_table[] = {
-       [ P_primary ]           = { &drbd_nl_primary,           0 },
-       [ P_secondary ]         = { &drbd_nl_secondary,         0 },
-       [ P_disk_conf ]         = { &drbd_nl_disk_conf,         0 },
-       [ P_detach ]            = { &drbd_nl_detach,            0 },
-       [ P_net_conf ]          = { &drbd_nl_net_conf,          0 },
-       [ P_disconnect ]        = { &drbd_nl_disconnect,        0 },
-       [ P_resize ]            = { &drbd_nl_resize,            0 },
-       [ P_syncer_conf ]       = { &drbd_nl_syncer_conf,       0 },
-       [ P_invalidate ]        = { &drbd_nl_invalidate,        0 },
-       [ P_invalidate_peer ]   = { &drbd_nl_invalidate_peer,   0 },
-       [ P_pause_sync ]        = { &drbd_nl_pause_sync,        0 },
-       [ P_resume_sync ]       = { &drbd_nl_resume_sync,       0 },
-       [ P_suspend_io ]        = { &drbd_nl_suspend_io,        0 },
-       [ P_resume_io ]         = { &drbd_nl_resume_io,         0 },
-       [ P_outdate ]           = { &drbd_nl_outdate,           0 },
-       [ P_get_config ]        = { &drbd_nl_get_config,
-                                   sizeof(struct syncer_conf_tag_len_struct) +
-                                   sizeof(struct disk_conf_tag_len_struct) +
-                                   sizeof(struct net_conf_tag_len_struct) },
-       [ P_get_state ]         = { &drbd_nl_get_state,
-                                   sizeof(struct get_state_tag_len_struct) +
-                                   sizeof(struct sync_progress_tag_len_struct) },
-       [ P_get_uuids ]         = { &drbd_nl_get_uuids,
-                                   sizeof(struct get_uuids_tag_len_struct) },
-       [ P_get_timeout_flag ]  = { &drbd_nl_get_timeout_flag,
-                                   sizeof(struct get_timeout_flag_tag_len_struct)},
-       [ P_start_ov ]          = { &drbd_nl_start_ov,          0 },
-       [ P_new_c_uuid ]        = { &drbd_nl_new_c_uuid,        0 },
-};
-
-static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms *nsp)
-{
-       struct drbd_nl_cfg_req *nlp = (struct drbd_nl_cfg_req *)req->data;
-       struct cn_handler_struct *cm;
-       struct cn_msg *cn_reply;
-       struct drbd_nl_cfg_reply *reply;
-       struct drbd_conf *mdev;
-       int retcode, rr;
-       int reply_size = sizeof(struct cn_msg)
-               + sizeof(struct drbd_nl_cfg_reply)
-               + sizeof(short int);
-
-       if (!try_module_get(THIS_MODULE)) {
-               printk(KERN_ERR "drbd: try_module_get() failed!\n");
-               return;
+static enum drbd_ret_code
+drbd_check_conn_name(const char *name)
+{
+       if (!name || !name[0]) {
+               drbd_msg_put_info("connection name missing");
+               return ERR_MANDATORY_TAG;
        }
-
-       if (!cap_raised(current_cap(), CAP_SYS_ADMIN)) {
-               retcode = ERR_PERM;
-               goto fail;
+       /* if we want to use these in sysfs/configfs/debugfs some day,
+        * we must not allow slashes */
+       if (strchr(name, '/')) {
+               drbd_msg_put_info("invalid connection name");
+               return ERR_INVALID_REQUEST;
        }
+       return NO_ERROR;
+}
 
-       mdev = ensure_mdev(nlp->drbd_minor,
-                       (nlp->flags & DRBD_NL_CREATE_DEVICE));
-       if (!mdev) {
-               retcode = ERR_MINOR_INVALID;
-               goto fail;
-       }
+int drbd_adm_create_connection(struct sk_buff *skb, struct genl_info *info)
+{
+       enum drbd_ret_code retcode;
 
-       if (nlp->packet_type >= P_nl_after_last_packet ||
-           nlp->packet_type == P_return_code_only) {
-               retcode = ERR_PACKET_NR;
-               goto fail;
-       }
+       retcode = drbd_adm_prepare(skb, info, 0);
+       if (!adm_ctx.reply_skb)
+               return retcode;
+       if (retcode != NO_ERROR)
+               goto out;
 
-       cm = cnd_table + nlp->packet_type;
+       retcode = drbd_check_conn_name(adm_ctx.conn_name);
+       if (retcode != NO_ERROR)
+               goto out;
 
-       /* This may happen if packet number is 0: */
-       if (cm->function == NULL) {
-               retcode = ERR_PACKET_NR;
-               goto fail;
+       if (adm_ctx.tconn) {
+               if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) {
+                       retcode = ERR_INVALID_REQUEST;
+                       drbd_msg_put_info("connection exists");
+               }
+               /* else: still NO_ERROR */
+               goto out;
        }
 
-       reply_size += cm->reply_body_size;
-
-       /* allocation not in the IO path, cqueue thread context */
-       cn_reply = kzalloc(reply_size, GFP_KERNEL);
-       if (!cn_reply) {
+       if (!conn_create(adm_ctx.conn_name))
                retcode = ERR_NOMEM;
-               goto fail;
-       }
-       reply = (struct drbd_nl_cfg_reply *) cn_reply->data;
-
-       reply->packet_type =
-               cm->reply_body_size ? nlp->packet_type : P_return_code_only;
-       reply->minor = nlp->drbd_minor;
-       reply->ret_code = NO_ERROR; /* Might by modified by cm->function. */
-       /* reply->tag_list; might be modified by cm->function. */
-
-       rr = cm->function(mdev, nlp, reply);
-
-       cn_reply->id = req->id;
-       cn_reply->seq = req->seq;
-       cn_reply->ack = req->ack  + 1;
-       cn_reply->len = sizeof(struct drbd_nl_cfg_reply) + rr;
-       cn_reply->flags = 0;
-
-       rr = cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_KERNEL);
-       if (rr && rr != -ESRCH)
-               printk(KERN_INFO "drbd: cn_netlink_send()=%d\n", rr);
-
-       kfree(cn_reply);
-       module_put(THIS_MODULE);
-       return;
- fail:
-       drbd_nl_send_reply(req, retcode);
-       module_put(THIS_MODULE);
+out:
+       drbd_adm_finish(info, retcode);
+       return 0;
 }
 
-static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
-
-static unsigned short *
-__tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
-       unsigned short len, int nul_terminated)
+int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info)
 {
-       unsigned short l = tag_descriptions[tag_number(tag)].max_len;
-       len = (len < l) ? len :  l;
-       put_unaligned(tag, tl++);
-       put_unaligned(len, tl++);
-       memcpy(tl, data, len);
-       tl = (unsigned short*)((char*)tl + len);
-       if (nul_terminated)
-               *((char*)tl - 1) = 0;
-       return tl;
-}
+       struct drbd_genlmsghdr *dh = info->userhdr;
+       enum drbd_ret_code retcode;
 
-static unsigned short *
-tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data, int len)
-{
-       return __tl_add_blob(tl, tag, data, len, 0);
-}
+       retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
+       if (!adm_ctx.reply_skb)
+               return retcode;
+       if (retcode != NO_ERROR)
+               goto out;
 
-static unsigned short *
-tl_add_str(unsigned short *tl, enum drbd_tags tag, const char *str)
-{
-       return __tl_add_blob(tl, tag, str, strlen(str)+1, 0);
-}
+       /* FIXME drop minor_count parameter, limit to MINORMASK */
+       if (dh->minor >= minor_count) {
+               drbd_msg_put_info("requested minor out of range");
+               retcode = ERR_INVALID_REQUEST;
+               goto out;
+       }
+       if (adm_ctx.volume > DRBD_VOLUME_MAX) {
+               drbd_msg_put_info("requested volume id out of range");
+               retcode = ERR_INVALID_REQUEST;
+               goto out;
+       }
 
-static unsigned short *
-tl_add_int(unsigned short *tl, enum drbd_tags tag, const void *val)
-{
-       put_unaligned(tag, tl++);
-       switch(tag_type(tag)) {
-       case TT_INTEGER:
-               put_unaligned(sizeof(int), tl++);
-               put_unaligned(*(int *)val, (int *)tl);
-               tl = (unsigned short*)((char*)tl+sizeof(int));
-               break;
-       case TT_INT64:
-               put_unaligned(sizeof(u64), tl++);
-               put_unaligned(*(u64 *)val, (u64 *)tl);
-               tl = (unsigned short*)((char*)tl+sizeof(u64));
-               break;
-       default:
-               /* someone did something stupid. */
-               ;
+       /* drbd_adm_prepare made sure already
+        * that mdev->tconn and mdev->vnr match the request. */
+       if (adm_ctx.mdev) {
+               if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
+                       retcode = ERR_MINOR_EXISTS;
+               /* else: still NO_ERROR */
+               goto out;
        }
-       return tl;
+
+       down_write(&drbd_cfg_rwsem);
+       retcode = conn_new_minor(adm_ctx.tconn, dh->minor, adm_ctx.volume);
+       up_write(&drbd_cfg_rwsem);
+out:
+       drbd_adm_finish(info, retcode);
+       return 0;
 }
 
-void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state state)
+static enum drbd_ret_code adm_delete_minor(struct drbd_conf *mdev)
 {
-       char buffer[sizeof(struct cn_msg)+
-                   sizeof(struct drbd_nl_cfg_reply)+
-                   sizeof(struct get_state_tag_len_struct)+
-                   sizeof(short int)];
-       struct cn_msg *cn_reply = (struct cn_msg *) buffer;
-       struct drbd_nl_cfg_reply *reply =
-               (struct drbd_nl_cfg_reply *)cn_reply->data;
-       unsigned short *tl = reply->tag_list;
-
-       /* dev_warn(DEV, "drbd_bcast_state() got called\n"); */
-
-       tl = get_state_to_tags(mdev, (struct get_state *)&state, tl);
-
-       put_unaligned(TT_END, tl++); /* Close the tag list */
-
-       cn_reply->id.idx = CN_IDX_DRBD;
-       cn_reply->id.val = CN_VAL_DRBD;
-
-       cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
-       cn_reply->ack = 0; /* not used here. */
-       cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
-               (int)((char *)tl - (char *)reply->tag_list);
-       cn_reply->flags = 0;
-
-       reply->packet_type = P_get_state;
-       reply->minor = mdev_to_minor(mdev);
-       reply->ret_code = NO_ERROR;
-
-       cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO);
+       if (mdev->state.disk == D_DISKLESS &&
+           /* no need to be mdev->state.conn == C_STANDALONE &&
+            * we may want to delete a minor from a live replication group.
+            */
+           mdev->state.role == R_SECONDARY) {
+               drbd_delete_device(mdev);
+               return NO_ERROR;
+       } else
+               return ERR_MINOR_CONFIGURED;
 }
 
-void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name)
+int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info)
 {
-       char buffer[sizeof(struct cn_msg)+
-                   sizeof(struct drbd_nl_cfg_reply)+
-                   sizeof(struct call_helper_tag_len_struct)+
-                   sizeof(short int)];
-       struct cn_msg *cn_reply = (struct cn_msg *) buffer;
-       struct drbd_nl_cfg_reply *reply =
-               (struct drbd_nl_cfg_reply *)cn_reply->data;
-       unsigned short *tl = reply->tag_list;
-
-       /* dev_warn(DEV, "drbd_bcast_state() got called\n"); */
-
-       tl = tl_add_str(tl, T_helper, helper_name);
-       put_unaligned(TT_END, tl++); /* Close the tag list */
-
-       cn_reply->id.idx = CN_IDX_DRBD;
-       cn_reply->id.val = CN_VAL_DRBD;
-
-       cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
-       cn_reply->ack = 0; /* not used here. */
-       cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
-               (int)((char *)tl - (char *)reply->tag_list);
-       cn_reply->flags = 0;
+       enum drbd_ret_code retcode;
 
-       reply->packet_type = P_call_helper;
-       reply->minor = mdev_to_minor(mdev);
-       reply->ret_code = NO_ERROR;
+       retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+       if (!adm_ctx.reply_skb)
+               return retcode;
+       if (retcode != NO_ERROR)
+               goto out;
 
-       cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO);
+       down_write(&drbd_cfg_rwsem);
+       retcode = adm_delete_minor(adm_ctx.mdev);
+       up_write(&drbd_cfg_rwsem);
+out:
+       drbd_adm_finish(info, retcode);
+       return 0;
 }
 
-void drbd_bcast_ee(struct drbd_conf *mdev,
-               const char *reason, const int dgs,
-               const char* seen_hash, const char* calc_hash,
-               const struct drbd_epoch_entry* e)
+int drbd_adm_down(struct sk_buff *skb, struct genl_info *info)
 {
-       struct cn_msg *cn_reply;
-       struct drbd_nl_cfg_reply *reply;
-       unsigned short *tl;
-       struct page *page;
-       unsigned len;
+       enum drbd_ret_code retcode;
+       enum drbd_state_rv rv;
+       struct drbd_conf *mdev;
+       unsigned i;
 
-       if (!e)
-               return;
-       if (!reason || !reason[0])
-               return;
+       retcode = drbd_adm_prepare(skb, info, 0);
+       if (!adm_ctx.reply_skb)
+               return retcode;
+       if (retcode != NO_ERROR)
+               goto out;
 
-       /* apparently we have to memcpy twice, first to prepare the data for the
-        * struct cn_msg, then within cn_netlink_send from the cn_msg to the
-        * netlink skb. */
-       /* receiver thread context, which is not in the writeout path (of this node),
-        * but may be in the writeout path of the _other_ node.
-        * GFP_NOIO to avoid potential "distributed deadlock". */
-       cn_reply = kzalloc(
-               sizeof(struct cn_msg)+
-               sizeof(struct drbd_nl_cfg_reply)+
-               sizeof(struct dump_ee_tag_len_struct)+
-               sizeof(short int),
-               GFP_NOIO);
-
-       if (!cn_reply) {
-               dev_err(DEV, "could not kmalloc buffer for drbd_bcast_ee, sector %llu, size %u\n",
-                               (unsigned long long)e->sector, e->size);
-               return;
+       if (!adm_ctx.tconn) {
+               retcode = ERR_CONN_NOT_KNOWN;
+               goto out;
        }
 
-       reply = (struct drbd_nl_cfg_reply*)cn_reply->data;
-       tl = reply->tag_list;
-
-       tl = tl_add_str(tl, T_dump_ee_reason, reason);
-       tl = tl_add_blob(tl, T_seen_digest, seen_hash, dgs);
-       tl = tl_add_blob(tl, T_calc_digest, calc_hash, dgs);
-       tl = tl_add_int(tl, T_ee_sector, &e->sector);
-       tl = tl_add_int(tl, T_ee_block_id, &e->block_id);
-
-       /* dump the first 32k */
-       len = min_t(unsigned, e->size, 32 << 10);
-       put_unaligned(T_ee_data, tl++);
-       put_unaligned(len, tl++);
-
-       page = e->pages;
-       page_chain_for_each(page) {
-               void *d = kmap_atomic(page, KM_USER0);
-               unsigned l = min_t(unsigned, len, PAGE_SIZE);
-               memcpy(tl, d, l);
-               kunmap_atomic(d, KM_USER0);
-               tl = (unsigned short*)((char*)tl + l);
-               len -= l;
-               if (len == 0)
-                       break;
+       down_read(&drbd_cfg_rwsem);
+       /* demote */
+       idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
+               retcode = drbd_set_role(mdev, R_SECONDARY, 0);
+               if (retcode < SS_SUCCESS) {
+                       drbd_msg_put_info("failed to demote");
+                       goto out_unlock;
+               }
        }
-       put_unaligned(TT_END, tl++); /* Close the tag list */
-
-       cn_reply->id.idx = CN_IDX_DRBD;
-       cn_reply->id.val = CN_VAL_DRBD;
-
-       cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
-       cn_reply->ack = 0; // not used here.
-       cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
-               (int)((char*)tl - (char*)reply->tag_list);
-       cn_reply->flags = 0;
-
-       reply->packet_type = P_dump_ee;
-       reply->minor = mdev_to_minor(mdev);
-       reply->ret_code = NO_ERROR;
-
-       cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO);
-       kfree(cn_reply);
-}
 
-void drbd_bcast_sync_progress(struct drbd_conf *mdev)
-{
-       char buffer[sizeof(struct cn_msg)+
-                   sizeof(struct drbd_nl_cfg_reply)+
-                   sizeof(struct sync_progress_tag_len_struct)+
-                   sizeof(short int)];
-       struct cn_msg *cn_reply = (struct cn_msg *) buffer;
-       struct drbd_nl_cfg_reply *reply =
-               (struct drbd_nl_cfg_reply *)cn_reply->data;
-       unsigned short *tl = reply->tag_list;
-       unsigned long rs_left;
-       unsigned int res;
+       /* disconnect */
+       rv = conn_try_disconnect(adm_ctx.tconn, 0);
+       if (rv < SS_SUCCESS) {
+               retcode = rv; /* enum type mismatch! */
+               drbd_msg_put_info("failed to disconnect");
+               goto out_unlock;
+       }
 
-       /* no local ref, no bitmap, no syncer progress, no broadcast. */
-       if (!get_ldev(mdev))
-               return;
-       drbd_get_syncer_progress(mdev, &rs_left, &res);
-       put_ldev(mdev);
+       /* detach */
+       idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
+               rv = adm_detach(mdev);
+               if (rv < SS_SUCCESS) {
+                       retcode = rv; /* enum type mismatch! */
+                       drbd_msg_put_info("failed to detach");
+                       goto out_unlock;
+               }
+       }
+       up_read(&drbd_cfg_rwsem);
 
-       tl = tl_add_int(tl, T_sync_progress, &res);
-       put_unaligned(TT_END, tl++); /* Close the tag list */
+       /* delete volumes */
+       down_write(&drbd_cfg_rwsem);
+       idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
+               retcode = adm_delete_minor(mdev);
+               if (retcode != NO_ERROR) {
+                       /* "can not happen" */
+                       drbd_msg_put_info("failed to delete volume");
+                       up_write(&drbd_cfg_rwsem);
+                       goto out;
+               }
+       }
 
-       cn_reply->id.idx = CN_IDX_DRBD;
-       cn_reply->id.val = CN_VAL_DRBD;
+       /* stop all threads */
+       conn_reconfig_done(adm_ctx.tconn);
 
-       cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
-       cn_reply->ack = 0; /* not used here. */
-       cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
-               (int)((char *)tl - (char *)reply->tag_list);
-       cn_reply->flags = 0;
+       /* delete connection */
+       if (conn_lowest_minor(adm_ctx.tconn) < 0) {
+               list_del(&adm_ctx.tconn->all_tconn);
+               kref_put(&adm_ctx.tconn->kref, &conn_destroy);
 
-       reply->packet_type = P_sync_progress;
-       reply->minor = mdev_to_minor(mdev);
-       reply->ret_code = NO_ERROR;
+               retcode = NO_ERROR;
+       } else {
+               /* "can not happen" */
+               retcode = ERR_CONN_IN_USE;
+               drbd_msg_put_info("failed to delete connection");
+       }
 
-       cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO);
+       up_write(&drbd_cfg_rwsem);
+       goto out;
+out_unlock:
+       up_read(&drbd_cfg_rwsem);
+out:
+       drbd_adm_finish(info, retcode);
+       return 0;
 }
 
-int __init drbd_nl_init(void)
+int drbd_adm_delete_connection(struct sk_buff *skb, struct genl_info *info)
 {
-       static struct cb_id cn_id_drbd;
-       int err, try=10;
+       enum drbd_ret_code retcode;
 
-       cn_id_drbd.val = CN_VAL_DRBD;
-       do {
-               cn_id_drbd.idx = cn_idx;
-               err = cn_add_callback(&cn_id_drbd, "cn_drbd", &drbd_connector_callback);
-               if (!err)
-                       break;
-               cn_idx = (cn_idx + CN_IDX_STEP);
-       } while (try--);
+       retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
+       if (!adm_ctx.reply_skb)
+               return retcode;
+       if (retcode != NO_ERROR)
+               goto out;
 
-       if (err) {
-               printk(KERN_ERR "drbd: cn_drbd failed to register\n");
-               return err;
+       down_write(&drbd_cfg_rwsem);
+       if (conn_lowest_minor(adm_ctx.tconn) < 0) {
+               list_del(&adm_ctx.tconn->all_tconn);
+               kref_put(&adm_ctx.tconn->kref, &conn_destroy);
+
+               retcode = NO_ERROR;
+       } else {
+               retcode = ERR_CONN_IN_USE;
        }
+       up_write(&drbd_cfg_rwsem);
 
+out:
+       drbd_adm_finish(info, retcode);
        return 0;
 }
 
-void drbd_nl_cleanup(void)
-{
-       static struct cb_id cn_id_drbd;
-
-       cn_id_drbd.idx = cn_idx;
-       cn_id_drbd.val = CN_VAL_DRBD;
-
-       cn_del_callback(&cn_id_drbd);
-}
-
-void drbd_nl_send_reply(struct cn_msg *req, int ret_code)
+void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib)
 {
-       char buffer[sizeof(struct cn_msg)+sizeof(struct drbd_nl_cfg_reply)];
-       struct cn_msg *cn_reply = (struct cn_msg *) buffer;
-       struct drbd_nl_cfg_reply *reply =
-               (struct drbd_nl_cfg_reply *)cn_reply->data;
-       int rr;
+       static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
+       struct sk_buff *msg;
+       struct drbd_genlmsghdr *d_out;
+       unsigned seq;
+       int err = -ENOMEM;
+
+       seq = atomic_inc_return(&drbd_genl_seq);
+       msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
+       if (!msg)
+               goto failed;
+
+       err = -EMSGSIZE;
+       d_out = genlmsg_put(msg, 0, seq, &drbd_genl_family, 0, DRBD_EVENT);
+       if (!d_out) /* cannot happen, but anyways. */
+               goto nla_put_failure;
+       d_out->minor = mdev_to_minor(mdev);
+       d_out->ret_code = 0;
+
+       if (nla_put_status_info(msg, mdev, sib))
+               goto nla_put_failure;
+       genlmsg_end(msg, d_out);
+       err = drbd_genl_multicast_events(msg, 0);
+       /* msg has been consumed or freed in netlink_broadcast() */
+       if (err && err != -ESRCH)
+               goto failed;
 
-       memset(buffer, 0, sizeof(buffer));
-       cn_reply->id = req->id;
-
-       cn_reply->seq = req->seq;
-       cn_reply->ack = req->ack  + 1;
-       cn_reply->len = sizeof(struct drbd_nl_cfg_reply);
-       cn_reply->flags = 0;
-
-       reply->packet_type = P_return_code_only;
-       reply->minor = ((struct drbd_nl_cfg_req *)req->data)->drbd_minor;
-       reply->ret_code = ret_code;
+       return;
 
-       rr = cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO);
-       if (rr && rr != -ESRCH)
-               printk(KERN_INFO "drbd: cn_netlink_send()=%d\n", rr);
+nla_put_failure:
+       nlmsg_free(msg);
+failed:
+       dev_err(DEV, "Error %d while broadcasting event. "
+                       "Event seq:%u sib_reason:%u\n",
+                       err, seq, sib->sib_reason);
 }
-