]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/block/drbd/drbd_nl.c
f965dfe4b5ff48d008fd2a88fa13c660e4c67e18
[karo-tx-linux.git] / drivers / block / drbd / drbd_nl.c
1 /*
2    drbd_nl.c
3
4    This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6    Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7    Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8    Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10    drbd is free software; you can redistribute it and/or modify
11    it under the terms of the GNU General Public License as published by
12    the Free Software Foundation; either version 2, or (at your option)
13    any later version.
14
15    drbd is distributed in the hope that it will be useful,
16    but WITHOUT ANY WARRANTY; without even the implied warranty of
17    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18    GNU General Public License for more details.
19
20    You should have received a copy of the GNU General Public License
21    along with drbd; see the file COPYING.  If not, write to
22    the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23
24  */
25
26 #include <linux/module.h>
27 #include <linux/drbd.h>
28 #include <linux/in.h>
29 #include <linux/fs.h>
30 #include <linux/file.h>
31 #include <linux/slab.h>
32 #include <linux/blkpg.h>
33 #include <linux/cpumask.h>
34 #include "drbd_int.h"
35 #include "drbd_req.h"
36 #include "drbd_wrappers.h"
37 #include <asm/unaligned.h>
38 #include <linux/drbd_limits.h>
39 #include <linux/kthread.h>
40
41 #include <net/genetlink.h>
42
43 /* .doit */
44 // int drbd_adm_create_resource(struct sk_buff *skb, struct genl_info *info);
45 // int drbd_adm_delete_resource(struct sk_buff *skb, struct genl_info *info);
46
47 int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info);
48 int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info);
49
50 int drbd_adm_create_connection(struct sk_buff *skb, struct genl_info *info);
51 int drbd_adm_delete_connection(struct sk_buff *skb, struct genl_info *info);
52
53 int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info);
54 int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info);
55 int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info);
56 int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info);
57 int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info);
58 int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info);
59 int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info);
60 int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info);
61 int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info);
62 int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info);
63 int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info);
64 int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info);
65 int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info);
66 int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info);
67 int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info);
68 int drbd_adm_syncer(struct sk_buff *skb, struct genl_info *info);
69 int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info);
70 int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info);
71 /* .dumpit */
72 int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb);
73
74 #include <linux/drbd_genl_api.h>
75 #include <linux/genl_magic_func.h>
76
77 /* used blkdev_get_by_path, to claim our meta data device(s) */
78 static char *drbd_m_holder = "Hands off! this is DRBD's meta data device.";
79
80 /* Configuration is strictly serialized, because generic netlink message
81  * processing is strictly serialized by the genl_lock().
82  * Which means we can use one static global drbd_config_context struct.
83  */
84 static struct drbd_config_context {
85         /* assigned from drbd_genlmsghdr */
86         unsigned int minor;
87         /* assigned from request attributes, if present */
88         unsigned int volume;
89 #define VOLUME_UNSPECIFIED              (-1U)
90         /* pointer into the request skb,
91          * limited lifetime! */
92         char *conn_name;
93
94         /* reply buffer */
95         struct sk_buff *reply_skb;
96         /* pointer into reply buffer */
97         struct drbd_genlmsghdr *reply_dh;
98         /* resolved from attributes, if possible */
99         struct drbd_conf *mdev;
100         struct drbd_tconn *tconn;
101 } adm_ctx;
102
103 static void drbd_adm_send_reply(struct sk_buff *skb, struct genl_info *info)
104 {
105         genlmsg_end(skb, genlmsg_data(nlmsg_data(nlmsg_hdr(skb))));
106         if (genlmsg_reply(skb, info))
107                 printk(KERN_ERR "drbd: error sending genl reply\n");
108 }
109
110 /* Used on a fresh "drbd_adm_prepare"d reply_skb, this cannot fail: The only
111  * reason it could fail was no space in skb, and there are 4k available. */
112 int drbd_msg_put_info(const char *info)
113 {
114         struct sk_buff *skb = adm_ctx.reply_skb;
115         struct nlattr *nla;
116         int err = -EMSGSIZE;
117
118         if (!info || !info[0])
119                 return 0;
120
121         nla = nla_nest_start(skb, DRBD_NLA_CFG_REPLY);
122         if (!nla)
123                 return err;
124
125         err = nla_put_string(skb, T_info_text, info);
126         if (err) {
127                 nla_nest_cancel(skb, nla);
128                 return err;
129         } else
130                 nla_nest_end(skb, nla);
131         return 0;
132 }
133
134 /* This would be a good candidate for a "pre_doit" hook,
135  * and per-family private info->pointers.
136  * But we need to stay compatible with older kernels.
137  * If it returns successfully, adm_ctx members are valid.
138  */
139 #define DRBD_ADM_NEED_MINOR     1
140 #define DRBD_ADM_NEED_CONN      2
141 static int drbd_adm_prepare(struct sk_buff *skb, struct genl_info *info,
142                 unsigned flags)
143 {
144         struct drbd_genlmsghdr *d_in = info->userhdr;
145         const u8 cmd = info->genlhdr->cmd;
146         int err;
147
148         memset(&adm_ctx, 0, sizeof(adm_ctx));
149
150         /* genl_rcv_msg only checks for CAP_NET_ADMIN on "GENL_ADMIN_PERM" :( */
151         if (cmd != DRBD_ADM_GET_STATUS
152         && security_netlink_recv(skb, CAP_SYS_ADMIN))
153                return -EPERM;
154
155         adm_ctx.reply_skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
156         if (!adm_ctx.reply_skb)
157                 goto fail;
158
159         adm_ctx.reply_dh = genlmsg_put_reply(adm_ctx.reply_skb,
160                                         info, &drbd_genl_family, 0, cmd);
161         /* put of a few bytes into a fresh skb of >= 4k will always succeed.
162          * but anyways */
163         if (!adm_ctx.reply_dh)
164                 goto fail;
165
166         adm_ctx.reply_dh->minor = d_in->minor;
167         adm_ctx.reply_dh->ret_code = NO_ERROR;
168
169         if (info->attrs[DRBD_NLA_CFG_CONTEXT]) {
170                 struct nlattr *nla;
171                 /* parse and validate only */
172                 err = drbd_cfg_context_from_attrs(NULL, info->attrs);
173                 if (err)
174                         goto fail;
175
176                 /* It was present, and valid,
177                  * copy it over to the reply skb. */
178                 err = nla_put_nohdr(adm_ctx.reply_skb,
179                                 info->attrs[DRBD_NLA_CFG_CONTEXT]->nla_len,
180                                 info->attrs[DRBD_NLA_CFG_CONTEXT]);
181                 if (err)
182                         goto fail;
183
184                 /* and assign stuff to the global adm_ctx */
185                 nla = nested_attr_tb[__nla_type(T_ctx_volume)];
186                 adm_ctx.volume = nla ? nla_get_u32(nla) : VOLUME_UNSPECIFIED;
187                 nla = nested_attr_tb[__nla_type(T_ctx_conn_name)];
188                 if (nla)
189                         adm_ctx.conn_name = nla_data(nla);
190         } else
191                 adm_ctx.volume = VOLUME_UNSPECIFIED;
192
193         adm_ctx.minor = d_in->minor;
194         adm_ctx.mdev = minor_to_mdev(d_in->minor);
195         adm_ctx.tconn = conn_by_name(adm_ctx.conn_name);
196
197         if (!adm_ctx.mdev && (flags & DRBD_ADM_NEED_MINOR)) {
198                 drbd_msg_put_info("unknown minor");
199                 return ERR_MINOR_INVALID;
200         }
201         if (!adm_ctx.tconn && (flags & DRBD_ADM_NEED_CONN)) {
202                 drbd_msg_put_info("unknown connection");
203                 return ERR_INVALID_REQUEST;
204         }
205
206         /* some more paranoia, if the request was over-determined */
207         if (adm_ctx.mdev && adm_ctx.tconn &&
208             adm_ctx.mdev->tconn != adm_ctx.tconn) {
209                 pr_warning("request: minor=%u, conn=%s; but that minor belongs to connection %s\n",
210                                 adm_ctx.minor, adm_ctx.conn_name, adm_ctx.mdev->tconn->name);
211                 drbd_msg_put_info("minor exists in different connection");
212                 return ERR_INVALID_REQUEST;
213         }
214         if (adm_ctx.mdev &&
215             adm_ctx.volume != VOLUME_UNSPECIFIED &&
216             adm_ctx.volume != adm_ctx.mdev->vnr) {
217                 pr_warning("request: minor=%u, volume=%u; but that minor is volume %u in %s\n",
218                                 adm_ctx.minor, adm_ctx.volume,
219                                 adm_ctx.mdev->vnr, adm_ctx.mdev->tconn->name);
220                 drbd_msg_put_info("minor exists as different volume");
221                 return ERR_INVALID_REQUEST;
222         }
223         if (adm_ctx.mdev && !adm_ctx.tconn)
224                 adm_ctx.tconn = adm_ctx.mdev->tconn;
225         return NO_ERROR;
226
227 fail:
228         nlmsg_free(adm_ctx.reply_skb);
229         adm_ctx.reply_skb = NULL;
230         return -ENOMEM;
231 }
232
233 static int drbd_adm_finish(struct genl_info *info, int retcode)
234 {
235         struct nlattr *nla;
236         const char *conn_name = NULL;
237
238         if (!adm_ctx.reply_skb)
239                 return -ENOMEM;
240
241         adm_ctx.reply_dh->ret_code = retcode;
242
243         nla = info->attrs[DRBD_NLA_CFG_CONTEXT];
244         if (nla) {
245                 nla = nla_find_nested(nla, __nla_type(T_ctx_conn_name));
246                 if (nla)
247                         conn_name = nla_data(nla);
248         }
249
250         drbd_adm_send_reply(adm_ctx.reply_skb, info);
251         return 0;
252 }
253
254 int drbd_khelper(struct drbd_conf *mdev, char *cmd)
255 {
256         char *envp[] = { "HOME=/",
257                         "TERM=linux",
258                         "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
259                         NULL, /* Will be set to address family */
260                         NULL, /* Will be set to address */
261                         NULL };
262         char mb[12], af[20], ad[60], *afs;
263         char *argv[] = {usermode_helper, cmd, mb, NULL };
264         struct sib_info sib;
265         int ret;
266
267         snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev));
268
269         if (get_net_conf(mdev->tconn)) {
270                 switch (((struct sockaddr *)mdev->tconn->net_conf->peer_addr)->sa_family) {
271                 case AF_INET6:
272                         afs = "ipv6";
273                         snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI6",
274                                  &((struct sockaddr_in6 *)mdev->tconn->net_conf->peer_addr)->sin6_addr);
275                         break;
276                 case AF_INET:
277                         afs = "ipv4";
278                         snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI4",
279                                  &((struct sockaddr_in *)mdev->tconn->net_conf->peer_addr)->sin_addr);
280                         break;
281                 default:
282                         afs = "ssocks";
283                         snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI4",
284                                  &((struct sockaddr_in *)mdev->tconn->net_conf->peer_addr)->sin_addr);
285                 }
286                 snprintf(af, 20, "DRBD_PEER_AF=%s", afs);
287                 envp[3]=af;
288                 envp[4]=ad;
289                 put_net_conf(mdev->tconn);
290         }
291
292         /* The helper may take some time.
293          * write out any unsynced meta data changes now */
294         drbd_md_sync(mdev);
295
296         dev_info(DEV, "helper command: %s %s %s\n", usermode_helper, cmd, mb);
297         sib.sib_reason = SIB_HELPER_PRE;
298         sib.helper_name = cmd;
299         drbd_bcast_event(mdev, &sib);
300         ret = call_usermodehelper(usermode_helper, argv, envp, 1);
301         if (ret)
302                 dev_warn(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
303                                 usermode_helper, cmd, mb,
304                                 (ret >> 8) & 0xff, ret);
305         else
306                 dev_info(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
307                                 usermode_helper, cmd, mb,
308                                 (ret >> 8) & 0xff, ret);
309         sib.sib_reason = SIB_HELPER_POST;
310         sib.helper_exit_code = ret;
311         drbd_bcast_event(mdev, &sib);
312
313         if (ret < 0) /* Ignore any ERRNOs we got. */
314                 ret = 0;
315
316         return ret;
317 }
318
319 enum drbd_disk_state drbd_try_outdate_peer(struct drbd_conf *mdev)
320 {
321         char *ex_to_string;
322         int r;
323         enum drbd_disk_state nps;
324         enum drbd_fencing_p fp;
325
326         D_ASSERT(mdev->state.pdsk == D_UNKNOWN);
327
328         if (get_ldev_if_state(mdev, D_CONSISTENT)) {
329                 fp = mdev->ldev->dc.fencing;
330                 put_ldev(mdev);
331         } else {
332                 dev_warn(DEV, "Not fencing peer, I'm not even Consistent myself.\n");
333                 nps = mdev->state.pdsk;
334                 goto out;
335         }
336
337         r = drbd_khelper(mdev, "fence-peer");
338
339         switch ((r>>8) & 0xff) {
340         case 3: /* peer is inconsistent */
341                 ex_to_string = "peer is inconsistent or worse";
342                 nps = D_INCONSISTENT;
343                 break;
344         case 4: /* peer got outdated, or was already outdated */
345                 ex_to_string = "peer was fenced";
346                 nps = D_OUTDATED;
347                 break;
348         case 5: /* peer was down */
349                 if (mdev->state.disk == D_UP_TO_DATE) {
350                         /* we will(have) create(d) a new UUID anyways... */
351                         ex_to_string = "peer is unreachable, assumed to be dead";
352                         nps = D_OUTDATED;
353                 } else {
354                         ex_to_string = "peer unreachable, doing nothing since disk != UpToDate";
355                         nps = mdev->state.pdsk;
356                 }
357                 break;
358         case 6: /* Peer is primary, voluntarily outdate myself.
359                  * This is useful when an unconnected R_SECONDARY is asked to
360                  * become R_PRIMARY, but finds the other peer being active. */
361                 ex_to_string = "peer is active";
362                 dev_warn(DEV, "Peer is primary, outdating myself.\n");
363                 nps = D_UNKNOWN;
364                 _drbd_request_state(mdev, NS(disk, D_OUTDATED), CS_WAIT_COMPLETE);
365                 break;
366         case 7:
367                 if (fp != FP_STONITH)
368                         dev_err(DEV, "fence-peer() = 7 && fencing != Stonith !!!\n");
369                 ex_to_string = "peer was stonithed";
370                 nps = D_OUTDATED;
371                 break;
372         default:
373                 /* The script is broken ... */
374                 nps = D_UNKNOWN;
375                 dev_err(DEV, "fence-peer helper broken, returned %d\n", (r>>8)&0xff);
376                 return nps;
377         }
378
379         dev_info(DEV, "fence-peer helper returned %d (%s)\n",
380                         (r>>8) & 0xff, ex_to_string);
381
382 out:
383         if (mdev->state.susp_fen && nps >= D_UNKNOWN) {
384                 /* The handler was not successful... unfreeze here, the
385                    state engine can not unfreeze... */
386                 _drbd_request_state(mdev, NS(susp_fen, 0), CS_VERBOSE);
387         }
388
389         return nps;
390 }
391
392 static int _try_outdate_peer_async(void *data)
393 {
394         struct drbd_conf *mdev = (struct drbd_conf *)data;
395         enum drbd_disk_state nps;
396         union drbd_state ns;
397
398         nps = drbd_try_outdate_peer(mdev);
399
400         /* Not using
401            drbd_request_state(mdev, NS(pdsk, nps));
402            here, because we might were able to re-establish the connection
403            in the meantime. This can only partially be solved in the state's
404            engine is_valid_state() and is_valid_state_transition()
405            functions.
406
407            nps can be D_INCONSISTENT, D_OUTDATED or D_UNKNOWN.
408            pdsk == D_INCONSISTENT while conn >= C_CONNECTED is valid,
409            therefore we have to have the pre state change check here.
410         */
411         spin_lock_irq(&mdev->tconn->req_lock);
412         ns = mdev->state;
413         if (ns.conn < C_WF_REPORT_PARAMS) {
414                 ns.pdsk = nps;
415                 _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
416         }
417         spin_unlock_irq(&mdev->tconn->req_lock);
418
419         return 0;
420 }
421
422 void drbd_try_outdate_peer_async(struct drbd_conf *mdev)
423 {
424         struct task_struct *opa;
425
426         opa = kthread_run(_try_outdate_peer_async, mdev, "drbd%d_a_helper", mdev_to_minor(mdev));
427         if (IS_ERR(opa))
428                 dev_err(DEV, "out of mem, failed to invoke fence-peer helper\n");
429 }
430
431 enum drbd_state_rv
432 drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
433 {
434         const int max_tries = 4;
435         enum drbd_state_rv rv = SS_UNKNOWN_ERROR;
436         int try = 0;
437         int forced = 0;
438         union drbd_state mask, val;
439         enum drbd_disk_state nps;
440
441         if (new_role == R_PRIMARY)
442                 request_ping(mdev->tconn); /* Detect a dead peer ASAP */
443
444         mutex_lock(mdev->state_mutex);
445
446         mask.i = 0; mask.role = R_MASK;
447         val.i  = 0; val.role  = new_role;
448
449         while (try++ < max_tries) {
450                 rv = _drbd_request_state(mdev, mask, val, CS_WAIT_COMPLETE);
451
452                 /* in case we first succeeded to outdate,
453                  * but now suddenly could establish a connection */
454                 if (rv == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) {
455                         val.pdsk = 0;
456                         mask.pdsk = 0;
457                         continue;
458                 }
459
460                 if (rv == SS_NO_UP_TO_DATE_DISK && force &&
461                     (mdev->state.disk < D_UP_TO_DATE &&
462                      mdev->state.disk >= D_INCONSISTENT)) {
463                         mask.disk = D_MASK;
464                         val.disk  = D_UP_TO_DATE;
465                         forced = 1;
466                         continue;
467                 }
468
469                 if (rv == SS_NO_UP_TO_DATE_DISK &&
470                     mdev->state.disk == D_CONSISTENT && mask.pdsk == 0) {
471                         D_ASSERT(mdev->state.pdsk == D_UNKNOWN);
472                         nps = drbd_try_outdate_peer(mdev);
473
474                         if (nps == D_OUTDATED || nps == D_INCONSISTENT) {
475                                 val.disk = D_UP_TO_DATE;
476                                 mask.disk = D_MASK;
477                         }
478
479                         val.pdsk = nps;
480                         mask.pdsk = D_MASK;
481
482                         continue;
483                 }
484
485                 if (rv == SS_NOTHING_TO_DO)
486                         goto out;
487                 if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) {
488                         nps = drbd_try_outdate_peer(mdev);
489
490                         if (force && nps > D_OUTDATED) {
491                                 dev_warn(DEV, "Forced into split brain situation!\n");
492                                 nps = D_OUTDATED;
493                         }
494
495                         mask.pdsk = D_MASK;
496                         val.pdsk  = nps;
497
498                         continue;
499                 }
500                 if (rv == SS_TWO_PRIMARIES) {
501                         /* Maybe the peer is detected as dead very soon...
502                            retry at most once more in this case. */
503                         schedule_timeout_interruptible((mdev->tconn->net_conf->ping_timeo+1)*HZ/10);
504                         if (try < max_tries)
505                                 try = max_tries - 1;
506                         continue;
507                 }
508                 if (rv < SS_SUCCESS) {
509                         rv = _drbd_request_state(mdev, mask, val,
510                                                 CS_VERBOSE + CS_WAIT_COMPLETE);
511                         if (rv < SS_SUCCESS)
512                                 goto out;
513                 }
514                 break;
515         }
516
517         if (rv < SS_SUCCESS)
518                 goto out;
519
520         if (forced)
521                 dev_warn(DEV, "Forced to consider local data as UpToDate!\n");
522
523         /* Wait until nothing is on the fly :) */
524         wait_event(mdev->misc_wait, atomic_read(&mdev->ap_pending_cnt) == 0);
525
526         if (new_role == R_SECONDARY) {
527                 set_disk_ro(mdev->vdisk, true);
528                 if (get_ldev(mdev)) {
529                         mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
530                         put_ldev(mdev);
531                 }
532         } else {
533                 if (get_net_conf(mdev->tconn)) {
534                         mdev->tconn->net_conf->want_lose = 0;
535                         put_net_conf(mdev->tconn);
536                 }
537                 set_disk_ro(mdev->vdisk, false);
538                 if (get_ldev(mdev)) {
539                         if (((mdev->state.conn < C_CONNECTED ||
540                                mdev->state.pdsk <= D_FAILED)
541                               && mdev->ldev->md.uuid[UI_BITMAP] == 0) || forced)
542                                 drbd_uuid_new_current(mdev);
543
544                         mdev->ldev->md.uuid[UI_CURRENT] |=  (u64)1;
545                         put_ldev(mdev);
546                 }
547         }
548
549         /* writeout of activity log covered areas of the bitmap
550          * to stable storage done in after state change already */
551
552         if (mdev->state.conn >= C_WF_REPORT_PARAMS) {
553                 /* if this was forced, we should consider sync */
554                 if (forced)
555                         drbd_send_uuids(mdev);
556                 drbd_send_state(mdev);
557         }
558
559         drbd_md_sync(mdev);
560
561         kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
562 out:
563         mutex_unlock(mdev->state_mutex);
564         return rv;
565 }
566
567 static const char *from_attrs_err_to_txt(int err)
568 {
569         return  err == -ENOMSG ? "required attribute missing" :
570                 err == -EOPNOTSUPP ? "unknown mandatory attribute" :
571                 "invalid attribute value";
572 }
573
574 int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info)
575 {
576         struct set_role_parms parms;
577         int err;
578         enum drbd_ret_code retcode;
579
580         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
581         if (!adm_ctx.reply_skb)
582                 return retcode;
583         if (retcode != NO_ERROR)
584                 goto out;
585
586         memset(&parms, 0, sizeof(parms));
587         if (info->attrs[DRBD_NLA_SET_ROLE_PARMS]) {
588                 err = set_role_parms_from_attrs(&parms, info->attrs);
589                 if (err) {
590                         retcode = ERR_MANDATORY_TAG;
591                         drbd_msg_put_info(from_attrs_err_to_txt(err));
592                         goto out;
593                 }
594         }
595
596         if (info->genlhdr->cmd == DRBD_ADM_PRIMARY)
597                 retcode = drbd_set_role(adm_ctx.mdev, R_PRIMARY, parms.assume_uptodate);
598         else
599                 retcode = drbd_set_role(adm_ctx.mdev, R_SECONDARY, 0);
600 out:
601         drbd_adm_finish(info, retcode);
602         return 0;
603 }
604
605 /* initializes the md.*_offset members, so we are able to find
606  * the on disk meta data */
607 static void drbd_md_set_sector_offsets(struct drbd_conf *mdev,
608                                        struct drbd_backing_dev *bdev)
609 {
610         sector_t md_size_sect = 0;
611         switch (bdev->dc.meta_dev_idx) {
612         default:
613                 /* v07 style fixed size indexed meta data */
614                 bdev->md.md_size_sect = MD_RESERVED_SECT;
615                 bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
616                 bdev->md.al_offset = MD_AL_OFFSET;
617                 bdev->md.bm_offset = MD_BM_OFFSET;
618                 break;
619         case DRBD_MD_INDEX_FLEX_EXT:
620                 /* just occupy the full device; unit: sectors */
621                 bdev->md.md_size_sect = drbd_get_capacity(bdev->md_bdev);
622                 bdev->md.md_offset = 0;
623                 bdev->md.al_offset = MD_AL_OFFSET;
624                 bdev->md.bm_offset = MD_BM_OFFSET;
625                 break;
626         case DRBD_MD_INDEX_INTERNAL:
627         case DRBD_MD_INDEX_FLEX_INT:
628                 bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
629                 /* al size is still fixed */
630                 bdev->md.al_offset = -MD_AL_SECTORS;
631                 /* we need (slightly less than) ~ this much bitmap sectors: */
632                 md_size_sect = drbd_get_capacity(bdev->backing_bdev);
633                 md_size_sect = ALIGN(md_size_sect, BM_SECT_PER_EXT);
634                 md_size_sect = BM_SECT_TO_EXT(md_size_sect);
635                 md_size_sect = ALIGN(md_size_sect, 8);
636
637                 /* plus the "drbd meta data super block",
638                  * and the activity log; */
639                 md_size_sect += MD_BM_OFFSET;
640
641                 bdev->md.md_size_sect = md_size_sect;
642                 /* bitmap offset is adjusted by 'super' block size */
643                 bdev->md.bm_offset   = -md_size_sect + MD_AL_OFFSET;
644                 break;
645         }
646 }
647
648 /* input size is expected to be in KB */
649 char *ppsize(char *buf, unsigned long long size)
650 {
651         /* Needs 9 bytes at max including trailing NUL:
652          * -1ULL ==> "16384 EB" */
653         static char units[] = { 'K', 'M', 'G', 'T', 'P', 'E' };
654         int base = 0;
655         while (size >= 10000 && base < sizeof(units)-1) {
656                 /* shift + round */
657                 size = (size >> 10) + !!(size & (1<<9));
658                 base++;
659         }
660         sprintf(buf, "%u %cB", (unsigned)size, units[base]);
661
662         return buf;
663 }
664
665 /* there is still a theoretical deadlock when called from receiver
666  * on an D_INCONSISTENT R_PRIMARY:
667  *  remote READ does inc_ap_bio, receiver would need to receive answer
668  *  packet from remote to dec_ap_bio again.
669  *  receiver receive_sizes(), comes here,
670  *  waits for ap_bio_cnt == 0. -> deadlock.
671  * but this cannot happen, actually, because:
672  *  R_PRIMARY D_INCONSISTENT, and peer's disk is unreachable
673  *  (not connected, or bad/no disk on peer):
674  *  see drbd_fail_request_early, ap_bio_cnt is zero.
675  *  R_PRIMARY D_INCONSISTENT, and C_SYNC_TARGET:
676  *  peer may not initiate a resize.
677  */
678 /* Note these are not to be confused with
679  * drbd_adm_suspend_io/drbd_adm_resume_io,
680  * which are (sub) state changes triggered by admin (drbdsetup),
681  * and can be long lived.
682  * This changes an mdev->flag, is triggered by drbd internals,
683  * and should be short-lived. */
684 void drbd_suspend_io(struct drbd_conf *mdev)
685 {
686         set_bit(SUSPEND_IO, &mdev->flags);
687         if (is_susp(mdev->state))
688                 return;
689         wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
690 }
691
692 void drbd_resume_io(struct drbd_conf *mdev)
693 {
694         clear_bit(SUSPEND_IO, &mdev->flags);
695         wake_up(&mdev->misc_wait);
696 }
697
698 /**
699  * drbd_determine_dev_size() -  Sets the right device size obeying all constraints
700  * @mdev:       DRBD device.
701  *
702  * Returns 0 on success, negative return values indicate errors.
703  * You should call drbd_md_sync() after calling this function.
704  */
705 enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags) __must_hold(local)
706 {
707         sector_t prev_first_sect, prev_size; /* previous meta location */
708         sector_t la_size;
709         sector_t size;
710         char ppb[10];
711
712         int md_moved, la_size_changed;
713         enum determine_dev_size rv = unchanged;
714
715         /* race:
716          * application request passes inc_ap_bio,
717          * but then cannot get an AL-reference.
718          * this function later may wait on ap_bio_cnt == 0. -> deadlock.
719          *
720          * to avoid that:
721          * Suspend IO right here.
722          * still lock the act_log to not trigger ASSERTs there.
723          */
724         drbd_suspend_io(mdev);
725
726         /* no wait necessary anymore, actually we could assert that */
727         wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
728
729         prev_first_sect = drbd_md_first_sector(mdev->ldev);
730         prev_size = mdev->ldev->md.md_size_sect;
731         la_size = mdev->ldev->md.la_size_sect;
732
733         /* TODO: should only be some assert here, not (re)init... */
734         drbd_md_set_sector_offsets(mdev, mdev->ldev);
735
736         size = drbd_new_dev_size(mdev, mdev->ldev, flags & DDSF_FORCED);
737
738         if (drbd_get_capacity(mdev->this_bdev) != size ||
739             drbd_bm_capacity(mdev) != size) {
740                 int err;
741                 err = drbd_bm_resize(mdev, size, !(flags & DDSF_NO_RESYNC));
742                 if (unlikely(err)) {
743                         /* currently there is only one error: ENOMEM! */
744                         size = drbd_bm_capacity(mdev)>>1;
745                         if (size == 0) {
746                                 dev_err(DEV, "OUT OF MEMORY! "
747                                     "Could not allocate bitmap!\n");
748                         } else {
749                                 dev_err(DEV, "BM resizing failed. "
750                                     "Leaving size unchanged at size = %lu KB\n",
751                                     (unsigned long)size);
752                         }
753                         rv = dev_size_error;
754                 }
755                 /* racy, see comments above. */
756                 drbd_set_my_capacity(mdev, size);
757                 mdev->ldev->md.la_size_sect = size;
758                 dev_info(DEV, "size = %s (%llu KB)\n", ppsize(ppb, size>>1),
759                      (unsigned long long)size>>1);
760         }
761         if (rv == dev_size_error)
762                 goto out;
763
764         la_size_changed = (la_size != mdev->ldev->md.la_size_sect);
765
766         md_moved = prev_first_sect != drbd_md_first_sector(mdev->ldev)
767                 || prev_size       != mdev->ldev->md.md_size_sect;
768
769         if (la_size_changed || md_moved) {
770                 int err;
771
772                 drbd_al_shrink(mdev); /* All extents inactive. */
773                 dev_info(DEV, "Writing the whole bitmap, %s\n",
774                          la_size_changed && md_moved ? "size changed and md moved" :
775                          la_size_changed ? "size changed" : "md moved");
776                 /* next line implicitly does drbd_suspend_io()+drbd_resume_io() */
777                 err = drbd_bitmap_io(mdev, &drbd_bm_write,
778                                 "size changed", BM_LOCKED_MASK);
779                 if (err) {
780                         rv = dev_size_error;
781                         goto out;
782                 }
783                 drbd_md_mark_dirty(mdev);
784         }
785
786         if (size > la_size)
787                 rv = grew;
788         if (size < la_size)
789                 rv = shrunk;
790 out:
791         lc_unlock(mdev->act_log);
792         wake_up(&mdev->al_wait);
793         drbd_resume_io(mdev);
794
795         return rv;
796 }
797
798 sector_t
799 drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev, int assume_peer_has_space)
800 {
801         sector_t p_size = mdev->p_size;   /* partner's disk size. */
802         sector_t la_size = bdev->md.la_size_sect; /* last agreed size. */
803         sector_t m_size; /* my size */
804         sector_t u_size = bdev->dc.disk_size; /* size requested by user. */
805         sector_t size = 0;
806
807         m_size = drbd_get_max_capacity(bdev);
808
809         if (mdev->state.conn < C_CONNECTED && assume_peer_has_space) {
810                 dev_warn(DEV, "Resize while not connected was forced by the user!\n");
811                 p_size = m_size;
812         }
813
814         if (p_size && m_size) {
815                 size = min_t(sector_t, p_size, m_size);
816         } else {
817                 if (la_size) {
818                         size = la_size;
819                         if (m_size && m_size < size)
820                                 size = m_size;
821                         if (p_size && p_size < size)
822                                 size = p_size;
823                 } else {
824                         if (m_size)
825                                 size = m_size;
826                         if (p_size)
827                                 size = p_size;
828                 }
829         }
830
831         if (size == 0)
832                 dev_err(DEV, "Both nodes diskless!\n");
833
834         if (u_size) {
835                 if (u_size > size)
836                         dev_err(DEV, "Requested disk size is too big (%lu > %lu)\n",
837                             (unsigned long)u_size>>1, (unsigned long)size>>1);
838                 else
839                         size = u_size;
840         }
841
842         return size;
843 }
844
845 /**
846  * drbd_check_al_size() - Ensures that the AL is of the right size
847  * @mdev:       DRBD device.
848  *
849  * Returns -EBUSY if current al lru is still used, -ENOMEM when allocation
850  * failed, and 0 on success. You should call drbd_md_sync() after you called
851  * this function.
852  */
853 static int drbd_check_al_size(struct drbd_conf *mdev)
854 {
855         struct lru_cache *n, *t;
856         struct lc_element *e;
857         unsigned int in_use;
858         int i;
859
860         if (!expect(mdev->sync_conf.al_extents >= DRBD_AL_EXTENTS_MIN))
861                 mdev->sync_conf.al_extents = DRBD_AL_EXTENTS_MIN;
862
863         if (mdev->act_log &&
864             mdev->act_log->nr_elements == mdev->sync_conf.al_extents)
865                 return 0;
866
867         in_use = 0;
868         t = mdev->act_log;
869         n = lc_create("act_log", drbd_al_ext_cache, AL_UPDATES_PER_TRANSACTION,
870                 mdev->sync_conf.al_extents, sizeof(struct lc_element), 0);
871
872         if (n == NULL) {
873                 dev_err(DEV, "Cannot allocate act_log lru!\n");
874                 return -ENOMEM;
875         }
876         spin_lock_irq(&mdev->al_lock);
877         if (t) {
878                 for (i = 0; i < t->nr_elements; i++) {
879                         e = lc_element_by_index(t, i);
880                         if (e->refcnt)
881                                 dev_err(DEV, "refcnt(%d)==%d\n",
882                                     e->lc_number, e->refcnt);
883                         in_use += e->refcnt;
884                 }
885         }
886         if (!in_use)
887                 mdev->act_log = n;
888         spin_unlock_irq(&mdev->al_lock);
889         if (in_use) {
890                 dev_err(DEV, "Activity log still in use!\n");
891                 lc_destroy(n);
892                 return -EBUSY;
893         } else {
894                 if (t)
895                         lc_destroy(t);
896         }
897         drbd_md_mark_dirty(mdev); /* we changed mdev->act_log->nr_elemens */
898         return 0;
899 }
900
901 static void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_size)
902 {
903         struct request_queue * const q = mdev->rq_queue;
904         int max_hw_sectors = max_bio_size >> 9;
905         int max_segments = 0;
906
907         if (get_ldev_if_state(mdev, D_ATTACHING)) {
908                 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
909
910                 max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9);
911                 max_segments = mdev->ldev->dc.max_bio_bvecs;
912                 put_ldev(mdev);
913         }
914
915         blk_queue_logical_block_size(q, 512);
916         blk_queue_max_hw_sectors(q, max_hw_sectors);
917         /* This is the workaround for "bio would need to, but cannot, be split" */
918         blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
919         blk_queue_segment_boundary(q, PAGE_CACHE_SIZE-1);
920
921         if (get_ldev_if_state(mdev, D_ATTACHING)) {
922                 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
923
924                 blk_queue_stack_limits(q, b);
925
926                 if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) {
927                         dev_info(DEV, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
928                                  q->backing_dev_info.ra_pages,
929                                  b->backing_dev_info.ra_pages);
930                         q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages;
931                 }
932                 put_ldev(mdev);
933         }
934 }
935
936 void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
937 {
938         int now, new, local, peer;
939
940         now = queue_max_hw_sectors(mdev->rq_queue) << 9;
941         local = mdev->local_max_bio_size; /* Eventually last known value, from volatile memory */
942         peer = mdev->peer_max_bio_size; /* Eventually last known value, from meta data */
943
944         if (get_ldev_if_state(mdev, D_ATTACHING)) {
945                 local = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9;
946                 mdev->local_max_bio_size = local;
947                 put_ldev(mdev);
948         }
949
950         /* We may ignore peer limits if the peer is modern enough.
951            Because new from 8.3.8 onwards the peer can use multiple
952            BIOs for a single peer_request */
953         if (mdev->state.conn >= C_CONNECTED) {
954                 if (mdev->tconn->agreed_pro_version < 94)
955                         peer = mdev->peer_max_bio_size;
956                 else if (mdev->tconn->agreed_pro_version == 94)
957                         peer = DRBD_MAX_SIZE_H80_PACKET;
958                 else /* drbd 8.3.8 onwards */
959                         peer = DRBD_MAX_BIO_SIZE;
960         }
961
962         new = min_t(int, local, peer);
963
964         if (mdev->state.role == R_PRIMARY && new < now)
965                 dev_err(DEV, "ASSERT FAILED new < now; (%d < %d)\n", new, now);
966
967         if (new != now)
968                 dev_info(DEV, "max BIO size = %u\n", new);
969
970         drbd_setup_queue_param(mdev, new);
971 }
972
973 /* serialize deconfig (worker exiting, doing cleanup)
974  * and reconfig (drbdsetup disk, drbdsetup net)
975  *
976  * Wait for a potentially exiting worker, then restart it,
977  * or start a new one.  Flush any pending work, there may still be an
978  * after_state_change queued.
979  */
980 static void conn_reconfig_start(struct drbd_tconn *tconn)
981 {
982         wait_event(tconn->ping_wait, !test_and_set_bit(CONFIG_PENDING, &tconn->flags));
983         wait_event(tconn->ping_wait, !test_bit(OBJECT_DYING, &tconn->flags));
984         drbd_thread_start(&tconn->worker);
985         conn_flush_workqueue(tconn);
986 }
987
988 /* if still unconfigured, stops worker again.
989  * if configured now, clears CONFIG_PENDING.
990  * wakes potential waiters */
991 static void conn_reconfig_done(struct drbd_tconn *tconn)
992 {
993         spin_lock_irq(&tconn->req_lock);
994         if (conn_all_vols_unconf(tconn)) {
995                 set_bit(OBJECT_DYING, &tconn->flags);
996                 drbd_thread_stop_nowait(&tconn->worker);
997         } else
998                 clear_bit(CONFIG_PENDING, &tconn->flags);
999         spin_unlock_irq(&tconn->req_lock);
1000         wake_up(&tconn->ping_wait);
1001 }
1002
1003 /* Make sure IO is suspended before calling this function(). */
1004 static void drbd_suspend_al(struct drbd_conf *mdev)
1005 {
1006         int s = 0;
1007
1008         if (!lc_try_lock(mdev->act_log)) {
1009                 dev_warn(DEV, "Failed to lock al in drbd_suspend_al()\n");
1010                 return;
1011         }
1012
1013         drbd_al_shrink(mdev);
1014         spin_lock_irq(&mdev->tconn->req_lock);
1015         if (mdev->state.conn < C_CONNECTED)
1016                 s = !test_and_set_bit(AL_SUSPENDED, &mdev->flags);
1017         spin_unlock_irq(&mdev->tconn->req_lock);
1018         lc_unlock(mdev->act_log);
1019
1020         if (s)
1021                 dev_info(DEV, "Suspended AL updates\n");
1022 }
1023
1024 int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
1025 {
1026         struct drbd_conf *mdev;
1027         int err;
1028         enum drbd_ret_code retcode;
1029         enum determine_dev_size dd;
1030         sector_t max_possible_sectors;
1031         sector_t min_md_device_sectors;
1032         struct drbd_backing_dev *nbc = NULL; /* new_backing_conf */
1033         struct block_device *bdev;
1034         struct lru_cache *resync_lru = NULL;
1035         union drbd_state ns, os;
1036         enum drbd_state_rv rv;
1037         int cp_discovered = 0;
1038
1039         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1040         if (!adm_ctx.reply_skb)
1041                 return retcode;
1042         if (retcode != NO_ERROR)
1043                 goto fail;
1044
1045         mdev = adm_ctx.mdev;
1046         conn_reconfig_start(mdev->tconn);
1047
1048         /* if you want to reconfigure, please tear down first */
1049         if (mdev->state.disk > D_DISKLESS) {
1050                 retcode = ERR_DISK_CONFIGURED;
1051                 goto fail;
1052         }
1053         /* It may just now have detached because of IO error.  Make sure
1054          * drbd_ldev_destroy is done already, we may end up here very fast,
1055          * e.g. if someone calls attach from the on-io-error handler,
1056          * to realize a "hot spare" feature (not that I'd recommend that) */
1057         wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
1058
1059         /* allocation not in the IO path, drbdsetup context */
1060         nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
1061         if (!nbc) {
1062                 retcode = ERR_NOMEM;
1063                 goto fail;
1064         }
1065
1066         nbc->dc.disk_size     = DRBD_DISK_SIZE_SECT_DEF;
1067         nbc->dc.on_io_error   = DRBD_ON_IO_ERROR_DEF;
1068         nbc->dc.fencing       = DRBD_FENCING_DEF;
1069         nbc->dc.max_bio_bvecs = DRBD_MAX_BIO_BVECS_DEF;
1070
1071         err = disk_conf_from_attrs(&nbc->dc, info->attrs);
1072         if (err) {
1073                 retcode = ERR_MANDATORY_TAG;
1074                 drbd_msg_put_info(from_attrs_err_to_txt(err));
1075                 goto fail;
1076         }
1077
1078         if ((int)nbc->dc.meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) {
1079                 retcode = ERR_MD_IDX_INVALID;
1080                 goto fail;
1081         }
1082
1083         if (get_net_conf(mdev->tconn)) {
1084                 int prot = mdev->tconn->net_conf->wire_protocol;
1085                 put_net_conf(mdev->tconn);
1086                 if (nbc->dc.fencing == FP_STONITH && prot == DRBD_PROT_A) {
1087                         retcode = ERR_STONITH_AND_PROT_A;
1088                         goto fail;
1089                 }
1090         }
1091
1092         bdev = blkdev_get_by_path(nbc->dc.backing_dev,
1093                                   FMODE_READ | FMODE_WRITE | FMODE_EXCL, mdev);
1094         if (IS_ERR(bdev)) {
1095                 dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.backing_dev,
1096                         PTR_ERR(bdev));
1097                 retcode = ERR_OPEN_DISK;
1098                 goto fail;
1099         }
1100         nbc->backing_bdev = bdev;
1101
1102         /*
1103          * meta_dev_idx >= 0: external fixed size, possibly multiple
1104          * drbd sharing one meta device.  TODO in that case, paranoia
1105          * check that [md_bdev, meta_dev_idx] is not yet used by some
1106          * other drbd minor!  (if you use drbd.conf + drbdadm, that
1107          * should check it for you already; but if you don't, or
1108          * someone fooled it, we need to double check here)
1109          */
1110         bdev = blkdev_get_by_path(nbc->dc.meta_dev,
1111                                   FMODE_READ | FMODE_WRITE | FMODE_EXCL,
1112                                   ((int)nbc->dc.meta_dev_idx < 0) ?
1113                                   (void *)mdev : (void *)drbd_m_holder);
1114         if (IS_ERR(bdev)) {
1115                 dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.meta_dev,
1116                         PTR_ERR(bdev));
1117                 retcode = ERR_OPEN_MD_DISK;
1118                 goto fail;
1119         }
1120         nbc->md_bdev = bdev;
1121
1122         if ((nbc->backing_bdev == nbc->md_bdev) !=
1123             (nbc->dc.meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
1124              nbc->dc.meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) {
1125                 retcode = ERR_MD_IDX_INVALID;
1126                 goto fail;
1127         }
1128
1129         resync_lru = lc_create("resync", drbd_bm_ext_cache,
1130                         1, 61, sizeof(struct bm_extent),
1131                         offsetof(struct bm_extent, lce));
1132         if (!resync_lru) {
1133                 retcode = ERR_NOMEM;
1134                 goto fail;
1135         }
1136
1137         /* RT - for drbd_get_max_capacity() DRBD_MD_INDEX_FLEX_INT */
1138         drbd_md_set_sector_offsets(mdev, nbc);
1139
1140         if (drbd_get_max_capacity(nbc) < nbc->dc.disk_size) {
1141                 dev_err(DEV, "max capacity %llu smaller than disk size %llu\n",
1142                         (unsigned long long) drbd_get_max_capacity(nbc),
1143                         (unsigned long long) nbc->dc.disk_size);
1144                 retcode = ERR_DISK_TO_SMALL;
1145                 goto fail;
1146         }
1147
1148         if ((int)nbc->dc.meta_dev_idx < 0) {
1149                 max_possible_sectors = DRBD_MAX_SECTORS_FLEX;
1150                 /* at least one MB, otherwise it does not make sense */
1151                 min_md_device_sectors = (2<<10);
1152         } else {
1153                 max_possible_sectors = DRBD_MAX_SECTORS;
1154                 min_md_device_sectors = MD_RESERVED_SECT * (nbc->dc.meta_dev_idx + 1);
1155         }
1156
1157         if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) {
1158                 retcode = ERR_MD_DISK_TO_SMALL;
1159                 dev_warn(DEV, "refusing attach: md-device too small, "
1160                      "at least %llu sectors needed for this meta-disk type\n",
1161                      (unsigned long long) min_md_device_sectors);
1162                 goto fail;
1163         }
1164
1165         /* Make sure the new disk is big enough
1166          * (we may currently be R_PRIMARY with no local disk...) */
1167         if (drbd_get_max_capacity(nbc) <
1168             drbd_get_capacity(mdev->this_bdev)) {
1169                 retcode = ERR_DISK_TO_SMALL;
1170                 goto fail;
1171         }
1172
1173         nbc->known_size = drbd_get_capacity(nbc->backing_bdev);
1174
1175         if (nbc->known_size > max_possible_sectors) {
1176                 dev_warn(DEV, "==> truncating very big lower level device "
1177                         "to currently maximum possible %llu sectors <==\n",
1178                         (unsigned long long) max_possible_sectors);
1179                 if ((int)nbc->dc.meta_dev_idx >= 0)
1180                         dev_warn(DEV, "==>> using internal or flexible "
1181                                       "meta data may help <<==\n");
1182         }
1183
1184         drbd_suspend_io(mdev);
1185         /* also wait for the last barrier ack. */
1186         wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_pending_cnt) || is_susp(mdev->state));
1187         /* and for any other previously queued work */
1188         drbd_flush_workqueue(mdev);
1189
1190         rv = _drbd_request_state(mdev, NS(disk, D_ATTACHING), CS_VERBOSE);
1191         retcode = rv;  /* FIXME: Type mismatch. */
1192         drbd_resume_io(mdev);
1193         if (rv < SS_SUCCESS)
1194                 goto fail;
1195
1196         if (!get_ldev_if_state(mdev, D_ATTACHING))
1197                 goto force_diskless;
1198
1199         drbd_md_set_sector_offsets(mdev, nbc);
1200
1201         if (!mdev->bitmap) {
1202                 if (drbd_bm_init(mdev)) {
1203                         retcode = ERR_NOMEM;
1204                         goto force_diskless_dec;
1205                 }
1206         }
1207
1208         retcode = drbd_md_read(mdev, nbc);
1209         if (retcode != NO_ERROR)
1210                 goto force_diskless_dec;
1211
1212         if (mdev->state.conn < C_CONNECTED &&
1213             mdev->state.role == R_PRIMARY &&
1214             (mdev->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) {
1215                 dev_err(DEV, "Can only attach to data with current UUID=%016llX\n",
1216                     (unsigned long long)mdev->ed_uuid);
1217                 retcode = ERR_DATA_NOT_CURRENT;
1218                 goto force_diskless_dec;
1219         }
1220
1221         /* Since we are diskless, fix the activity log first... */
1222         if (drbd_check_al_size(mdev)) {
1223                 retcode = ERR_NOMEM;
1224                 goto force_diskless_dec;
1225         }
1226
1227         /* Prevent shrinking of consistent devices ! */
1228         if (drbd_md_test_flag(nbc, MDF_CONSISTENT) &&
1229             drbd_new_dev_size(mdev, nbc, 0) < nbc->md.la_size_sect) {
1230                 dev_warn(DEV, "refusing to truncate a consistent device\n");
1231                 retcode = ERR_DISK_TO_SMALL;
1232                 goto force_diskless_dec;
1233         }
1234
1235         if (!drbd_al_read_log(mdev, nbc)) {
1236                 retcode = ERR_IO_MD_DISK;
1237                 goto force_diskless_dec;
1238         }
1239
1240         /* Reset the "barriers don't work" bits here, then force meta data to
1241          * be written, to ensure we determine if barriers are supported. */
1242         if (nbc->dc.no_md_flush)
1243                 set_bit(MD_NO_FUA, &mdev->flags);
1244         else
1245                 clear_bit(MD_NO_FUA, &mdev->flags);
1246
1247         /* Point of no return reached.
1248          * Devices and memory are no longer released by error cleanup below.
1249          * now mdev takes over responsibility, and the state engine should
1250          * clean it up somewhere.  */
1251         D_ASSERT(mdev->ldev == NULL);
1252         mdev->ldev = nbc;
1253         mdev->resync = resync_lru;
1254         nbc = NULL;
1255         resync_lru = NULL;
1256
1257         mdev->write_ordering = WO_bdev_flush;
1258         drbd_bump_write_ordering(mdev, WO_bdev_flush);
1259
1260         if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY))
1261                 set_bit(CRASHED_PRIMARY, &mdev->flags);
1262         else
1263                 clear_bit(CRASHED_PRIMARY, &mdev->flags);
1264
1265         if (drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
1266             !(mdev->state.role == R_PRIMARY && mdev->state.susp_nod)) {
1267                 set_bit(CRASHED_PRIMARY, &mdev->flags);
1268                 cp_discovered = 1;
1269         }
1270
1271         mdev->send_cnt = 0;
1272         mdev->recv_cnt = 0;
1273         mdev->read_cnt = 0;
1274         mdev->writ_cnt = 0;
1275
1276         drbd_reconsider_max_bio_size(mdev);
1277
1278         /* If I am currently not R_PRIMARY,
1279          * but meta data primary indicator is set,
1280          * I just now recover from a hard crash,
1281          * and have been R_PRIMARY before that crash.
1282          *
1283          * Now, if I had no connection before that crash
1284          * (have been degraded R_PRIMARY), chances are that
1285          * I won't find my peer now either.
1286          *
1287          * In that case, and _only_ in that case,
1288          * we use the degr-wfc-timeout instead of the default,
1289          * so we can automatically recover from a crash of a
1290          * degraded but active "cluster" after a certain timeout.
1291          */
1292         clear_bit(USE_DEGR_WFC_T, &mdev->flags);
1293         if (mdev->state.role != R_PRIMARY &&
1294              drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
1295             !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND))
1296                 set_bit(USE_DEGR_WFC_T, &mdev->flags);
1297
1298         dd = drbd_determine_dev_size(mdev, 0);
1299         if (dd == dev_size_error) {
1300                 retcode = ERR_NOMEM_BITMAP;
1301                 goto force_diskless_dec;
1302         } else if (dd == grew)
1303                 set_bit(RESYNC_AFTER_NEG, &mdev->flags);
1304
1305         if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
1306                 dev_info(DEV, "Assuming that all blocks are out of sync "
1307                      "(aka FullSync)\n");
1308                 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write,
1309                         "set_n_write from attaching", BM_LOCKED_MASK)) {
1310                         retcode = ERR_IO_MD_DISK;
1311                         goto force_diskless_dec;
1312                 }
1313         } else {
1314                 if (drbd_bitmap_io(mdev, &drbd_bm_read,
1315                         "read from attaching", BM_LOCKED_MASK) < 0) {
1316                         retcode = ERR_IO_MD_DISK;
1317                         goto force_diskless_dec;
1318                 }
1319         }
1320
1321         if (cp_discovered) {
1322                 drbd_al_apply_to_bm(mdev);
1323                 if (drbd_bitmap_io(mdev, &drbd_bm_write,
1324                         "crashed primary apply AL", BM_LOCKED_MASK)) {
1325                         retcode = ERR_IO_MD_DISK;
1326                         goto force_diskless_dec;
1327                 }
1328         }
1329
1330         if (_drbd_bm_total_weight(mdev) == drbd_bm_bits(mdev))
1331                 drbd_suspend_al(mdev); /* IO is still suspended here... */
1332
1333         spin_lock_irq(&mdev->tconn->req_lock);
1334         os = mdev->state;
1335         ns.i = os.i;
1336         /* If MDF_CONSISTENT is not set go into inconsistent state,
1337            otherwise investigate MDF_WasUpToDate...
1338            If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state,
1339            otherwise into D_CONSISTENT state.
1340         */
1341         if (drbd_md_test_flag(mdev->ldev, MDF_CONSISTENT)) {
1342                 if (drbd_md_test_flag(mdev->ldev, MDF_WAS_UP_TO_DATE))
1343                         ns.disk = D_CONSISTENT;
1344                 else
1345                         ns.disk = D_OUTDATED;
1346         } else {
1347                 ns.disk = D_INCONSISTENT;
1348         }
1349
1350         if (drbd_md_test_flag(mdev->ldev, MDF_PEER_OUT_DATED))
1351                 ns.pdsk = D_OUTDATED;
1352
1353         if ( ns.disk == D_CONSISTENT &&
1354             (ns.pdsk == D_OUTDATED || mdev->ldev->dc.fencing == FP_DONT_CARE))
1355                 ns.disk = D_UP_TO_DATE;
1356
1357         /* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND,
1358            MDF_CONSISTENT and MDF_WAS_UP_TO_DATE must happen before
1359            this point, because drbd_request_state() modifies these
1360            flags. */
1361
1362         /* In case we are C_CONNECTED postpone any decision on the new disk
1363            state after the negotiation phase. */
1364         if (mdev->state.conn == C_CONNECTED) {
1365                 mdev->new_state_tmp.i = ns.i;
1366                 ns.i = os.i;
1367                 ns.disk = D_NEGOTIATING;
1368
1369                 /* We expect to receive up-to-date UUIDs soon.
1370                    To avoid a race in receive_state, free p_uuid while
1371                    holding req_lock. I.e. atomic with the state change */
1372                 kfree(mdev->p_uuid);
1373                 mdev->p_uuid = NULL;
1374         }
1375
1376         rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
1377         ns = mdev->state;
1378         spin_unlock_irq(&mdev->tconn->req_lock);
1379
1380         if (rv < SS_SUCCESS)
1381                 goto force_diskless_dec;
1382
1383         if (mdev->state.role == R_PRIMARY)
1384                 mdev->ldev->md.uuid[UI_CURRENT] |=  (u64)1;
1385         else
1386                 mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
1387
1388         drbd_md_mark_dirty(mdev);
1389         drbd_md_sync(mdev);
1390
1391         kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
1392         put_ldev(mdev);
1393         conn_reconfig_done(mdev->tconn);
1394         drbd_adm_finish(info, retcode);
1395         return 0;
1396
1397  force_diskless_dec:
1398         put_ldev(mdev);
1399  force_diskless:
1400         drbd_force_state(mdev, NS(disk, D_FAILED));
1401         drbd_md_sync(mdev);
1402         conn_reconfig_done(mdev->tconn);
1403  fail:
1404         if (nbc) {
1405                 if (nbc->backing_bdev)
1406                         blkdev_put(nbc->backing_bdev,
1407                                    FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1408                 if (nbc->md_bdev)
1409                         blkdev_put(nbc->md_bdev,
1410                                    FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1411                 kfree(nbc);
1412         }
1413         lc_destroy(resync_lru);
1414
1415         drbd_adm_finish(info, retcode);
1416         return 0;
1417 }
1418
1419 /* Detaching the disk is a process in multiple stages.  First we need to lock
1420  * out application IO, in-flight IO, IO stuck in drbd_al_begin_io.
1421  * Then we transition to D_DISKLESS, and wait for put_ldev() to return all
1422  * internal references as well.
1423  * Only then we have finally detached. */
1424 int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info)
1425 {
1426         struct drbd_conf *mdev;
1427         enum drbd_ret_code retcode;
1428
1429         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1430         if (!adm_ctx.reply_skb)
1431                 return retcode;
1432         if (retcode != NO_ERROR)
1433                 goto out;
1434
1435         mdev = adm_ctx.mdev;
1436         drbd_suspend_io(mdev); /* so no-one is stuck in drbd_al_begin_io */
1437         retcode = drbd_request_state(mdev, NS(disk, D_DISKLESS));
1438         wait_event(mdev->misc_wait,
1439                         mdev->state.disk != D_DISKLESS ||
1440                         !atomic_read(&mdev->local_cnt));
1441         drbd_resume_io(mdev);
1442 out:
1443         drbd_adm_finish(info, retcode);
1444         return 0;
1445 }
1446
1447 int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
1448 {
1449         char hmac_name[CRYPTO_MAX_ALG_NAME];
1450         struct drbd_conf *mdev;
1451         struct net_conf *new_conf = NULL;
1452         struct crypto_hash *tfm = NULL;
1453         struct crypto_hash *integrity_w_tfm = NULL;
1454         struct crypto_hash *integrity_r_tfm = NULL;
1455         void *int_dig_out = NULL;
1456         void *int_dig_in = NULL;
1457         void *int_dig_vv = NULL;
1458         struct drbd_tconn *oconn;
1459         struct drbd_tconn *tconn;
1460         struct sockaddr *new_my_addr, *new_peer_addr, *taken_addr;
1461         enum drbd_ret_code retcode;
1462         int i;
1463         int err;
1464
1465         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
1466         if (!adm_ctx.reply_skb)
1467                 return retcode;
1468         if (retcode != NO_ERROR)
1469                 goto out;
1470
1471         tconn = adm_ctx.tconn;
1472         conn_reconfig_start(tconn);
1473
1474         if (tconn->cstate > C_STANDALONE) {
1475                 retcode = ERR_NET_CONFIGURED;
1476                 goto fail;
1477         }
1478
1479         /* allocation not in the IO path, cqueue thread context */
1480         new_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
1481         if (!new_conf) {
1482                 retcode = ERR_NOMEM;
1483                 goto fail;
1484         }
1485
1486         new_conf->timeout          = DRBD_TIMEOUT_DEF;
1487         new_conf->try_connect_int  = DRBD_CONNECT_INT_DEF;
1488         new_conf->ping_int         = DRBD_PING_INT_DEF;
1489         new_conf->max_epoch_size   = DRBD_MAX_EPOCH_SIZE_DEF;
1490         new_conf->max_buffers      = DRBD_MAX_BUFFERS_DEF;
1491         new_conf->unplug_watermark = DRBD_UNPLUG_WATERMARK_DEF;
1492         new_conf->sndbuf_size      = DRBD_SNDBUF_SIZE_DEF;
1493         new_conf->rcvbuf_size      = DRBD_RCVBUF_SIZE_DEF;
1494         new_conf->ko_count         = DRBD_KO_COUNT_DEF;
1495         new_conf->after_sb_0p      = DRBD_AFTER_SB_0P_DEF;
1496         new_conf->after_sb_1p      = DRBD_AFTER_SB_1P_DEF;
1497         new_conf->after_sb_2p      = DRBD_AFTER_SB_2P_DEF;
1498         new_conf->want_lose        = 0;
1499         new_conf->two_primaries    = 0;
1500         new_conf->wire_protocol    = DRBD_PROT_C;
1501         new_conf->ping_timeo       = DRBD_PING_TIMEO_DEF;
1502         new_conf->rr_conflict      = DRBD_RR_CONFLICT_DEF;
1503         new_conf->on_congestion    = DRBD_ON_CONGESTION_DEF;
1504         new_conf->cong_extents     = DRBD_CONG_EXTENTS_DEF;
1505
1506         err = net_conf_from_attrs(new_conf, info->attrs);
1507         if (err) {
1508                 retcode = ERR_MANDATORY_TAG;
1509                 drbd_msg_put_info(from_attrs_err_to_txt(err));
1510                 goto fail;
1511         }
1512
1513         if (new_conf->two_primaries
1514             && (new_conf->wire_protocol != DRBD_PROT_C)) {
1515                 retcode = ERR_NOT_PROTO_C;
1516                 goto fail;
1517         }
1518
1519         idr_for_each_entry(&tconn->volumes, mdev, i) {
1520                 if (get_ldev(mdev)) {
1521                         enum drbd_fencing_p fp = mdev->ldev->dc.fencing;
1522                         put_ldev(mdev);
1523                         if (new_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH) {
1524                                 retcode = ERR_STONITH_AND_PROT_A;
1525                                 goto fail;
1526                         }
1527                 }
1528                 if (mdev->state.role == R_PRIMARY && new_conf->want_lose) {
1529                         retcode = ERR_DISCARD;
1530                         goto fail;
1531                 }
1532                 if (!mdev->bitmap) {
1533                         if(drbd_bm_init(mdev)) {
1534                                 retcode = ERR_NOMEM;
1535                                 goto fail;
1536                         }
1537                 }
1538         }
1539
1540         if (new_conf->on_congestion != OC_BLOCK && new_conf->wire_protocol != DRBD_PROT_A) {
1541                 retcode = ERR_CONG_NOT_PROTO_A;
1542                 goto fail;
1543         }
1544
1545         retcode = NO_ERROR;
1546
1547         new_my_addr = (struct sockaddr *)&new_conf->my_addr;
1548         new_peer_addr = (struct sockaddr *)&new_conf->peer_addr;
1549
1550         /* No need to take drbd_cfg_mutex here.  All reconfiguration is
1551          * strictly serialized on genl_lock(). We are protected against
1552          * concurrent reconfiguration/addition/deletion */
1553         list_for_each_entry(oconn, &drbd_tconns, all_tconn) {
1554                 if (oconn == tconn)
1555                         continue;
1556                 if (get_net_conf(oconn)) {
1557                         taken_addr = (struct sockaddr *)&oconn->net_conf->my_addr;
1558                         if (new_conf->my_addr_len == oconn->net_conf->my_addr_len &&
1559                             !memcmp(new_my_addr, taken_addr, new_conf->my_addr_len))
1560                                 retcode = ERR_LOCAL_ADDR;
1561
1562                         taken_addr = (struct sockaddr *)&oconn->net_conf->peer_addr;
1563                         if (new_conf->peer_addr_len == oconn->net_conf->peer_addr_len &&
1564                             !memcmp(new_peer_addr, taken_addr, new_conf->peer_addr_len))
1565                                 retcode = ERR_PEER_ADDR;
1566
1567                         put_net_conf(oconn);
1568                         if (retcode != NO_ERROR)
1569                                 goto fail;
1570                 }
1571         }
1572
1573         if (new_conf->cram_hmac_alg[0] != 0) {
1574                 snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
1575                         new_conf->cram_hmac_alg);
1576                 tfm = crypto_alloc_hash(hmac_name, 0, CRYPTO_ALG_ASYNC);
1577                 if (IS_ERR(tfm)) {
1578                         tfm = NULL;
1579                         retcode = ERR_AUTH_ALG;
1580                         goto fail;
1581                 }
1582
1583                 if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) {
1584                         retcode = ERR_AUTH_ALG_ND;
1585                         goto fail;
1586                 }
1587         }
1588
1589         if (new_conf->integrity_alg[0]) {
1590                 integrity_w_tfm = crypto_alloc_hash(new_conf->integrity_alg, 0, CRYPTO_ALG_ASYNC);
1591                 if (IS_ERR(integrity_w_tfm)) {
1592                         integrity_w_tfm = NULL;
1593                         retcode=ERR_INTEGRITY_ALG;
1594                         goto fail;
1595                 }
1596
1597                 if (!drbd_crypto_is_hash(crypto_hash_tfm(integrity_w_tfm))) {
1598                         retcode=ERR_INTEGRITY_ALG_ND;
1599                         goto fail;
1600                 }
1601
1602                 integrity_r_tfm = crypto_alloc_hash(new_conf->integrity_alg, 0, CRYPTO_ALG_ASYNC);
1603                 if (IS_ERR(integrity_r_tfm)) {
1604                         integrity_r_tfm = NULL;
1605                         retcode=ERR_INTEGRITY_ALG;
1606                         goto fail;
1607                 }
1608         }
1609
1610         ((char *)new_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0;
1611
1612         /* allocation not in the IO path, cqueue thread context */
1613         if (integrity_w_tfm) {
1614                 i = crypto_hash_digestsize(integrity_w_tfm);
1615                 int_dig_out = kmalloc(i, GFP_KERNEL);
1616                 if (!int_dig_out) {
1617                         retcode = ERR_NOMEM;
1618                         goto fail;
1619                 }
1620                 int_dig_in = kmalloc(i, GFP_KERNEL);
1621                 if (!int_dig_in) {
1622                         retcode = ERR_NOMEM;
1623                         goto fail;
1624                 }
1625                 int_dig_vv = kmalloc(i, GFP_KERNEL);
1626                 if (!int_dig_vv) {
1627                         retcode = ERR_NOMEM;
1628                         goto fail;
1629                 }
1630         }
1631
1632         conn_flush_workqueue(tconn);
1633         spin_lock_irq(&tconn->req_lock);
1634         if (tconn->net_conf != NULL) {
1635                 retcode = ERR_NET_CONFIGURED;
1636                 spin_unlock_irq(&tconn->req_lock);
1637                 goto fail;
1638         }
1639         tconn->net_conf = new_conf;
1640
1641         crypto_free_hash(tconn->cram_hmac_tfm);
1642         tconn->cram_hmac_tfm = tfm;
1643
1644         crypto_free_hash(tconn->integrity_w_tfm);
1645         tconn->integrity_w_tfm = integrity_w_tfm;
1646
1647         crypto_free_hash(tconn->integrity_r_tfm);
1648         tconn->integrity_r_tfm = integrity_r_tfm;
1649
1650         kfree(tconn->int_dig_out);
1651         kfree(tconn->int_dig_in);
1652         kfree(tconn->int_dig_vv);
1653         tconn->int_dig_out=int_dig_out;
1654         tconn->int_dig_in=int_dig_in;
1655         tconn->int_dig_vv=int_dig_vv;
1656         retcode = _conn_request_state(tconn, NS(conn, C_UNCONNECTED), CS_VERBOSE);
1657         spin_unlock_irq(&tconn->req_lock);
1658
1659         idr_for_each_entry(&tconn->volumes, mdev, i) {
1660                 mdev->send_cnt = 0;
1661                 mdev->recv_cnt = 0;
1662                 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
1663         }
1664         conn_reconfig_done(tconn);
1665         drbd_adm_finish(info, retcode);
1666         return 0;
1667
1668 fail:
1669         kfree(int_dig_out);
1670         kfree(int_dig_in);
1671         kfree(int_dig_vv);
1672         crypto_free_hash(tfm);
1673         crypto_free_hash(integrity_w_tfm);
1674         crypto_free_hash(integrity_r_tfm);
1675         kfree(new_conf);
1676
1677         conn_reconfig_done(tconn);
1678 out:
1679         drbd_adm_finish(info, retcode);
1680         return 0;
1681 }
1682
1683 int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
1684 {
1685         struct disconnect_parms parms;
1686         struct drbd_tconn *tconn;
1687         enum drbd_ret_code retcode;
1688         int err;
1689
1690         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
1691         if (!adm_ctx.reply_skb)
1692                 return retcode;
1693         if (retcode != NO_ERROR)
1694                 goto fail;
1695
1696         tconn = adm_ctx.tconn;
1697         memset(&parms, 0, sizeof(parms));
1698         if (info->attrs[DRBD_NLA_DISCONNECT_PARMS]) {
1699                 err = disconnect_parms_from_attrs(&parms, info->attrs);
1700                 if (err) {
1701                         retcode = ERR_MANDATORY_TAG;
1702                         drbd_msg_put_info(from_attrs_err_to_txt(err));
1703                         goto fail;
1704                 }
1705         }
1706
1707         if (parms.force_disconnect) {
1708                 spin_lock_irq(&tconn->req_lock);
1709                 if (tconn->cstate >= C_WF_CONNECTION)
1710                         _conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
1711                 spin_unlock_irq(&tconn->req_lock);
1712                 goto done;
1713         }
1714
1715         retcode = conn_request_state(tconn, NS(conn, C_DISCONNECTING), 0);
1716
1717         if (retcode == SS_NOTHING_TO_DO)
1718                 goto done;
1719         else if (retcode == SS_ALREADY_STANDALONE)
1720                 goto done;
1721         else if (retcode == SS_PRIMARY_NOP) {
1722                 /* Our state checking code wants to see the peer outdated. */
1723                 retcode = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
1724                                                         pdsk, D_OUTDATED), CS_VERBOSE);
1725         } else if (retcode == SS_CW_FAILED_BY_PEER) {
1726                 /* The peer probably wants to see us outdated. */
1727                 retcode = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
1728                                                         disk, D_OUTDATED), 0);
1729                 if (retcode == SS_IS_DISKLESS || retcode == SS_LOWER_THAN_OUTDATED) {
1730                         conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
1731                         retcode = SS_SUCCESS;
1732                 }
1733         }
1734
1735         if (retcode < SS_SUCCESS)
1736                 goto fail;
1737
1738         if (wait_event_interruptible(tconn->ping_wait,
1739                                      tconn->cstate != C_DISCONNECTING)) {
1740                 /* Do not test for mdev->state.conn == C_STANDALONE, since
1741                    someone else might connect us in the mean time! */
1742                 retcode = ERR_INTR;
1743                 goto fail;
1744         }
1745
1746  done:
1747         retcode = NO_ERROR;
1748  fail:
1749         drbd_adm_finish(info, retcode);
1750         return 0;
1751 }
1752
1753 void resync_after_online_grow(struct drbd_conf *mdev)
1754 {
1755         int iass; /* I am sync source */
1756
1757         dev_info(DEV, "Resync of new storage after online grow\n");
1758         if (mdev->state.role != mdev->state.peer)
1759                 iass = (mdev->state.role == R_PRIMARY);
1760         else
1761                 iass = test_bit(DISCARD_CONCURRENT, &mdev->tconn->flags);
1762
1763         if (iass)
1764                 drbd_start_resync(mdev, C_SYNC_SOURCE);
1765         else
1766                 _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE);
1767 }
1768
1769 int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
1770 {
1771         struct resize_parms rs;
1772         struct drbd_conf *mdev;
1773         enum drbd_ret_code retcode;
1774         enum determine_dev_size dd;
1775         enum dds_flags ddsf;
1776         int err;
1777
1778         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1779         if (!adm_ctx.reply_skb)
1780                 return retcode;
1781         if (retcode != NO_ERROR)
1782                 goto fail;
1783
1784         memset(&rs, 0, sizeof(struct resize_parms));
1785         if (info->attrs[DRBD_NLA_RESIZE_PARMS]) {
1786                 err = resize_parms_from_attrs(&rs, info->attrs);
1787                 if (err) {
1788                         retcode = ERR_MANDATORY_TAG;
1789                         drbd_msg_put_info(from_attrs_err_to_txt(err));
1790                         goto fail;
1791                 }
1792         }
1793
1794         mdev = adm_ctx.mdev;
1795         if (mdev->state.conn > C_CONNECTED) {
1796                 retcode = ERR_RESIZE_RESYNC;
1797                 goto fail;
1798         }
1799
1800         if (mdev->state.role == R_SECONDARY &&
1801             mdev->state.peer == R_SECONDARY) {
1802                 retcode = ERR_NO_PRIMARY;
1803                 goto fail;
1804         }
1805
1806         if (!get_ldev(mdev)) {
1807                 retcode = ERR_NO_DISK;
1808                 goto fail;
1809         }
1810
1811         if (rs.no_resync && mdev->tconn->agreed_pro_version < 93) {
1812                 retcode = ERR_NEED_APV_93;
1813                 goto fail;
1814         }
1815
1816         if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev))
1817                 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
1818
1819         mdev->ldev->dc.disk_size = (sector_t)rs.resize_size;
1820         ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0);
1821         dd = drbd_determine_dev_size(mdev, ddsf);
1822         drbd_md_sync(mdev);
1823         put_ldev(mdev);
1824         if (dd == dev_size_error) {
1825                 retcode = ERR_NOMEM_BITMAP;
1826                 goto fail;
1827         }
1828
1829         if (mdev->state.conn == C_CONNECTED) {
1830                 if (dd == grew)
1831                         set_bit(RESIZE_PENDING, &mdev->flags);
1832
1833                 drbd_send_uuids(mdev);
1834                 drbd_send_sizes(mdev, 1, ddsf);
1835         }
1836
1837  fail:
1838         drbd_adm_finish(info, retcode);
1839         return 0;
1840 }
1841
1842 int drbd_adm_syncer(struct sk_buff *skb, struct genl_info *info)
1843 {
1844         struct drbd_conf *mdev;
1845         enum drbd_ret_code retcode;
1846         int err;
1847         int ovr; /* online verify running */
1848         int rsr; /* re-sync running */
1849         struct crypto_hash *verify_tfm = NULL;
1850         struct crypto_hash *csums_tfm = NULL;
1851         struct syncer_conf sc;
1852         cpumask_var_t new_cpu_mask;
1853         int *rs_plan_s = NULL;
1854         int fifo_size;
1855
1856         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1857         if (!adm_ctx.reply_skb)
1858                 return retcode;
1859         if (retcode != NO_ERROR)
1860                 goto fail;
1861         mdev = adm_ctx.mdev;
1862
1863         if (!zalloc_cpumask_var(&new_cpu_mask, GFP_KERNEL)) {
1864                 retcode = ERR_NOMEM;
1865                 drbd_msg_put_info("unable to allocate cpumask");
1866                 goto fail;
1867         }
1868
1869         if (((struct drbd_genlmsghdr*)info->userhdr)->flags
1870                         & DRBD_GENL_F_SET_DEFAULTS) {
1871                 memset(&sc, 0, sizeof(struct syncer_conf));
1872                 sc.rate       = DRBD_RATE_DEF;
1873                 sc.after      = DRBD_AFTER_DEF;
1874                 sc.al_extents = DRBD_AL_EXTENTS_DEF;
1875                 sc.on_no_data  = DRBD_ON_NO_DATA_DEF;
1876                 sc.c_plan_ahead = DRBD_C_PLAN_AHEAD_DEF;
1877                 sc.c_delay_target = DRBD_C_DELAY_TARGET_DEF;
1878                 sc.c_fill_target = DRBD_C_FILL_TARGET_DEF;
1879                 sc.c_max_rate = DRBD_C_MAX_RATE_DEF;
1880                 sc.c_min_rate = DRBD_C_MIN_RATE_DEF;
1881         } else
1882                 memcpy(&sc, &mdev->sync_conf, sizeof(struct syncer_conf));
1883
1884         err = syncer_conf_from_attrs(&sc, info->attrs);
1885         if (err) {
1886                 retcode = ERR_MANDATORY_TAG;
1887                 drbd_msg_put_info(from_attrs_err_to_txt(err));
1888                 goto fail;
1889         }
1890
1891         /* re-sync running */
1892         rsr = ( mdev->state.conn == C_SYNC_SOURCE ||
1893                 mdev->state.conn == C_SYNC_TARGET ||
1894                 mdev->state.conn == C_PAUSED_SYNC_S ||
1895                 mdev->state.conn == C_PAUSED_SYNC_T );
1896
1897         if (rsr && strcmp(sc.csums_alg, mdev->sync_conf.csums_alg)) {
1898                 retcode = ERR_CSUMS_RESYNC_RUNNING;
1899                 goto fail;
1900         }
1901
1902         if (!rsr && sc.csums_alg[0]) {
1903                 csums_tfm = crypto_alloc_hash(sc.csums_alg, 0, CRYPTO_ALG_ASYNC);
1904                 if (IS_ERR(csums_tfm)) {
1905                         csums_tfm = NULL;
1906                         retcode = ERR_CSUMS_ALG;
1907                         goto fail;
1908                 }
1909
1910                 if (!drbd_crypto_is_hash(crypto_hash_tfm(csums_tfm))) {
1911                         retcode = ERR_CSUMS_ALG_ND;
1912                         goto fail;
1913                 }
1914         }
1915
1916         /* online verify running */
1917         ovr = (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T);
1918
1919         if (ovr) {
1920                 if (strcmp(sc.verify_alg, mdev->sync_conf.verify_alg)) {
1921                         retcode = ERR_VERIFY_RUNNING;
1922                         goto fail;
1923                 }
1924         }
1925
1926         if (!ovr && sc.verify_alg[0]) {
1927                 verify_tfm = crypto_alloc_hash(sc.verify_alg, 0, CRYPTO_ALG_ASYNC);
1928                 if (IS_ERR(verify_tfm)) {
1929                         verify_tfm = NULL;
1930                         retcode = ERR_VERIFY_ALG;
1931                         goto fail;
1932                 }
1933
1934                 if (!drbd_crypto_is_hash(crypto_hash_tfm(verify_tfm))) {
1935                         retcode = ERR_VERIFY_ALG_ND;
1936                         goto fail;
1937                 }
1938         }
1939
1940         /* silently ignore cpu mask on UP kernel */
1941         if (nr_cpu_ids > 1 && sc.cpu_mask[0] != 0) {
1942                 err = __bitmap_parse(sc.cpu_mask, 32, 0,
1943                                 cpumask_bits(new_cpu_mask), nr_cpu_ids);
1944                 if (err) {
1945                         dev_warn(DEV, "__bitmap_parse() failed with %d\n", err);
1946                         retcode = ERR_CPU_MASK_PARSE;
1947                         goto fail;
1948                 }
1949         }
1950
1951         if (!expect(sc.rate >= 1))
1952                 sc.rate = 1;
1953
1954         /* clip to allowed range */
1955         if (!expect(sc.al_extents >= DRBD_AL_EXTENTS_MIN))
1956                 sc.al_extents = DRBD_AL_EXTENTS_MIN;
1957         if (!expect(sc.al_extents <= DRBD_AL_EXTENTS_MAX))
1958                 sc.al_extents = DRBD_AL_EXTENTS_MAX;
1959
1960         /* most sanity checks done, try to assign the new sync-after
1961          * dependency.  need to hold the global lock in there,
1962          * to avoid a race in the dependency loop check. */
1963         retcode = drbd_alter_sa(mdev, sc.after);
1964         if (retcode != NO_ERROR)
1965                 goto fail;
1966
1967         fifo_size = (sc.c_plan_ahead * 10 * SLEEP_TIME) / HZ;
1968         if (fifo_size != mdev->rs_plan_s.size && fifo_size > 0) {
1969                 rs_plan_s   = kzalloc(sizeof(int) * fifo_size, GFP_KERNEL);
1970                 if (!rs_plan_s) {
1971                         dev_err(DEV, "kmalloc of fifo_buffer failed");
1972                         retcode = ERR_NOMEM;
1973                         goto fail;
1974                 }
1975         }
1976
1977         /* ok, assign the rest of it as well.
1978          * lock against receive_SyncParam() */
1979         spin_lock(&mdev->peer_seq_lock);
1980         mdev->sync_conf = sc;
1981
1982         if (!rsr) {
1983                 crypto_free_hash(mdev->csums_tfm);
1984                 mdev->csums_tfm = csums_tfm;
1985                 csums_tfm = NULL;
1986         }
1987
1988         if (!ovr) {
1989                 crypto_free_hash(mdev->verify_tfm);
1990                 mdev->verify_tfm = verify_tfm;
1991                 verify_tfm = NULL;
1992         }
1993
1994         if (fifo_size != mdev->rs_plan_s.size) {
1995                 kfree(mdev->rs_plan_s.values);
1996                 mdev->rs_plan_s.values = rs_plan_s;
1997                 mdev->rs_plan_s.size   = fifo_size;
1998                 mdev->rs_planed = 0;
1999                 rs_plan_s = NULL;
2000         }
2001
2002         spin_unlock(&mdev->peer_seq_lock);
2003
2004         if (get_ldev(mdev)) {
2005                 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
2006                 drbd_al_shrink(mdev);
2007                 err = drbd_check_al_size(mdev);
2008                 lc_unlock(mdev->act_log);
2009                 wake_up(&mdev->al_wait);
2010
2011                 put_ldev(mdev);
2012                 drbd_md_sync(mdev);
2013
2014                 if (err) {
2015                         retcode = ERR_NOMEM;
2016                         goto fail;
2017                 }
2018         }
2019
2020         if (mdev->state.conn >= C_CONNECTED)
2021                 drbd_send_sync_param(mdev, &sc);
2022
2023         if (!cpumask_equal(mdev->tconn->cpu_mask, new_cpu_mask)) {
2024                 cpumask_copy(mdev->tconn->cpu_mask, new_cpu_mask);
2025                 drbd_calc_cpu_mask(mdev->tconn);
2026                 mdev->tconn->receiver.reset_cpu_mask = 1;
2027                 mdev->tconn->asender.reset_cpu_mask = 1;
2028                 mdev->tconn->worker.reset_cpu_mask = 1;
2029         }
2030
2031         kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
2032 fail:
2033         kfree(rs_plan_s);
2034         free_cpumask_var(new_cpu_mask);
2035         crypto_free_hash(csums_tfm);
2036         crypto_free_hash(verify_tfm);
2037
2038         drbd_adm_finish(info, retcode);
2039         return 0;
2040 }
2041
2042 int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info)
2043 {
2044         struct drbd_conf *mdev;
2045         int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
2046
2047         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2048         if (!adm_ctx.reply_skb)
2049                 return retcode;
2050         if (retcode != NO_ERROR)
2051                 goto out;
2052
2053         mdev = adm_ctx.mdev;
2054
2055         /* If there is still bitmap IO pending, probably because of a previous
2056          * resync just being finished, wait for it before requesting a new resync. */
2057         wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
2058
2059         retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED);
2060
2061         if (retcode < SS_SUCCESS && retcode != SS_NEED_CONNECTION)
2062                 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
2063
2064         while (retcode == SS_NEED_CONNECTION) {
2065                 spin_lock_irq(&mdev->tconn->req_lock);
2066                 if (mdev->state.conn < C_CONNECTED)
2067                         retcode = _drbd_set_state(_NS(mdev, disk, D_INCONSISTENT), CS_VERBOSE, NULL);
2068                 spin_unlock_irq(&mdev->tconn->req_lock);
2069
2070                 if (retcode != SS_NEED_CONNECTION)
2071                         break;
2072
2073                 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
2074         }
2075
2076 out:
2077         drbd_adm_finish(info, retcode);
2078         return 0;
2079 }
2080
2081 static int drbd_bmio_set_susp_al(struct drbd_conf *mdev)
2082 {
2083         int rv;
2084
2085         rv = drbd_bmio_set_n_write(mdev);
2086         drbd_suspend_al(mdev);
2087         return rv;
2088 }
2089
2090 static int drbd_adm_simple_request_state(struct sk_buff *skb, struct genl_info *info,
2091                 union drbd_state mask, union drbd_state val)
2092 {
2093         enum drbd_ret_code retcode;
2094
2095         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2096         if (!adm_ctx.reply_skb)
2097                 return retcode;
2098         if (retcode != NO_ERROR)
2099                 goto out;
2100
2101         retcode = drbd_request_state(adm_ctx.mdev, mask, val);
2102 out:
2103         drbd_adm_finish(info, retcode);
2104         return 0;
2105 }
2106
2107 int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info)
2108 {
2109         return drbd_adm_simple_request_state(skb, info, NS(conn, C_STARTING_SYNC_S));
2110 }
2111
2112 int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info)
2113 {
2114         enum drbd_ret_code retcode;
2115
2116         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2117         if (!adm_ctx.reply_skb)
2118                 return retcode;
2119         if (retcode != NO_ERROR)
2120                 goto out;
2121
2122         if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 1)) == SS_NOTHING_TO_DO)
2123                 retcode = ERR_PAUSE_IS_SET;
2124 out:
2125         drbd_adm_finish(info, retcode);
2126         return 0;
2127 }
2128
2129 int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info)
2130 {
2131         union drbd_state s;
2132         enum drbd_ret_code retcode;
2133
2134         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2135         if (!adm_ctx.reply_skb)
2136                 return retcode;
2137         if (retcode != NO_ERROR)
2138                 goto out;
2139
2140         if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 0)) == SS_NOTHING_TO_DO) {
2141                 s = adm_ctx.mdev->state;
2142                 if (s.conn == C_PAUSED_SYNC_S || s.conn == C_PAUSED_SYNC_T) {
2143                         retcode = s.aftr_isp ? ERR_PIC_AFTER_DEP :
2144                                   s.peer_isp ? ERR_PIC_PEER_DEP : ERR_PAUSE_IS_CLEAR;
2145                 } else {
2146                         retcode = ERR_PAUSE_IS_CLEAR;
2147                 }
2148         }
2149
2150 out:
2151         drbd_adm_finish(info, retcode);
2152         return 0;
2153 }
2154
2155 int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info)
2156 {
2157         return drbd_adm_simple_request_state(skb, info, NS(susp, 1));
2158 }
2159
2160 int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info)
2161 {
2162         struct drbd_conf *mdev;
2163         int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
2164
2165         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2166         if (!adm_ctx.reply_skb)
2167                 return retcode;
2168         if (retcode != NO_ERROR)
2169                 goto out;
2170
2171         mdev = adm_ctx.mdev;
2172         if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
2173                 drbd_uuid_new_current(mdev);
2174                 clear_bit(NEW_CUR_UUID, &mdev->flags);
2175         }
2176         drbd_suspend_io(mdev);
2177         retcode = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
2178         if (retcode == SS_SUCCESS) {
2179                 if (mdev->state.conn < C_CONNECTED)
2180                         tl_clear(mdev->tconn);
2181                 if (mdev->state.disk == D_DISKLESS || mdev->state.disk == D_FAILED)
2182                         tl_restart(mdev->tconn, FAIL_FROZEN_DISK_IO);
2183         }
2184         drbd_resume_io(mdev);
2185
2186 out:
2187         drbd_adm_finish(info, retcode);
2188         return 0;
2189 }
2190
2191 int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info)
2192 {
2193         return drbd_adm_simple_request_state(skb, info, NS(disk, D_OUTDATED));
2194 }
2195
2196 int nla_put_drbd_cfg_context(struct sk_buff *skb, const char *conn_name, unsigned vnr)
2197 {
2198         struct nlattr *nla;
2199         nla = nla_nest_start(skb, DRBD_NLA_CFG_CONTEXT);
2200         if (!nla)
2201                 goto nla_put_failure;
2202         if (vnr != VOLUME_UNSPECIFIED)
2203                 NLA_PUT_U32(skb, T_ctx_volume, vnr);
2204         NLA_PUT_STRING(skb, T_ctx_conn_name, conn_name);
2205         nla_nest_end(skb, nla);
2206         return 0;
2207
2208 nla_put_failure:
2209         if (nla)
2210                 nla_nest_cancel(skb, nla);
2211         return -EMSGSIZE;
2212 }
2213
2214 int nla_put_status_info(struct sk_buff *skb, struct drbd_conf *mdev,
2215                 const struct sib_info *sib)
2216 {
2217         struct state_info *si = NULL; /* for sizeof(si->member); */
2218         struct nlattr *nla;
2219         int got_ldev;
2220         int got_net;
2221         int err = 0;
2222         int exclude_sensitive;
2223
2224         /* If sib != NULL, this is drbd_bcast_event, which anyone can listen
2225          * to.  So we better exclude_sensitive information.
2226          *
2227          * If sib == NULL, this is drbd_adm_get_status, executed synchronously
2228          * in the context of the requesting user process. Exclude sensitive
2229          * information, unless current has superuser.
2230          *
2231          * NOTE: for drbd_adm_get_status_all(), this is a netlink dump, and
2232          * relies on the current implementation of netlink_dump(), which
2233          * executes the dump callback successively from netlink_recvmsg(),
2234          * always in the context of the receiving process */
2235         exclude_sensitive = sib || !capable(CAP_SYS_ADMIN);
2236
2237         got_ldev = get_ldev(mdev);
2238         got_net = get_net_conf(mdev->tconn);
2239
2240         /* We need to add connection name and volume number information still.
2241          * Minor number is in drbd_genlmsghdr. */
2242         if (nla_put_drbd_cfg_context(skb, mdev->tconn->name, mdev->vnr))
2243                 goto nla_put_failure;
2244
2245         if (got_ldev)
2246                 if (disk_conf_to_skb(skb, &mdev->ldev->dc, exclude_sensitive))
2247                         goto nla_put_failure;
2248         if (got_net)
2249                 if (net_conf_to_skb(skb, mdev->tconn->net_conf, exclude_sensitive))
2250                         goto nla_put_failure;
2251
2252         if (syncer_conf_to_skb(skb, &mdev->sync_conf, exclude_sensitive))
2253                         goto nla_put_failure;
2254
2255         nla = nla_nest_start(skb, DRBD_NLA_STATE_INFO);
2256         if (!nla)
2257                 goto nla_put_failure;
2258         NLA_PUT_U32(skb, T_sib_reason, sib ? sib->sib_reason : SIB_GET_STATUS_REPLY);
2259         NLA_PUT_U32(skb, T_current_state, mdev->state.i);
2260         NLA_PUT_U64(skb, T_ed_uuid, mdev->ed_uuid);
2261         NLA_PUT_U64(skb, T_capacity, drbd_get_capacity(mdev->this_bdev));
2262
2263         if (got_ldev) {
2264                 NLA_PUT_U32(skb, T_disk_flags, mdev->ldev->md.flags);
2265                 NLA_PUT(skb, T_uuids, sizeof(si->uuids), mdev->ldev->md.uuid);
2266                 NLA_PUT_U64(skb, T_bits_total, drbd_bm_bits(mdev));
2267                 NLA_PUT_U64(skb, T_bits_oos, drbd_bm_total_weight(mdev));
2268                 if (C_SYNC_SOURCE <= mdev->state.conn &&
2269                     C_PAUSED_SYNC_T >= mdev->state.conn) {
2270                         NLA_PUT_U64(skb, T_bits_rs_total, mdev->rs_total);
2271                         NLA_PUT_U64(skb, T_bits_rs_failed, mdev->rs_failed);
2272                 }
2273         }
2274
2275         if (sib) {
2276                 switch(sib->sib_reason) {
2277                 case SIB_SYNC_PROGRESS:
2278                 case SIB_GET_STATUS_REPLY:
2279                         break;
2280                 case SIB_STATE_CHANGE:
2281                         NLA_PUT_U32(skb, T_prev_state, sib->os.i);
2282                         NLA_PUT_U32(skb, T_new_state, sib->ns.i);
2283                         break;
2284                 case SIB_HELPER_POST:
2285                         NLA_PUT_U32(skb,
2286                                 T_helper_exit_code, sib->helper_exit_code);
2287                         /* fall through */
2288                 case SIB_HELPER_PRE:
2289                         NLA_PUT_STRING(skb, T_helper, sib->helper_name);
2290                         break;
2291                 }
2292         }
2293         nla_nest_end(skb, nla);
2294
2295         if (0)
2296 nla_put_failure:
2297                 err = -EMSGSIZE;
2298         if (got_ldev)
2299                 put_ldev(mdev);
2300         if (got_net)
2301                 put_net_conf(mdev->tconn);
2302         return err;
2303 }
2304
2305 int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info)
2306 {
2307         enum drbd_ret_code retcode;
2308         int err;
2309
2310         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2311         if (!adm_ctx.reply_skb)
2312                 return retcode;
2313         if (retcode != NO_ERROR)
2314                 goto out;
2315
2316         err = nla_put_status_info(adm_ctx.reply_skb, adm_ctx.mdev, NULL);
2317         if (err) {
2318                 nlmsg_free(adm_ctx.reply_skb);
2319                 return err;
2320         }
2321 out:
2322         drbd_adm_finish(info, retcode);
2323         return 0;
2324 }
2325
2326 int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb)
2327 {
2328         struct drbd_conf *mdev;
2329         struct drbd_genlmsghdr *dh;
2330         struct drbd_tconn *pos = (struct drbd_tconn*)cb->args[0];
2331         struct drbd_tconn *tconn = NULL;
2332         struct drbd_tconn *tmp;
2333         unsigned volume = cb->args[1];
2334
2335         /* Open coded, deferred, iteration:
2336          * list_for_each_entry_safe(tconn, tmp, &drbd_tconns, all_tconn) {
2337          *      idr_for_each_entry(&tconn->volumes, mdev, i) {
2338          *        ...
2339          *      }
2340          * }
2341          * where tconn is cb->args[0];
2342          * and i is cb->args[1];
2343          *
2344          * This may miss entries inserted after this dump started,
2345          * or entries deleted before they are reached.
2346          *
2347          * We need to make sure the mdev won't disappear while
2348          * we are looking at it, and revalidate our iterators
2349          * on each iteration.
2350          */
2351
2352         /* synchronize with drbd_new_tconn/drbd_free_tconn */
2353         mutex_lock(&drbd_cfg_mutex);
2354         /* synchronize with drbd_delete_device */
2355         rcu_read_lock();
2356 next_tconn:
2357         /* revalidate iterator position */
2358         list_for_each_entry(tmp, &drbd_tconns, all_tconn) {
2359                 if (pos == NULL) {
2360                         /* first iteration */
2361                         pos = tmp;
2362                         tconn = pos;
2363                         break;
2364                 }
2365                 if (tmp == pos) {
2366                         tconn = pos;
2367                         break;
2368                 }
2369         }
2370         if (tconn) {
2371                 mdev = idr_get_next(&tconn->volumes, &volume);
2372                 if (!mdev) {
2373                         /* No more volumes to dump on this tconn.
2374                          * Advance tconn iterator. */
2375                         pos = list_entry(tconn->all_tconn.next,
2376                                         struct drbd_tconn, all_tconn);
2377                         /* But, did we dump any volume on this tconn yet? */
2378                         if (volume != 0) {
2379                                 tconn = NULL;
2380                                 volume = 0;
2381                                 goto next_tconn;
2382                         }
2383                 }
2384
2385                 dh = genlmsg_put(skb, NETLINK_CB(cb->skb).pid,
2386                                 cb->nlh->nlmsg_seq, &drbd_genl_family,
2387                                 NLM_F_MULTI, DRBD_ADM_GET_STATUS);
2388                 if (!dh)
2389                         goto out;
2390
2391                 if (!mdev) {
2392                         /* this is a tconn without a single volume */
2393                         dh->minor = -1U;
2394                         dh->ret_code = NO_ERROR;
2395                         if (nla_put_drbd_cfg_context(skb, tconn->name, VOLUME_UNSPECIFIED))
2396                                 genlmsg_cancel(skb, dh);
2397                         else
2398                                 genlmsg_end(skb, dh);
2399                         goto out;
2400                 }
2401
2402                 D_ASSERT(mdev->vnr == volume);
2403                 D_ASSERT(mdev->tconn == tconn);
2404
2405                 dh->minor = mdev_to_minor(mdev);
2406                 dh->ret_code = NO_ERROR;
2407
2408                 if (nla_put_status_info(skb, mdev, NULL)) {
2409                         genlmsg_cancel(skb, dh);
2410                         goto out;
2411                 }
2412                 genlmsg_end(skb, dh);
2413         }
2414
2415 out:
2416         rcu_read_unlock();
2417         mutex_unlock(&drbd_cfg_mutex);
2418         /* where to start the next iteration */
2419         cb->args[0] = (long)pos;
2420         cb->args[1] = (pos == tconn) ? volume + 1 : 0;
2421
2422         /* No more tconns/volumes/minors found results in an empty skb.
2423          * Which will terminate the dump. */
2424         return skb->len;
2425 }
2426
2427 int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info)
2428 {
2429         enum drbd_ret_code retcode;
2430         struct timeout_parms tp;
2431         int err;
2432
2433         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2434         if (!adm_ctx.reply_skb)
2435                 return retcode;
2436         if (retcode != NO_ERROR)
2437                 goto out;
2438
2439         tp.timeout_type =
2440                 adm_ctx.mdev->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED :
2441                 test_bit(USE_DEGR_WFC_T, &adm_ctx.mdev->flags) ? UT_DEGRADED :
2442                 UT_DEFAULT;
2443
2444         err = timeout_parms_to_priv_skb(adm_ctx.reply_skb, &tp);
2445         if (err) {
2446                 nlmsg_free(adm_ctx.reply_skb);
2447                 return err;
2448         }
2449 out:
2450         drbd_adm_finish(info, retcode);
2451         return 0;
2452 }
2453
2454 int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info)
2455 {
2456         struct drbd_conf *mdev;
2457         enum drbd_ret_code retcode;
2458
2459         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2460         if (!adm_ctx.reply_skb)
2461                 return retcode;
2462         if (retcode != NO_ERROR)
2463                 goto out;
2464
2465         mdev = adm_ctx.mdev;
2466         if (info->attrs[DRBD_NLA_START_OV_PARMS]) {
2467                 /* resume from last known position, if possible */
2468                 struct start_ov_parms parms =
2469                         { .ov_start_sector = mdev->ov_start_sector };
2470                 int err = start_ov_parms_from_attrs(&parms, info->attrs);
2471                 if (err) {
2472                         retcode = ERR_MANDATORY_TAG;
2473                         drbd_msg_put_info(from_attrs_err_to_txt(err));
2474                         goto out;
2475                 }
2476                 /* w_make_ov_request expects position to be aligned */
2477                 mdev->ov_start_sector = parms.ov_start_sector & ~BM_SECT_PER_BIT;
2478         }
2479         /* If there is still bitmap IO pending, e.g. previous resync or verify
2480          * just being finished, wait for it before requesting a new resync. */
2481         wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
2482         retcode = drbd_request_state(mdev,NS(conn,C_VERIFY_S));
2483 out:
2484         drbd_adm_finish(info, retcode);
2485         return 0;
2486 }
2487
2488
2489 int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info)
2490 {
2491         struct drbd_conf *mdev;
2492         enum drbd_ret_code retcode;
2493         int skip_initial_sync = 0;
2494         int err;
2495         struct new_c_uuid_parms args;
2496
2497         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2498         if (!adm_ctx.reply_skb)
2499                 return retcode;
2500         if (retcode != NO_ERROR)
2501                 goto out_nolock;
2502
2503         mdev = adm_ctx.mdev;
2504         memset(&args, 0, sizeof(args));
2505         if (info->attrs[DRBD_NLA_NEW_C_UUID_PARMS]) {
2506                 err = new_c_uuid_parms_from_attrs(&args, info->attrs);
2507                 if (err) {
2508                         retcode = ERR_MANDATORY_TAG;
2509                         drbd_msg_put_info(from_attrs_err_to_txt(err));
2510                         goto out_nolock;
2511                 }
2512         }
2513
2514         mutex_lock(mdev->state_mutex); /* Protects us against serialized state changes. */
2515
2516         if (!get_ldev(mdev)) {
2517                 retcode = ERR_NO_DISK;
2518                 goto out;
2519         }
2520
2521         /* this is "skip initial sync", assume to be clean */
2522         if (mdev->state.conn == C_CONNECTED && mdev->tconn->agreed_pro_version >= 90 &&
2523             mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) {
2524                 dev_info(DEV, "Preparing to skip initial sync\n");
2525                 skip_initial_sync = 1;
2526         } else if (mdev->state.conn != C_STANDALONE) {
2527                 retcode = ERR_CONNECTED;
2528                 goto out_dec;
2529         }
2530
2531         drbd_uuid_set(mdev, UI_BITMAP, 0); /* Rotate UI_BITMAP to History 1, etc... */
2532         drbd_uuid_new_current(mdev); /* New current, previous to UI_BITMAP */
2533
2534         if (args.clear_bm) {
2535                 err = drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
2536                         "clear_n_write from new_c_uuid", BM_LOCKED_MASK);
2537                 if (err) {
2538                         dev_err(DEV, "Writing bitmap failed with %d\n",err);
2539                         retcode = ERR_IO_MD_DISK;
2540                 }
2541                 if (skip_initial_sync) {
2542                         drbd_send_uuids_skip_initial_sync(mdev);
2543                         _drbd_uuid_set(mdev, UI_BITMAP, 0);
2544                         drbd_print_uuids(mdev, "cleared bitmap UUID");
2545                         spin_lock_irq(&mdev->tconn->req_lock);
2546                         _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
2547                                         CS_VERBOSE, NULL);
2548                         spin_unlock_irq(&mdev->tconn->req_lock);
2549                 }
2550         }
2551
2552         drbd_md_sync(mdev);
2553 out_dec:
2554         put_ldev(mdev);
2555 out:
2556         mutex_unlock(mdev->state_mutex);
2557 out_nolock:
2558         drbd_adm_finish(info, retcode);
2559         return 0;
2560 }
2561
2562 static enum drbd_ret_code
2563 drbd_check_conn_name(const char *name)
2564 {
2565         if (!name || !name[0]) {
2566                 drbd_msg_put_info("connection name missing");
2567                 return ERR_MANDATORY_TAG;
2568         }
2569         /* if we want to use these in sysfs/configfs/debugfs some day,
2570          * we must not allow slashes */
2571         if (strchr(name, '/')) {
2572                 drbd_msg_put_info("invalid connection name");
2573                 return ERR_INVALID_REQUEST;
2574         }
2575         return NO_ERROR;
2576 }
2577
2578 int drbd_adm_create_connection(struct sk_buff *skb, struct genl_info *info)
2579 {
2580         enum drbd_ret_code retcode;
2581
2582         retcode = drbd_adm_prepare(skb, info, 0);
2583         if (!adm_ctx.reply_skb)
2584                 return retcode;
2585         if (retcode != NO_ERROR)
2586                 goto out;
2587
2588         retcode = drbd_check_conn_name(adm_ctx.conn_name);
2589         if (retcode != NO_ERROR)
2590                 goto out;
2591
2592         if (adm_ctx.tconn) {
2593                 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) {
2594                         retcode = ERR_INVALID_REQUEST;
2595                         drbd_msg_put_info("connection exists");
2596                 }
2597                 /* else: still NO_ERROR */
2598                 goto out;
2599         }
2600
2601         if (!drbd_new_tconn(adm_ctx.conn_name))
2602                 retcode = ERR_NOMEM;
2603 out:
2604         drbd_adm_finish(info, retcode);
2605         return 0;
2606 }
2607
2608 int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info)
2609 {
2610         struct drbd_genlmsghdr *dh = info->userhdr;
2611         enum drbd_ret_code retcode;
2612
2613         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
2614         if (!adm_ctx.reply_skb)
2615                 return retcode;
2616         if (retcode != NO_ERROR)
2617                 goto out;
2618
2619         /* FIXME drop minor_count parameter, limit to MINORMASK */
2620         if (dh->minor >= minor_count) {
2621                 drbd_msg_put_info("requested minor out of range");
2622                 retcode = ERR_INVALID_REQUEST;
2623                 goto out;
2624         }
2625         /* FIXME we need a define here */
2626         if (adm_ctx.volume >= 256) {
2627                 drbd_msg_put_info("requested volume id out of range");
2628                 retcode = ERR_INVALID_REQUEST;
2629                 goto out;
2630         }
2631
2632         /* drbd_adm_prepare made sure already
2633          * that mdev->tconn and mdev->vnr match the request. */
2634         if (adm_ctx.mdev) {
2635                 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
2636                         retcode = ERR_MINOR_EXISTS;
2637                 /* else: still NO_ERROR */
2638                 goto out;
2639         }
2640
2641         retcode = conn_new_minor(adm_ctx.tconn, dh->minor, adm_ctx.volume);
2642 out:
2643         drbd_adm_finish(info, retcode);
2644         return 0;
2645 }
2646
2647 int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info)
2648 {
2649         struct drbd_conf *mdev;
2650         enum drbd_ret_code retcode;
2651
2652         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2653         if (!adm_ctx.reply_skb)
2654                 return retcode;
2655         if (retcode != NO_ERROR)
2656                 goto out;
2657
2658         mdev = adm_ctx.mdev;
2659         if (mdev->state.disk == D_DISKLESS &&
2660             /* no need to be mdev->state.conn == C_STANDALONE &&
2661              * we may want to delete a minor from a live replication group.
2662              */
2663             mdev->state.role == R_SECONDARY) {
2664                 drbd_delete_device(mdev_to_minor(mdev));
2665                 retcode = NO_ERROR;
2666                 /* if this was the last volume of this connection,
2667                  * this will terminate all threads */
2668                 conn_reconfig_done(adm_ctx.tconn);
2669         } else
2670                 retcode = ERR_MINOR_CONFIGURED;
2671 out:
2672         drbd_adm_finish(info, retcode);
2673         return 0;
2674 }
2675
2676 int drbd_adm_delete_connection(struct sk_buff *skb, struct genl_info *info)
2677 {
2678         enum drbd_ret_code retcode;
2679
2680         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
2681         if (!adm_ctx.reply_skb)
2682                 return retcode;
2683         if (retcode != NO_ERROR)
2684                 goto out;
2685
2686         if (conn_lowest_minor(adm_ctx.tconn) < 0) {
2687                 drbd_free_tconn(adm_ctx.tconn);
2688                 retcode = NO_ERROR;
2689         } else {
2690                 retcode = ERR_CONN_IN_USE;
2691         }
2692
2693 out:
2694         drbd_adm_finish(info, retcode);
2695         return 0;
2696 }
2697
2698 void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib)
2699 {
2700         static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
2701         struct sk_buff *msg;
2702         struct drbd_genlmsghdr *d_out;
2703         unsigned seq;
2704         int err = -ENOMEM;
2705
2706         seq = atomic_inc_return(&drbd_genl_seq);
2707         msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
2708         if (!msg)
2709                 goto failed;
2710
2711         err = -EMSGSIZE;
2712         d_out = genlmsg_put(msg, 0, seq, &drbd_genl_family, 0, DRBD_EVENT);
2713         if (!d_out) /* cannot happen, but anyways. */
2714                 goto nla_put_failure;
2715         d_out->minor = mdev_to_minor(mdev);
2716         d_out->ret_code = 0;
2717
2718         if (nla_put_status_info(msg, mdev, sib))
2719                 goto nla_put_failure;
2720         genlmsg_end(msg, d_out);
2721         err = drbd_genl_multicast_events(msg, 0);
2722         /* msg has been consumed or freed in netlink_broadcast() */
2723         if (err && err != -ESRCH)
2724                 goto failed;
2725
2726         return;
2727
2728 nla_put_failure:
2729         nlmsg_free(msg);
2730 failed:
2731         dev_err(DEV, "Error %d while broadcasting event. "
2732                         "Event seq:%u sib_reason:%u\n",
2733                         err, seq, sib->sib_reason);
2734 }