]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/block/drbd/drbd_nl.c
drbd: Split drbd_alter_sa() into drbd_sync_after_valid() and drbd_sync_after_changed()
[karo-tx-linux.git] / drivers / block / drbd / drbd_nl.c
1 /*
2    drbd_nl.c
3
4    This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6    Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7    Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8    Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10    drbd is free software; you can redistribute it and/or modify
11    it under the terms of the GNU General Public License as published by
12    the Free Software Foundation; either version 2, or (at your option)
13    any later version.
14
15    drbd is distributed in the hope that it will be useful,
16    but WITHOUT ANY WARRANTY; without even the implied warranty of
17    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18    GNU General Public License for more details.
19
20    You should have received a copy of the GNU General Public License
21    along with drbd; see the file COPYING.  If not, write to
22    the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23
24  */
25
26 #include <linux/module.h>
27 #include <linux/drbd.h>
28 #include <linux/in.h>
29 #include <linux/fs.h>
30 #include <linux/file.h>
31 #include <linux/slab.h>
32 #include <linux/blkpg.h>
33 #include <linux/cpumask.h>
34 #include "drbd_int.h"
35 #include "drbd_req.h"
36 #include "drbd_wrappers.h"
37 #include <asm/unaligned.h>
38 #include <linux/drbd_limits.h>
39 #include <linux/kthread.h>
40
41 #include <net/genetlink.h>
42
43 /* .doit */
44 // int drbd_adm_create_resource(struct sk_buff *skb, struct genl_info *info);
45 // int drbd_adm_delete_resource(struct sk_buff *skb, struct genl_info *info);
46
47 int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info);
48 int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info);
49
50 int drbd_adm_create_connection(struct sk_buff *skb, struct genl_info *info);
51 int drbd_adm_delete_connection(struct sk_buff *skb, struct genl_info *info);
52 int drbd_adm_down(struct sk_buff *skb, struct genl_info *info);
53
54 int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info);
55 int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info);
56 int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info);
57 int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info);
58 int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info);
59 int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info);
60 int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info);
61 int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info);
62 int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info);
63 int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info);
64 int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info);
65 int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info);
66 int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info);
67 int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info);
68 int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info);
69 int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info);
70 int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info);
71 int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info);
72 int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info);
73 int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info);
74 /* .dumpit */
75 int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb);
76
77 #include <linux/drbd_genl_api.h>
78 #include <linux/genl_magic_func.h>
79
80 /* used blkdev_get_by_path, to claim our meta data device(s) */
81 static char *drbd_m_holder = "Hands off! this is DRBD's meta data device.";
82
83 /* Configuration is strictly serialized, because generic netlink message
84  * processing is strictly serialized by the genl_lock().
85  * Which means we can use one static global drbd_config_context struct.
86  */
87 static struct drbd_config_context {
88         /* assigned from drbd_genlmsghdr */
89         unsigned int minor;
90         /* assigned from request attributes, if present */
91         unsigned int volume;
92 #define VOLUME_UNSPECIFIED              (-1U)
93         /* pointer into the request skb,
94          * limited lifetime! */
95         char *conn_name;
96
97         /* reply buffer */
98         struct sk_buff *reply_skb;
99         /* pointer into reply buffer */
100         struct drbd_genlmsghdr *reply_dh;
101         /* resolved from attributes, if possible */
102         struct drbd_conf *mdev;
103         struct drbd_tconn *tconn;
104 } adm_ctx;
105
106 static void drbd_adm_send_reply(struct sk_buff *skb, struct genl_info *info)
107 {
108         genlmsg_end(skb, genlmsg_data(nlmsg_data(nlmsg_hdr(skb))));
109         if (genlmsg_reply(skb, info))
110                 printk(KERN_ERR "drbd: error sending genl reply\n");
111 }
112
113 /* Used on a fresh "drbd_adm_prepare"d reply_skb, this cannot fail: The only
114  * reason it could fail was no space in skb, and there are 4k available. */
115 int drbd_msg_put_info(const char *info)
116 {
117         struct sk_buff *skb = adm_ctx.reply_skb;
118         struct nlattr *nla;
119         int err = -EMSGSIZE;
120
121         if (!info || !info[0])
122                 return 0;
123
124         nla = nla_nest_start(skb, DRBD_NLA_CFG_REPLY);
125         if (!nla)
126                 return err;
127
128         err = nla_put_string(skb, T_info_text, info);
129         if (err) {
130                 nla_nest_cancel(skb, nla);
131                 return err;
132         } else
133                 nla_nest_end(skb, nla);
134         return 0;
135 }
136
137 /* This would be a good candidate for a "pre_doit" hook,
138  * and per-family private info->pointers.
139  * But we need to stay compatible with older kernels.
140  * If it returns successfully, adm_ctx members are valid.
141  */
142 #define DRBD_ADM_NEED_MINOR     1
143 #define DRBD_ADM_NEED_CONN      2
144 static int drbd_adm_prepare(struct sk_buff *skb, struct genl_info *info,
145                 unsigned flags)
146 {
147         struct drbd_genlmsghdr *d_in = info->userhdr;
148         const u8 cmd = info->genlhdr->cmd;
149         int err;
150
151         memset(&adm_ctx, 0, sizeof(adm_ctx));
152
153         /* genl_rcv_msg only checks for CAP_NET_ADMIN on "GENL_ADMIN_PERM" :( */
154         if (cmd != DRBD_ADM_GET_STATUS
155         && security_netlink_recv(skb, CAP_SYS_ADMIN))
156                return -EPERM;
157
158         adm_ctx.reply_skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
159         if (!adm_ctx.reply_skb)
160                 goto fail;
161
162         adm_ctx.reply_dh = genlmsg_put_reply(adm_ctx.reply_skb,
163                                         info, &drbd_genl_family, 0, cmd);
164         /* put of a few bytes into a fresh skb of >= 4k will always succeed.
165          * but anyways */
166         if (!adm_ctx.reply_dh)
167                 goto fail;
168
169         adm_ctx.reply_dh->minor = d_in->minor;
170         adm_ctx.reply_dh->ret_code = NO_ERROR;
171
172         if (info->attrs[DRBD_NLA_CFG_CONTEXT]) {
173                 struct nlattr *nla;
174                 /* parse and validate only */
175                 err = drbd_cfg_context_from_attrs(NULL, info);
176                 if (err)
177                         goto fail;
178
179                 /* It was present, and valid,
180                  * copy it over to the reply skb. */
181                 err = nla_put_nohdr(adm_ctx.reply_skb,
182                                 info->attrs[DRBD_NLA_CFG_CONTEXT]->nla_len,
183                                 info->attrs[DRBD_NLA_CFG_CONTEXT]);
184                 if (err)
185                         goto fail;
186
187                 /* and assign stuff to the global adm_ctx */
188                 nla = nested_attr_tb[__nla_type(T_ctx_volume)];
189                 adm_ctx.volume = nla ? nla_get_u32(nla) : VOLUME_UNSPECIFIED;
190                 nla = nested_attr_tb[__nla_type(T_ctx_conn_name)];
191                 if (nla)
192                         adm_ctx.conn_name = nla_data(nla);
193         } else
194                 adm_ctx.volume = VOLUME_UNSPECIFIED;
195
196         adm_ctx.minor = d_in->minor;
197         adm_ctx.mdev = minor_to_mdev(d_in->minor);
198         adm_ctx.tconn = conn_get_by_name(adm_ctx.conn_name);
199
200         if (!adm_ctx.mdev && (flags & DRBD_ADM_NEED_MINOR)) {
201                 drbd_msg_put_info("unknown minor");
202                 return ERR_MINOR_INVALID;
203         }
204         if (!adm_ctx.tconn && (flags & DRBD_ADM_NEED_CONN)) {
205                 drbd_msg_put_info("unknown connection");
206                 return ERR_INVALID_REQUEST;
207         }
208
209         /* some more paranoia, if the request was over-determined */
210         if (adm_ctx.mdev && adm_ctx.tconn &&
211             adm_ctx.mdev->tconn != adm_ctx.tconn) {
212                 pr_warning("request: minor=%u, conn=%s; but that minor belongs to connection %s\n",
213                                 adm_ctx.minor, adm_ctx.conn_name, adm_ctx.mdev->tconn->name);
214                 drbd_msg_put_info("minor exists in different connection");
215                 return ERR_INVALID_REQUEST;
216         }
217         if (adm_ctx.mdev &&
218             adm_ctx.volume != VOLUME_UNSPECIFIED &&
219             adm_ctx.volume != adm_ctx.mdev->vnr) {
220                 pr_warning("request: minor=%u, volume=%u; but that minor is volume %u in %s\n",
221                                 adm_ctx.minor, adm_ctx.volume,
222                                 adm_ctx.mdev->vnr, adm_ctx.mdev->tconn->name);
223                 drbd_msg_put_info("minor exists as different volume");
224                 return ERR_INVALID_REQUEST;
225         }
226
227         return NO_ERROR;
228
229 fail:
230         nlmsg_free(adm_ctx.reply_skb);
231         adm_ctx.reply_skb = NULL;
232         return -ENOMEM;
233 }
234
235 static int drbd_adm_finish(struct genl_info *info, int retcode)
236 {
237         struct nlattr *nla;
238         const char *conn_name = NULL;
239
240         if (adm_ctx.tconn) {
241                 kref_put(&adm_ctx.tconn->kref, &conn_destroy);
242                 adm_ctx.tconn = NULL;
243         }
244
245         if (!adm_ctx.reply_skb)
246                 return -ENOMEM;
247
248         adm_ctx.reply_dh->ret_code = retcode;
249
250         nla = info->attrs[DRBD_NLA_CFG_CONTEXT];
251         if (nla) {
252                 nla = nla_find_nested(nla, __nla_type(T_ctx_conn_name));
253                 if (nla)
254                         conn_name = nla_data(nla);
255         }
256
257         drbd_adm_send_reply(adm_ctx.reply_skb, info);
258         return 0;
259 }
260
261 static void setup_khelper_env(struct drbd_tconn *tconn, char **envp)
262 {
263         char *afs;
264         struct net_conf *nc;
265
266         rcu_read_lock();
267         nc = rcu_dereference(tconn->net_conf);
268         if (nc) {
269                 switch (((struct sockaddr *)nc->peer_addr)->sa_family) {
270                 case AF_INET6:
271                         afs = "ipv6";
272                         snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI6",
273                                  &((struct sockaddr_in6 *)nc->peer_addr)->sin6_addr);
274                         break;
275                 case AF_INET:
276                         afs = "ipv4";
277                         snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
278                                  &((struct sockaddr_in *)nc->peer_addr)->sin_addr);
279                         break;
280                 default:
281                         afs = "ssocks";
282                         snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
283                                  &((struct sockaddr_in *)nc->peer_addr)->sin_addr);
284                 }
285                 snprintf(envp[3], 20, "DRBD_PEER_AF=%s", afs);
286         }
287         rcu_read_unlock();
288 }
289
290 int drbd_khelper(struct drbd_conf *mdev, char *cmd)
291 {
292         char *envp[] = { "HOME=/",
293                         "TERM=linux",
294                         "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
295                          (char[20]) { }, /* address family */
296                          (char[60]) { }, /* address */
297                         NULL };
298         char mb[12];
299         char *argv[] = {usermode_helper, cmd, mb, NULL };
300         struct sib_info sib;
301         int ret;
302
303         snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev));
304         setup_khelper_env(mdev->tconn, envp);
305
306         /* The helper may take some time.
307          * write out any unsynced meta data changes now */
308         drbd_md_sync(mdev);
309
310         dev_info(DEV, "helper command: %s %s %s\n", usermode_helper, cmd, mb);
311         sib.sib_reason = SIB_HELPER_PRE;
312         sib.helper_name = cmd;
313         drbd_bcast_event(mdev, &sib);
314         ret = call_usermodehelper(usermode_helper, argv, envp, 1);
315         if (ret)
316                 dev_warn(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
317                                 usermode_helper, cmd, mb,
318                                 (ret >> 8) & 0xff, ret);
319         else
320                 dev_info(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
321                                 usermode_helper, cmd, mb,
322                                 (ret >> 8) & 0xff, ret);
323         sib.sib_reason = SIB_HELPER_POST;
324         sib.helper_exit_code = ret;
325         drbd_bcast_event(mdev, &sib);
326
327         if (ret < 0) /* Ignore any ERRNOs we got. */
328                 ret = 0;
329
330         return ret;
331 }
332
333 static void conn_md_sync(struct drbd_tconn *tconn)
334 {
335         struct drbd_conf *mdev;
336         int vnr;
337
338         down_read(&drbd_cfg_rwsem);
339         idr_for_each_entry(&tconn->volumes, mdev, vnr)
340                 drbd_md_sync(mdev);
341         up_read(&drbd_cfg_rwsem);
342 }
343
344 int conn_khelper(struct drbd_tconn *tconn, char *cmd)
345 {
346         char *envp[] = { "HOME=/",
347                         "TERM=linux",
348                         "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
349                          (char[20]) { }, /* address family */
350                          (char[60]) { }, /* address */
351                         NULL };
352         char *argv[] = {usermode_helper, cmd, tconn->name, NULL };
353         int ret;
354
355         setup_khelper_env(tconn, envp);
356         conn_md_sync(tconn);
357
358         conn_info(tconn, "helper command: %s %s %s\n", usermode_helper, cmd, tconn->name);
359         /* TODO: conn_bcast_event() ?? */
360
361         ret = call_usermodehelper(usermode_helper, argv, envp, 1);
362         if (ret)
363                 conn_warn(tconn, "helper command: %s %s %s exit code %u (0x%x)\n",
364                           usermode_helper, cmd, tconn->name,
365                           (ret >> 8) & 0xff, ret);
366         else
367                 conn_info(tconn, "helper command: %s %s %s exit code %u (0x%x)\n",
368                           usermode_helper, cmd, tconn->name,
369                           (ret >> 8) & 0xff, ret);
370         /* TODO: conn_bcast_event() ?? */
371
372         if (ret < 0) /* Ignore any ERRNOs we got. */
373                 ret = 0;
374
375         return ret;
376 }
377
378 static enum drbd_fencing_p highest_fencing_policy(struct drbd_tconn *tconn)
379 {
380         enum drbd_fencing_p fp = FP_NOT_AVAIL;
381         struct drbd_conf *mdev;
382         int vnr;
383
384         rcu_read_lock();
385         idr_for_each_entry(&tconn->volumes, mdev, vnr) {
386                 if (get_ldev_if_state(mdev, D_CONSISTENT)) {
387                         fp = max_t(enum drbd_fencing_p, fp, mdev->ldev->dc.fencing);
388                         put_ldev(mdev);
389                 }
390         }
391         rcu_read_unlock();
392
393         return fp;
394 }
395
396 bool conn_try_outdate_peer(struct drbd_tconn *tconn)
397 {
398         union drbd_state mask = { };
399         union drbd_state val = { };
400         enum drbd_fencing_p fp;
401         char *ex_to_string;
402         int r;
403
404         if (tconn->cstate >= C_WF_REPORT_PARAMS) {
405                 conn_err(tconn, "Expected cstate < C_WF_REPORT_PARAMS\n");
406                 return false;
407         }
408
409         fp = highest_fencing_policy(tconn);
410         switch (fp) {
411         case FP_NOT_AVAIL:
412                 conn_warn(tconn, "Not fencing peer, I'm not even Consistent myself.\n");
413                 goto out;
414         case FP_DONT_CARE:
415                 return true;
416         default: ;
417         }
418
419         r = conn_khelper(tconn, "fence-peer");
420
421         switch ((r>>8) & 0xff) {
422         case 3: /* peer is inconsistent */
423                 ex_to_string = "peer is inconsistent or worse";
424                 mask.pdsk = D_MASK;
425                 val.pdsk = D_INCONSISTENT;
426                 break;
427         case 4: /* peer got outdated, or was already outdated */
428                 ex_to_string = "peer was fenced";
429                 mask.pdsk = D_MASK;
430                 val.pdsk = D_OUTDATED;
431                 break;
432         case 5: /* peer was down */
433                 if (conn_highest_disk(tconn) == D_UP_TO_DATE) {
434                         /* we will(have) create(d) a new UUID anyways... */
435                         ex_to_string = "peer is unreachable, assumed to be dead";
436                         mask.pdsk = D_MASK;
437                         val.pdsk = D_OUTDATED;
438                 } else {
439                         ex_to_string = "peer unreachable, doing nothing since disk != UpToDate";
440                 }
441                 break;
442         case 6: /* Peer is primary, voluntarily outdate myself.
443                  * This is useful when an unconnected R_SECONDARY is asked to
444                  * become R_PRIMARY, but finds the other peer being active. */
445                 ex_to_string = "peer is active";
446                 conn_warn(tconn, "Peer is primary, outdating myself.\n");
447                 mask.disk = D_MASK;
448                 val.disk = D_OUTDATED;
449                 break;
450         case 7:
451                 if (fp != FP_STONITH)
452                         conn_err(tconn, "fence-peer() = 7 && fencing != Stonith !!!\n");
453                 ex_to_string = "peer was stonithed";
454                 mask.pdsk = D_MASK;
455                 val.pdsk = D_OUTDATED;
456                 break;
457         default:
458                 /* The script is broken ... */
459                 conn_err(tconn, "fence-peer helper broken, returned %d\n", (r>>8)&0xff);
460                 return false; /* Eventually leave IO frozen */
461         }
462
463         conn_info(tconn, "fence-peer helper returned %d (%s)\n",
464                   (r>>8) & 0xff, ex_to_string);
465
466  out:
467
468         /* Not using
469            conn_request_state(tconn, mask, val, CS_VERBOSE);
470            here, because we might were able to re-establish the connection in the
471            meantime. */
472         spin_lock_irq(&tconn->req_lock);
473         if (tconn->cstate < C_WF_REPORT_PARAMS)
474                 _conn_request_state(tconn, mask, val, CS_VERBOSE);
475         spin_unlock_irq(&tconn->req_lock);
476
477         return conn_highest_pdsk(tconn) <= D_OUTDATED;
478 }
479
480 static int _try_outdate_peer_async(void *data)
481 {
482         struct drbd_tconn *tconn = (struct drbd_tconn *)data;
483
484         conn_try_outdate_peer(tconn);
485
486         kref_put(&tconn->kref, &conn_destroy);
487         return 0;
488 }
489
490 void conn_try_outdate_peer_async(struct drbd_tconn *tconn)
491 {
492         struct task_struct *opa;
493
494         kref_get(&tconn->kref);
495         opa = kthread_run(_try_outdate_peer_async, tconn, "drbd_async_h");
496         if (IS_ERR(opa)) {
497                 conn_err(tconn, "out of mem, failed to invoke fence-peer helper\n");
498                 kref_put(&tconn->kref, &conn_destroy);
499         }
500 }
501
502 enum drbd_state_rv
503 drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
504 {
505         const int max_tries = 4;
506         enum drbd_state_rv rv = SS_UNKNOWN_ERROR;
507         struct net_conf *nc;
508         int try = 0;
509         int forced = 0;
510         union drbd_state mask, val;
511
512         if (new_role == R_PRIMARY)
513                 request_ping(mdev->tconn); /* Detect a dead peer ASAP */
514
515         mutex_lock(mdev->state_mutex);
516
517         mask.i = 0; mask.role = R_MASK;
518         val.i  = 0; val.role  = new_role;
519
520         while (try++ < max_tries) {
521                 rv = _drbd_request_state(mdev, mask, val, CS_WAIT_COMPLETE);
522
523                 /* in case we first succeeded to outdate,
524                  * but now suddenly could establish a connection */
525                 if (rv == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) {
526                         val.pdsk = 0;
527                         mask.pdsk = 0;
528                         continue;
529                 }
530
531                 if (rv == SS_NO_UP_TO_DATE_DISK && force &&
532                     (mdev->state.disk < D_UP_TO_DATE &&
533                      mdev->state.disk >= D_INCONSISTENT)) {
534                         mask.disk = D_MASK;
535                         val.disk  = D_UP_TO_DATE;
536                         forced = 1;
537                         continue;
538                 }
539
540                 if (rv == SS_NO_UP_TO_DATE_DISK &&
541                     mdev->state.disk == D_CONSISTENT && mask.pdsk == 0) {
542                         D_ASSERT(mdev->state.pdsk == D_UNKNOWN);
543
544                         if (conn_try_outdate_peer(mdev->tconn)) {
545                                 val.disk = D_UP_TO_DATE;
546                                 mask.disk = D_MASK;
547                         }
548                         continue;
549                 }
550
551                 if (rv == SS_NOTHING_TO_DO)
552                         goto out;
553                 if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) {
554                         if (!conn_try_outdate_peer(mdev->tconn) && force) {
555                                 dev_warn(DEV, "Forced into split brain situation!\n");
556                                 mask.pdsk = D_MASK;
557                                 val.pdsk  = D_OUTDATED;
558
559                         }
560                         continue;
561                 }
562                 if (rv == SS_TWO_PRIMARIES) {
563                         /* Maybe the peer is detected as dead very soon...
564                            retry at most once more in this case. */
565                         int timeo;
566                         rcu_read_lock();
567                         nc = rcu_dereference(mdev->tconn->net_conf);
568                         timeo = nc ? (nc->ping_timeo + 1) * HZ / 10 : 1;
569                         rcu_read_unlock();
570                         schedule_timeout_interruptible(timeo);
571                         if (try < max_tries)
572                                 try = max_tries - 1;
573                         continue;
574                 }
575                 if (rv < SS_SUCCESS) {
576                         rv = _drbd_request_state(mdev, mask, val,
577                                                 CS_VERBOSE + CS_WAIT_COMPLETE);
578                         if (rv < SS_SUCCESS)
579                                 goto out;
580                 }
581                 break;
582         }
583
584         if (rv < SS_SUCCESS)
585                 goto out;
586
587         if (forced)
588                 dev_warn(DEV, "Forced to consider local data as UpToDate!\n");
589
590         /* Wait until nothing is on the fly :) */
591         wait_event(mdev->misc_wait, atomic_read(&mdev->ap_pending_cnt) == 0);
592
593         if (new_role == R_SECONDARY) {
594                 set_disk_ro(mdev->vdisk, true);
595                 if (get_ldev(mdev)) {
596                         mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
597                         put_ldev(mdev);
598                 }
599         } else {
600                 mutex_lock(&mdev->tconn->conf_update);
601                 nc = mdev->tconn->net_conf;
602                 if (nc)
603                         nc->want_lose = 0; /* without copy; single bit op is atomic */
604                 mutex_unlock(&mdev->tconn->conf_update);
605
606                 set_disk_ro(mdev->vdisk, false);
607                 if (get_ldev(mdev)) {
608                         if (((mdev->state.conn < C_CONNECTED ||
609                                mdev->state.pdsk <= D_FAILED)
610                               && mdev->ldev->md.uuid[UI_BITMAP] == 0) || forced)
611                                 drbd_uuid_new_current(mdev);
612
613                         mdev->ldev->md.uuid[UI_CURRENT] |=  (u64)1;
614                         put_ldev(mdev);
615                 }
616         }
617
618         /* writeout of activity log covered areas of the bitmap
619          * to stable storage done in after state change already */
620
621         if (mdev->state.conn >= C_WF_REPORT_PARAMS) {
622                 /* if this was forced, we should consider sync */
623                 if (forced)
624                         drbd_send_uuids(mdev);
625                 drbd_send_state(mdev);
626         }
627
628         drbd_md_sync(mdev);
629
630         kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
631 out:
632         mutex_unlock(mdev->state_mutex);
633         return rv;
634 }
635
636 static const char *from_attrs_err_to_txt(int err)
637 {
638         return  err == -ENOMSG ? "required attribute missing" :
639                 err == -EOPNOTSUPP ? "unknown mandatory attribute" :
640                 err == -EEXIST ? "can not change invariant setting" :
641                 "invalid attribute value";
642 }
643
644 int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info)
645 {
646         struct set_role_parms parms;
647         int err;
648         enum drbd_ret_code retcode;
649
650         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
651         if (!adm_ctx.reply_skb)
652                 return retcode;
653         if (retcode != NO_ERROR)
654                 goto out;
655
656         memset(&parms, 0, sizeof(parms));
657         if (info->attrs[DRBD_NLA_SET_ROLE_PARMS]) {
658                 err = set_role_parms_from_attrs(&parms, info);
659                 if (err) {
660                         retcode = ERR_MANDATORY_TAG;
661                         drbd_msg_put_info(from_attrs_err_to_txt(err));
662                         goto out;
663                 }
664         }
665
666         if (info->genlhdr->cmd == DRBD_ADM_PRIMARY)
667                 retcode = drbd_set_role(adm_ctx.mdev, R_PRIMARY, parms.assume_uptodate);
668         else
669                 retcode = drbd_set_role(adm_ctx.mdev, R_SECONDARY, 0);
670 out:
671         drbd_adm_finish(info, retcode);
672         return 0;
673 }
674
675 /* initializes the md.*_offset members, so we are able to find
676  * the on disk meta data */
677 static void drbd_md_set_sector_offsets(struct drbd_conf *mdev,
678                                        struct drbd_backing_dev *bdev)
679 {
680         sector_t md_size_sect = 0;
681         switch (bdev->dc.meta_dev_idx) {
682         default:
683                 /* v07 style fixed size indexed meta data */
684                 bdev->md.md_size_sect = MD_RESERVED_SECT;
685                 bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
686                 bdev->md.al_offset = MD_AL_OFFSET;
687                 bdev->md.bm_offset = MD_BM_OFFSET;
688                 break;
689         case DRBD_MD_INDEX_FLEX_EXT:
690                 /* just occupy the full device; unit: sectors */
691                 bdev->md.md_size_sect = drbd_get_capacity(bdev->md_bdev);
692                 bdev->md.md_offset = 0;
693                 bdev->md.al_offset = MD_AL_OFFSET;
694                 bdev->md.bm_offset = MD_BM_OFFSET;
695                 break;
696         case DRBD_MD_INDEX_INTERNAL:
697         case DRBD_MD_INDEX_FLEX_INT:
698                 bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
699                 /* al size is still fixed */
700                 bdev->md.al_offset = -MD_AL_SECTORS;
701                 /* we need (slightly less than) ~ this much bitmap sectors: */
702                 md_size_sect = drbd_get_capacity(bdev->backing_bdev);
703                 md_size_sect = ALIGN(md_size_sect, BM_SECT_PER_EXT);
704                 md_size_sect = BM_SECT_TO_EXT(md_size_sect);
705                 md_size_sect = ALIGN(md_size_sect, 8);
706
707                 /* plus the "drbd meta data super block",
708                  * and the activity log; */
709                 md_size_sect += MD_BM_OFFSET;
710
711                 bdev->md.md_size_sect = md_size_sect;
712                 /* bitmap offset is adjusted by 'super' block size */
713                 bdev->md.bm_offset   = -md_size_sect + MD_AL_OFFSET;
714                 break;
715         }
716 }
717
718 /* input size is expected to be in KB */
719 char *ppsize(char *buf, unsigned long long size)
720 {
721         /* Needs 9 bytes at max including trailing NUL:
722          * -1ULL ==> "16384 EB" */
723         static char units[] = { 'K', 'M', 'G', 'T', 'P', 'E' };
724         int base = 0;
725         while (size >= 10000 && base < sizeof(units)-1) {
726                 /* shift + round */
727                 size = (size >> 10) + !!(size & (1<<9));
728                 base++;
729         }
730         sprintf(buf, "%u %cB", (unsigned)size, units[base]);
731
732         return buf;
733 }
734
735 /* there is still a theoretical deadlock when called from receiver
736  * on an D_INCONSISTENT R_PRIMARY:
737  *  remote READ does inc_ap_bio, receiver would need to receive answer
738  *  packet from remote to dec_ap_bio again.
739  *  receiver receive_sizes(), comes here,
740  *  waits for ap_bio_cnt == 0. -> deadlock.
741  * but this cannot happen, actually, because:
742  *  R_PRIMARY D_INCONSISTENT, and peer's disk is unreachable
743  *  (not connected, or bad/no disk on peer):
744  *  see drbd_fail_request_early, ap_bio_cnt is zero.
745  *  R_PRIMARY D_INCONSISTENT, and C_SYNC_TARGET:
746  *  peer may not initiate a resize.
747  */
748 /* Note these are not to be confused with
749  * drbd_adm_suspend_io/drbd_adm_resume_io,
750  * which are (sub) state changes triggered by admin (drbdsetup),
751  * and can be long lived.
752  * This changes an mdev->flag, is triggered by drbd internals,
753  * and should be short-lived. */
754 void drbd_suspend_io(struct drbd_conf *mdev)
755 {
756         set_bit(SUSPEND_IO, &mdev->flags);
757         if (drbd_suspended(mdev))
758                 return;
759         wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
760 }
761
762 void drbd_resume_io(struct drbd_conf *mdev)
763 {
764         clear_bit(SUSPEND_IO, &mdev->flags);
765         wake_up(&mdev->misc_wait);
766 }
767
768 /**
769  * drbd_determine_dev_size() -  Sets the right device size obeying all constraints
770  * @mdev:       DRBD device.
771  *
772  * Returns 0 on success, negative return values indicate errors.
773  * You should call drbd_md_sync() after calling this function.
774  */
775 enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags) __must_hold(local)
776 {
777         sector_t prev_first_sect, prev_size; /* previous meta location */
778         sector_t la_size, u_size;
779         sector_t size;
780         char ppb[10];
781
782         int md_moved, la_size_changed;
783         enum determine_dev_size rv = unchanged;
784
785         /* race:
786          * application request passes inc_ap_bio,
787          * but then cannot get an AL-reference.
788          * this function later may wait on ap_bio_cnt == 0. -> deadlock.
789          *
790          * to avoid that:
791          * Suspend IO right here.
792          * still lock the act_log to not trigger ASSERTs there.
793          */
794         drbd_suspend_io(mdev);
795
796         /* no wait necessary anymore, actually we could assert that */
797         wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
798
799         prev_first_sect = drbd_md_first_sector(mdev->ldev);
800         prev_size = mdev->ldev->md.md_size_sect;
801         la_size = mdev->ldev->md.la_size_sect;
802
803         /* TODO: should only be some assert here, not (re)init... */
804         drbd_md_set_sector_offsets(mdev, mdev->ldev);
805
806         u_size = mdev->ldev->dc.disk_size;
807         size = drbd_new_dev_size(mdev, mdev->ldev, u_size, flags & DDSF_FORCED);
808
809         if (drbd_get_capacity(mdev->this_bdev) != size ||
810             drbd_bm_capacity(mdev) != size) {
811                 int err;
812                 err = drbd_bm_resize(mdev, size, !(flags & DDSF_NO_RESYNC));
813                 if (unlikely(err)) {
814                         /* currently there is only one error: ENOMEM! */
815                         size = drbd_bm_capacity(mdev)>>1;
816                         if (size == 0) {
817                                 dev_err(DEV, "OUT OF MEMORY! "
818                                     "Could not allocate bitmap!\n");
819                         } else {
820                                 dev_err(DEV, "BM resizing failed. "
821                                     "Leaving size unchanged at size = %lu KB\n",
822                                     (unsigned long)size);
823                         }
824                         rv = dev_size_error;
825                 }
826                 /* racy, see comments above. */
827                 drbd_set_my_capacity(mdev, size);
828                 mdev->ldev->md.la_size_sect = size;
829                 dev_info(DEV, "size = %s (%llu KB)\n", ppsize(ppb, size>>1),
830                      (unsigned long long)size>>1);
831         }
832         if (rv == dev_size_error)
833                 goto out;
834
835         la_size_changed = (la_size != mdev->ldev->md.la_size_sect);
836
837         md_moved = prev_first_sect != drbd_md_first_sector(mdev->ldev)
838                 || prev_size       != mdev->ldev->md.md_size_sect;
839
840         if (la_size_changed || md_moved) {
841                 int err;
842
843                 drbd_al_shrink(mdev); /* All extents inactive. */
844                 dev_info(DEV, "Writing the whole bitmap, %s\n",
845                          la_size_changed && md_moved ? "size changed and md moved" :
846                          la_size_changed ? "size changed" : "md moved");
847                 /* next line implicitly does drbd_suspend_io()+drbd_resume_io() */
848                 err = drbd_bitmap_io(mdev, &drbd_bm_write,
849                                 "size changed", BM_LOCKED_MASK);
850                 if (err) {
851                         rv = dev_size_error;
852                         goto out;
853                 }
854                 drbd_md_mark_dirty(mdev);
855         }
856
857         if (size > la_size)
858                 rv = grew;
859         if (size < la_size)
860                 rv = shrunk;
861 out:
862         lc_unlock(mdev->act_log);
863         wake_up(&mdev->al_wait);
864         drbd_resume_io(mdev);
865
866         return rv;
867 }
868
869 sector_t
870 drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
871                   sector_t u_size, int assume_peer_has_space)
872 {
873         sector_t p_size = mdev->p_size;   /* partner's disk size. */
874         sector_t la_size = bdev->md.la_size_sect; /* last agreed size. */
875         sector_t m_size; /* my size */
876         sector_t size = 0;
877
878         m_size = drbd_get_max_capacity(bdev);
879
880         if (mdev->state.conn < C_CONNECTED && assume_peer_has_space) {
881                 dev_warn(DEV, "Resize while not connected was forced by the user!\n");
882                 p_size = m_size;
883         }
884
885         if (p_size && m_size) {
886                 size = min_t(sector_t, p_size, m_size);
887         } else {
888                 if (la_size) {
889                         size = la_size;
890                         if (m_size && m_size < size)
891                                 size = m_size;
892                         if (p_size && p_size < size)
893                                 size = p_size;
894                 } else {
895                         if (m_size)
896                                 size = m_size;
897                         if (p_size)
898                                 size = p_size;
899                 }
900         }
901
902         if (size == 0)
903                 dev_err(DEV, "Both nodes diskless!\n");
904
905         if (u_size) {
906                 if (u_size > size)
907                         dev_err(DEV, "Requested disk size is too big (%lu > %lu)\n",
908                             (unsigned long)u_size>>1, (unsigned long)size>>1);
909                 else
910                         size = u_size;
911         }
912
913         return size;
914 }
915
916 /**
917  * drbd_check_al_size() - Ensures that the AL is of the right size
918  * @mdev:       DRBD device.
919  *
920  * Returns -EBUSY if current al lru is still used, -ENOMEM when allocation
921  * failed, and 0 on success. You should call drbd_md_sync() after you called
922  * this function.
923  */
924 static int drbd_check_al_size(struct drbd_conf *mdev, struct disk_conf *dc)
925 {
926         struct lru_cache *n, *t;
927         struct lc_element *e;
928         unsigned int in_use;
929         int i;
930
931         if (!expect(dc->al_extents >= DRBD_AL_EXTENTS_MIN))
932                 dc->al_extents = DRBD_AL_EXTENTS_MIN;
933
934         if (mdev->act_log &&
935             mdev->act_log->nr_elements == dc->al_extents)
936                 return 0;
937
938         in_use = 0;
939         t = mdev->act_log;
940         n = lc_create("act_log", drbd_al_ext_cache, AL_UPDATES_PER_TRANSACTION,
941                 dc->al_extents, sizeof(struct lc_element), 0);
942
943         if (n == NULL) {
944                 dev_err(DEV, "Cannot allocate act_log lru!\n");
945                 return -ENOMEM;
946         }
947         spin_lock_irq(&mdev->al_lock);
948         if (t) {
949                 for (i = 0; i < t->nr_elements; i++) {
950                         e = lc_element_by_index(t, i);
951                         if (e->refcnt)
952                                 dev_err(DEV, "refcnt(%d)==%d\n",
953                                     e->lc_number, e->refcnt);
954                         in_use += e->refcnt;
955                 }
956         }
957         if (!in_use)
958                 mdev->act_log = n;
959         spin_unlock_irq(&mdev->al_lock);
960         if (in_use) {
961                 dev_err(DEV, "Activity log still in use!\n");
962                 lc_destroy(n);
963                 return -EBUSY;
964         } else {
965                 if (t)
966                         lc_destroy(t);
967         }
968         drbd_md_mark_dirty(mdev); /* we changed mdev->act_log->nr_elemens */
969         return 0;
970 }
971
972 static void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_size)
973 {
974         struct request_queue * const q = mdev->rq_queue;
975         int max_hw_sectors = max_bio_size >> 9;
976         int max_segments = 0;
977
978         if (get_ldev_if_state(mdev, D_ATTACHING)) {
979                 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
980
981                 max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9);
982                 max_segments = mdev->ldev->dc.max_bio_bvecs;
983                 put_ldev(mdev);
984         }
985
986         blk_queue_logical_block_size(q, 512);
987         blk_queue_max_hw_sectors(q, max_hw_sectors);
988         /* This is the workaround for "bio would need to, but cannot, be split" */
989         blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
990         blk_queue_segment_boundary(q, PAGE_CACHE_SIZE-1);
991
992         if (get_ldev_if_state(mdev, D_ATTACHING)) {
993                 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
994
995                 blk_queue_stack_limits(q, b);
996
997                 if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) {
998                         dev_info(DEV, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
999                                  q->backing_dev_info.ra_pages,
1000                                  b->backing_dev_info.ra_pages);
1001                         q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages;
1002                 }
1003                 put_ldev(mdev);
1004         }
1005 }
1006
1007 void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
1008 {
1009         int now, new, local, peer;
1010
1011         now = queue_max_hw_sectors(mdev->rq_queue) << 9;
1012         local = mdev->local_max_bio_size; /* Eventually last known value, from volatile memory */
1013         peer = mdev->peer_max_bio_size; /* Eventually last known value, from meta data */
1014
1015         if (get_ldev_if_state(mdev, D_ATTACHING)) {
1016                 local = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9;
1017                 mdev->local_max_bio_size = local;
1018                 put_ldev(mdev);
1019         }
1020
1021         /* We may ignore peer limits if the peer is modern enough.
1022            Because new from 8.3.8 onwards the peer can use multiple
1023            BIOs for a single peer_request */
1024         if (mdev->state.conn >= C_CONNECTED) {
1025                 if (mdev->tconn->agreed_pro_version < 94)
1026                         peer = mdev->peer_max_bio_size;
1027                 else if (mdev->tconn->agreed_pro_version == 94)
1028                         peer = DRBD_MAX_SIZE_H80_PACKET;
1029                 else /* drbd 8.3.8 onwards */
1030                         peer = DRBD_MAX_BIO_SIZE;
1031         }
1032
1033         new = min_t(int, local, peer);
1034
1035         if (mdev->state.role == R_PRIMARY && new < now)
1036                 dev_err(DEV, "ASSERT FAILED new < now; (%d < %d)\n", new, now);
1037
1038         if (new != now)
1039                 dev_info(DEV, "max BIO size = %u\n", new);
1040
1041         drbd_setup_queue_param(mdev, new);
1042 }
1043
1044 /* Starts the worker thread */
1045 static void conn_reconfig_start(struct drbd_tconn *tconn)
1046 {
1047         drbd_thread_start(&tconn->worker);
1048         conn_flush_workqueue(tconn);
1049 }
1050
1051 /* if still unconfigured, stops worker again. */
1052 static void conn_reconfig_done(struct drbd_tconn *tconn)
1053 {
1054         bool stop_threads;
1055         spin_lock_irq(&tconn->req_lock);
1056         stop_threads = conn_all_vols_unconf(tconn);
1057         spin_unlock_irq(&tconn->req_lock);
1058         if (stop_threads) {
1059                 /* asender is implicitly stopped by receiver
1060                  * in drbd_disconnect() */
1061                 drbd_thread_stop(&tconn->receiver);
1062                 drbd_thread_stop(&tconn->worker);
1063         }
1064 }
1065
1066 /* Make sure IO is suspended before calling this function(). */
1067 static void drbd_suspend_al(struct drbd_conf *mdev)
1068 {
1069         int s = 0;
1070
1071         if (!lc_try_lock(mdev->act_log)) {
1072                 dev_warn(DEV, "Failed to lock al in drbd_suspend_al()\n");
1073                 return;
1074         }
1075
1076         drbd_al_shrink(mdev);
1077         spin_lock_irq(&mdev->tconn->req_lock);
1078         if (mdev->state.conn < C_CONNECTED)
1079                 s = !test_and_set_bit(AL_SUSPENDED, &mdev->flags);
1080         spin_unlock_irq(&mdev->tconn->req_lock);
1081         lc_unlock(mdev->act_log);
1082
1083         if (s)
1084                 dev_info(DEV, "Suspended AL updates\n");
1085 }
1086
1087
1088 static bool should_set_defaults(struct genl_info *info)
1089 {
1090         unsigned flags = ((struct drbd_genlmsghdr*)info->userhdr)->flags;
1091         return 0 != (flags & DRBD_GENL_F_SET_DEFAULTS);
1092 }
1093
1094 int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
1095 {
1096         enum drbd_ret_code retcode;
1097         struct drbd_conf *mdev;
1098         struct disk_conf *new_disk_conf;
1099         int err, fifo_size;
1100         int *rs_plan_s = NULL;
1101
1102         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1103         if (!adm_ctx.reply_skb)
1104                 return retcode;
1105         if (retcode != NO_ERROR)
1106                 goto out;
1107
1108         mdev = adm_ctx.mdev;
1109
1110         /* we also need a disk
1111          * to change the options on */
1112         if (!get_ldev(mdev)) {
1113                 retcode = ERR_NO_DISK;
1114                 goto out;
1115         }
1116
1117 /* FIXME freeze IO, cluster wide.
1118  *
1119  * We should make sure no-one uses
1120  * some half-updated struct when we
1121  * assign it later. */
1122
1123         new_disk_conf = kmalloc(sizeof(*new_disk_conf), GFP_KERNEL);
1124         if (!new_disk_conf) {
1125                 retcode = ERR_NOMEM;
1126                 goto fail;
1127         }
1128
1129         memcpy(new_disk_conf, &mdev->ldev->dc, sizeof(*new_disk_conf));
1130         if (should_set_defaults(info))
1131                 set_disk_conf_defaults(new_disk_conf);
1132
1133         err = disk_conf_from_attrs_for_change(new_disk_conf, info);
1134         if (err) {
1135                 retcode = ERR_MANDATORY_TAG;
1136                 drbd_msg_put_info(from_attrs_err_to_txt(err));
1137         }
1138
1139         if (!expect(new_disk_conf->resync_rate >= 1))
1140                 new_disk_conf->resync_rate = 1;
1141
1142         /* clip to allowed range */
1143         if (!expect(new_disk_conf->al_extents >= DRBD_AL_EXTENTS_MIN))
1144                 new_disk_conf->al_extents = DRBD_AL_EXTENTS_MIN;
1145         if (!expect(new_disk_conf->al_extents <= DRBD_AL_EXTENTS_MAX))
1146                 new_disk_conf->al_extents = DRBD_AL_EXTENTS_MAX;
1147
1148         fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
1149         if (fifo_size != mdev->rs_plan_s.size && fifo_size > 0) {
1150                 rs_plan_s   = kzalloc(sizeof(int) * fifo_size, GFP_KERNEL);
1151                 if (!rs_plan_s) {
1152                         dev_err(DEV, "kmalloc of fifo_buffer failed");
1153                         retcode = ERR_NOMEM;
1154                         goto fail;
1155                 }
1156         }
1157
1158         if (fifo_size != mdev->rs_plan_s.size) {
1159                 kfree(mdev->rs_plan_s.values);
1160                 mdev->rs_plan_s.values = rs_plan_s;
1161                 mdev->rs_plan_s.size   = fifo_size;
1162                 mdev->rs_planed = 0;
1163                 rs_plan_s = NULL;
1164         }
1165
1166         wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
1167         drbd_al_shrink(mdev);
1168         err = drbd_check_al_size(mdev, new_disk_conf);
1169         lc_unlock(mdev->act_log);
1170         wake_up(&mdev->al_wait);
1171
1172         if (err) {
1173                 retcode = ERR_NOMEM;
1174                 goto fail;
1175         }
1176
1177         /* FIXME
1178          * To avoid someone looking at a half-updated struct, we probably
1179          * should have a rw-semaphor on net_conf and disk_conf.
1180          */
1181         write_lock_irq(&global_state_lock);
1182         retcode = drbd_sync_after_valid(mdev, new_disk_conf->resync_after);
1183         if (retcode == NO_ERROR) {
1184                 mdev->ldev->dc = *new_disk_conf;
1185                 drbd_sync_after_changed(mdev);
1186         }
1187         write_unlock_irq(&global_state_lock);
1188
1189         drbd_md_sync(mdev);
1190
1191
1192         if (mdev->state.conn >= C_CONNECTED)
1193                 drbd_send_sync_param(mdev);
1194
1195  fail:
1196         put_ldev(mdev);
1197         kfree(new_disk_conf);
1198         kfree(rs_plan_s);
1199  out:
1200         drbd_adm_finish(info, retcode);
1201         return 0;
1202 }
1203
1204 int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
1205 {
1206         struct drbd_conf *mdev;
1207         int err;
1208         enum drbd_ret_code retcode;
1209         enum determine_dev_size dd;
1210         sector_t max_possible_sectors;
1211         sector_t min_md_device_sectors;
1212         struct drbd_backing_dev *nbc = NULL; /* new_backing_conf */
1213         struct block_device *bdev;
1214         struct lru_cache *resync_lru = NULL;
1215         union drbd_state ns, os;
1216         enum drbd_state_rv rv;
1217         struct net_conf *nc;
1218         int cp_discovered = 0;
1219
1220         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1221         if (!adm_ctx.reply_skb)
1222                 return retcode;
1223         if (retcode != NO_ERROR)
1224                 goto finish;
1225
1226         mdev = adm_ctx.mdev;
1227         conn_reconfig_start(mdev->tconn);
1228
1229         /* if you want to reconfigure, please tear down first */
1230         if (mdev->state.disk > D_DISKLESS) {
1231                 retcode = ERR_DISK_CONFIGURED;
1232                 goto fail;
1233         }
1234         /* It may just now have detached because of IO error.  Make sure
1235          * drbd_ldev_destroy is done already, we may end up here very fast,
1236          * e.g. if someone calls attach from the on-io-error handler,
1237          * to realize a "hot spare" feature (not that I'd recommend that) */
1238         wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
1239
1240         /* allocation not in the IO path, drbdsetup context */
1241         nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
1242         if (!nbc) {
1243                 retcode = ERR_NOMEM;
1244                 goto fail;
1245         }
1246
1247         set_disk_conf_defaults(&nbc->dc);
1248
1249         err = disk_conf_from_attrs(&nbc->dc, info);
1250         if (err) {
1251                 retcode = ERR_MANDATORY_TAG;
1252                 drbd_msg_put_info(from_attrs_err_to_txt(err));
1253                 goto fail;
1254         }
1255
1256         if ((int)nbc->dc.meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) {
1257                 retcode = ERR_MD_IDX_INVALID;
1258                 goto fail;
1259         }
1260
1261         rcu_read_lock();
1262         nc = rcu_dereference(mdev->tconn->net_conf);
1263         if (nc) {
1264                 if (nbc->dc.fencing == FP_STONITH && nc->wire_protocol == DRBD_PROT_A) {
1265                         rcu_read_unlock();
1266                         retcode = ERR_STONITH_AND_PROT_A;
1267                         goto fail;
1268                 }
1269         }
1270         rcu_read_unlock();
1271
1272         bdev = blkdev_get_by_path(nbc->dc.backing_dev,
1273                                   FMODE_READ | FMODE_WRITE | FMODE_EXCL, mdev);
1274         if (IS_ERR(bdev)) {
1275                 dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.backing_dev,
1276                         PTR_ERR(bdev));
1277                 retcode = ERR_OPEN_DISK;
1278                 goto fail;
1279         }
1280         nbc->backing_bdev = bdev;
1281
1282         /*
1283          * meta_dev_idx >= 0: external fixed size, possibly multiple
1284          * drbd sharing one meta device.  TODO in that case, paranoia
1285          * check that [md_bdev, meta_dev_idx] is not yet used by some
1286          * other drbd minor!  (if you use drbd.conf + drbdadm, that
1287          * should check it for you already; but if you don't, or
1288          * someone fooled it, we need to double check here)
1289          */
1290         bdev = blkdev_get_by_path(nbc->dc.meta_dev,
1291                                   FMODE_READ | FMODE_WRITE | FMODE_EXCL,
1292                                   ((int)nbc->dc.meta_dev_idx < 0) ?
1293                                   (void *)mdev : (void *)drbd_m_holder);
1294         if (IS_ERR(bdev)) {
1295                 dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.meta_dev,
1296                         PTR_ERR(bdev));
1297                 retcode = ERR_OPEN_MD_DISK;
1298                 goto fail;
1299         }
1300         nbc->md_bdev = bdev;
1301
1302         if ((nbc->backing_bdev == nbc->md_bdev) !=
1303             (nbc->dc.meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
1304              nbc->dc.meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) {
1305                 retcode = ERR_MD_IDX_INVALID;
1306                 goto fail;
1307         }
1308
1309         resync_lru = lc_create("resync", drbd_bm_ext_cache,
1310                         1, 61, sizeof(struct bm_extent),
1311                         offsetof(struct bm_extent, lce));
1312         if (!resync_lru) {
1313                 retcode = ERR_NOMEM;
1314                 goto fail;
1315         }
1316
1317         /* RT - for drbd_get_max_capacity() DRBD_MD_INDEX_FLEX_INT */
1318         drbd_md_set_sector_offsets(mdev, nbc);
1319
1320         if (drbd_get_max_capacity(nbc) < nbc->dc.disk_size) {
1321                 dev_err(DEV, "max capacity %llu smaller than disk size %llu\n",
1322                         (unsigned long long) drbd_get_max_capacity(nbc),
1323                         (unsigned long long) nbc->dc.disk_size);
1324                 retcode = ERR_DISK_TO_SMALL;
1325                 goto fail;
1326         }
1327
1328         if ((int)nbc->dc.meta_dev_idx < 0) {
1329                 max_possible_sectors = DRBD_MAX_SECTORS_FLEX;
1330                 /* at least one MB, otherwise it does not make sense */
1331                 min_md_device_sectors = (2<<10);
1332         } else {
1333                 max_possible_sectors = DRBD_MAX_SECTORS;
1334                 min_md_device_sectors = MD_RESERVED_SECT * (nbc->dc.meta_dev_idx + 1);
1335         }
1336
1337         if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) {
1338                 retcode = ERR_MD_DISK_TO_SMALL;
1339                 dev_warn(DEV, "refusing attach: md-device too small, "
1340                      "at least %llu sectors needed for this meta-disk type\n",
1341                      (unsigned long long) min_md_device_sectors);
1342                 goto fail;
1343         }
1344
1345         /* Make sure the new disk is big enough
1346          * (we may currently be R_PRIMARY with no local disk...) */
1347         if (drbd_get_max_capacity(nbc) <
1348             drbd_get_capacity(mdev->this_bdev)) {
1349                 retcode = ERR_DISK_TO_SMALL;
1350                 goto fail;
1351         }
1352
1353         nbc->known_size = drbd_get_capacity(nbc->backing_bdev);
1354
1355         if (nbc->known_size > max_possible_sectors) {
1356                 dev_warn(DEV, "==> truncating very big lower level device "
1357                         "to currently maximum possible %llu sectors <==\n",
1358                         (unsigned long long) max_possible_sectors);
1359                 if ((int)nbc->dc.meta_dev_idx >= 0)
1360                         dev_warn(DEV, "==>> using internal or flexible "
1361                                       "meta data may help <<==\n");
1362         }
1363
1364         drbd_suspend_io(mdev);
1365         /* also wait for the last barrier ack. */
1366         wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_pending_cnt) || drbd_suspended(mdev));
1367         /* and for any other previously queued work */
1368         drbd_flush_workqueue(mdev);
1369
1370         rv = _drbd_request_state(mdev, NS(disk, D_ATTACHING), CS_VERBOSE);
1371         retcode = rv;  /* FIXME: Type mismatch. */
1372         drbd_resume_io(mdev);
1373         if (rv < SS_SUCCESS)
1374                 goto fail;
1375
1376         if (!get_ldev_if_state(mdev, D_ATTACHING))
1377                 goto force_diskless;
1378
1379         drbd_md_set_sector_offsets(mdev, nbc);
1380
1381         if (!mdev->bitmap) {
1382                 if (drbd_bm_init(mdev)) {
1383                         retcode = ERR_NOMEM;
1384                         goto force_diskless_dec;
1385                 }
1386         }
1387
1388         retcode = drbd_md_read(mdev, nbc);
1389         if (retcode != NO_ERROR)
1390                 goto force_diskless_dec;
1391
1392         if (mdev->state.conn < C_CONNECTED &&
1393             mdev->state.role == R_PRIMARY &&
1394             (mdev->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) {
1395                 dev_err(DEV, "Can only attach to data with current UUID=%016llX\n",
1396                     (unsigned long long)mdev->ed_uuid);
1397                 retcode = ERR_DATA_NOT_CURRENT;
1398                 goto force_diskless_dec;
1399         }
1400
1401         /* Since we are diskless, fix the activity log first... */
1402         if (drbd_check_al_size(mdev, &nbc->dc)) {
1403                 retcode = ERR_NOMEM;
1404                 goto force_diskless_dec;
1405         }
1406
1407         /* Prevent shrinking of consistent devices ! */
1408         if (drbd_md_test_flag(nbc, MDF_CONSISTENT) &&
1409             drbd_new_dev_size(mdev, nbc, nbc->dc.disk_size, 0) < nbc->md.la_size_sect) {
1410                 dev_warn(DEV, "refusing to truncate a consistent device\n");
1411                 retcode = ERR_DISK_TO_SMALL;
1412                 goto force_diskless_dec;
1413         }
1414
1415         if (!drbd_al_read_log(mdev, nbc)) {
1416                 retcode = ERR_IO_MD_DISK;
1417                 goto force_diskless_dec;
1418         }
1419
1420         /* Reset the "barriers don't work" bits here, then force meta data to
1421          * be written, to ensure we determine if barriers are supported. */
1422         if (nbc->dc.no_md_flush)
1423                 set_bit(MD_NO_FUA, &mdev->flags);
1424         else
1425                 clear_bit(MD_NO_FUA, &mdev->flags);
1426
1427         /* Point of no return reached.
1428          * Devices and memory are no longer released by error cleanup below.
1429          * now mdev takes over responsibility, and the state engine should
1430          * clean it up somewhere.  */
1431         D_ASSERT(mdev->ldev == NULL);
1432         mdev->ldev = nbc;
1433         mdev->resync = resync_lru;
1434         nbc = NULL;
1435         resync_lru = NULL;
1436
1437         mdev->write_ordering = WO_bdev_flush;
1438         drbd_bump_write_ordering(mdev, WO_bdev_flush);
1439
1440         if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY))
1441                 set_bit(CRASHED_PRIMARY, &mdev->flags);
1442         else
1443                 clear_bit(CRASHED_PRIMARY, &mdev->flags);
1444
1445         if (drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
1446             !(mdev->state.role == R_PRIMARY && mdev->tconn->susp_nod)) {
1447                 set_bit(CRASHED_PRIMARY, &mdev->flags);
1448                 cp_discovered = 1;
1449         }
1450
1451         mdev->send_cnt = 0;
1452         mdev->recv_cnt = 0;
1453         mdev->read_cnt = 0;
1454         mdev->writ_cnt = 0;
1455
1456         drbd_reconsider_max_bio_size(mdev);
1457
1458         /* If I am currently not R_PRIMARY,
1459          * but meta data primary indicator is set,
1460          * I just now recover from a hard crash,
1461          * and have been R_PRIMARY before that crash.
1462          *
1463          * Now, if I had no connection before that crash
1464          * (have been degraded R_PRIMARY), chances are that
1465          * I won't find my peer now either.
1466          *
1467          * In that case, and _only_ in that case,
1468          * we use the degr-wfc-timeout instead of the default,
1469          * so we can automatically recover from a crash of a
1470          * degraded but active "cluster" after a certain timeout.
1471          */
1472         clear_bit(USE_DEGR_WFC_T, &mdev->flags);
1473         if (mdev->state.role != R_PRIMARY &&
1474              drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
1475             !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND))
1476                 set_bit(USE_DEGR_WFC_T, &mdev->flags);
1477
1478         dd = drbd_determine_dev_size(mdev, 0);
1479         if (dd == dev_size_error) {
1480                 retcode = ERR_NOMEM_BITMAP;
1481                 goto force_diskless_dec;
1482         } else if (dd == grew)
1483                 set_bit(RESYNC_AFTER_NEG, &mdev->flags);
1484
1485         if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
1486                 dev_info(DEV, "Assuming that all blocks are out of sync "
1487                      "(aka FullSync)\n");
1488                 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write,
1489                         "set_n_write from attaching", BM_LOCKED_MASK)) {
1490                         retcode = ERR_IO_MD_DISK;
1491                         goto force_diskless_dec;
1492                 }
1493         } else {
1494                 if (drbd_bitmap_io(mdev, &drbd_bm_read,
1495                         "read from attaching", BM_LOCKED_MASK)) {
1496                         retcode = ERR_IO_MD_DISK;
1497                         goto force_diskless_dec;
1498                 }
1499         }
1500
1501         if (cp_discovered) {
1502                 drbd_al_apply_to_bm(mdev);
1503                 if (drbd_bitmap_io(mdev, &drbd_bm_write,
1504                         "crashed primary apply AL", BM_LOCKED_MASK)) {
1505                         retcode = ERR_IO_MD_DISK;
1506                         goto force_diskless_dec;
1507                 }
1508         }
1509
1510         if (_drbd_bm_total_weight(mdev) == drbd_bm_bits(mdev))
1511                 drbd_suspend_al(mdev); /* IO is still suspended here... */
1512
1513         spin_lock_irq(&mdev->tconn->req_lock);
1514         os = drbd_read_state(mdev);
1515         ns = os;
1516         /* If MDF_CONSISTENT is not set go into inconsistent state,
1517            otherwise investigate MDF_WasUpToDate...
1518            If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state,
1519            otherwise into D_CONSISTENT state.
1520         */
1521         if (drbd_md_test_flag(mdev->ldev, MDF_CONSISTENT)) {
1522                 if (drbd_md_test_flag(mdev->ldev, MDF_WAS_UP_TO_DATE))
1523                         ns.disk = D_CONSISTENT;
1524                 else
1525                         ns.disk = D_OUTDATED;
1526         } else {
1527                 ns.disk = D_INCONSISTENT;
1528         }
1529
1530         if (drbd_md_test_flag(mdev->ldev, MDF_PEER_OUT_DATED))
1531                 ns.pdsk = D_OUTDATED;
1532
1533         if ( ns.disk == D_CONSISTENT &&
1534             (ns.pdsk == D_OUTDATED || mdev->ldev->dc.fencing == FP_DONT_CARE))
1535                 ns.disk = D_UP_TO_DATE;
1536
1537         /* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND,
1538            MDF_CONSISTENT and MDF_WAS_UP_TO_DATE must happen before
1539            this point, because drbd_request_state() modifies these
1540            flags. */
1541
1542         /* In case we are C_CONNECTED postpone any decision on the new disk
1543            state after the negotiation phase. */
1544         if (mdev->state.conn == C_CONNECTED) {
1545                 mdev->new_state_tmp.i = ns.i;
1546                 ns.i = os.i;
1547                 ns.disk = D_NEGOTIATING;
1548
1549                 /* We expect to receive up-to-date UUIDs soon.
1550                    To avoid a race in receive_state, free p_uuid while
1551                    holding req_lock. I.e. atomic with the state change */
1552                 kfree(mdev->p_uuid);
1553                 mdev->p_uuid = NULL;
1554         }
1555
1556         rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
1557         spin_unlock_irq(&mdev->tconn->req_lock);
1558
1559         if (rv < SS_SUCCESS)
1560                 goto force_diskless_dec;
1561
1562         if (mdev->state.role == R_PRIMARY)
1563                 mdev->ldev->md.uuid[UI_CURRENT] |=  (u64)1;
1564         else
1565                 mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
1566
1567         drbd_md_mark_dirty(mdev);
1568         drbd_md_sync(mdev);
1569
1570         kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
1571         put_ldev(mdev);
1572         conn_reconfig_done(mdev->tconn);
1573         drbd_adm_finish(info, retcode);
1574         return 0;
1575
1576  force_diskless_dec:
1577         put_ldev(mdev);
1578  force_diskless:
1579         drbd_force_state(mdev, NS(disk, D_FAILED));
1580         drbd_md_sync(mdev);
1581  fail:
1582         conn_reconfig_done(mdev->tconn);
1583         if (nbc) {
1584                 if (nbc->backing_bdev)
1585                         blkdev_put(nbc->backing_bdev,
1586                                    FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1587                 if (nbc->md_bdev)
1588                         blkdev_put(nbc->md_bdev,
1589                                    FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1590                 kfree(nbc);
1591         }
1592         lc_destroy(resync_lru);
1593
1594  finish:
1595         drbd_adm_finish(info, retcode);
1596         return 0;
1597 }
1598
1599 static int adm_detach(struct drbd_conf *mdev)
1600 {
1601         enum drbd_state_rv retcode;
1602         int ret;
1603         drbd_suspend_io(mdev); /* so no-one is stuck in drbd_al_begin_io */
1604         retcode = drbd_request_state(mdev, NS(disk, D_FAILED));
1605         /* D_FAILED will transition to DISKLESS. */
1606         ret = wait_event_interruptible(mdev->misc_wait,
1607                         mdev->state.disk != D_FAILED);
1608         drbd_resume_io(mdev);
1609         if ((int)retcode == (int)SS_IS_DISKLESS)
1610                 retcode = SS_NOTHING_TO_DO;
1611         if (ret)
1612                 retcode = ERR_INTR;
1613         return retcode;
1614 }
1615
1616 /* Detaching the disk is a process in multiple stages.  First we need to lock
1617  * out application IO, in-flight IO, IO stuck in drbd_al_begin_io.
1618  * Then we transition to D_DISKLESS, and wait for put_ldev() to return all
1619  * internal references as well.
1620  * Only then we have finally detached. */
1621 int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info)
1622 {
1623         enum drbd_ret_code retcode;
1624
1625         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1626         if (!adm_ctx.reply_skb)
1627                 return retcode;
1628         if (retcode != NO_ERROR)
1629                 goto out;
1630
1631         retcode = adm_detach(adm_ctx.mdev);
1632 out:
1633         drbd_adm_finish(info, retcode);
1634         return 0;
1635 }
1636
1637 static bool conn_resync_running(struct drbd_tconn *tconn)
1638 {
1639         struct drbd_conf *mdev;
1640         bool rv = false;
1641         int vnr;
1642
1643         rcu_read_lock();
1644         idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1645                 if (mdev->state.conn == C_SYNC_SOURCE ||
1646                     mdev->state.conn == C_SYNC_TARGET ||
1647                     mdev->state.conn == C_PAUSED_SYNC_S ||
1648                     mdev->state.conn == C_PAUSED_SYNC_T) {
1649                         rv = true;
1650                         break;
1651                 }
1652         }
1653         rcu_read_unlock();
1654
1655         return rv;
1656 }
1657
1658 static bool conn_ov_running(struct drbd_tconn *tconn)
1659 {
1660         struct drbd_conf *mdev;
1661         bool rv = false;
1662         int vnr;
1663
1664         rcu_read_lock();
1665         idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1666                 if (mdev->state.conn == C_VERIFY_S ||
1667                     mdev->state.conn == C_VERIFY_T) {
1668                         rv = true;
1669                         break;
1670                 }
1671         }
1672         rcu_read_unlock();
1673
1674         return rv;
1675 }
1676
1677 static enum drbd_ret_code
1678 _check_net_options(struct drbd_tconn *tconn, struct net_conf *old_conf, struct net_conf *new_conf)
1679 {
1680         struct drbd_conf *mdev;
1681         int i;
1682
1683         if (old_conf && tconn->agreed_pro_version < 100 &&
1684             tconn->cstate == C_WF_REPORT_PARAMS &&
1685             new_conf->wire_protocol != old_conf->wire_protocol)
1686                 return ERR_NEED_APV_100;
1687
1688         if (new_conf->two_primaries &&
1689             (new_conf->wire_protocol != DRBD_PROT_C))
1690                 return ERR_NOT_PROTO_C;
1691
1692         idr_for_each_entry(&tconn->volumes, mdev, i) {
1693                 if (get_ldev(mdev)) {
1694                         enum drbd_fencing_p fp = mdev->ldev->dc.fencing;
1695                         put_ldev(mdev);
1696                         if (new_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH)
1697                                 return ERR_STONITH_AND_PROT_A;
1698                 }
1699                 if (mdev->state.role == R_PRIMARY && new_conf->want_lose)
1700                         return ERR_DISCARD;
1701         }
1702
1703         if (new_conf->on_congestion != OC_BLOCK && new_conf->wire_protocol != DRBD_PROT_A)
1704                 return ERR_CONG_NOT_PROTO_A;
1705
1706         return NO_ERROR;
1707 }
1708
1709 static enum drbd_ret_code
1710 check_net_options(struct drbd_tconn *tconn, struct net_conf *new_conf)
1711 {
1712         static enum drbd_ret_code rv;
1713         struct drbd_conf *mdev;
1714         int i;
1715
1716         rcu_read_lock();
1717         rv = _check_net_options(tconn, rcu_dereference(tconn->net_conf), new_conf);
1718         rcu_read_unlock();
1719
1720         /* tconn->volumes protected by genl_lock() here */
1721         idr_for_each_entry(&tconn->volumes, mdev, i) {
1722                 if (!mdev->bitmap) {
1723                         if(drbd_bm_init(mdev))
1724                                 return ERR_NOMEM;
1725                 }
1726         }
1727
1728         return rv;
1729 }
1730
1731 struct crypto {
1732         struct crypto_hash *verify_tfm;
1733         struct crypto_hash *csums_tfm;
1734         struct crypto_hash *cram_hmac_tfm;
1735         struct crypto_hash *integrity_tfm;
1736         void *int_dig_in;
1737         void *int_dig_vv;
1738 };
1739
1740 static int
1741 alloc_hash(struct crypto_hash **tfm, char *tfm_name, int err_alg)
1742 {
1743         if (!tfm_name[0])
1744                 return NO_ERROR;
1745
1746         *tfm = crypto_alloc_hash(tfm_name, 0, CRYPTO_ALG_ASYNC);
1747         if (IS_ERR(*tfm)) {
1748                 *tfm = NULL;
1749                 return err_alg;
1750         }
1751
1752         return NO_ERROR;
1753 }
1754
1755 static enum drbd_ret_code
1756 alloc_crypto(struct crypto *crypto, struct net_conf *new_conf)
1757 {
1758         char hmac_name[CRYPTO_MAX_ALG_NAME];
1759         enum drbd_ret_code rv;
1760         int hash_size;
1761
1762         rv = alloc_hash(&crypto->csums_tfm, new_conf->csums_alg,
1763                        ERR_CSUMS_ALG);
1764         if (rv != NO_ERROR)
1765                 return rv;
1766         rv = alloc_hash(&crypto->verify_tfm, new_conf->verify_alg,
1767                        ERR_VERIFY_ALG);
1768         if (rv != NO_ERROR)
1769                 return rv;
1770         rv = alloc_hash(&crypto->integrity_tfm, new_conf->integrity_alg,
1771                        ERR_INTEGRITY_ALG);
1772         if (rv != NO_ERROR)
1773                 return rv;
1774         if (new_conf->cram_hmac_alg[0] != 0) {
1775                 snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
1776                          new_conf->cram_hmac_alg);
1777
1778                 rv = alloc_hash(&crypto->cram_hmac_tfm, hmac_name,
1779                                ERR_AUTH_ALG);
1780         }
1781         if (crypto->integrity_tfm) {
1782                 hash_size = crypto_hash_digestsize(crypto->integrity_tfm);
1783                 crypto->int_dig_in = kmalloc(hash_size, GFP_KERNEL);
1784                 if (!crypto->int_dig_in)
1785                         return ERR_NOMEM;
1786                 crypto->int_dig_vv = kmalloc(hash_size, GFP_KERNEL);
1787                 if (!crypto->int_dig_vv)
1788                         return ERR_NOMEM;
1789         }
1790
1791         return rv;
1792 }
1793
1794 static void free_crypto(struct crypto *crypto)
1795 {
1796         kfree(crypto->int_dig_in);
1797         kfree(crypto->int_dig_vv);
1798         crypto_free_hash(crypto->cram_hmac_tfm);
1799         crypto_free_hash(crypto->integrity_tfm);
1800         crypto_free_hash(crypto->csums_tfm);
1801         crypto_free_hash(crypto->verify_tfm);
1802 }
1803
1804 int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
1805 {
1806         enum drbd_ret_code retcode;
1807         struct drbd_tconn *tconn;
1808         struct net_conf *old_conf, *new_conf = NULL;
1809         int err;
1810         int ovr; /* online verify running */
1811         int rsr; /* re-sync running */
1812         struct crypto crypto = { };
1813         bool change_integrity_alg;
1814
1815         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
1816         if (!adm_ctx.reply_skb)
1817                 return retcode;
1818         if (retcode != NO_ERROR)
1819                 goto out;
1820
1821         tconn = adm_ctx.tconn;
1822
1823         new_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
1824         if (!new_conf) {
1825                 retcode = ERR_NOMEM;
1826                 goto out;
1827         }
1828
1829         conn_reconfig_start(tconn);
1830
1831         mutex_lock(&tconn->data.mutex);
1832         mutex_lock(&tconn->conf_update);
1833         old_conf = tconn->net_conf;
1834
1835         if (!old_conf) {
1836                 drbd_msg_put_info("net conf missing, try connect");
1837                 retcode = ERR_INVALID_REQUEST;
1838                 goto fail;
1839         }
1840
1841         *new_conf = *old_conf;
1842         if (should_set_defaults(info))
1843                 set_net_conf_defaults(new_conf);
1844
1845         err = net_conf_from_attrs_for_change(new_conf, info);
1846         if (err) {
1847                 retcode = ERR_MANDATORY_TAG;
1848                 drbd_msg_put_info(from_attrs_err_to_txt(err));
1849                 goto fail;
1850         }
1851
1852         retcode = check_net_options(tconn, new_conf);
1853         if (retcode != NO_ERROR)
1854                 goto fail;
1855
1856         /* re-sync running */
1857         rsr = conn_resync_running(tconn);
1858         if (rsr && strcmp(new_conf->csums_alg, old_conf->csums_alg)) {
1859                 retcode = ERR_CSUMS_RESYNC_RUNNING;
1860                 goto fail;
1861         }
1862
1863         /* online verify running */
1864         ovr = conn_ov_running(tconn);
1865         if (ovr && strcmp(new_conf->verify_alg, old_conf->verify_alg)) {
1866                 retcode = ERR_VERIFY_RUNNING;
1867                 goto fail;
1868         }
1869
1870         change_integrity_alg = strcmp(old_conf->integrity_alg,
1871                                       new_conf->integrity_alg);
1872
1873         retcode = alloc_crypto(&crypto, new_conf);
1874         if (retcode != NO_ERROR)
1875                 goto fail;
1876
1877         rcu_assign_pointer(tconn->net_conf, new_conf);
1878
1879         if (!rsr) {
1880                 crypto_free_hash(tconn->csums_tfm);
1881                 tconn->csums_tfm = crypto.csums_tfm;
1882                 crypto.csums_tfm = NULL;
1883         }
1884         if (!ovr) {
1885                 crypto_free_hash(tconn->verify_tfm);
1886                 tconn->verify_tfm = crypto.verify_tfm;
1887                 crypto.verify_tfm = NULL;
1888         }
1889
1890         kfree(tconn->int_dig_in);
1891         tconn->int_dig_in = crypto.int_dig_in;
1892         kfree(tconn->int_dig_vv);
1893         tconn->int_dig_vv = crypto.int_dig_vv;
1894         crypto_free_hash(tconn->integrity_tfm);
1895         tconn->integrity_tfm = crypto.integrity_tfm;
1896         if (change_integrity_alg) {
1897                 /* Do this without trying to take tconn->data.mutex again.  */
1898                 if (__drbd_send_protocol(tconn))
1899                         goto fail;
1900         }
1901
1902         /* FIXME Changing cram_hmac while the connection is established is useless */
1903         crypto_free_hash(tconn->cram_hmac_tfm);
1904         tconn->cram_hmac_tfm = crypto.cram_hmac_tfm;
1905
1906         mutex_unlock(&tconn->conf_update);
1907         mutex_unlock(&tconn->data.mutex);
1908         synchronize_rcu();
1909         kfree(old_conf);
1910
1911         if (tconn->cstate >= C_WF_REPORT_PARAMS)
1912                 drbd_send_sync_param(minor_to_mdev(conn_lowest_minor(tconn)));
1913
1914         goto done;
1915
1916  fail:
1917         mutex_unlock(&tconn->conf_update);
1918         mutex_unlock(&tconn->data.mutex);
1919         free_crypto(&crypto);
1920         kfree(new_conf);
1921  done:
1922         conn_reconfig_done(tconn);
1923  out:
1924         drbd_adm_finish(info, retcode);
1925         return 0;
1926 }
1927
1928 int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
1929 {
1930         struct drbd_conf *mdev;
1931         struct net_conf *old_conf, *new_conf = NULL;
1932         struct crypto crypto = { };
1933         struct drbd_tconn *oconn;
1934         struct drbd_tconn *tconn;
1935         struct sockaddr *new_my_addr, *new_peer_addr, *taken_addr;
1936         enum drbd_ret_code retcode;
1937         int i;
1938         int err;
1939
1940         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
1941         if (!adm_ctx.reply_skb)
1942                 return retcode;
1943         if (retcode != NO_ERROR)
1944                 goto out;
1945
1946         tconn = adm_ctx.tconn;
1947         conn_reconfig_start(tconn);
1948
1949         if (tconn->cstate > C_STANDALONE) {
1950                 retcode = ERR_NET_CONFIGURED;
1951                 goto fail;
1952         }
1953
1954         /* allocation not in the IO path, cqueue thread context */
1955         new_conf = kzalloc(sizeof(*new_conf), GFP_KERNEL);
1956         if (!new_conf) {
1957                 retcode = ERR_NOMEM;
1958                 goto fail;
1959         }
1960
1961         set_net_conf_defaults(new_conf);
1962
1963         err = net_conf_from_attrs(new_conf, info);
1964         if (err) {
1965                 retcode = ERR_MANDATORY_TAG;
1966                 drbd_msg_put_info(from_attrs_err_to_txt(err));
1967                 goto fail;
1968         }
1969
1970         retcode = check_net_options(tconn, new_conf);
1971         if (retcode != NO_ERROR)
1972                 goto fail;
1973
1974         retcode = NO_ERROR;
1975
1976         new_my_addr = (struct sockaddr *)&new_conf->my_addr;
1977         new_peer_addr = (struct sockaddr *)&new_conf->peer_addr;
1978
1979         /* No need to take drbd_cfg_rwsem here.  All reconfiguration is
1980          * strictly serialized on genl_lock(). We are protected against
1981          * concurrent reconfiguration/addition/deletion */
1982         list_for_each_entry(oconn, &drbd_tconns, all_tconn) {
1983                 struct net_conf *nc;
1984                 if (oconn == tconn)
1985                         continue;
1986
1987                 rcu_read_lock();
1988                 nc = rcu_dereference(oconn->net_conf);
1989                 if (nc) {
1990                         taken_addr = (struct sockaddr *)&nc->my_addr;
1991                         if (new_conf->my_addr_len == nc->my_addr_len &&
1992                             !memcmp(new_my_addr, taken_addr, new_conf->my_addr_len))
1993                                 retcode = ERR_LOCAL_ADDR;
1994
1995                         taken_addr = (struct sockaddr *)&nc->peer_addr;
1996                         if (new_conf->peer_addr_len == nc->peer_addr_len &&
1997                             !memcmp(new_peer_addr, taken_addr, new_conf->peer_addr_len))
1998                                 retcode = ERR_PEER_ADDR;
1999                 }
2000                 rcu_read_unlock();
2001                 if (retcode != NO_ERROR)
2002                         goto fail;
2003         }
2004
2005         retcode = alloc_crypto(&crypto, new_conf);
2006         if (retcode != NO_ERROR)
2007                 goto fail;
2008
2009         ((char *)new_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0;
2010
2011         conn_flush_workqueue(tconn);
2012
2013         mutex_lock(&tconn->conf_update);
2014         old_conf = tconn->net_conf;
2015         if (old_conf) {
2016                 retcode = ERR_NET_CONFIGURED;
2017                 mutex_unlock(&tconn->conf_update);
2018                 goto fail;
2019         }
2020         rcu_assign_pointer(tconn->net_conf, new_conf);
2021
2022         conn_free_crypto(tconn);
2023         tconn->int_dig_in = crypto.int_dig_in;
2024         tconn->int_dig_vv = crypto.int_dig_vv;
2025         tconn->cram_hmac_tfm = crypto.cram_hmac_tfm;
2026         tconn->integrity_tfm = crypto.integrity_tfm;
2027         tconn->csums_tfm = crypto.csums_tfm;
2028         tconn->verify_tfm = crypto.verify_tfm;
2029
2030         mutex_unlock(&tconn->conf_update);
2031
2032         rcu_read_lock();
2033         idr_for_each_entry(&tconn->volumes, mdev, i) {
2034                 mdev->send_cnt = 0;
2035                 mdev->recv_cnt = 0;
2036         }
2037         rcu_read_unlock();
2038
2039         retcode = conn_request_state(tconn, NS(conn, C_UNCONNECTED), CS_VERBOSE);
2040
2041         conn_reconfig_done(tconn);
2042         drbd_adm_finish(info, retcode);
2043         return 0;
2044
2045 fail:
2046         free_crypto(&crypto);
2047         kfree(new_conf);
2048
2049         conn_reconfig_done(tconn);
2050 out:
2051         drbd_adm_finish(info, retcode);
2052         return 0;
2053 }
2054
2055 static enum drbd_state_rv conn_try_disconnect(struct drbd_tconn *tconn, bool force)
2056 {
2057         enum drbd_state_rv rv;
2058
2059         rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING),
2060                         force ? CS_HARD : 0);
2061
2062         switch (rv) {
2063         case SS_NOTHING_TO_DO:
2064                 break;
2065         case SS_ALREADY_STANDALONE:
2066                 return SS_SUCCESS;
2067         case SS_PRIMARY_NOP:
2068                 /* Our state checking code wants to see the peer outdated. */
2069                 rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
2070                                                 pdsk, D_OUTDATED), CS_VERBOSE);
2071                 break;
2072         case SS_CW_FAILED_BY_PEER:
2073                 /* The peer probably wants to see us outdated. */
2074                 rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
2075                                                         disk, D_OUTDATED), 0);
2076                 if (rv == SS_IS_DISKLESS || rv == SS_LOWER_THAN_OUTDATED) {
2077                         rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING),
2078                                         CS_HARD);
2079                 }
2080                 break;
2081         default:;
2082                 /* no special handling necessary */
2083         }
2084
2085         if (rv >= SS_SUCCESS) {
2086                 enum drbd_state_rv rv2;
2087                 /* No one else can reconfigure the network while I am here.
2088                  * The state handling only uses drbd_thread_stop_nowait(),
2089                  * we want to really wait here until the receiver is no more.
2090                  */
2091                 drbd_thread_stop(&adm_ctx.tconn->receiver);
2092
2093                 /* Race breaker.  This additional state change request may be
2094                  * necessary, if this was a forced disconnect during a receiver
2095                  * restart.  We may have "killed" the receiver thread just
2096                  * after drbdd_init() returned.  Typically, we should be
2097                  * C_STANDALONE already, now, and this becomes a no-op.
2098                  */
2099                 rv2 = conn_request_state(tconn, NS(conn, C_STANDALONE),
2100                                 CS_VERBOSE | CS_HARD);
2101                 if (rv2 < SS_SUCCESS)
2102                         conn_err(tconn,
2103                                 "unexpected rv2=%d in conn_try_disconnect()\n",
2104                                 rv2);
2105         }
2106         return rv;
2107 }
2108
2109 int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
2110 {
2111         struct disconnect_parms parms;
2112         struct drbd_tconn *tconn;
2113         enum drbd_state_rv rv;
2114         enum drbd_ret_code retcode;
2115         int err;
2116
2117         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
2118         if (!adm_ctx.reply_skb)
2119                 return retcode;
2120         if (retcode != NO_ERROR)
2121                 goto fail;
2122
2123         tconn = adm_ctx.tconn;
2124         memset(&parms, 0, sizeof(parms));
2125         if (info->attrs[DRBD_NLA_DISCONNECT_PARMS]) {
2126                 err = disconnect_parms_from_attrs(&parms, info);
2127                 if (err) {
2128                         retcode = ERR_MANDATORY_TAG;
2129                         drbd_msg_put_info(from_attrs_err_to_txt(err));
2130                         goto fail;
2131                 }
2132         }
2133
2134         rv = conn_try_disconnect(tconn, parms.force_disconnect);
2135         if (rv < SS_SUCCESS)
2136                 retcode = rv;  /* FIXME: Type mismatch. */
2137         else
2138                 retcode = NO_ERROR;
2139  fail:
2140         drbd_adm_finish(info, retcode);
2141         return 0;
2142 }
2143
2144 void resync_after_online_grow(struct drbd_conf *mdev)
2145 {
2146         int iass; /* I am sync source */
2147
2148         dev_info(DEV, "Resync of new storage after online grow\n");
2149         if (mdev->state.role != mdev->state.peer)
2150                 iass = (mdev->state.role == R_PRIMARY);
2151         else
2152                 iass = test_bit(DISCARD_CONCURRENT, &mdev->tconn->flags);
2153
2154         if (iass)
2155                 drbd_start_resync(mdev, C_SYNC_SOURCE);
2156         else
2157                 _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE);
2158 }
2159
2160 int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
2161 {
2162         struct resize_parms rs;
2163         struct drbd_conf *mdev;
2164         enum drbd_ret_code retcode;
2165         enum determine_dev_size dd;
2166         enum dds_flags ddsf;
2167         int err;
2168
2169         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2170         if (!adm_ctx.reply_skb)
2171                 return retcode;
2172         if (retcode != NO_ERROR)
2173                 goto fail;
2174
2175         memset(&rs, 0, sizeof(struct resize_parms));
2176         if (info->attrs[DRBD_NLA_RESIZE_PARMS]) {
2177                 err = resize_parms_from_attrs(&rs, info);
2178                 if (err) {
2179                         retcode = ERR_MANDATORY_TAG;
2180                         drbd_msg_put_info(from_attrs_err_to_txt(err));
2181                         goto fail;
2182                 }
2183         }
2184
2185         mdev = adm_ctx.mdev;
2186         if (mdev->state.conn > C_CONNECTED) {
2187                 retcode = ERR_RESIZE_RESYNC;
2188                 goto fail;
2189         }
2190
2191         if (mdev->state.role == R_SECONDARY &&
2192             mdev->state.peer == R_SECONDARY) {
2193                 retcode = ERR_NO_PRIMARY;
2194                 goto fail;
2195         }
2196
2197         if (!get_ldev(mdev)) {
2198                 retcode = ERR_NO_DISK;
2199                 goto fail;
2200         }
2201
2202         if (rs.no_resync && mdev->tconn->agreed_pro_version < 93) {
2203                 retcode = ERR_NEED_APV_93;
2204                 goto fail;
2205         }
2206
2207         if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev))
2208                 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
2209
2210         mdev->ldev->dc.disk_size = (sector_t)rs.resize_size;
2211         ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0);
2212         dd = drbd_determine_dev_size(mdev, ddsf);
2213         drbd_md_sync(mdev);
2214         put_ldev(mdev);
2215         if (dd == dev_size_error) {
2216                 retcode = ERR_NOMEM_BITMAP;
2217                 goto fail;
2218         }
2219
2220         if (mdev->state.conn == C_CONNECTED) {
2221                 if (dd == grew)
2222                         set_bit(RESIZE_PENDING, &mdev->flags);
2223
2224                 drbd_send_uuids(mdev);
2225                 drbd_send_sizes(mdev, 1, ddsf);
2226         }
2227
2228  fail:
2229         drbd_adm_finish(info, retcode);
2230         return 0;
2231 }
2232
2233 void drbd_set_res_opts_defaults(struct res_opts *r)
2234 {
2235         return set_res_opts_defaults(r);
2236 }
2237
2238 int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info)
2239 {
2240         enum drbd_ret_code retcode;
2241         cpumask_var_t new_cpu_mask;
2242         struct drbd_tconn *tconn;
2243         struct res_opts res_opts;
2244         int err;
2245
2246         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
2247         if (!adm_ctx.reply_skb)
2248                 return retcode;
2249         if (retcode != NO_ERROR)
2250                 goto fail;
2251         tconn = adm_ctx.tconn;
2252
2253         if (!zalloc_cpumask_var(&new_cpu_mask, GFP_KERNEL)) {
2254                 retcode = ERR_NOMEM;
2255                 drbd_msg_put_info("unable to allocate cpumask");
2256                 goto fail;
2257         }
2258
2259         res_opts = tconn->res_opts;
2260         if (should_set_defaults(info))
2261                 set_res_opts_defaults(&res_opts);
2262
2263         err = res_opts_from_attrs(&res_opts, info);
2264         if (err) {
2265                 retcode = ERR_MANDATORY_TAG;
2266                 drbd_msg_put_info(from_attrs_err_to_txt(err));
2267                 goto fail;
2268         }
2269
2270         /* silently ignore cpu mask on UP kernel */
2271         if (nr_cpu_ids > 1 && res_opts.cpu_mask[0] != 0) {
2272                 err = __bitmap_parse(res_opts.cpu_mask, 32, 0,
2273                                 cpumask_bits(new_cpu_mask), nr_cpu_ids);
2274                 if (err) {
2275                         conn_warn(tconn, "__bitmap_parse() failed with %d\n", err);
2276                         retcode = ERR_CPU_MASK_PARSE;
2277                         goto fail;
2278                 }
2279         }
2280
2281
2282         tconn->res_opts = res_opts;
2283
2284         if (!cpumask_equal(tconn->cpu_mask, new_cpu_mask)) {
2285                 cpumask_copy(tconn->cpu_mask, new_cpu_mask);
2286                 drbd_calc_cpu_mask(tconn);
2287                 tconn->receiver.reset_cpu_mask = 1;
2288                 tconn->asender.reset_cpu_mask = 1;
2289                 tconn->worker.reset_cpu_mask = 1;
2290         }
2291
2292 fail:
2293         free_cpumask_var(new_cpu_mask);
2294
2295         drbd_adm_finish(info, retcode);
2296         return 0;
2297 }
2298
2299 int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info)
2300 {
2301         struct drbd_conf *mdev;
2302         int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
2303
2304         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2305         if (!adm_ctx.reply_skb)
2306                 return retcode;
2307         if (retcode != NO_ERROR)
2308                 goto out;
2309
2310         mdev = adm_ctx.mdev;
2311
2312         /* If there is still bitmap IO pending, probably because of a previous
2313          * resync just being finished, wait for it before requesting a new resync. */
2314         wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
2315
2316         retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED);
2317
2318         if (retcode < SS_SUCCESS && retcode != SS_NEED_CONNECTION)
2319                 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
2320
2321         while (retcode == SS_NEED_CONNECTION) {
2322                 spin_lock_irq(&mdev->tconn->req_lock);
2323                 if (mdev->state.conn < C_CONNECTED)
2324                         retcode = _drbd_set_state(_NS(mdev, disk, D_INCONSISTENT), CS_VERBOSE, NULL);
2325                 spin_unlock_irq(&mdev->tconn->req_lock);
2326
2327                 if (retcode != SS_NEED_CONNECTION)
2328                         break;
2329
2330                 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
2331         }
2332
2333 out:
2334         drbd_adm_finish(info, retcode);
2335         return 0;
2336 }
2337
2338 static int drbd_bmio_set_susp_al(struct drbd_conf *mdev)
2339 {
2340         int rv;
2341
2342         rv = drbd_bmio_set_n_write(mdev);
2343         drbd_suspend_al(mdev);
2344         return rv;
2345 }
2346
2347 static int drbd_adm_simple_request_state(struct sk_buff *skb, struct genl_info *info,
2348                 union drbd_state mask, union drbd_state val)
2349 {
2350         enum drbd_ret_code retcode;
2351
2352         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2353         if (!adm_ctx.reply_skb)
2354                 return retcode;
2355         if (retcode != NO_ERROR)
2356                 goto out;
2357
2358         retcode = drbd_request_state(adm_ctx.mdev, mask, val);
2359 out:
2360         drbd_adm_finish(info, retcode);
2361         return 0;
2362 }
2363
2364 int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info)
2365 {
2366         return drbd_adm_simple_request_state(skb, info, NS(conn, C_STARTING_SYNC_S));
2367 }
2368
2369 int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info)
2370 {
2371         enum drbd_ret_code retcode;
2372
2373         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2374         if (!adm_ctx.reply_skb)
2375                 return retcode;
2376         if (retcode != NO_ERROR)
2377                 goto out;
2378
2379         if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 1)) == SS_NOTHING_TO_DO)
2380                 retcode = ERR_PAUSE_IS_SET;
2381 out:
2382         drbd_adm_finish(info, retcode);
2383         return 0;
2384 }
2385
2386 int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info)
2387 {
2388         union drbd_dev_state s;
2389         enum drbd_ret_code retcode;
2390
2391         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2392         if (!adm_ctx.reply_skb)
2393                 return retcode;
2394         if (retcode != NO_ERROR)
2395                 goto out;
2396
2397         if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 0)) == SS_NOTHING_TO_DO) {
2398                 s = adm_ctx.mdev->state;
2399                 if (s.conn == C_PAUSED_SYNC_S || s.conn == C_PAUSED_SYNC_T) {
2400                         retcode = s.aftr_isp ? ERR_PIC_AFTER_DEP :
2401                                   s.peer_isp ? ERR_PIC_PEER_DEP : ERR_PAUSE_IS_CLEAR;
2402                 } else {
2403                         retcode = ERR_PAUSE_IS_CLEAR;
2404                 }
2405         }
2406
2407 out:
2408         drbd_adm_finish(info, retcode);
2409         return 0;
2410 }
2411
2412 int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info)
2413 {
2414         return drbd_adm_simple_request_state(skb, info, NS(susp, 1));
2415 }
2416
2417 int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info)
2418 {
2419         struct drbd_conf *mdev;
2420         int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
2421
2422         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2423         if (!adm_ctx.reply_skb)
2424                 return retcode;
2425         if (retcode != NO_ERROR)
2426                 goto out;
2427
2428         mdev = adm_ctx.mdev;
2429         if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
2430                 drbd_uuid_new_current(mdev);
2431                 clear_bit(NEW_CUR_UUID, &mdev->flags);
2432         }
2433         drbd_suspend_io(mdev);
2434         retcode = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
2435         if (retcode == SS_SUCCESS) {
2436                 if (mdev->state.conn < C_CONNECTED)
2437                         tl_clear(mdev->tconn);
2438                 if (mdev->state.disk == D_DISKLESS || mdev->state.disk == D_FAILED)
2439                         tl_restart(mdev->tconn, FAIL_FROZEN_DISK_IO);
2440         }
2441         drbd_resume_io(mdev);
2442
2443 out:
2444         drbd_adm_finish(info, retcode);
2445         return 0;
2446 }
2447
2448 int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info)
2449 {
2450         return drbd_adm_simple_request_state(skb, info, NS(disk, D_OUTDATED));
2451 }
2452
2453 int nla_put_drbd_cfg_context(struct sk_buff *skb, const char *conn_name, unsigned vnr)
2454 {
2455         struct nlattr *nla;
2456         nla = nla_nest_start(skb, DRBD_NLA_CFG_CONTEXT);
2457         if (!nla)
2458                 goto nla_put_failure;
2459         if (vnr != VOLUME_UNSPECIFIED)
2460                 NLA_PUT_U32(skb, T_ctx_volume, vnr);
2461         NLA_PUT_STRING(skb, T_ctx_conn_name, conn_name);
2462         nla_nest_end(skb, nla);
2463         return 0;
2464
2465 nla_put_failure:
2466         if (nla)
2467                 nla_nest_cancel(skb, nla);
2468         return -EMSGSIZE;
2469 }
2470
2471 int nla_put_status_info(struct sk_buff *skb, struct drbd_conf *mdev,
2472                 const struct sib_info *sib)
2473 {
2474         struct state_info *si = NULL; /* for sizeof(si->member); */
2475         struct net_conf *nc;
2476         struct nlattr *nla;
2477         int got_ldev;
2478         int err = 0;
2479         int exclude_sensitive;
2480
2481         /* If sib != NULL, this is drbd_bcast_event, which anyone can listen
2482          * to.  So we better exclude_sensitive information.
2483          *
2484          * If sib == NULL, this is drbd_adm_get_status, executed synchronously
2485          * in the context of the requesting user process. Exclude sensitive
2486          * information, unless current has superuser.
2487          *
2488          * NOTE: for drbd_adm_get_status_all(), this is a netlink dump, and
2489          * relies on the current implementation of netlink_dump(), which
2490          * executes the dump callback successively from netlink_recvmsg(),
2491          * always in the context of the receiving process */
2492         exclude_sensitive = sib || !capable(CAP_SYS_ADMIN);
2493
2494         got_ldev = get_ldev(mdev);
2495
2496         /* We need to add connection name and volume number information still.
2497          * Minor number is in drbd_genlmsghdr. */
2498         if (nla_put_drbd_cfg_context(skb, mdev->tconn->name, mdev->vnr))
2499                 goto nla_put_failure;
2500
2501         if (res_opts_to_skb(skb, &mdev->tconn->res_opts, exclude_sensitive))
2502                 goto nla_put_failure;
2503
2504         if (got_ldev)
2505                 if (disk_conf_to_skb(skb, &mdev->ldev->dc, exclude_sensitive))
2506                         goto nla_put_failure;
2507
2508         rcu_read_lock();
2509         nc = rcu_dereference(mdev->tconn->net_conf);
2510         if (nc)
2511                 err = net_conf_to_skb(skb, nc, exclude_sensitive);
2512         rcu_read_unlock();
2513         if (err)
2514                 goto nla_put_failure;
2515
2516         nla = nla_nest_start(skb, DRBD_NLA_STATE_INFO);
2517         if (!nla)
2518                 goto nla_put_failure;
2519         NLA_PUT_U32(skb, T_sib_reason, sib ? sib->sib_reason : SIB_GET_STATUS_REPLY);
2520         NLA_PUT_U32(skb, T_current_state, mdev->state.i);
2521         NLA_PUT_U64(skb, T_ed_uuid, mdev->ed_uuid);
2522         NLA_PUT_U64(skb, T_capacity, drbd_get_capacity(mdev->this_bdev));
2523
2524         if (got_ldev) {
2525                 NLA_PUT_U32(skb, T_disk_flags, mdev->ldev->md.flags);
2526                 NLA_PUT(skb, T_uuids, sizeof(si->uuids), mdev->ldev->md.uuid);
2527                 NLA_PUT_U64(skb, T_bits_total, drbd_bm_bits(mdev));
2528                 NLA_PUT_U64(skb, T_bits_oos, drbd_bm_total_weight(mdev));
2529                 if (C_SYNC_SOURCE <= mdev->state.conn &&
2530                     C_PAUSED_SYNC_T >= mdev->state.conn) {
2531                         NLA_PUT_U64(skb, T_bits_rs_total, mdev->rs_total);
2532                         NLA_PUT_U64(skb, T_bits_rs_failed, mdev->rs_failed);
2533                 }
2534         }
2535
2536         if (sib) {
2537                 switch(sib->sib_reason) {
2538                 case SIB_SYNC_PROGRESS:
2539                 case SIB_GET_STATUS_REPLY:
2540                         break;
2541                 case SIB_STATE_CHANGE:
2542                         NLA_PUT_U32(skb, T_prev_state, sib->os.i);
2543                         NLA_PUT_U32(skb, T_new_state, sib->ns.i);
2544                         break;
2545                 case SIB_HELPER_POST:
2546                         NLA_PUT_U32(skb,
2547                                 T_helper_exit_code, sib->helper_exit_code);
2548                         /* fall through */
2549                 case SIB_HELPER_PRE:
2550                         NLA_PUT_STRING(skb, T_helper, sib->helper_name);
2551                         break;
2552                 }
2553         }
2554         nla_nest_end(skb, nla);
2555
2556         if (0)
2557 nla_put_failure:
2558                 err = -EMSGSIZE;
2559         if (got_ldev)
2560                 put_ldev(mdev);
2561         return err;
2562 }
2563
2564 int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info)
2565 {
2566         enum drbd_ret_code retcode;
2567         int err;
2568
2569         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2570         if (!adm_ctx.reply_skb)
2571                 return retcode;
2572         if (retcode != NO_ERROR)
2573                 goto out;
2574
2575         err = nla_put_status_info(adm_ctx.reply_skb, adm_ctx.mdev, NULL);
2576         if (err) {
2577                 nlmsg_free(adm_ctx.reply_skb);
2578                 return err;
2579         }
2580 out:
2581         drbd_adm_finish(info, retcode);
2582         return 0;
2583 }
2584
2585 int get_one_status(struct sk_buff *skb, struct netlink_callback *cb)
2586 {
2587         struct drbd_conf *mdev;
2588         struct drbd_genlmsghdr *dh;
2589         struct drbd_tconn *pos = (struct drbd_tconn*)cb->args[0];
2590         struct drbd_tconn *tconn = NULL;
2591         struct drbd_tconn *tmp;
2592         unsigned volume = cb->args[1];
2593
2594         /* Open coded, deferred, iteration:
2595          * list_for_each_entry_safe(tconn, tmp, &drbd_tconns, all_tconn) {
2596          *      idr_for_each_entry(&tconn->volumes, mdev, i) {
2597          *        ...
2598          *      }
2599          * }
2600          * where tconn is cb->args[0];
2601          * and i is cb->args[1];
2602          *
2603          * cb->args[2] indicates if we shall loop over all resources,
2604          * or just dump all volumes of a single resource.
2605          *
2606          * This may miss entries inserted after this dump started,
2607          * or entries deleted before they are reached.
2608          *
2609          * We need to make sure the mdev won't disappear while
2610          * we are looking at it, and revalidate our iterators
2611          * on each iteration.
2612          */
2613
2614         /* synchronize with conn_create()/conn_destroy() */
2615         down_read(&drbd_cfg_rwsem);
2616         /* revalidate iterator position */
2617         list_for_each_entry(tmp, &drbd_tconns, all_tconn) {
2618                 if (pos == NULL) {
2619                         /* first iteration */
2620                         pos = tmp;
2621                         tconn = pos;
2622                         break;
2623                 }
2624                 if (tmp == pos) {
2625                         tconn = pos;
2626                         break;
2627                 }
2628         }
2629         if (tconn) {
2630 next_tconn:
2631                 mdev = idr_get_next(&tconn->volumes, &volume);
2632                 if (!mdev) {
2633                         /* No more volumes to dump on this tconn.
2634                          * Advance tconn iterator. */
2635                         pos = list_entry(tconn->all_tconn.next,
2636                                         struct drbd_tconn, all_tconn);
2637                         /* Did we dump any volume on this tconn yet? */
2638                         if (volume != 0) {
2639                                 /* If we reached the end of the list,
2640                                  * or only a single resource dump was requested,
2641                                  * we are done. */
2642                                 if (&pos->all_tconn == &drbd_tconns || cb->args[2])
2643                                         goto out;
2644                                 volume = 0;
2645                                 tconn = pos;
2646                                 goto next_tconn;
2647                         }
2648                 }
2649
2650                 dh = genlmsg_put(skb, NETLINK_CB(cb->skb).pid,
2651                                 cb->nlh->nlmsg_seq, &drbd_genl_family,
2652                                 NLM_F_MULTI, DRBD_ADM_GET_STATUS);
2653                 if (!dh)
2654                         goto out;
2655
2656                 if (!mdev) {
2657                         /* this is a tconn without a single volume */
2658                         dh->minor = -1U;
2659                         dh->ret_code = NO_ERROR;
2660                         if (nla_put_drbd_cfg_context(skb, tconn->name, VOLUME_UNSPECIFIED))
2661                                 genlmsg_cancel(skb, dh);
2662                         else
2663                                 genlmsg_end(skb, dh);
2664                         goto out;
2665                 }
2666
2667                 D_ASSERT(mdev->vnr == volume);
2668                 D_ASSERT(mdev->tconn == tconn);
2669
2670                 dh->minor = mdev_to_minor(mdev);
2671                 dh->ret_code = NO_ERROR;
2672
2673                 if (nla_put_status_info(skb, mdev, NULL)) {
2674                         genlmsg_cancel(skb, dh);
2675                         goto out;
2676                 }
2677                 genlmsg_end(skb, dh);
2678         }
2679
2680 out:
2681         up_read(&drbd_cfg_rwsem);
2682         /* where to start the next iteration */
2683         cb->args[0] = (long)pos;
2684         cb->args[1] = (pos == tconn) ? volume + 1 : 0;
2685
2686         /* No more tconns/volumes/minors found results in an empty skb.
2687          * Which will terminate the dump. */
2688         return skb->len;
2689 }
2690
2691 /*
2692  * Request status of all resources, or of all volumes within a single resource.
2693  *
2694  * This is a dump, as the answer may not fit in a single reply skb otherwise.
2695  * Which means we cannot use the family->attrbuf or other such members, because
2696  * dump is NOT protected by the genl_lock().  During dump, we only have access
2697  * to the incoming skb, and need to opencode "parsing" of the nlattr payload.
2698  *
2699  * Once things are setup properly, we call into get_one_status().
2700  */
2701 int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb)
2702 {
2703         const unsigned hdrlen = GENL_HDRLEN + GENL_MAGIC_FAMILY_HDRSZ;
2704         struct nlattr *nla;
2705         const char *conn_name;
2706         struct drbd_tconn *tconn;
2707
2708         /* Is this a followup call? */
2709         if (cb->args[0]) {
2710                 /* ... of a single resource dump,
2711                  * and the resource iterator has been advanced already? */
2712                 if (cb->args[2] && cb->args[2] != cb->args[0])
2713                         return 0; /* DONE. */
2714                 goto dump;
2715         }
2716
2717         /* First call (from netlink_dump_start).  We need to figure out
2718          * which resource(s) the user wants us to dump. */
2719         nla = nla_find(nlmsg_attrdata(cb->nlh, hdrlen),
2720                         nlmsg_attrlen(cb->nlh, hdrlen),
2721                         DRBD_NLA_CFG_CONTEXT);
2722
2723         /* No explicit context given.  Dump all. */
2724         if (!nla)
2725                 goto dump;
2726         nla = nla_find_nested(nla, __nla_type(T_ctx_conn_name));
2727         /* context given, but no name present? */
2728         if (!nla)
2729                 return -EINVAL;
2730         conn_name = nla_data(nla);
2731         tconn = conn_get_by_name(conn_name);
2732
2733         if (!tconn)
2734                 return -ENODEV;
2735
2736         kref_put(&tconn->kref, &conn_destroy); /* get_one_status() (re)validates tconn by itself */
2737
2738         /* prime iterators, and set "filter" mode mark:
2739          * only dump this tconn. */
2740         cb->args[0] = (long)tconn;
2741         /* cb->args[1] = 0; passed in this way. */
2742         cb->args[2] = (long)tconn;
2743
2744 dump:
2745         return get_one_status(skb, cb);
2746 }
2747
2748 int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info)
2749 {
2750         enum drbd_ret_code retcode;
2751         struct timeout_parms tp;
2752         int err;
2753
2754         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2755         if (!adm_ctx.reply_skb)
2756                 return retcode;
2757         if (retcode != NO_ERROR)
2758                 goto out;
2759
2760         tp.timeout_type =
2761                 adm_ctx.mdev->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED :
2762                 test_bit(USE_DEGR_WFC_T, &adm_ctx.mdev->flags) ? UT_DEGRADED :
2763                 UT_DEFAULT;
2764
2765         err = timeout_parms_to_priv_skb(adm_ctx.reply_skb, &tp);
2766         if (err) {
2767                 nlmsg_free(adm_ctx.reply_skb);
2768                 return err;
2769         }
2770 out:
2771         drbd_adm_finish(info, retcode);
2772         return 0;
2773 }
2774
2775 int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info)
2776 {
2777         struct drbd_conf *mdev;
2778         enum drbd_ret_code retcode;
2779
2780         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2781         if (!adm_ctx.reply_skb)
2782                 return retcode;
2783         if (retcode != NO_ERROR)
2784                 goto out;
2785
2786         mdev = adm_ctx.mdev;
2787         if (info->attrs[DRBD_NLA_START_OV_PARMS]) {
2788                 /* resume from last known position, if possible */
2789                 struct start_ov_parms parms =
2790                         { .ov_start_sector = mdev->ov_start_sector };
2791                 int err = start_ov_parms_from_attrs(&parms, info);
2792                 if (err) {
2793                         retcode = ERR_MANDATORY_TAG;
2794                         drbd_msg_put_info(from_attrs_err_to_txt(err));
2795                         goto out;
2796                 }
2797                 /* w_make_ov_request expects position to be aligned */
2798                 mdev->ov_start_sector = parms.ov_start_sector & ~BM_SECT_PER_BIT;
2799         }
2800         /* If there is still bitmap IO pending, e.g. previous resync or verify
2801          * just being finished, wait for it before requesting a new resync. */
2802         wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
2803         retcode = drbd_request_state(mdev,NS(conn,C_VERIFY_S));
2804 out:
2805         drbd_adm_finish(info, retcode);
2806         return 0;
2807 }
2808
2809
2810 int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info)
2811 {
2812         struct drbd_conf *mdev;
2813         enum drbd_ret_code retcode;
2814         int skip_initial_sync = 0;
2815         int err;
2816         struct new_c_uuid_parms args;
2817
2818         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2819         if (!adm_ctx.reply_skb)
2820                 return retcode;
2821         if (retcode != NO_ERROR)
2822                 goto out_nolock;
2823
2824         mdev = adm_ctx.mdev;
2825         memset(&args, 0, sizeof(args));
2826         if (info->attrs[DRBD_NLA_NEW_C_UUID_PARMS]) {
2827                 err = new_c_uuid_parms_from_attrs(&args, info);
2828                 if (err) {
2829                         retcode = ERR_MANDATORY_TAG;
2830                         drbd_msg_put_info(from_attrs_err_to_txt(err));
2831                         goto out_nolock;
2832                 }
2833         }
2834
2835         mutex_lock(mdev->state_mutex); /* Protects us against serialized state changes. */
2836
2837         if (!get_ldev(mdev)) {
2838                 retcode = ERR_NO_DISK;
2839                 goto out;
2840         }
2841
2842         /* this is "skip initial sync", assume to be clean */
2843         if (mdev->state.conn == C_CONNECTED && mdev->tconn->agreed_pro_version >= 90 &&
2844             mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) {
2845                 dev_info(DEV, "Preparing to skip initial sync\n");
2846                 skip_initial_sync = 1;
2847         } else if (mdev->state.conn != C_STANDALONE) {
2848                 retcode = ERR_CONNECTED;
2849                 goto out_dec;
2850         }
2851
2852         drbd_uuid_set(mdev, UI_BITMAP, 0); /* Rotate UI_BITMAP to History 1, etc... */
2853         drbd_uuid_new_current(mdev); /* New current, previous to UI_BITMAP */
2854
2855         if (args.clear_bm) {
2856                 err = drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
2857                         "clear_n_write from new_c_uuid", BM_LOCKED_MASK);
2858                 if (err) {
2859                         dev_err(DEV, "Writing bitmap failed with %d\n",err);
2860                         retcode = ERR_IO_MD_DISK;
2861                 }
2862                 if (skip_initial_sync) {
2863                         drbd_send_uuids_skip_initial_sync(mdev);
2864                         _drbd_uuid_set(mdev, UI_BITMAP, 0);
2865                         drbd_print_uuids(mdev, "cleared bitmap UUID");
2866                         spin_lock_irq(&mdev->tconn->req_lock);
2867                         _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
2868                                         CS_VERBOSE, NULL);
2869                         spin_unlock_irq(&mdev->tconn->req_lock);
2870                 }
2871         }
2872
2873         drbd_md_sync(mdev);
2874 out_dec:
2875         put_ldev(mdev);
2876 out:
2877         mutex_unlock(mdev->state_mutex);
2878 out_nolock:
2879         drbd_adm_finish(info, retcode);
2880         return 0;
2881 }
2882
2883 static enum drbd_ret_code
2884 drbd_check_conn_name(const char *name)
2885 {
2886         if (!name || !name[0]) {
2887                 drbd_msg_put_info("connection name missing");
2888                 return ERR_MANDATORY_TAG;
2889         }
2890         /* if we want to use these in sysfs/configfs/debugfs some day,
2891          * we must not allow slashes */
2892         if (strchr(name, '/')) {
2893                 drbd_msg_put_info("invalid connection name");
2894                 return ERR_INVALID_REQUEST;
2895         }
2896         return NO_ERROR;
2897 }
2898
2899 int drbd_adm_create_connection(struct sk_buff *skb, struct genl_info *info)
2900 {
2901         enum drbd_ret_code retcode;
2902
2903         retcode = drbd_adm_prepare(skb, info, 0);
2904         if (!adm_ctx.reply_skb)
2905                 return retcode;
2906         if (retcode != NO_ERROR)
2907                 goto out;
2908
2909         retcode = drbd_check_conn_name(adm_ctx.conn_name);
2910         if (retcode != NO_ERROR)
2911                 goto out;
2912
2913         if (adm_ctx.tconn) {
2914                 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) {
2915                         retcode = ERR_INVALID_REQUEST;
2916                         drbd_msg_put_info("connection exists");
2917                 }
2918                 /* else: still NO_ERROR */
2919                 goto out;
2920         }
2921
2922         if (!conn_create(adm_ctx.conn_name))
2923                 retcode = ERR_NOMEM;
2924 out:
2925         drbd_adm_finish(info, retcode);
2926         return 0;
2927 }
2928
2929 int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info)
2930 {
2931         struct drbd_genlmsghdr *dh = info->userhdr;
2932         enum drbd_ret_code retcode;
2933
2934         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
2935         if (!adm_ctx.reply_skb)
2936                 return retcode;
2937         if (retcode != NO_ERROR)
2938                 goto out;
2939
2940         /* FIXME drop minor_count parameter, limit to MINORMASK */
2941         if (dh->minor >= minor_count) {
2942                 drbd_msg_put_info("requested minor out of range");
2943                 retcode = ERR_INVALID_REQUEST;
2944                 goto out;
2945         }
2946         if (adm_ctx.volume > DRBD_VOLUME_MAX) {
2947                 drbd_msg_put_info("requested volume id out of range");
2948                 retcode = ERR_INVALID_REQUEST;
2949                 goto out;
2950         }
2951
2952         /* drbd_adm_prepare made sure already
2953          * that mdev->tconn and mdev->vnr match the request. */
2954         if (adm_ctx.mdev) {
2955                 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
2956                         retcode = ERR_MINOR_EXISTS;
2957                 /* else: still NO_ERROR */
2958                 goto out;
2959         }
2960
2961         down_write(&drbd_cfg_rwsem);
2962         retcode = conn_new_minor(adm_ctx.tconn, dh->minor, adm_ctx.volume);
2963         up_write(&drbd_cfg_rwsem);
2964 out:
2965         drbd_adm_finish(info, retcode);
2966         return 0;
2967 }
2968
2969 static enum drbd_ret_code adm_delete_minor(struct drbd_conf *mdev)
2970 {
2971         if (mdev->state.disk == D_DISKLESS &&
2972             /* no need to be mdev->state.conn == C_STANDALONE &&
2973              * we may want to delete a minor from a live replication group.
2974              */
2975             mdev->state.role == R_SECONDARY) {
2976                 drbd_delete_device(mdev);
2977                 return NO_ERROR;
2978         } else
2979                 return ERR_MINOR_CONFIGURED;
2980 }
2981
2982 int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info)
2983 {
2984         enum drbd_ret_code retcode;
2985
2986         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2987         if (!adm_ctx.reply_skb)
2988                 return retcode;
2989         if (retcode != NO_ERROR)
2990                 goto out;
2991
2992         down_write(&drbd_cfg_rwsem);
2993         retcode = adm_delete_minor(adm_ctx.mdev);
2994         up_write(&drbd_cfg_rwsem);
2995 out:
2996         drbd_adm_finish(info, retcode);
2997         return 0;
2998 }
2999
3000 int drbd_adm_down(struct sk_buff *skb, struct genl_info *info)
3001 {
3002         int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
3003         struct drbd_conf *mdev;
3004         unsigned i;
3005
3006         retcode = drbd_adm_prepare(skb, info, 0);
3007         if (!adm_ctx.reply_skb)
3008                 return retcode;
3009         if (retcode != NO_ERROR)
3010                 goto out;
3011
3012         if (!adm_ctx.tconn) {
3013                 retcode = ERR_CONN_NOT_KNOWN;
3014                 goto out;
3015         }
3016
3017         down_read(&drbd_cfg_rwsem);
3018         /* demote */
3019         idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
3020                 retcode = drbd_set_role(mdev, R_SECONDARY, 0);
3021                 if (retcode < SS_SUCCESS) {
3022                         drbd_msg_put_info("failed to demote");
3023                         goto out_unlock;
3024                 }
3025         }
3026         up_read(&drbd_cfg_rwsem);
3027
3028         /* disconnect; may stop the receiver;
3029          * must not hold the drbd_cfg_rwsem */
3030         retcode = conn_try_disconnect(adm_ctx.tconn, 0);
3031         if (retcode < SS_SUCCESS) {
3032                 drbd_msg_put_info("failed to disconnect");
3033                 goto out;
3034         }
3035
3036         down_read(&drbd_cfg_rwsem);
3037         /* detach */
3038         idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
3039                 retcode = adm_detach(mdev);
3040                 if (retcode < SS_SUCCESS) {
3041                         drbd_msg_put_info("failed to detach");
3042                         goto out_unlock;
3043                 }
3044         }
3045         up_read(&drbd_cfg_rwsem);
3046
3047         /* If we reach this, all volumes (of this tconn) are Secondary,
3048          * Disconnected, Diskless, aka Unconfigured. Make sure all threads have
3049          * actually stopped, state handling only does drbd_thread_stop_nowait().
3050          * This needs to be done without holding drbd_cfg_rwsem. */
3051         drbd_thread_stop(&adm_ctx.tconn->worker);
3052
3053         /* Now, nothing can fail anymore */
3054
3055         /* delete volumes */
3056         down_write(&drbd_cfg_rwsem);
3057         idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
3058                 retcode = adm_delete_minor(mdev);
3059                 if (retcode != NO_ERROR) {
3060                         /* "can not happen" */
3061                         drbd_msg_put_info("failed to delete volume");
3062                         up_write(&drbd_cfg_rwsem);
3063                         goto out;
3064                 }
3065         }
3066
3067         /* delete connection */
3068         if (conn_lowest_minor(adm_ctx.tconn) < 0) {
3069                 list_del(&adm_ctx.tconn->all_tconn);
3070                 kref_put(&adm_ctx.tconn->kref, &conn_destroy);
3071
3072                 retcode = NO_ERROR;
3073         } else {
3074                 /* "can not happen" */
3075                 retcode = ERR_CONN_IN_USE;
3076                 drbd_msg_put_info("failed to delete connection");
3077         }
3078         up_write(&drbd_cfg_rwsem);
3079         goto out;
3080 out_unlock:
3081         up_read(&drbd_cfg_rwsem);
3082 out:
3083         drbd_adm_finish(info, retcode);
3084         return 0;
3085 }
3086
3087 int drbd_adm_delete_connection(struct sk_buff *skb, struct genl_info *info)
3088 {
3089         enum drbd_ret_code retcode;
3090
3091         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
3092         if (!adm_ctx.reply_skb)
3093                 return retcode;
3094         if (retcode != NO_ERROR)
3095                 goto out;
3096
3097         down_write(&drbd_cfg_rwsem);
3098         if (conn_lowest_minor(adm_ctx.tconn) < 0) {
3099                 list_del(&adm_ctx.tconn->all_tconn);
3100                 kref_put(&adm_ctx.tconn->kref, &conn_destroy);
3101
3102                 retcode = NO_ERROR;
3103         } else {
3104                 retcode = ERR_CONN_IN_USE;
3105         }
3106         up_write(&drbd_cfg_rwsem);
3107
3108         if (retcode == NO_ERROR)
3109                 drbd_thread_stop(&adm_ctx.tconn->worker);
3110 out:
3111         drbd_adm_finish(info, retcode);
3112         return 0;
3113 }
3114
3115 void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib)
3116 {
3117         static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
3118         struct sk_buff *msg;
3119         struct drbd_genlmsghdr *d_out;
3120         unsigned seq;
3121         int err = -ENOMEM;
3122
3123         seq = atomic_inc_return(&drbd_genl_seq);
3124         msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
3125         if (!msg)
3126                 goto failed;
3127
3128         err = -EMSGSIZE;
3129         d_out = genlmsg_put(msg, 0, seq, &drbd_genl_family, 0, DRBD_EVENT);
3130         if (!d_out) /* cannot happen, but anyways. */
3131                 goto nla_put_failure;
3132         d_out->minor = mdev_to_minor(mdev);
3133         d_out->ret_code = 0;
3134
3135         if (nla_put_status_info(msg, mdev, sib))
3136                 goto nla_put_failure;
3137         genlmsg_end(msg, d_out);
3138         err = drbd_genl_multicast_events(msg, 0);
3139         /* msg has been consumed or freed in netlink_broadcast() */
3140         if (err && err != -ESRCH)
3141                 goto failed;
3142
3143         return;
3144
3145 nla_put_failure:
3146         nlmsg_free(msg);
3147 failed:
3148         dev_err(DEV, "Error %d while broadcasting event. "
3149                         "Event seq:%u sib_reason:%u\n",
3150                         err, seq, sib->sib_reason);
3151 }