]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/scsi/scsi_transport_srp.c
scsi_transport_srp: Add transport layer error handling
[karo-tx-linux.git] / drivers / scsi / scsi_transport_srp.c
1 /*
2  * SCSI RDMA (SRP) transport class
3  *
4  * Copyright (C) 2007 FUJITA Tomonori <tomof@acm.org>
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License as
8  * published by the Free Software Foundation, version 2 of the
9  * License.
10  *
11  * This program is distributed in the hope that it will be useful, but
12  * WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
19  * 02110-1301 USA
20  */
21 #include <linux/init.h>
22 #include <linux/module.h>
23 #include <linux/jiffies.h>
24 #include <linux/err.h>
25 #include <linux/slab.h>
26 #include <linux/string.h>
27 #include <linux/delay.h>
28
29 #include <scsi/scsi.h>
30 #include <scsi/scsi_cmnd.h>
31 #include <scsi/scsi_device.h>
32 #include <scsi/scsi_host.h>
33 #include <scsi/scsi_transport.h>
34 #include <scsi/scsi_transport_srp.h>
35 #include "scsi_priv.h"
36 #include "scsi_transport_srp_internal.h"
37
38 struct srp_host_attrs {
39         atomic_t next_port_id;
40 };
41 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
42
43 #define SRP_HOST_ATTRS 0
44 #define SRP_RPORT_ATTRS 6
45
46 struct srp_internal {
47         struct scsi_transport_template t;
48         struct srp_function_template *f;
49
50         struct device_attribute *host_attrs[SRP_HOST_ATTRS + 1];
51
52         struct device_attribute *rport_attrs[SRP_RPORT_ATTRS + 1];
53         struct transport_container rport_attr_cont;
54 };
55
56 #define to_srp_internal(tmpl) container_of(tmpl, struct srp_internal, t)
57
58 #define dev_to_rport(d) container_of(d, struct srp_rport, dev)
59 #define transport_class_to_srp_rport(dev) dev_to_rport((dev)->parent)
60 static inline struct Scsi_Host *rport_to_shost(struct srp_rport *r)
61 {
62         return dev_to_shost(r->dev.parent);
63 }
64
65 /**
66  * srp_tmo_valid() - check timeout combination validity
67  *
68  * The combination of the timeout parameters must be such that SCSI commands
69  * are finished in a reasonable time. Hence do not allow the fast I/O fail
70  * timeout to exceed SCSI_DEVICE_BLOCK_MAX_TIMEOUT. Furthermore, these
71  * parameters must be such that multipath can detect failed paths timely.
72  * Hence do not allow both parameters to be disabled simultaneously.
73  */
74 int srp_tmo_valid(int fast_io_fail_tmo, int dev_loss_tmo)
75 {
76         if (fast_io_fail_tmo < 0 && dev_loss_tmo < 0)
77                 return -EINVAL;
78         if (fast_io_fail_tmo > SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
79                 return -EINVAL;
80         if (dev_loss_tmo >= LONG_MAX / HZ)
81                 return -EINVAL;
82         if (fast_io_fail_tmo >= 0 && dev_loss_tmo >= 0 &&
83             fast_io_fail_tmo >= dev_loss_tmo)
84                 return -EINVAL;
85         return 0;
86 }
87 EXPORT_SYMBOL_GPL(srp_tmo_valid);
88
89 static int srp_host_setup(struct transport_container *tc, struct device *dev,
90                           struct device *cdev)
91 {
92         struct Scsi_Host *shost = dev_to_shost(dev);
93         struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
94
95         atomic_set(&srp_host->next_port_id, 0);
96         return 0;
97 }
98
99 static DECLARE_TRANSPORT_CLASS(srp_host_class, "srp_host", srp_host_setup,
100                                NULL, NULL);
101
102 static DECLARE_TRANSPORT_CLASS(srp_rport_class, "srp_remote_ports",
103                                NULL, NULL, NULL);
104
105 #define SRP_PID(p) \
106         (p)->port_id[0], (p)->port_id[1], (p)->port_id[2], (p)->port_id[3], \
107         (p)->port_id[4], (p)->port_id[5], (p)->port_id[6], (p)->port_id[7], \
108         (p)->port_id[8], (p)->port_id[9], (p)->port_id[10], (p)->port_id[11], \
109         (p)->port_id[12], (p)->port_id[13], (p)->port_id[14], (p)->port_id[15]
110
111 #define SRP_PID_FMT "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:" \
112         "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x"
113
114 static ssize_t
115 show_srp_rport_id(struct device *dev, struct device_attribute *attr,
116                   char *buf)
117 {
118         struct srp_rport *rport = transport_class_to_srp_rport(dev);
119         return sprintf(buf, SRP_PID_FMT "\n", SRP_PID(rport));
120 }
121
122 static DEVICE_ATTR(port_id, S_IRUGO, show_srp_rport_id, NULL);
123
124 static const struct {
125         u32 value;
126         char *name;
127 } srp_rport_role_names[] = {
128         {SRP_RPORT_ROLE_INITIATOR, "SRP Initiator"},
129         {SRP_RPORT_ROLE_TARGET, "SRP Target"},
130 };
131
132 static ssize_t
133 show_srp_rport_roles(struct device *dev, struct device_attribute *attr,
134                      char *buf)
135 {
136         struct srp_rport *rport = transport_class_to_srp_rport(dev);
137         int i;
138         char *name = NULL;
139
140         for (i = 0; i < ARRAY_SIZE(srp_rport_role_names); i++)
141                 if (srp_rport_role_names[i].value == rport->roles) {
142                         name = srp_rport_role_names[i].name;
143                         break;
144                 }
145         return sprintf(buf, "%s\n", name ? : "unknown");
146 }
147
148 static DEVICE_ATTR(roles, S_IRUGO, show_srp_rport_roles, NULL);
149
150 static ssize_t store_srp_rport_delete(struct device *dev,
151                                       struct device_attribute *attr,
152                                       const char *buf, size_t count)
153 {
154         struct srp_rport *rport = transport_class_to_srp_rport(dev);
155         struct Scsi_Host *shost = dev_to_shost(dev);
156         struct srp_internal *i = to_srp_internal(shost->transportt);
157
158         if (i->f->rport_delete) {
159                 i->f->rport_delete(rport);
160                 return count;
161         } else {
162                 return -ENOSYS;
163         }
164 }
165
166 static DEVICE_ATTR(delete, S_IWUSR, NULL, store_srp_rport_delete);
167
168 static ssize_t show_srp_rport_state(struct device *dev,
169                                     struct device_attribute *attr,
170                                     char *buf)
171 {
172         static const char *const state_name[] = {
173                 [SRP_RPORT_RUNNING]     = "running",
174                 [SRP_RPORT_BLOCKED]     = "blocked",
175                 [SRP_RPORT_FAIL_FAST]   = "fail-fast",
176                 [SRP_RPORT_LOST]        = "lost",
177         };
178         struct srp_rport *rport = transport_class_to_srp_rport(dev);
179         enum srp_rport_state state = rport->state;
180
181         return sprintf(buf, "%s\n",
182                        (unsigned)state < ARRAY_SIZE(state_name) ?
183                        state_name[state] : "???");
184 }
185
186 static DEVICE_ATTR(state, S_IRUGO, show_srp_rport_state, NULL);
187
188 static ssize_t srp_show_tmo(char *buf, int tmo)
189 {
190         return tmo >= 0 ? sprintf(buf, "%d\n", tmo) : sprintf(buf, "off\n");
191 }
192
193 static int srp_parse_tmo(int *tmo, const char *buf)
194 {
195         int res = 0;
196
197         if (strncmp(buf, "off", 3) != 0)
198                 res = kstrtoint(buf, 0, tmo);
199         else
200                 *tmo = -1;
201
202         return res;
203 }
204
205 static ssize_t show_srp_rport_fast_io_fail_tmo(struct device *dev,
206                                                struct device_attribute *attr,
207                                                char *buf)
208 {
209         struct srp_rport *rport = transport_class_to_srp_rport(dev);
210
211         return srp_show_tmo(buf, rport->fast_io_fail_tmo);
212 }
213
214 static ssize_t store_srp_rport_fast_io_fail_tmo(struct device *dev,
215                                                 struct device_attribute *attr,
216                                                 const char *buf, size_t count)
217 {
218         struct srp_rport *rport = transport_class_to_srp_rport(dev);
219         int res;
220         int fast_io_fail_tmo;
221
222         res = srp_parse_tmo(&fast_io_fail_tmo, buf);
223         if (res)
224                 goto out;
225         res = srp_tmo_valid(fast_io_fail_tmo, rport->dev_loss_tmo);
226         if (res)
227                 goto out;
228         rport->fast_io_fail_tmo = fast_io_fail_tmo;
229         res = count;
230
231 out:
232         return res;
233 }
234
235 static DEVICE_ATTR(fast_io_fail_tmo, S_IRUGO | S_IWUSR,
236                    show_srp_rport_fast_io_fail_tmo,
237                    store_srp_rport_fast_io_fail_tmo);
238
239 static ssize_t show_srp_rport_dev_loss_tmo(struct device *dev,
240                                            struct device_attribute *attr,
241                                            char *buf)
242 {
243         struct srp_rport *rport = transport_class_to_srp_rport(dev);
244
245         return srp_show_tmo(buf, rport->dev_loss_tmo);
246 }
247
248 static ssize_t store_srp_rport_dev_loss_tmo(struct device *dev,
249                                             struct device_attribute *attr,
250                                             const char *buf, size_t count)
251 {
252         struct srp_rport *rport = transport_class_to_srp_rport(dev);
253         int res;
254         int dev_loss_tmo;
255
256         res = srp_parse_tmo(&dev_loss_tmo, buf);
257         if (res)
258                 goto out;
259         res = srp_tmo_valid(rport->fast_io_fail_tmo, dev_loss_tmo);
260         if (res)
261                 goto out;
262         rport->dev_loss_tmo = dev_loss_tmo;
263         res = count;
264
265 out:
266         return res;
267 }
268
269 static DEVICE_ATTR(dev_loss_tmo, S_IRUGO | S_IWUSR,
270                    show_srp_rport_dev_loss_tmo,
271                    store_srp_rport_dev_loss_tmo);
272
273 static int srp_rport_set_state(struct srp_rport *rport,
274                                enum srp_rport_state new_state)
275 {
276         enum srp_rport_state old_state = rport->state;
277
278         lockdep_assert_held(&rport->mutex);
279
280         switch (new_state) {
281         case SRP_RPORT_RUNNING:
282                 switch (old_state) {
283                 case SRP_RPORT_LOST:
284                         goto invalid;
285                 default:
286                         break;
287                 }
288                 break;
289         case SRP_RPORT_BLOCKED:
290                 switch (old_state) {
291                 case SRP_RPORT_RUNNING:
292                         break;
293                 default:
294                         goto invalid;
295                 }
296                 break;
297         case SRP_RPORT_FAIL_FAST:
298                 switch (old_state) {
299                 case SRP_RPORT_LOST:
300                         goto invalid;
301                 default:
302                         break;
303                 }
304                 break;
305         case SRP_RPORT_LOST:
306                 break;
307         }
308         rport->state = new_state;
309         return 0;
310
311 invalid:
312         return -EINVAL;
313 }
314
315 static void __rport_fail_io_fast(struct srp_rport *rport)
316 {
317         struct Scsi_Host *shost = rport_to_shost(rport);
318         struct srp_internal *i;
319
320         lockdep_assert_held(&rport->mutex);
321
322         if (srp_rport_set_state(rport, SRP_RPORT_FAIL_FAST))
323                 return;
324         scsi_target_unblock(rport->dev.parent, SDEV_TRANSPORT_OFFLINE);
325
326         /* Involve the LLD if possible to terminate all I/O on the rport. */
327         i = to_srp_internal(shost->transportt);
328         if (i->f->terminate_rport_io)
329                 i->f->terminate_rport_io(rport);
330 }
331
332 /**
333  * rport_fast_io_fail_timedout() - fast I/O failure timeout handler
334  */
335 static void rport_fast_io_fail_timedout(struct work_struct *work)
336 {
337         struct srp_rport *rport = container_of(to_delayed_work(work),
338                                         struct srp_rport, fast_io_fail_work);
339         struct Scsi_Host *shost = rport_to_shost(rport);
340
341         pr_info("fast_io_fail_tmo expired for SRP %s / %s.\n",
342                 dev_name(&rport->dev), dev_name(&shost->shost_gendev));
343
344         mutex_lock(&rport->mutex);
345         if (rport->state == SRP_RPORT_BLOCKED)
346                 __rport_fail_io_fast(rport);
347         mutex_unlock(&rport->mutex);
348 }
349
350 /**
351  * rport_dev_loss_timedout() - device loss timeout handler
352  */
353 static void rport_dev_loss_timedout(struct work_struct *work)
354 {
355         struct srp_rport *rport = container_of(to_delayed_work(work),
356                                         struct srp_rport, dev_loss_work);
357         struct Scsi_Host *shost = rport_to_shost(rport);
358         struct srp_internal *i = to_srp_internal(shost->transportt);
359
360         pr_info("dev_loss_tmo expired for SRP %s / %s.\n",
361                 dev_name(&rport->dev), dev_name(&shost->shost_gendev));
362
363         mutex_lock(&rport->mutex);
364         WARN_ON(srp_rport_set_state(rport, SRP_RPORT_LOST) != 0);
365         scsi_target_unblock(rport->dev.parent, SDEV_TRANSPORT_OFFLINE);
366         mutex_unlock(&rport->mutex);
367
368         i->f->rport_delete(rport);
369 }
370
371 static void __srp_start_tl_fail_timers(struct srp_rport *rport)
372 {
373         struct Scsi_Host *shost = rport_to_shost(rport);
374         int fast_io_fail_tmo, dev_loss_tmo;
375
376         lockdep_assert_held(&rport->mutex);
377
378         if (!rport->deleted) {
379                 fast_io_fail_tmo = rport->fast_io_fail_tmo;
380                 dev_loss_tmo = rport->dev_loss_tmo;
381                 pr_debug("%s current state: %d\n",
382                          dev_name(&shost->shost_gendev), rport->state);
383
384                 if (fast_io_fail_tmo >= 0 &&
385                     srp_rport_set_state(rport, SRP_RPORT_BLOCKED) == 0) {
386                         pr_debug("%s new state: %d\n",
387                                  dev_name(&shost->shost_gendev),
388                                  rport->state);
389                         scsi_target_block(&shost->shost_gendev);
390                         queue_delayed_work(system_long_wq,
391                                            &rport->fast_io_fail_work,
392                                            1UL * fast_io_fail_tmo * HZ);
393                 }
394                 if (dev_loss_tmo >= 0)
395                         queue_delayed_work(system_long_wq,
396                                            &rport->dev_loss_work,
397                                            1UL * dev_loss_tmo * HZ);
398         } else {
399                 pr_debug("%s has already been deleted\n",
400                          dev_name(&shost->shost_gendev));
401                 srp_rport_set_state(rport, SRP_RPORT_FAIL_FAST);
402                 scsi_target_unblock(&shost->shost_gendev,
403                                     SDEV_TRANSPORT_OFFLINE);
404         }
405 }
406
407 /**
408  * srp_start_tl_fail_timers() - start the transport layer failure timers
409  *
410  * Start the transport layer fast I/O failure and device loss timers. Do not
411  * modify a timer that was already started.
412  */
413 void srp_start_tl_fail_timers(struct srp_rport *rport)
414 {
415         mutex_lock(&rport->mutex);
416         __srp_start_tl_fail_timers(rport);
417         mutex_unlock(&rport->mutex);
418 }
419 EXPORT_SYMBOL(srp_start_tl_fail_timers);
420
421 /**
422  * scsi_request_fn_active() - number of kernel threads inside scsi_request_fn()
423  */
424 static int scsi_request_fn_active(struct Scsi_Host *shost)
425 {
426         struct scsi_device *sdev;
427         struct request_queue *q;
428         int request_fn_active = 0;
429
430         shost_for_each_device(sdev, shost) {
431                 q = sdev->request_queue;
432
433                 spin_lock_irq(q->queue_lock);
434                 request_fn_active += q->request_fn_active;
435                 spin_unlock_irq(q->queue_lock);
436         }
437
438         return request_fn_active;
439 }
440
441 /**
442  * srp_reconnect_rport() - reconnect to an SRP target port
443  *
444  * Blocks SCSI command queueing before invoking reconnect() such that
445  * queuecommand() won't be invoked concurrently with reconnect() from outside
446  * the SCSI EH. This is important since a reconnect() implementation may
447  * reallocate resources needed by queuecommand().
448  *
449  * Notes:
450  * - This function neither waits until outstanding requests have finished nor
451  *   tries to abort these. It is the responsibility of the reconnect()
452  *   function to finish outstanding commands before reconnecting to the target
453  *   port.
454  * - It is the responsibility of the caller to ensure that the resources
455  *   reallocated by the reconnect() function won't be used while this function
456  *   is in progress. One possible strategy is to invoke this function from
457  *   the context of the SCSI EH thread only. Another possible strategy is to
458  *   lock the rport mutex inside each SCSI LLD callback that can be invoked by
459  *   the SCSI EH (the scsi_host_template.eh_*() functions and also the
460  *   scsi_host_template.queuecommand() function).
461  */
462 int srp_reconnect_rport(struct srp_rport *rport)
463 {
464         struct Scsi_Host *shost = rport_to_shost(rport);
465         struct srp_internal *i = to_srp_internal(shost->transportt);
466         struct scsi_device *sdev;
467         int res;
468
469         pr_debug("SCSI host %s\n", dev_name(&shost->shost_gendev));
470
471         res = mutex_lock_interruptible(&rport->mutex);
472         if (res)
473                 goto out;
474         scsi_target_block(&shost->shost_gendev);
475         while (scsi_request_fn_active(shost))
476                 msleep(20);
477         res = i->f->reconnect(rport);
478         pr_debug("%s (state %d): transport.reconnect() returned %d\n",
479                  dev_name(&shost->shost_gendev), rport->state, res);
480         if (res == 0) {
481                 cancel_delayed_work(&rport->fast_io_fail_work);
482                 cancel_delayed_work(&rport->dev_loss_work);
483
484                 srp_rport_set_state(rport, SRP_RPORT_RUNNING);
485                 scsi_target_unblock(&shost->shost_gendev, SDEV_RUNNING);
486                 /*
487                  * If the SCSI error handler has offlined one or more devices,
488                  * invoking scsi_target_unblock() won't change the state of
489                  * these devices into running so do that explicitly.
490                  */
491                 spin_lock_irq(shost->host_lock);
492                 __shost_for_each_device(sdev, shost)
493                         if (sdev->sdev_state == SDEV_OFFLINE)
494                                 sdev->sdev_state = SDEV_RUNNING;
495                 spin_unlock_irq(shost->host_lock);
496         } else if (rport->state == SRP_RPORT_RUNNING) {
497                 /*
498                  * srp_reconnect_rport() was invoked with fast_io_fail
499                  * off. Mark the port as failed and start the TL failure
500                  * timers if these had not yet been started.
501                  */
502                 __rport_fail_io_fast(rport);
503                 scsi_target_unblock(&shost->shost_gendev,
504                                     SDEV_TRANSPORT_OFFLINE);
505                 __srp_start_tl_fail_timers(rport);
506         } else if (rport->state != SRP_RPORT_BLOCKED) {
507                 scsi_target_unblock(&shost->shost_gendev,
508                                     SDEV_TRANSPORT_OFFLINE);
509         }
510         mutex_unlock(&rport->mutex);
511
512 out:
513         return res;
514 }
515 EXPORT_SYMBOL(srp_reconnect_rport);
516
517 /**
518  * srp_timed_out() - SRP transport intercept of the SCSI timeout EH
519  *
520  * If a timeout occurs while an rport is in the blocked state, ask the SCSI
521  * EH to continue waiting (BLK_EH_RESET_TIMER). Otherwise let the SCSI core
522  * handle the timeout (BLK_EH_NOT_HANDLED).
523  *
524  * Note: This function is called from soft-IRQ context and with the request
525  * queue lock held.
526  */
527 static enum blk_eh_timer_return srp_timed_out(struct scsi_cmnd *scmd)
528 {
529         struct scsi_device *sdev = scmd->device;
530         struct Scsi_Host *shost = sdev->host;
531         struct srp_internal *i = to_srp_internal(shost->transportt);
532
533         pr_debug("timeout for sdev %s\n", dev_name(&sdev->sdev_gendev));
534         return i->f->reset_timer_if_blocked && scsi_device_blocked(sdev) ?
535                 BLK_EH_RESET_TIMER : BLK_EH_NOT_HANDLED;
536 }
537
538 static void srp_rport_release(struct device *dev)
539 {
540         struct srp_rport *rport = dev_to_rport(dev);
541
542         cancel_delayed_work_sync(&rport->fast_io_fail_work);
543         cancel_delayed_work_sync(&rport->dev_loss_work);
544
545         put_device(dev->parent);
546         kfree(rport);
547 }
548
549 static int scsi_is_srp_rport(const struct device *dev)
550 {
551         return dev->release == srp_rport_release;
552 }
553
554 static int srp_rport_match(struct attribute_container *cont,
555                            struct device *dev)
556 {
557         struct Scsi_Host *shost;
558         struct srp_internal *i;
559
560         if (!scsi_is_srp_rport(dev))
561                 return 0;
562
563         shost = dev_to_shost(dev->parent);
564         if (!shost->transportt)
565                 return 0;
566         if (shost->transportt->host_attrs.ac.class != &srp_host_class.class)
567                 return 0;
568
569         i = to_srp_internal(shost->transportt);
570         return &i->rport_attr_cont.ac == cont;
571 }
572
573 static int srp_host_match(struct attribute_container *cont, struct device *dev)
574 {
575         struct Scsi_Host *shost;
576         struct srp_internal *i;
577
578         if (!scsi_is_host_device(dev))
579                 return 0;
580
581         shost = dev_to_shost(dev);
582         if (!shost->transportt)
583                 return 0;
584         if (shost->transportt->host_attrs.ac.class != &srp_host_class.class)
585                 return 0;
586
587         i = to_srp_internal(shost->transportt);
588         return &i->t.host_attrs.ac == cont;
589 }
590
591 /**
592  * srp_rport_get() - increment rport reference count
593  */
594 void srp_rport_get(struct srp_rport *rport)
595 {
596         get_device(&rport->dev);
597 }
598 EXPORT_SYMBOL(srp_rport_get);
599
600 /**
601  * srp_rport_put() - decrement rport reference count
602  */
603 void srp_rport_put(struct srp_rport *rport)
604 {
605         put_device(&rport->dev);
606 }
607 EXPORT_SYMBOL(srp_rport_put);
608
609 /**
610  * srp_rport_add - add a SRP remote port to the device hierarchy
611  * @shost:      scsi host the remote port is connected to.
612  * @ids:        The port id for the remote port.
613  *
614  * Publishes a port to the rest of the system.
615  */
616 struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
617                                 struct srp_rport_identifiers *ids)
618 {
619         struct srp_rport *rport;
620         struct device *parent = &shost->shost_gendev;
621         struct srp_internal *i = to_srp_internal(shost->transportt);
622         int id, ret;
623
624         rport = kzalloc(sizeof(*rport), GFP_KERNEL);
625         if (!rport)
626                 return ERR_PTR(-ENOMEM);
627
628         mutex_init(&rport->mutex);
629
630         device_initialize(&rport->dev);
631
632         rport->dev.parent = get_device(parent);
633         rport->dev.release = srp_rport_release;
634
635         memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
636         rport->roles = ids->roles;
637
638         rport->fast_io_fail_tmo = i->f->fast_io_fail_tmo ?
639                 *i->f->fast_io_fail_tmo : 15;
640         rport->dev_loss_tmo = i->f->dev_loss_tmo ? *i->f->dev_loss_tmo : 60;
641         INIT_DELAYED_WORK(&rport->fast_io_fail_work,
642                           rport_fast_io_fail_timedout);
643         INIT_DELAYED_WORK(&rport->dev_loss_work, rport_dev_loss_timedout);
644
645         id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
646         dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
647
648         transport_setup_device(&rport->dev);
649
650         ret = device_add(&rport->dev);
651         if (ret) {
652                 transport_destroy_device(&rport->dev);
653                 put_device(&rport->dev);
654                 return ERR_PTR(ret);
655         }
656
657         if (shost->active_mode & MODE_TARGET &&
658             ids->roles == SRP_RPORT_ROLE_INITIATOR) {
659                 ret = srp_tgt_it_nexus_create(shost, (unsigned long)rport,
660                                               rport->port_id);
661                 if (ret) {
662                         device_del(&rport->dev);
663                         transport_destroy_device(&rport->dev);
664                         put_device(&rport->dev);
665                         return ERR_PTR(ret);
666                 }
667         }
668
669         transport_add_device(&rport->dev);
670         transport_configure_device(&rport->dev);
671
672         return rport;
673 }
674 EXPORT_SYMBOL_GPL(srp_rport_add);
675
676 /**
677  * srp_rport_del  -  remove a SRP remote port
678  * @rport:      SRP remote port to remove
679  *
680  * Removes the specified SRP remote port.
681  */
682 void srp_rport_del(struct srp_rport *rport)
683 {
684         struct device *dev = &rport->dev;
685         struct Scsi_Host *shost = dev_to_shost(dev->parent);
686
687         if (shost->active_mode & MODE_TARGET &&
688             rport->roles == SRP_RPORT_ROLE_INITIATOR)
689                 srp_tgt_it_nexus_destroy(shost, (unsigned long)rport);
690
691         transport_remove_device(dev);
692         device_del(dev);
693         transport_destroy_device(dev);
694
695         mutex_lock(&rport->mutex);
696         if (rport->state == SRP_RPORT_BLOCKED)
697                 __rport_fail_io_fast(rport);
698         rport->deleted = true;
699         mutex_unlock(&rport->mutex);
700
701         put_device(dev);
702 }
703 EXPORT_SYMBOL_GPL(srp_rport_del);
704
705 static int do_srp_rport_del(struct device *dev, void *data)
706 {
707         if (scsi_is_srp_rport(dev))
708                 srp_rport_del(dev_to_rport(dev));
709         return 0;
710 }
711
712 /**
713  * srp_remove_host  -  tear down a Scsi_Host's SRP data structures
714  * @shost:      Scsi Host that is torn down
715  *
716  * Removes all SRP remote ports for a given Scsi_Host.
717  * Must be called just before scsi_remove_host for SRP HBAs.
718  */
719 void srp_remove_host(struct Scsi_Host *shost)
720 {
721         device_for_each_child(&shost->shost_gendev, NULL, do_srp_rport_del);
722 }
723 EXPORT_SYMBOL_GPL(srp_remove_host);
724
725 static int srp_tsk_mgmt_response(struct Scsi_Host *shost, u64 nexus, u64 tm_id,
726                                  int result)
727 {
728         struct srp_internal *i = to_srp_internal(shost->transportt);
729         return i->f->tsk_mgmt_response(shost, nexus, tm_id, result);
730 }
731
732 static int srp_it_nexus_response(struct Scsi_Host *shost, u64 nexus, int result)
733 {
734         struct srp_internal *i = to_srp_internal(shost->transportt);
735         return i->f->it_nexus_response(shost, nexus, result);
736 }
737
738 /**
739  * srp_attach_transport  -  instantiate SRP transport template
740  * @ft:         SRP transport class function template
741  */
742 struct scsi_transport_template *
743 srp_attach_transport(struct srp_function_template *ft)
744 {
745         int count;
746         struct srp_internal *i;
747
748         i = kzalloc(sizeof(*i), GFP_KERNEL);
749         if (!i)
750                 return NULL;
751
752         i->t.eh_timed_out = srp_timed_out;
753
754         i->t.tsk_mgmt_response = srp_tsk_mgmt_response;
755         i->t.it_nexus_response = srp_it_nexus_response;
756
757         i->t.host_size = sizeof(struct srp_host_attrs);
758         i->t.host_attrs.ac.attrs = &i->host_attrs[0];
759         i->t.host_attrs.ac.class = &srp_host_class.class;
760         i->t.host_attrs.ac.match = srp_host_match;
761         i->host_attrs[0] = NULL;
762         transport_container_register(&i->t.host_attrs);
763
764         i->rport_attr_cont.ac.attrs = &i->rport_attrs[0];
765         i->rport_attr_cont.ac.class = &srp_rport_class.class;
766         i->rport_attr_cont.ac.match = srp_rport_match;
767
768         count = 0;
769         i->rport_attrs[count++] = &dev_attr_port_id;
770         i->rport_attrs[count++] = &dev_attr_roles;
771         if (ft->has_rport_state) {
772                 i->rport_attrs[count++] = &dev_attr_state;
773                 i->rport_attrs[count++] = &dev_attr_fast_io_fail_tmo;
774                 i->rport_attrs[count++] = &dev_attr_dev_loss_tmo;
775         }
776         if (ft->rport_delete)
777                 i->rport_attrs[count++] = &dev_attr_delete;
778         i->rport_attrs[count++] = NULL;
779         BUG_ON(count > ARRAY_SIZE(i->rport_attrs));
780
781         transport_container_register(&i->rport_attr_cont);
782
783         i->f = ft;
784
785         return &i->t;
786 }
787 EXPORT_SYMBOL_GPL(srp_attach_transport);
788
789 /**
790  * srp_release_transport  -  release SRP transport template instance
791  * @t:          transport template instance
792  */
793 void srp_release_transport(struct scsi_transport_template *t)
794 {
795         struct srp_internal *i = to_srp_internal(t);
796
797         transport_container_unregister(&i->t.host_attrs);
798         transport_container_unregister(&i->rport_attr_cont);
799
800         kfree(i);
801 }
802 EXPORT_SYMBOL_GPL(srp_release_transport);
803
804 static __init int srp_transport_init(void)
805 {
806         int ret;
807
808         ret = transport_class_register(&srp_host_class);
809         if (ret)
810                 return ret;
811         ret = transport_class_register(&srp_rport_class);
812         if (ret)
813                 goto unregister_host_class;
814
815         return 0;
816 unregister_host_class:
817         transport_class_unregister(&srp_host_class);
818         return ret;
819 }
820
821 static void __exit srp_transport_exit(void)
822 {
823         transport_class_unregister(&srp_host_class);
824         transport_class_unregister(&srp_rport_class);
825 }
826
827 MODULE_AUTHOR("FUJITA Tomonori");
828 MODULE_DESCRIPTION("SRP Transport Attributes");
829 MODULE_LICENSE("GPL");
830
831 module_init(srp_transport_init);
832 module_exit(srp_transport_exit);