]> git.karo-electronics.de Git - mv-sheeva.git/blob - drivers/scsi/hosts.c
[SCSI] host state model update: replace old host bitmap state
[mv-sheeva.git] / drivers / scsi / hosts.c
1 /*
2  *  hosts.c Copyright (C) 1992 Drew Eckhardt
3  *          Copyright (C) 1993, 1994, 1995 Eric Youngdale
4  *          Copyright (C) 2002-2003 Christoph Hellwig
5  *
6  *  mid to lowlevel SCSI driver interface
7  *      Initial versions: Drew Eckhardt
8  *      Subsequent revisions: Eric Youngdale
9  *
10  *  <drew@colorado.edu>
11  *
12  *  Jiffies wrap fixes (host->resetting), 3 Dec 1998 Andrea Arcangeli
13  *  Added QLOGIC QLA1280 SCSI controller kernel host support. 
14  *     August 4, 1999 Fred Lewis, Intel DuPont
15  *
16  *  Updated to reflect the new initialization scheme for the higher 
17  *  level of scsi drivers (sd/sr/st)
18  *  September 17, 2000 Torben Mathiasen <tmm@image.dk>
19  *
20  *  Restructured scsi_host lists and associated functions.
21  *  September 04, 2002 Mike Anderson (andmike@us.ibm.com)
22  */
23
24 #include <linux/module.h>
25 #include <linux/blkdev.h>
26 #include <linux/kernel.h>
27 #include <linux/string.h>
28 #include <linux/mm.h>
29 #include <linux/init.h>
30 #include <linux/completion.h>
31 #include <linux/transport_class.h>
32
33 #include <scsi/scsi_device.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_transport.h>
36
37 #include "scsi_priv.h"
38 #include "scsi_logging.h"
39
40
41 static int scsi_host_next_hn;           /* host_no for next new host */
42
43
44 static void scsi_host_cls_release(struct class_device *class_dev)
45 {
46         put_device(&class_to_shost(class_dev)->shost_gendev);
47 }
48
49 static struct class shost_class = {
50         .name           = "scsi_host",
51         .release        = scsi_host_cls_release,
52 };
53
54 /**
55  *      scsi_host_set_state - Take the given host through the host
56  *              state model.
57  *      @shost: scsi host to change the state of.
58  *      @state: state to change to.
59  *
60  *      Returns zero if unsuccessful or an error if the requested
61  *      transition is illegal.
62  **/
63 int scsi_host_set_state(struct Scsi_Host *shost, enum scsi_host_state state)
64 {
65         enum scsi_host_state oldstate = shost->shost_state;
66
67         if (state == oldstate)
68                 return 0;
69
70         switch (state) {
71         case SHOST_CREATED:
72                 /* There are no legal states that come back to
73                  * created.  This is the manually initialised start
74                  * state */
75                 goto illegal;
76
77         case SHOST_RUNNING:
78                 switch (oldstate) {
79                 case SHOST_CREATED:
80                 case SHOST_RECOVERY:
81                         break;
82                 default:
83                         goto illegal;
84                 }
85                 break;
86
87         case SHOST_RECOVERY:
88                 switch (oldstate) {
89                 case SHOST_RUNNING:
90                         break;
91                 default:
92                         goto illegal;
93                 }
94                 break;
95
96         case SHOST_CANCEL:
97                 switch (oldstate) {
98                 case SHOST_CREATED:
99                 case SHOST_RUNNING:
100                         break;
101                 default:
102                         goto illegal;
103                 }
104                 break;
105
106         case SHOST_DEL:
107                 switch (oldstate) {
108                 case SHOST_CANCEL:
109                         break;
110                 default:
111                         goto illegal;
112                 }
113                 break;
114
115         }
116         shost->shost_state = state;
117         return 0;
118
119  illegal:
120         SCSI_LOG_ERROR_RECOVERY(1,
121                                 dev_printk(KERN_ERR, &shost->shost_gendev,
122                                            "Illegal host state transition"
123                                            "%s->%s\n",
124                                            scsi_host_state_name(oldstate),
125                                            scsi_host_state_name(state)));
126         return -EINVAL;
127 }
128 EXPORT_SYMBOL(scsi_host_set_state);
129
130 /**
131  * scsi_host_cancel - cancel outstanding IO to this host
132  * @shost:      pointer to struct Scsi_Host
133  * recovery:    recovery requested to run.
134  **/
135 static void scsi_host_cancel(struct Scsi_Host *shost, int recovery)
136 {
137         struct scsi_device *sdev;
138
139         scsi_host_set_state(shost, SHOST_CANCEL);
140         shost_for_each_device(sdev, shost) {
141                 scsi_device_cancel(sdev, recovery);
142         }
143         wait_event(shost->host_wait, (shost->shost_state != SHOST_RECOVERY));
144 }
145
146 /**
147  * scsi_remove_host - remove a scsi host
148  * @shost:      a pointer to a scsi host to remove
149  **/
150 void scsi_remove_host(struct Scsi_Host *shost)
151 {
152         scsi_forget_host(shost);
153         scsi_host_cancel(shost, 0);
154         scsi_proc_host_rm(shost);
155
156         scsi_host_set_state(shost, SHOST_DEL);
157
158         transport_unregister_device(&shost->shost_gendev);
159         class_device_unregister(&shost->shost_classdev);
160         device_del(&shost->shost_gendev);
161 }
162 EXPORT_SYMBOL(scsi_remove_host);
163
164 /**
165  * scsi_add_host - add a scsi host
166  * @shost:      scsi host pointer to add
167  * @dev:        a struct device of type scsi class
168  *
169  * Return value: 
170  *      0 on success / != 0 for error
171  **/
172 int scsi_add_host(struct Scsi_Host *shost, struct device *dev)
173 {
174         struct scsi_host_template *sht = shost->hostt;
175         int error = -EINVAL;
176
177         printk(KERN_INFO "scsi%d : %s\n", shost->host_no,
178                         sht->info ? sht->info(shost) : sht->name);
179
180         if (!shost->can_queue) {
181                 printk(KERN_ERR "%s: can_queue = 0 no longer supported\n",
182                                 sht->name);
183                 goto out;
184         }
185
186         if (!shost->shost_gendev.parent)
187                 shost->shost_gendev.parent = dev ? dev : &platform_bus;
188
189         error = device_add(&shost->shost_gendev);
190         if (error)
191                 goto out;
192
193         scsi_host_set_state(shost, SHOST_RUNNING);
194         get_device(shost->shost_gendev.parent);
195
196         error = class_device_add(&shost->shost_classdev);
197         if (error)
198                 goto out_del_gendev;
199
200         get_device(&shost->shost_gendev);
201
202         if (shost->transportt->host_size &&
203             (shost->shost_data = kmalloc(shost->transportt->host_size,
204                                          GFP_KERNEL)) == NULL)
205                 goto out_del_classdev;
206
207         if (shost->transportt->create_work_queue) {
208                 snprintf(shost->work_q_name, KOBJ_NAME_LEN, "scsi_wq_%d",
209                         shost->host_no);
210                 shost->work_q = create_singlethread_workqueue(
211                                         shost->work_q_name);
212                 if (!shost->work_q)
213                         goto out_free_shost_data;
214         }
215
216         error = scsi_sysfs_add_host(shost);
217         if (error)
218                 goto out_destroy_host;
219
220         scsi_proc_host_add(shost);
221         return error;
222
223  out_destroy_host:
224         if (shost->work_q)
225                 destroy_workqueue(shost->work_q);
226  out_free_shost_data:
227         kfree(shost->shost_data);
228  out_del_classdev:
229         class_device_del(&shost->shost_classdev);
230  out_del_gendev:
231         device_del(&shost->shost_gendev);
232  out:
233         return error;
234 }
235 EXPORT_SYMBOL(scsi_add_host);
236
237 static void scsi_host_dev_release(struct device *dev)
238 {
239         struct Scsi_Host *shost = dev_to_shost(dev);
240         struct device *parent = dev->parent;
241
242         if (shost->ehandler) {
243                 DECLARE_COMPLETION(sem);
244                 shost->eh_notify = &sem;
245                 shost->eh_kill = 1;
246                 up(shost->eh_wait);
247                 wait_for_completion(&sem);
248                 shost->eh_notify = NULL;
249         }
250
251         if (shost->work_q)
252                 destroy_workqueue(shost->work_q);
253
254         scsi_proc_hostdir_rm(shost->hostt);
255         scsi_destroy_command_freelist(shost);
256         kfree(shost->shost_data);
257
258         if (parent)
259                 put_device(parent);
260         kfree(shost);
261 }
262
263 /**
264  * scsi_host_alloc - register a scsi host adapter instance.
265  * @sht:        pointer to scsi host template
266  * @privsize:   extra bytes to allocate for driver
267  *
268  * Note:
269  *      Allocate a new Scsi_Host and perform basic initialization.
270  *      The host is not published to the scsi midlayer until scsi_add_host
271  *      is called.
272  *
273  * Return value:
274  *      Pointer to a new Scsi_Host
275  **/
276 struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
277 {
278         struct Scsi_Host *shost;
279         int gfp_mask = GFP_KERNEL, rval;
280         DECLARE_COMPLETION(complete);
281
282         if (sht->unchecked_isa_dma && privsize)
283                 gfp_mask |= __GFP_DMA;
284
285         /* Check to see if this host has any error handling facilities */
286         if (!sht->eh_strategy_handler && !sht->eh_abort_handler &&
287             !sht->eh_device_reset_handler && !sht->eh_bus_reset_handler &&
288             !sht->eh_host_reset_handler) {
289                 printk(KERN_ERR "ERROR: SCSI host `%s' has no error handling\n"
290                                 "ERROR: This is not a safe way to run your "
291                                         "SCSI host\n"
292                                 "ERROR: The error handling must be added to "
293                                 "this driver\n", sht->proc_name);
294                 dump_stack();
295         }
296
297         shost = kmalloc(sizeof(struct Scsi_Host) + privsize, gfp_mask);
298         if (!shost)
299                 return NULL;
300         memset(shost, 0, sizeof(struct Scsi_Host) + privsize);
301
302         spin_lock_init(&shost->default_lock);
303         scsi_assign_lock(shost, &shost->default_lock);
304         shost->shost_state = SHOST_CREATED;
305         INIT_LIST_HEAD(&shost->__devices);
306         INIT_LIST_HEAD(&shost->__targets);
307         INIT_LIST_HEAD(&shost->eh_cmd_q);
308         INIT_LIST_HEAD(&shost->starved_list);
309         init_waitqueue_head(&shost->host_wait);
310
311         init_MUTEX(&shost->scan_mutex);
312
313         shost->host_no = scsi_host_next_hn++; /* XXX(hch): still racy */
314         shost->dma_channel = 0xff;
315
316         /* These three are default values which can be overridden */
317         shost->max_channel = 0;
318         shost->max_id = 8;
319         shost->max_lun = 8;
320
321         /* Give each shost a default transportt */
322         shost->transportt = &blank_transport_template;
323
324         /*
325          * All drivers right now should be able to handle 12 byte
326          * commands.  Every so often there are requests for 16 byte
327          * commands, but individual low-level drivers need to certify that
328          * they actually do something sensible with such commands.
329          */
330         shost->max_cmd_len = 12;
331         shost->hostt = sht;
332         shost->this_id = sht->this_id;
333         shost->can_queue = sht->can_queue;
334         shost->sg_tablesize = sht->sg_tablesize;
335         shost->cmd_per_lun = sht->cmd_per_lun;
336         shost->unchecked_isa_dma = sht->unchecked_isa_dma;
337         shost->use_clustering = sht->use_clustering;
338         shost->ordered_flush = sht->ordered_flush;
339         shost->ordered_tag = sht->ordered_tag;
340
341         /*
342          * hosts/devices that do queueing must support ordered tags
343          */
344         if (shost->can_queue > 1 && shost->ordered_flush) {
345                 printk(KERN_ERR "scsi: ordered flushes don't support queueing\n");
346                 shost->ordered_flush = 0;
347         }
348
349         if (sht->max_host_blocked)
350                 shost->max_host_blocked = sht->max_host_blocked;
351         else
352                 shost->max_host_blocked = SCSI_DEFAULT_HOST_BLOCKED;
353
354         /*
355          * If the driver imposes no hard sector transfer limit, start at
356          * machine infinity initially.
357          */
358         if (sht->max_sectors)
359                 shost->max_sectors = sht->max_sectors;
360         else
361                 shost->max_sectors = SCSI_DEFAULT_MAX_SECTORS;
362
363         /*
364          * assume a 4GB boundary, if not set
365          */
366         if (sht->dma_boundary)
367                 shost->dma_boundary = sht->dma_boundary;
368         else
369                 shost->dma_boundary = 0xffffffff;
370
371         rval = scsi_setup_command_freelist(shost);
372         if (rval)
373                 goto fail_kfree;
374
375         device_initialize(&shost->shost_gendev);
376         snprintf(shost->shost_gendev.bus_id, BUS_ID_SIZE, "host%d",
377                 shost->host_no);
378         shost->shost_gendev.release = scsi_host_dev_release;
379
380         class_device_initialize(&shost->shost_classdev);
381         shost->shost_classdev.dev = &shost->shost_gendev;
382         shost->shost_classdev.class = &shost_class;
383         snprintf(shost->shost_classdev.class_id, BUS_ID_SIZE, "host%d",
384                   shost->host_no);
385
386         shost->eh_notify = &complete;
387         rval = kernel_thread(scsi_error_handler, shost, 0);
388         if (rval < 0)
389                 goto fail_destroy_freelist;
390         wait_for_completion(&complete);
391         shost->eh_notify = NULL;
392
393         scsi_proc_hostdir_add(shost->hostt);
394         return shost;
395
396  fail_destroy_freelist:
397         scsi_destroy_command_freelist(shost);
398  fail_kfree:
399         kfree(shost);
400         return NULL;
401 }
402 EXPORT_SYMBOL(scsi_host_alloc);
403
404 struct Scsi_Host *scsi_register(struct scsi_host_template *sht, int privsize)
405 {
406         struct Scsi_Host *shost = scsi_host_alloc(sht, privsize);
407
408         if (!sht->detect) {
409                 printk(KERN_WARNING "scsi_register() called on new-style "
410                                     "template for driver %s\n", sht->name);
411                 dump_stack();
412         }
413
414         if (shost)
415                 list_add_tail(&shost->sht_legacy_list, &sht->legacy_hosts);
416         return shost;
417 }
418 EXPORT_SYMBOL(scsi_register);
419
420 void scsi_unregister(struct Scsi_Host *shost)
421 {
422         list_del(&shost->sht_legacy_list);
423         scsi_host_put(shost);
424 }
425 EXPORT_SYMBOL(scsi_unregister);
426
427 /**
428  * scsi_host_lookup - get a reference to a Scsi_Host by host no
429  *
430  * @hostnum:    host number to locate
431  *
432  * Return value:
433  *      A pointer to located Scsi_Host or NULL.
434  **/
435 struct Scsi_Host *scsi_host_lookup(unsigned short hostnum)
436 {
437         struct class *class = &shost_class;
438         struct class_device *cdev;
439         struct Scsi_Host *shost = ERR_PTR(-ENXIO), *p;
440
441         down_read(&class->subsys.rwsem);
442         list_for_each_entry(cdev, &class->children, node) {
443                 p = class_to_shost(cdev);
444                 if (p->host_no == hostnum) {
445                         shost = scsi_host_get(p);
446                         break;
447                 }
448         }
449         up_read(&class->subsys.rwsem);
450
451         return shost;
452 }
453 EXPORT_SYMBOL(scsi_host_lookup);
454
455 /**
456  * scsi_host_get - inc a Scsi_Host ref count
457  * @shost:      Pointer to Scsi_Host to inc.
458  **/
459 struct Scsi_Host *scsi_host_get(struct Scsi_Host *shost)
460 {
461         if ((shost->shost_state == SHOST_DEL) ||
462                 !get_device(&shost->shost_gendev))
463                 return NULL;
464         return shost;
465 }
466 EXPORT_SYMBOL(scsi_host_get);
467
468 /**
469  * scsi_host_put - dec a Scsi_Host ref count
470  * @shost:      Pointer to Scsi_Host to dec.
471  **/
472 void scsi_host_put(struct Scsi_Host *shost)
473 {
474         put_device(&shost->shost_gendev);
475 }
476 EXPORT_SYMBOL(scsi_host_put);
477
478 int scsi_init_hosts(void)
479 {
480         return class_register(&shost_class);
481 }
482
483 void scsi_exit_hosts(void)
484 {
485         class_unregister(&shost_class);
486 }
487
488 int scsi_is_host_device(const struct device *dev)
489 {
490         return dev->release == scsi_host_dev_release;
491 }
492 EXPORT_SYMBOL(scsi_is_host_device);
493
494 /**
495  * scsi_queue_work - Queue work to the Scsi_Host workqueue.
496  * @shost:      Pointer to Scsi_Host.
497  * @work:       Work to queue for execution.
498  *
499  * Return value:
500  *      0 on success / != 0 for error
501  **/
502 int scsi_queue_work(struct Scsi_Host *shost, struct work_struct *work)
503 {
504         if (unlikely(!shost->work_q)) {
505                 printk(KERN_ERR
506                         "ERROR: Scsi host '%s' attempted to queue scsi-work, "
507                         "when no workqueue created.\n", shost->hostt->name);
508                 dump_stack();
509
510                 return -EINVAL;
511         }
512
513         return queue_work(shost->work_q, work);
514 }
515 EXPORT_SYMBOL_GPL(scsi_queue_work);
516
517 /**
518  * scsi_flush_work - Flush a Scsi_Host's workqueue.
519  * @shost:      Pointer to Scsi_Host.
520  **/
521 void scsi_flush_work(struct Scsi_Host *shost)
522 {
523         if (!shost->work_q) {
524                 printk(KERN_ERR
525                         "ERROR: Scsi host '%s' attempted to flush scsi-work, "
526                         "when no workqueue created.\n", shost->hostt->name);
527                 dump_stack();
528                 return;
529         }
530
531         flush_workqueue(shost->work_q);
532 }
533 EXPORT_SYMBOL_GPL(scsi_flush_work);