1 /* ------------------------------------------------------------
3 * (C) Copyright IBM Corporation 1994, 2004
4 * Authors: Colin DeVilbiss (devilbis@us.ibm.com)
5 * Santiago Leon (santil@us.ibm.com)
6 * Dave Boutcher (sleddog@us.ibm.com)
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
23 * ------------------------------------------------------------
24 * Emulation of a SCSI host adapter for Virtual I/O devices
26 * This driver supports the SCSI adapter implemented by the IBM
27 * Power5 firmware. That SCSI adapter is not a physical adapter,
28 * but allows Linux SCSI peripheral drivers to directly
29 * access devices in another logical partition on the physical system.
31 * The virtual adapter(s) are present in the open firmware device
32 * tree just like real adapters.
34 * One of the capabilities provided on these systems is the ability
35 * to DMA between partitions. The architecture states that for VSCSI,
36 * the server side is allowed to DMA to and from the client. The client
37 * is never trusted to DMA to or from the server directly.
39 * Messages are sent between partitions on a "Command/Response Queue"
40 * (CRQ), which is just a buffer of 16 byte entries in the receiver's
41 * Senders cannot access the buffer directly, but send messages by
42 * making a hypervisor call and passing in the 16 bytes. The hypervisor
43 * puts the message in the next 16 byte space in round-robbin fashion,
44 * turns on the high order bit of the message (the valid bit), and
45 * generates an interrupt to the receiver (if interrupts are turned on.)
46 * The receiver just turns off the valid bit when they have copied out
49 * The VSCSI client builds a SCSI Remote Protocol (SRP) Information Unit
50 * (IU) (as defined in the T10 standard available at www.t10.org), gets
51 * a DMA address for the message, and sends it to the server as the
52 * payload of a CRQ message. The server DMAs the SRP IU and processes it,
53 * including doing any additional data transfers. When it is done, it
54 * DMAs the SRP response back to the same address as the request came from,
55 * and sends a CRQ message back to inform the client that the request has
58 * Note that some of the underlying infrastructure is different between
59 * machines conforming to the "RS/6000 Platform Architecture" (RPA) and
60 * the older iSeries hypervisor models. To support both, some low level
61 * routines have been broken out into rpa_vscsi.c and iseries_vscsi.c.
62 * The Makefile should pick one, not two, not zero, of these.
64 * TODO: This is currently pretty tied to the IBM i/pSeries hypervisor
65 * interfaces. It would be really nice to abstract this above an RDMA
69 #include <linux/module.h>
70 #include <linux/moduleparam.h>
71 #include <linux/dma-mapping.h>
72 #include <linux/delay.h>
74 #include <scsi/scsi.h>
75 #include <scsi/scsi_cmnd.h>
76 #include <scsi/scsi_host.h>
77 #include <scsi/scsi_device.h>
78 #include <scsi/scsi_transport_srp.h>
81 /* The values below are somewhat arbitrary default values, but
82 * OS/400 will use 3 busses (disks, CDs, tapes, I think.)
83 * Note that there are 3 bits of channel value, 6 bits of id, and
86 static int max_id = 64;
87 static int max_channel = 3;
88 static int init_timeout = 5;
89 static int max_requests = IBMVSCSI_MAX_REQUESTS_DEFAULT;
91 static struct scsi_transport_template *ibmvscsi_transport_template;
93 #define IBMVSCSI_VERSION "1.5.8"
95 MODULE_DESCRIPTION("IBM Virtual SCSI");
96 MODULE_AUTHOR("Dave Boutcher");
97 MODULE_LICENSE("GPL");
98 MODULE_VERSION(IBMVSCSI_VERSION);
100 module_param_named(max_id, max_id, int, S_IRUGO | S_IWUSR);
101 MODULE_PARM_DESC(max_id, "Largest ID value for each channel");
102 module_param_named(max_channel, max_channel, int, S_IRUGO | S_IWUSR);
103 MODULE_PARM_DESC(max_channel, "Largest channel value");
104 module_param_named(init_timeout, init_timeout, int, S_IRUGO | S_IWUSR);
105 MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds");
106 module_param_named(max_requests, max_requests, int, S_IRUGO | S_IWUSR);
107 MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter");
109 /* ------------------------------------------------------------
110 * Routines for the event pool and event structs
113 * initialize_event_pool: - Allocates and initializes the event pool for a host
114 * @pool: event_pool to be initialized
115 * @size: Number of events in pool
116 * @hostdata: ibmvscsi_host_data who owns the event pool
118 * Returns zero on success.
120 static int initialize_event_pool(struct event_pool *pool,
121 int size, struct ibmvscsi_host_data *hostdata)
127 pool->events = kcalloc(pool->size, sizeof(*pool->events), GFP_KERNEL);
132 dma_alloc_coherent(hostdata->dev,
133 pool->size * sizeof(*pool->iu_storage),
135 if (!pool->iu_storage) {
140 for (i = 0; i < pool->size; ++i) {
141 struct srp_event_struct *evt = &pool->events[i];
142 memset(&evt->crq, 0x00, sizeof(evt->crq));
143 atomic_set(&evt->free, 1);
144 evt->crq.valid = 0x80;
145 evt->crq.IU_length = sizeof(*evt->xfer_iu);
146 evt->crq.IU_data_ptr = pool->iu_token +
147 sizeof(*evt->xfer_iu) * i;
148 evt->xfer_iu = pool->iu_storage + i;
149 evt->hostdata = hostdata;
150 evt->ext_list = NULL;
151 evt->ext_list_token = 0;
158 * release_event_pool: - Frees memory of an event pool of a host
159 * @pool: event_pool to be released
160 * @hostdata: ibmvscsi_host_data who owns the even pool
162 * Returns zero on success.
164 static void release_event_pool(struct event_pool *pool,
165 struct ibmvscsi_host_data *hostdata)
168 for (i = 0; i < pool->size; ++i) {
169 if (atomic_read(&pool->events[i].free) != 1)
171 if (pool->events[i].ext_list) {
172 dma_free_coherent(hostdata->dev,
173 SG_ALL * sizeof(struct srp_direct_buf),
174 pool->events[i].ext_list,
175 pool->events[i].ext_list_token);
179 dev_warn(hostdata->dev, "releasing event pool with %d "
180 "events still in use?\n", in_use);
182 dma_free_coherent(hostdata->dev,
183 pool->size * sizeof(*pool->iu_storage),
184 pool->iu_storage, pool->iu_token);
188 * valid_event_struct: - Determines if event is valid.
189 * @pool: event_pool that contains the event
190 * @evt: srp_event_struct to be checked for validity
192 * Returns zero if event is invalid, one otherwise.
194 static int valid_event_struct(struct event_pool *pool,
195 struct srp_event_struct *evt)
197 int index = evt - pool->events;
198 if (index < 0 || index >= pool->size) /* outside of bounds */
200 if (evt != pool->events + index) /* unaligned */
206 * ibmvscsi_free-event_struct: - Changes status of event to "free"
207 * @pool: event_pool that contains the event
208 * @evt: srp_event_struct to be modified
211 static void free_event_struct(struct event_pool *pool,
212 struct srp_event_struct *evt)
214 if (!valid_event_struct(pool, evt)) {
215 dev_err(evt->hostdata->dev, "Freeing invalid event_struct %p "
216 "(not in pool %p)\n", evt, pool->events);
219 if (atomic_inc_return(&evt->free) != 1) {
220 dev_err(evt->hostdata->dev, "Freeing event_struct %p "
221 "which is not in use!\n", evt);
227 * get_evt_struct: - Gets the next free event in pool
228 * @pool: event_pool that contains the events to be searched
230 * Returns the next event in "free" state, and NULL if none are free.
231 * Note that no synchronization is done here, we assume the host_lock
232 * will syncrhonze things.
234 static struct srp_event_struct *get_event_struct(struct event_pool *pool)
237 int poolsize = pool->size;
238 int offset = pool->next;
240 for (i = 0; i < poolsize; i++) {
241 offset = (offset + 1) % poolsize;
242 if (!atomic_dec_if_positive(&pool->events[offset].free)) {
244 return &pool->events[offset];
248 printk(KERN_ERR "ibmvscsi: found no event struct in pool!\n");
253 * init_event_struct: Initialize fields in an event struct that are always
256 * @done: Routine to call when the event is responded to
257 * @format: SRP or MAD format
258 * @timeout: timeout value set in the CRQ
260 static void init_event_struct(struct srp_event_struct *evt_struct,
261 void (*done) (struct srp_event_struct *),
265 evt_struct->cmnd = NULL;
266 evt_struct->cmnd_done = NULL;
267 evt_struct->sync_srp = NULL;
268 evt_struct->crq.format = format;
269 evt_struct->crq.timeout = timeout;
270 evt_struct->done = done;
273 /* ------------------------------------------------------------
274 * Routines for receiving SCSI responses from the hosting partition
278 * set_srp_direction: Set the fields in the srp related to data
279 * direction and number of buffers based on the direction in
280 * the scsi_cmnd and the number of buffers
282 static void set_srp_direction(struct scsi_cmnd *cmd,
283 struct srp_cmd *srp_cmd,
292 fmt = SRP_DATA_DESC_DIRECT;
294 fmt = SRP_DATA_DESC_INDIRECT;
295 numbuf = min(numbuf, MAX_INDIRECT_BUFS);
297 if (cmd->sc_data_direction == DMA_TO_DEVICE)
298 srp_cmd->data_out_desc_cnt = numbuf;
300 srp_cmd->data_in_desc_cnt = numbuf;
303 if (cmd->sc_data_direction == DMA_TO_DEVICE)
304 srp_cmd->buf_fmt = fmt << 4;
306 srp_cmd->buf_fmt = fmt;
309 static void unmap_sg_list(int num_entries,
311 struct srp_direct_buf *md)
315 for (i = 0; i < num_entries; ++i)
316 dma_unmap_single(dev, md[i].va, md[i].len, DMA_BIDIRECTIONAL);
320 * unmap_cmd_data: - Unmap data pointed in srp_cmd based on the format
321 * @cmd: srp_cmd whose additional_data member will be unmapped
322 * @dev: device for which the memory is mapped
325 static void unmap_cmd_data(struct srp_cmd *cmd,
326 struct srp_event_struct *evt_struct,
331 out_fmt = cmd->buf_fmt >> 4;
332 in_fmt = cmd->buf_fmt & ((1U << 4) - 1);
334 if (out_fmt == SRP_NO_DATA_DESC && in_fmt == SRP_NO_DATA_DESC)
336 else if (out_fmt == SRP_DATA_DESC_DIRECT ||
337 in_fmt == SRP_DATA_DESC_DIRECT) {
338 struct srp_direct_buf *data =
339 (struct srp_direct_buf *) cmd->add_data;
340 dma_unmap_single(dev, data->va, data->len, DMA_BIDIRECTIONAL);
342 struct srp_indirect_buf *indirect =
343 (struct srp_indirect_buf *) cmd->add_data;
344 int num_mapped = indirect->table_desc.len /
345 sizeof(struct srp_direct_buf);
347 if (num_mapped <= MAX_INDIRECT_BUFS) {
348 unmap_sg_list(num_mapped, dev, &indirect->desc_list[0]);
352 unmap_sg_list(num_mapped, dev, evt_struct->ext_list);
356 static int map_sg_list(struct scsi_cmnd *cmd, int nseg,
357 struct srp_direct_buf *md)
360 struct scatterlist *sg;
361 u64 total_length = 0;
363 scsi_for_each_sg(cmd, sg, nseg, i) {
364 struct srp_direct_buf *descr = md + i;
365 descr->va = sg_dma_address(sg);
366 descr->len = sg_dma_len(sg);
368 total_length += sg_dma_len(sg);
374 * map_sg_data: - Maps dma for a scatterlist and initializes decriptor fields
375 * @cmd: Scsi_Cmnd with the scatterlist
376 * @srp_cmd: srp_cmd that contains the memory descriptor
377 * @dev: device for which to map dma memory
379 * Called by map_data_for_srp_cmd() when building srp cmd from scsi cmd.
380 * Returns 1 on success.
382 static int map_sg_data(struct scsi_cmnd *cmd,
383 struct srp_event_struct *evt_struct,
384 struct srp_cmd *srp_cmd, struct device *dev)
388 u64 total_length = 0;
389 struct srp_direct_buf *data =
390 (struct srp_direct_buf *) srp_cmd->add_data;
391 struct srp_indirect_buf *indirect =
392 (struct srp_indirect_buf *) data;
394 sg_mapped = scsi_dma_map(cmd);
397 else if (sg_mapped < 0)
400 set_srp_direction(cmd, srp_cmd, sg_mapped);
402 /* special case; we can use a single direct descriptor */
403 if (sg_mapped == 1) {
404 map_sg_list(cmd, sg_mapped, data);
408 indirect->table_desc.va = 0;
409 indirect->table_desc.len = sg_mapped * sizeof(struct srp_direct_buf);
410 indirect->table_desc.key = 0;
412 if (sg_mapped <= MAX_INDIRECT_BUFS) {
413 total_length = map_sg_list(cmd, sg_mapped,
414 &indirect->desc_list[0]);
415 indirect->len = total_length;
419 /* get indirect table */
420 if (!evt_struct->ext_list) {
421 evt_struct->ext_list = (struct srp_direct_buf *)
422 dma_alloc_coherent(dev,
423 SG_ALL * sizeof(struct srp_direct_buf),
424 &evt_struct->ext_list_token, 0);
425 if (!evt_struct->ext_list) {
426 sdev_printk(KERN_ERR, cmd->device,
427 "Can't allocate memory for indirect table\n");
432 total_length = map_sg_list(cmd, sg_mapped, evt_struct->ext_list);
434 indirect->len = total_length;
435 indirect->table_desc.va = evt_struct->ext_list_token;
436 indirect->table_desc.len = sg_mapped * sizeof(indirect->desc_list[0]);
437 memcpy(indirect->desc_list, evt_struct->ext_list,
438 MAX_INDIRECT_BUFS * sizeof(struct srp_direct_buf));
443 * map_data_for_srp_cmd: - Calls functions to map data for srp cmds
444 * @cmd: struct scsi_cmnd with the memory to be mapped
445 * @srp_cmd: srp_cmd that contains the memory descriptor
446 * @dev: dma device for which to map dma memory
448 * Called by scsi_cmd_to_srp_cmd() when converting scsi cmds to srp cmds
449 * Returns 1 on success.
451 static int map_data_for_srp_cmd(struct scsi_cmnd *cmd,
452 struct srp_event_struct *evt_struct,
453 struct srp_cmd *srp_cmd, struct device *dev)
455 switch (cmd->sc_data_direction) {
456 case DMA_FROM_DEVICE:
461 case DMA_BIDIRECTIONAL:
462 sdev_printk(KERN_ERR, cmd->device,
463 "Can't map DMA_BIDIRECTIONAL to read/write\n");
466 sdev_printk(KERN_ERR, cmd->device,
467 "Unknown data direction 0x%02x; can't map!\n",
468 cmd->sc_data_direction);
472 return map_sg_data(cmd, evt_struct, srp_cmd, dev);
476 * purge_requests: Our virtual adapter just shut down. purge any sent requests
477 * @hostdata: the adapter
479 static void purge_requests(struct ibmvscsi_host_data *hostdata, int error_code)
481 struct srp_event_struct *tmp_evt, *pos;
484 spin_lock_irqsave(hostdata->host->host_lock, flags);
485 list_for_each_entry_safe(tmp_evt, pos, &hostdata->sent, list) {
486 list_del(&tmp_evt->list);
487 del_timer(&tmp_evt->timer);
489 tmp_evt->cmnd->result = (error_code << 16);
490 unmap_cmd_data(&tmp_evt->iu.srp.cmd,
492 tmp_evt->hostdata->dev);
493 if (tmp_evt->cmnd_done)
494 tmp_evt->cmnd_done(tmp_evt->cmnd);
495 } else if (tmp_evt->done)
496 tmp_evt->done(tmp_evt);
497 free_event_struct(&tmp_evt->hostdata->pool, tmp_evt);
499 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
503 * ibmvscsi_reset_host - Reset the connection to the server
504 * @hostdata: struct ibmvscsi_host_data to reset
506 static void ibmvscsi_reset_host(struct ibmvscsi_host_data *hostdata)
508 scsi_block_requests(hostdata->host);
509 atomic_set(&hostdata->request_limit, 0);
511 purge_requests(hostdata, DID_ERROR);
512 if ((ibmvscsi_reset_crq_queue(&hostdata->queue, hostdata)) ||
513 (ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0)) ||
514 (vio_enable_interrupts(to_vio_dev(hostdata->dev)))) {
515 atomic_set(&hostdata->request_limit, -1);
516 dev_err(hostdata->dev, "error after reset\n");
519 scsi_unblock_requests(hostdata->host);
523 * ibmvscsi_timeout - Internal command timeout handler
524 * @evt_struct: struct srp_event_struct that timed out
526 * Called when an internally generated command times out
528 static void ibmvscsi_timeout(struct srp_event_struct *evt_struct)
530 struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
532 dev_err(hostdata->dev, "Command timed out (%x). Resetting connection\n",
533 evt_struct->iu.srp.cmd.opcode);
535 ibmvscsi_reset_host(hostdata);
539 /* ------------------------------------------------------------
540 * Routines for sending and receiving SRPs
543 * ibmvscsi_send_srp_event: - Transforms event to u64 array and calls send_crq()
544 * @evt_struct: evt_struct to be sent
545 * @hostdata: ibmvscsi_host_data of host
546 * @timeout: timeout in seconds - 0 means do not time command
548 * Returns the value returned from ibmvscsi_send_crq(). (Zero for success)
549 * Note that this routine assumes that host_lock is held for synchronization
551 static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
552 struct ibmvscsi_host_data *hostdata,
553 unsigned long timeout)
555 u64 *crq_as_u64 = (u64 *) &evt_struct->crq;
559 /* If we have exhausted our request limit, just fail this request,
560 * unless it is for a reset or abort.
561 * Note that there are rare cases involving driver generated requests
562 * (such as task management requests) that the mid layer may think we
563 * can handle more requests (can_queue) when we actually can't
565 if (evt_struct->crq.format == VIOSRP_SRP_FORMAT) {
567 atomic_dec_if_positive(&hostdata->request_limit);
568 /* If request limit was -1 when we started, it is now even
571 if (request_status < -1)
573 /* Otherwise, we may have run out of requests. */
574 /* Abort and reset calls should make it through.
575 * Nothing except abort and reset should use the last two
576 * slots unless we had two or less to begin with.
578 else if (request_status < 2 &&
579 evt_struct->iu.srp.cmd.opcode != SRP_TSK_MGMT) {
580 /* In the case that we have less than two requests
581 * available, check the server limit as a combination
582 * of the request limit and the number of requests
583 * in-flight (the size of the send list). If the
584 * server limit is greater than 2, return busy so
585 * that the last two are reserved for reset and abort.
587 int server_limit = request_status;
588 struct srp_event_struct *tmp_evt;
590 list_for_each_entry(tmp_evt, &hostdata->sent, list) {
594 if (server_limit > 2)
599 /* Copy the IU into the transfer area */
600 *evt_struct->xfer_iu = evt_struct->iu;
601 evt_struct->xfer_iu->srp.rsp.tag = (u64)evt_struct;
603 /* Add this to the sent list. We need to do this
604 * before we actually send
605 * in case it comes back REALLY fast
607 list_add_tail(&evt_struct->list, &hostdata->sent);
609 init_timer(&evt_struct->timer);
611 evt_struct->timer.data = (unsigned long) evt_struct;
612 evt_struct->timer.expires = jiffies + (timeout * HZ);
613 evt_struct->timer.function = (void (*)(unsigned long))ibmvscsi_timeout;
614 add_timer(&evt_struct->timer);
618 ibmvscsi_send_crq(hostdata, crq_as_u64[0], crq_as_u64[1])) != 0) {
619 list_del(&evt_struct->list);
620 del_timer(&evt_struct->timer);
622 dev_err(hostdata->dev, "send error %d\n", rc);
623 atomic_inc(&hostdata->request_limit);
630 unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, hostdata->dev);
632 free_event_struct(&hostdata->pool, evt_struct);
633 atomic_inc(&hostdata->request_limit);
634 return SCSI_MLQUEUE_HOST_BUSY;
637 unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, hostdata->dev);
639 if (evt_struct->cmnd != NULL) {
640 evt_struct->cmnd->result = DID_ERROR << 16;
641 evt_struct->cmnd_done(evt_struct->cmnd);
642 } else if (evt_struct->done)
643 evt_struct->done(evt_struct);
645 free_event_struct(&hostdata->pool, evt_struct);
650 * handle_cmd_rsp: - Handle responses from commands
651 * @evt_struct: srp_event_struct to be handled
653 * Used as a callback by when sending scsi cmds.
654 * Gets called by ibmvscsi_handle_crq()
656 static void handle_cmd_rsp(struct srp_event_struct *evt_struct)
658 struct srp_rsp *rsp = &evt_struct->xfer_iu->srp.rsp;
659 struct scsi_cmnd *cmnd = evt_struct->cmnd;
661 if (unlikely(rsp->opcode != SRP_RSP)) {
662 if (printk_ratelimit())
663 dev_warn(evt_struct->hostdata->dev,
664 "bad SRP RSP type %d\n", rsp->opcode);
668 cmnd->result = rsp->status;
669 if (((cmnd->result >> 1) & 0x1f) == CHECK_CONDITION)
670 memcpy(cmnd->sense_buffer,
672 rsp->sense_data_len);
673 unmap_cmd_data(&evt_struct->iu.srp.cmd,
675 evt_struct->hostdata->dev);
677 if (rsp->flags & SRP_RSP_FLAG_DOOVER)
678 scsi_set_resid(cmnd, rsp->data_out_res_cnt);
679 else if (rsp->flags & SRP_RSP_FLAG_DIOVER)
680 scsi_set_resid(cmnd, rsp->data_in_res_cnt);
683 if (evt_struct->cmnd_done)
684 evt_struct->cmnd_done(cmnd);
688 * lun_from_dev: - Returns the lun of the scsi device
689 * @dev: struct scsi_device
692 static inline u16 lun_from_dev(struct scsi_device *dev)
694 return (0x2 << 14) | (dev->id << 8) | (dev->channel << 5) | dev->lun;
698 * ibmvscsi_queue: - The queuecommand function of the scsi template
699 * @cmd: struct scsi_cmnd to be executed
700 * @done: Callback function to be called when cmd is completed
702 static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,
703 void (*done) (struct scsi_cmnd *))
705 struct srp_cmd *srp_cmd;
706 struct srp_event_struct *evt_struct;
707 struct srp_indirect_buf *indirect;
708 struct ibmvscsi_host_data *hostdata = shost_priv(cmnd->device->host);
709 u16 lun = lun_from_dev(cmnd->device);
712 evt_struct = get_event_struct(&hostdata->pool);
714 return SCSI_MLQUEUE_HOST_BUSY;
716 /* Set up the actual SRP IU */
717 srp_cmd = &evt_struct->iu.srp.cmd;
718 memset(srp_cmd, 0x00, SRP_MAX_IU_LEN);
719 srp_cmd->opcode = SRP_CMD;
720 memcpy(srp_cmd->cdb, cmnd->cmnd, sizeof(cmnd->cmnd));
721 srp_cmd->lun = ((u64) lun) << 48;
723 if (!map_data_for_srp_cmd(cmnd, evt_struct, srp_cmd, hostdata->dev)) {
724 sdev_printk(KERN_ERR, cmnd->device, "couldn't convert cmd to srp_cmd\n");
725 free_event_struct(&hostdata->pool, evt_struct);
726 return SCSI_MLQUEUE_HOST_BUSY;
729 init_event_struct(evt_struct,
732 cmnd->timeout_per_command/HZ);
734 evt_struct->cmnd = cmnd;
735 evt_struct->cmnd_done = done;
737 /* Fix up dma address of the buffer itself */
738 indirect = (struct srp_indirect_buf *) srp_cmd->add_data;
739 out_fmt = srp_cmd->buf_fmt >> 4;
740 in_fmt = srp_cmd->buf_fmt & ((1U << 4) - 1);
741 if ((in_fmt == SRP_DATA_DESC_INDIRECT ||
742 out_fmt == SRP_DATA_DESC_INDIRECT) &&
743 indirect->table_desc.va == 0) {
744 indirect->table_desc.va = evt_struct->crq.IU_data_ptr +
745 offsetof(struct srp_cmd, add_data) +
746 offsetof(struct srp_indirect_buf, desc_list);
749 return ibmvscsi_send_srp_event(evt_struct, hostdata, 0);
752 /* ------------------------------------------------------------
753 * Routines for driver initialization
756 * adapter_info_rsp: - Handle response to MAD adapter info request
757 * @evt_struct: srp_event_struct with the response
759 * Used as a "done" callback by when sending adapter_info. Gets called
760 * by ibmvscsi_handle_crq()
762 static void adapter_info_rsp(struct srp_event_struct *evt_struct)
764 struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
765 dma_unmap_single(hostdata->dev,
766 evt_struct->iu.mad.adapter_info.buffer,
767 evt_struct->iu.mad.adapter_info.common.length,
770 if (evt_struct->xfer_iu->mad.adapter_info.common.status) {
771 dev_err(hostdata->dev, "error %d getting adapter info\n",
772 evt_struct->xfer_iu->mad.adapter_info.common.status);
774 dev_info(hostdata->dev, "host srp version: %s, "
775 "host partition %s (%d), OS %d, max io %u\n",
776 hostdata->madapter_info.srp_version,
777 hostdata->madapter_info.partition_name,
778 hostdata->madapter_info.partition_number,
779 hostdata->madapter_info.os_type,
780 hostdata->madapter_info.port_max_txu[0]);
782 if (hostdata->madapter_info.port_max_txu[0])
783 hostdata->host->max_sectors =
784 hostdata->madapter_info.port_max_txu[0] >> 9;
786 if (hostdata->madapter_info.os_type == 3 &&
787 strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) {
788 dev_err(hostdata->dev, "host (Ver. %s) doesn't support large transfers\n",
789 hostdata->madapter_info.srp_version);
790 dev_err(hostdata->dev, "limiting scatterlists to %d\n",
792 hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS;
798 * send_mad_adapter_info: - Sends the mad adapter info request
799 * and stores the result so it can be retrieved with
800 * sysfs. We COULD consider causing a failure if the
801 * returned SRP version doesn't match ours.
802 * @hostdata: ibmvscsi_host_data of host
804 * Returns zero if successful.
806 static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)
808 struct viosrp_adapter_info *req;
809 struct srp_event_struct *evt_struct;
813 evt_struct = get_event_struct(&hostdata->pool);
815 dev_err(hostdata->dev,
816 "couldn't allocate an event for ADAPTER_INFO_REQ!\n");
820 init_event_struct(evt_struct,
825 req = &evt_struct->iu.mad.adapter_info;
826 memset(req, 0x00, sizeof(*req));
828 req->common.type = VIOSRP_ADAPTER_INFO_TYPE;
829 req->common.length = sizeof(hostdata->madapter_info);
830 req->buffer = addr = dma_map_single(hostdata->dev,
831 &hostdata->madapter_info,
832 sizeof(hostdata->madapter_info),
835 if (dma_mapping_error(req->buffer)) {
836 dev_err(hostdata->dev, "Unable to map request_buffer for adapter_info!\n");
837 free_event_struct(&hostdata->pool, evt_struct);
841 spin_lock_irqsave(hostdata->host->host_lock, flags);
842 if (ibmvscsi_send_srp_event(evt_struct, hostdata, init_timeout * 2)) {
843 dev_err(hostdata->dev, "couldn't send ADAPTER_INFO_REQ!\n");
844 dma_unmap_single(hostdata->dev,
846 sizeof(hostdata->madapter_info),
849 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
853 * login_rsp: - Handle response to SRP login request
854 * @evt_struct: srp_event_struct with the response
856 * Used as a "done" callback by when sending srp_login. Gets called
857 * by ibmvscsi_handle_crq()
859 static void login_rsp(struct srp_event_struct *evt_struct)
861 struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
862 switch (evt_struct->xfer_iu->srp.login_rsp.opcode) {
863 case SRP_LOGIN_RSP: /* it worked! */
865 case SRP_LOGIN_REJ: /* refused! */
866 dev_info(hostdata->dev, "SRP_LOGIN_REJ reason %u\n",
867 evt_struct->xfer_iu->srp.login_rej.reason);
869 atomic_set(&hostdata->request_limit, -1);
872 dev_err(hostdata->dev, "Invalid login response typecode 0x%02x!\n",
873 evt_struct->xfer_iu->srp.login_rsp.opcode);
875 atomic_set(&hostdata->request_limit, -1);
879 dev_info(hostdata->dev, "SRP_LOGIN succeeded\n");
881 if (evt_struct->xfer_iu->srp.login_rsp.req_lim_delta < 0)
882 dev_err(hostdata->dev, "Invalid request_limit.\n");
884 /* Now we know what the real request-limit is.
885 * This value is set rather than added to request_limit because
886 * request_limit could have been set to -1 by this client.
888 atomic_set(&hostdata->request_limit,
889 evt_struct->xfer_iu->srp.login_rsp.req_lim_delta);
891 /* If we had any pending I/Os, kick them */
892 scsi_unblock_requests(hostdata->host);
894 send_mad_adapter_info(hostdata);
899 * send_srp_login: - Sends the srp login
900 * @hostdata: ibmvscsi_host_data of host
902 * Returns zero if successful.
904 static int send_srp_login(struct ibmvscsi_host_data *hostdata)
908 struct srp_login_req *login;
909 struct srp_event_struct *evt_struct = get_event_struct(&hostdata->pool);
911 dev_err(hostdata->dev, "couldn't allocate an event for login req!\n");
915 init_event_struct(evt_struct,
920 login = &evt_struct->iu.srp.login_req;
921 memset(login, 0x00, sizeof(struct srp_login_req));
922 login->opcode = SRP_LOGIN_REQ;
923 login->req_it_iu_len = sizeof(union srp_iu);
924 login->req_buf_fmt = SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT;
926 spin_lock_irqsave(hostdata->host->host_lock, flags);
927 /* Start out with a request limit of 1, since this is negotiated in
928 * the login request we are just sending
930 atomic_set(&hostdata->request_limit, 1);
932 rc = ibmvscsi_send_srp_event(evt_struct, hostdata, init_timeout * 2);
933 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
934 dev_info(hostdata->dev, "sent SRP login\n");
939 * sync_completion: Signal that a synchronous command has completed
940 * Note that after returning from this call, the evt_struct is freed.
941 * the caller waiting on this completion shouldn't touch the evt_struct
944 static void sync_completion(struct srp_event_struct *evt_struct)
946 /* copy the response back */
947 if (evt_struct->sync_srp)
948 *evt_struct->sync_srp = *evt_struct->xfer_iu;
950 complete(&evt_struct->comp);
954 * ibmvscsi_abort: Abort a command...from scsi host template
955 * send this over to the server and wait synchronously for the response
957 static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
959 struct ibmvscsi_host_data *hostdata = shost_priv(cmd->device->host);
960 struct srp_tsk_mgmt *tsk_mgmt;
961 struct srp_event_struct *evt;
962 struct srp_event_struct *tmp_evt, *found_evt;
963 union viosrp_iu srp_rsp;
966 u16 lun = lun_from_dev(cmd->device);
968 /* First, find this command in our sent list so we can figure
969 * out the correct tag
971 spin_lock_irqsave(hostdata->host->host_lock, flags);
973 list_for_each_entry(tmp_evt, &hostdata->sent, list) {
974 if (tmp_evt->cmnd == cmd) {
981 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
985 evt = get_event_struct(&hostdata->pool);
987 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
988 sdev_printk(KERN_ERR, cmd->device, "failed to allocate abort event\n");
992 init_event_struct(evt,
997 tsk_mgmt = &evt->iu.srp.tsk_mgmt;
999 /* Set up an abort SRP command */
1000 memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt));
1001 tsk_mgmt->opcode = SRP_TSK_MGMT;
1002 tsk_mgmt->lun = ((u64) lun) << 48;
1003 tsk_mgmt->tsk_mgmt_func = SRP_TSK_ABORT_TASK;
1004 tsk_mgmt->task_tag = (u64) found_evt;
1006 sdev_printk(KERN_INFO, cmd->device, "aborting command. lun 0x%lx, tag 0x%lx\n",
1007 tsk_mgmt->lun, tsk_mgmt->task_tag);
1009 evt->sync_srp = &srp_rsp;
1010 init_completion(&evt->comp);
1011 rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, init_timeout * 2);
1012 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1014 sdev_printk(KERN_ERR, cmd->device,
1015 "failed to send abort() event. rc=%d\n", rsp_rc);
1019 wait_for_completion(&evt->comp);
1021 /* make sure we got a good response */
1022 if (unlikely(srp_rsp.srp.rsp.opcode != SRP_RSP)) {
1023 if (printk_ratelimit())
1024 sdev_printk(KERN_WARNING, cmd->device, "abort bad SRP RSP type %d\n",
1025 srp_rsp.srp.rsp.opcode);
1029 if (srp_rsp.srp.rsp.flags & SRP_RSP_FLAG_RSPVALID)
1030 rsp_rc = *((int *)srp_rsp.srp.rsp.data);
1032 rsp_rc = srp_rsp.srp.rsp.status;
1035 if (printk_ratelimit())
1036 sdev_printk(KERN_WARNING, cmd->device,
1037 "abort code %d for task tag 0x%lx\n",
1038 rsp_rc, tsk_mgmt->task_tag);
1042 /* Because we dropped the spinlock above, it's possible
1043 * The event is no longer in our list. Make sure it didn't
1044 * complete while we were aborting
1046 spin_lock_irqsave(hostdata->host->host_lock, flags);
1048 list_for_each_entry(tmp_evt, &hostdata->sent, list) {
1049 if (tmp_evt->cmnd == cmd) {
1050 found_evt = tmp_evt;
1055 if (found_evt == NULL) {
1056 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1057 sdev_printk(KERN_INFO, cmd->device, "aborted task tag 0x%lx completed\n",
1058 tsk_mgmt->task_tag);
1062 sdev_printk(KERN_INFO, cmd->device, "successfully aborted task tag 0x%lx\n",
1063 tsk_mgmt->task_tag);
1065 cmd->result = (DID_ABORT << 16);
1066 list_del(&found_evt->list);
1067 unmap_cmd_data(&found_evt->iu.srp.cmd, found_evt,
1068 found_evt->hostdata->dev);
1069 free_event_struct(&found_evt->hostdata->pool, found_evt);
1070 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1071 atomic_inc(&hostdata->request_limit);
1076 * ibmvscsi_eh_device_reset_handler: Reset a single LUN...from scsi host
1077 * template send this over to the server and wait synchronously for the
1080 static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
1082 struct ibmvscsi_host_data *hostdata = shost_priv(cmd->device->host);
1083 struct srp_tsk_mgmt *tsk_mgmt;
1084 struct srp_event_struct *evt;
1085 struct srp_event_struct *tmp_evt, *pos;
1086 union viosrp_iu srp_rsp;
1088 unsigned long flags;
1089 u16 lun = lun_from_dev(cmd->device);
1091 spin_lock_irqsave(hostdata->host->host_lock, flags);
1092 evt = get_event_struct(&hostdata->pool);
1094 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1095 sdev_printk(KERN_ERR, cmd->device, "failed to allocate reset event\n");
1099 init_event_struct(evt,
1104 tsk_mgmt = &evt->iu.srp.tsk_mgmt;
1106 /* Set up a lun reset SRP command */
1107 memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt));
1108 tsk_mgmt->opcode = SRP_TSK_MGMT;
1109 tsk_mgmt->lun = ((u64) lun) << 48;
1110 tsk_mgmt->tsk_mgmt_func = SRP_TSK_LUN_RESET;
1112 sdev_printk(KERN_INFO, cmd->device, "resetting device. lun 0x%lx\n",
1115 evt->sync_srp = &srp_rsp;
1116 init_completion(&evt->comp);
1117 rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, init_timeout * 2);
1118 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1120 sdev_printk(KERN_ERR, cmd->device,
1121 "failed to send reset event. rc=%d\n", rsp_rc);
1125 wait_for_completion(&evt->comp);
1127 /* make sure we got a good response */
1128 if (unlikely(srp_rsp.srp.rsp.opcode != SRP_RSP)) {
1129 if (printk_ratelimit())
1130 sdev_printk(KERN_WARNING, cmd->device, "reset bad SRP RSP type %d\n",
1131 srp_rsp.srp.rsp.opcode);
1135 if (srp_rsp.srp.rsp.flags & SRP_RSP_FLAG_RSPVALID)
1136 rsp_rc = *((int *)srp_rsp.srp.rsp.data);
1138 rsp_rc = srp_rsp.srp.rsp.status;
1141 if (printk_ratelimit())
1142 sdev_printk(KERN_WARNING, cmd->device,
1143 "reset code %d for task tag 0x%lx\n",
1144 rsp_rc, tsk_mgmt->task_tag);
1148 /* We need to find all commands for this LUN that have not yet been
1149 * responded to, and fail them with DID_RESET
1151 spin_lock_irqsave(hostdata->host->host_lock, flags);
1152 list_for_each_entry_safe(tmp_evt, pos, &hostdata->sent, list) {
1153 if ((tmp_evt->cmnd) && (tmp_evt->cmnd->device == cmd->device)) {
1155 tmp_evt->cmnd->result = (DID_RESET << 16);
1156 list_del(&tmp_evt->list);
1157 unmap_cmd_data(&tmp_evt->iu.srp.cmd, tmp_evt,
1158 tmp_evt->hostdata->dev);
1159 free_event_struct(&tmp_evt->hostdata->pool,
1161 atomic_inc(&hostdata->request_limit);
1162 if (tmp_evt->cmnd_done)
1163 tmp_evt->cmnd_done(tmp_evt->cmnd);
1164 else if (tmp_evt->done)
1165 tmp_evt->done(tmp_evt);
1168 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1173 * ibmvscsi_eh_host_reset_handler - Reset the connection to the server
1174 * @cmd: struct scsi_cmnd having problems
1176 static int ibmvscsi_eh_host_reset_handler(struct scsi_cmnd *cmd)
1178 unsigned long wait_switch = 0;
1179 struct ibmvscsi_host_data *hostdata = shost_priv(cmd->device->host);
1181 dev_err(hostdata->dev, "Resetting connection due to error recovery\n");
1183 ibmvscsi_reset_host(hostdata);
1185 for (wait_switch = jiffies + (init_timeout * HZ);
1186 time_before(jiffies, wait_switch) &&
1187 atomic_read(&hostdata->request_limit) < 2;) {
1192 if (atomic_read(&hostdata->request_limit) <= 0)
1199 * ibmvscsi_handle_crq: - Handles and frees received events in the CRQ
1200 * @crq: Command/Response queue
1201 * @hostdata: ibmvscsi_host_data of host
1204 void ibmvscsi_handle_crq(struct viosrp_crq *crq,
1205 struct ibmvscsi_host_data *hostdata)
1208 unsigned long flags;
1209 struct srp_event_struct *evt_struct =
1210 (struct srp_event_struct *)crq->IU_data_ptr;
1211 switch (crq->valid) {
1212 case 0xC0: /* initialization */
1213 switch (crq->format) {
1214 case 0x01: /* Initialization message */
1215 dev_info(hostdata->dev, "partner initialized\n");
1216 /* Send back a response */
1217 if ((rc = ibmvscsi_send_crq(hostdata,
1218 0xC002000000000000LL, 0)) == 0) {
1220 send_srp_login(hostdata);
1222 dev_err(hostdata->dev, "Unable to send init rsp. rc=%ld\n", rc);
1226 case 0x02: /* Initialization response */
1227 dev_info(hostdata->dev, "partner initialization complete\n");
1230 send_srp_login(hostdata);
1233 dev_err(hostdata->dev, "unknown crq message type: %d\n", crq->format);
1236 case 0xFF: /* Hypervisor telling us the connection is closed */
1237 scsi_block_requests(hostdata->host);
1238 atomic_set(&hostdata->request_limit, 0);
1239 if (crq->format == 0x06) {
1240 /* We need to re-setup the interpartition connection */
1241 dev_info(hostdata->dev, "Re-enabling adapter!\n");
1242 purge_requests(hostdata, DID_REQUEUE);
1243 if ((ibmvscsi_reenable_crq_queue(&hostdata->queue,
1245 (ibmvscsi_send_crq(hostdata,
1246 0xC001000000000000LL, 0))) {
1247 atomic_set(&hostdata->request_limit,
1249 dev_err(hostdata->dev, "error after enable\n");
1252 dev_err(hostdata->dev, "Virtual adapter failed rc %d!\n",
1255 purge_requests(hostdata, DID_ERROR);
1256 if ((ibmvscsi_reset_crq_queue(&hostdata->queue,
1258 (ibmvscsi_send_crq(hostdata,
1259 0xC001000000000000LL, 0))) {
1260 atomic_set(&hostdata->request_limit,
1262 dev_err(hostdata->dev, "error after reset\n");
1265 scsi_unblock_requests(hostdata->host);
1267 case 0x80: /* real payload */
1270 dev_err(hostdata->dev, "got an invalid message type 0x%02x\n",
1275 /* The only kind of payload CRQs we should get are responses to
1276 * things we send. Make sure this response is to something we
1279 if (!valid_event_struct(&hostdata->pool, evt_struct)) {
1280 dev_err(hostdata->dev, "returned correlation_token 0x%p is invalid!\n",
1281 (void *)crq->IU_data_ptr);
1285 if (atomic_read(&evt_struct->free)) {
1286 dev_err(hostdata->dev, "received duplicate correlation_token 0x%p!\n",
1287 (void *)crq->IU_data_ptr);
1291 if (crq->format == VIOSRP_SRP_FORMAT)
1292 atomic_add(evt_struct->xfer_iu->srp.rsp.req_lim_delta,
1293 &hostdata->request_limit);
1295 del_timer(&evt_struct->timer);
1297 if (evt_struct->done)
1298 evt_struct->done(evt_struct);
1300 dev_err(hostdata->dev, "returned done() is NULL; not running it!\n");
1303 * Lock the host_lock before messing with these structures, since we
1304 * are running in a task context
1306 spin_lock_irqsave(evt_struct->hostdata->host->host_lock, flags);
1307 list_del(&evt_struct->list);
1308 free_event_struct(&evt_struct->hostdata->pool, evt_struct);
1309 spin_unlock_irqrestore(evt_struct->hostdata->host->host_lock, flags);
1313 * ibmvscsi_get_host_config: Send the command to the server to get host
1314 * configuration data. The data is opaque to us.
1316 static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
1317 unsigned char *buffer, int length)
1319 struct viosrp_host_config *host_config;
1320 struct srp_event_struct *evt_struct;
1321 unsigned long flags;
1325 evt_struct = get_event_struct(&hostdata->pool);
1327 dev_err(hostdata->dev, "couldn't allocate event for HOST_CONFIG!\n");
1331 init_event_struct(evt_struct,
1336 host_config = &evt_struct->iu.mad.host_config;
1338 /* Set up a lun reset SRP command */
1339 memset(host_config, 0x00, sizeof(*host_config));
1340 host_config->common.type = VIOSRP_HOST_CONFIG_TYPE;
1341 host_config->common.length = length;
1342 host_config->buffer = addr = dma_map_single(hostdata->dev, buffer,
1346 if (dma_mapping_error(host_config->buffer)) {
1347 dev_err(hostdata->dev, "dma_mapping error getting host config\n");
1348 free_event_struct(&hostdata->pool, evt_struct);
1352 init_completion(&evt_struct->comp);
1353 spin_lock_irqsave(hostdata->host->host_lock, flags);
1354 rc = ibmvscsi_send_srp_event(evt_struct, hostdata, init_timeout * 2);
1355 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1357 wait_for_completion(&evt_struct->comp);
1358 dma_unmap_single(hostdata->dev, addr, length, DMA_BIDIRECTIONAL);
1364 * ibmvscsi_slave_configure: Set the "allow_restart" flag for each disk.
1365 * @sdev: struct scsi_device device to configure
1367 * Enable allow_restart for a device if it is a disk. Adjust the
1368 * queue_depth here also as is required by the documentation for
1369 * struct scsi_host_template.
1371 static int ibmvscsi_slave_configure(struct scsi_device *sdev)
1373 struct Scsi_Host *shost = sdev->host;
1374 unsigned long lock_flags = 0;
1376 spin_lock_irqsave(shost->host_lock, lock_flags);
1377 if (sdev->type == TYPE_DISK)
1378 sdev->allow_restart = 1;
1379 scsi_adjust_queue_depth(sdev, 0, shost->cmd_per_lun);
1380 spin_unlock_irqrestore(shost->host_lock, lock_flags);
1385 * ibmvscsi_change_queue_depth - Change the device's queue depth
1386 * @sdev: scsi device struct
1387 * @qdepth: depth to set
1392 static int ibmvscsi_change_queue_depth(struct scsi_device *sdev, int qdepth)
1394 if (qdepth > IBMVSCSI_MAX_CMDS_PER_LUN)
1395 qdepth = IBMVSCSI_MAX_CMDS_PER_LUN;
1397 scsi_adjust_queue_depth(sdev, 0, qdepth);
1398 return sdev->queue_depth;
1401 /* ------------------------------------------------------------
1404 static ssize_t show_host_srp_version(struct class_device *class_dev, char *buf)
1406 struct Scsi_Host *shost = class_to_shost(class_dev);
1407 struct ibmvscsi_host_data *hostdata = shost_priv(shost);
1410 len = snprintf(buf, PAGE_SIZE, "%s\n",
1411 hostdata->madapter_info.srp_version);
1415 static struct class_device_attribute ibmvscsi_host_srp_version = {
1417 .name = "srp_version",
1420 .show = show_host_srp_version,
1423 static ssize_t show_host_partition_name(struct class_device *class_dev,
1426 struct Scsi_Host *shost = class_to_shost(class_dev);
1427 struct ibmvscsi_host_data *hostdata = shost_priv(shost);
1430 len = snprintf(buf, PAGE_SIZE, "%s\n",
1431 hostdata->madapter_info.partition_name);
1435 static struct class_device_attribute ibmvscsi_host_partition_name = {
1437 .name = "partition_name",
1440 .show = show_host_partition_name,
1443 static ssize_t show_host_partition_number(struct class_device *class_dev,
1446 struct Scsi_Host *shost = class_to_shost(class_dev);
1447 struct ibmvscsi_host_data *hostdata = shost_priv(shost);
1450 len = snprintf(buf, PAGE_SIZE, "%d\n",
1451 hostdata->madapter_info.partition_number);
1455 static struct class_device_attribute ibmvscsi_host_partition_number = {
1457 .name = "partition_number",
1460 .show = show_host_partition_number,
1463 static ssize_t show_host_mad_version(struct class_device *class_dev, char *buf)
1465 struct Scsi_Host *shost = class_to_shost(class_dev);
1466 struct ibmvscsi_host_data *hostdata = shost_priv(shost);
1469 len = snprintf(buf, PAGE_SIZE, "%d\n",
1470 hostdata->madapter_info.mad_version);
1474 static struct class_device_attribute ibmvscsi_host_mad_version = {
1476 .name = "mad_version",
1479 .show = show_host_mad_version,
1482 static ssize_t show_host_os_type(struct class_device *class_dev, char *buf)
1484 struct Scsi_Host *shost = class_to_shost(class_dev);
1485 struct ibmvscsi_host_data *hostdata = shost_priv(shost);
1488 len = snprintf(buf, PAGE_SIZE, "%d\n", hostdata->madapter_info.os_type);
1492 static struct class_device_attribute ibmvscsi_host_os_type = {
1497 .show = show_host_os_type,
1500 static ssize_t show_host_config(struct class_device *class_dev, char *buf)
1502 struct Scsi_Host *shost = class_to_shost(class_dev);
1503 struct ibmvscsi_host_data *hostdata = shost_priv(shost);
1505 /* returns null-terminated host config data */
1506 if (ibmvscsi_do_host_config(hostdata, buf, PAGE_SIZE) == 0)
1512 static struct class_device_attribute ibmvscsi_host_config = {
1517 .show = show_host_config,
1520 static struct class_device_attribute *ibmvscsi_attrs[] = {
1521 &ibmvscsi_host_srp_version,
1522 &ibmvscsi_host_partition_name,
1523 &ibmvscsi_host_partition_number,
1524 &ibmvscsi_host_mad_version,
1525 &ibmvscsi_host_os_type,
1526 &ibmvscsi_host_config,
1530 /* ------------------------------------------------------------
1531 * SCSI driver registration
1533 static struct scsi_host_template driver_template = {
1534 .module = THIS_MODULE,
1535 .name = "IBM POWER Virtual SCSI Adapter " IBMVSCSI_VERSION,
1536 .proc_name = "ibmvscsi",
1537 .queuecommand = ibmvscsi_queuecommand,
1538 .eh_abort_handler = ibmvscsi_eh_abort_handler,
1539 .eh_device_reset_handler = ibmvscsi_eh_device_reset_handler,
1540 .eh_host_reset_handler = ibmvscsi_eh_host_reset_handler,
1541 .slave_configure = ibmvscsi_slave_configure,
1542 .change_queue_depth = ibmvscsi_change_queue_depth,
1544 .can_queue = IBMVSCSI_MAX_REQUESTS_DEFAULT,
1546 .sg_tablesize = SG_ALL,
1547 .use_clustering = ENABLE_CLUSTERING,
1548 .shost_attrs = ibmvscsi_attrs,
1552 * Called by bus code for each adapter
1554 static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
1556 struct ibmvscsi_host_data *hostdata;
1557 struct Scsi_Host *host;
1558 struct device *dev = &vdev->dev;
1559 struct srp_rport_identifiers ids;
1560 struct srp_rport *rport;
1561 unsigned long wait_switch = 0;
1564 vdev->dev.driver_data = NULL;
1566 driver_template.can_queue = max_requests;
1567 host = scsi_host_alloc(&driver_template, sizeof(*hostdata));
1569 dev_err(&vdev->dev, "couldn't allocate host data\n");
1570 goto scsi_host_alloc_failed;
1573 host->transportt = ibmvscsi_transport_template;
1574 hostdata = shost_priv(host);
1575 memset(hostdata, 0x00, sizeof(*hostdata));
1576 INIT_LIST_HEAD(&hostdata->sent);
1577 hostdata->host = host;
1578 hostdata->dev = dev;
1579 atomic_set(&hostdata->request_limit, -1);
1580 hostdata->host->max_sectors = 32 * 8; /* default max I/O 32 pages */
1582 rc = ibmvscsi_init_crq_queue(&hostdata->queue, hostdata, max_requests);
1583 if (rc != 0 && rc != H_RESOURCE) {
1584 dev_err(&vdev->dev, "couldn't initialize crq. rc=%d\n", rc);
1585 goto init_crq_failed;
1587 if (initialize_event_pool(&hostdata->pool, max_requests, hostdata) != 0) {
1588 dev_err(&vdev->dev, "couldn't initialize event pool\n");
1589 goto init_pool_failed;
1593 host->max_id = max_id;
1594 host->max_channel = max_channel;
1596 if (scsi_add_host(hostdata->host, hostdata->dev))
1597 goto add_host_failed;
1599 /* we don't have a proper target_port_id so let's use the fake one */
1600 memcpy(ids.port_id, hostdata->madapter_info.partition_name,
1601 sizeof(ids.port_id));
1602 ids.roles = SRP_RPORT_ROLE_TARGET;
1603 rport = srp_rport_add(host, &ids);
1605 goto add_srp_port_failed;
1607 /* Try to send an initialization message. Note that this is allowed
1608 * to fail if the other end is not acive. In that case we don't
1611 if (ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0) == 0
1612 || rc == H_RESOURCE) {
1614 * Wait around max init_timeout secs for the adapter to finish
1615 * initializing. When we are done initializing, we will have a
1616 * valid request_limit. We don't want Linux scanning before
1619 for (wait_switch = jiffies + (init_timeout * HZ);
1620 time_before(jiffies, wait_switch) &&
1621 atomic_read(&hostdata->request_limit) < 2;) {
1626 /* if we now have a valid request_limit, initiate a scan */
1627 if (atomic_read(&hostdata->request_limit) > 0)
1628 scsi_scan_host(host);
1631 vdev->dev.driver_data = hostdata;
1634 add_srp_port_failed:
1635 scsi_remove_host(hostdata->host);
1637 release_event_pool(&hostdata->pool, hostdata);
1639 ibmvscsi_release_crq_queue(&hostdata->queue, hostdata, max_requests);
1641 scsi_host_put(host);
1642 scsi_host_alloc_failed:
1646 static int ibmvscsi_remove(struct vio_dev *vdev)
1648 struct ibmvscsi_host_data *hostdata = vdev->dev.driver_data;
1649 release_event_pool(&hostdata->pool, hostdata);
1650 ibmvscsi_release_crq_queue(&hostdata->queue, hostdata,
1653 srp_remove_host(hostdata->host);
1654 scsi_remove_host(hostdata->host);
1655 scsi_host_put(hostdata->host);
1661 * ibmvscsi_device_table: Used by vio.c to match devices in the device tree we
1664 static struct vio_device_id ibmvscsi_device_table[] __devinitdata = {
1665 {"vscsi", "IBM,v-scsi"},
1668 MODULE_DEVICE_TABLE(vio, ibmvscsi_device_table);
1670 static struct vio_driver ibmvscsi_driver = {
1671 .id_table = ibmvscsi_device_table,
1672 .probe = ibmvscsi_probe,
1673 .remove = ibmvscsi_remove,
1676 .owner = THIS_MODULE,
1680 static struct srp_function_template ibmvscsi_transport_functions = {
1683 int __init ibmvscsi_module_init(void)
1687 ibmvscsi_transport_template =
1688 srp_attach_transport(&ibmvscsi_transport_functions);
1689 if (!ibmvscsi_transport_template)
1692 ret = vio_register_driver(&ibmvscsi_driver);
1694 srp_release_transport(ibmvscsi_transport_template);
1698 void __exit ibmvscsi_module_exit(void)
1700 vio_unregister_driver(&ibmvscsi_driver);
1701 srp_release_transport(ibmvscsi_transport_template);
1704 module_init(ibmvscsi_module_init);
1705 module_exit(ibmvscsi_module_exit);