]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/staging/unisys/visorhba/visorhba_main.c
Merge tag 'modules-for-v4.11' of git://git.kernel.org/pub/scm/linux/kernel/git/jeyu...
[karo-tx-linux.git] / drivers / staging / unisys / visorhba / visorhba_main.c
1 /* Copyright (c) 2012 - 2015 UNISYS CORPORATION
2  * All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or (at
7  * your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
12  * NON INFRINGEMENT.  See the GNU General Public License for more
13  * details.
14  */
15
16 #include <linux/debugfs.h>
17 #include <linux/skbuff.h>
18 #include <linux/kthread.h>
19 #include <linux/idr.h>
20 #include <linux/seq_file.h>
21 #include <scsi/scsi.h>
22 #include <scsi/scsi_host.h>
23 #include <scsi/scsi_cmnd.h>
24 #include <scsi/scsi_device.h>
25
26 #include "visorbus.h"
27 #include "iochannel.h"
28
29 /* The Send and Receive Buffers of the IO Queue may both be full */
30
31 #define IOS_ERROR_THRESHOLD     1000
32 #define MAX_PENDING_REQUESTS    (MIN_NUMSIGNALS * 2)
33 #define VISORHBA_ERROR_COUNT    30
34
35 static struct dentry *visorhba_debugfs_dir;
36
37 /* GUIDS for HBA channel type supported by this driver */
38 static struct visor_channeltype_descriptor visorhba_channel_types[] = {
39         /* Note that the only channel type we expect to be reported by the
40          * bus driver is the SPAR_VHBA channel.
41          */
42         { SPAR_VHBA_CHANNEL_PROTOCOL_UUID, "sparvhba" },
43         { NULL_UUID_LE, NULL }
44 };
45
46 MODULE_DEVICE_TABLE(visorbus, visorhba_channel_types);
47 MODULE_ALIAS("visorbus:" SPAR_VHBA_CHANNEL_PROTOCOL_UUID_STR);
48
49 struct visordisk_info {
50         u32 valid;
51         u32 channel, id, lun;   /* Disk Path */
52         atomic_t ios_threshold;
53         atomic_t error_count;
54         struct visordisk_info *next;
55 };
56
57 struct scsipending {
58         struct uiscmdrsp cmdrsp;
59         void *sent;             /* The Data being tracked */
60         char cmdtype;           /* Type of pointer that is being stored */
61 };
62
63 /* Each scsi_host has a host_data area that contains this struct. */
64 struct visorhba_devdata {
65         struct Scsi_Host *scsihost;
66         struct visor_device *dev;
67         struct list_head dev_info_list;
68         /* Tracks the requests that have been forwarded to
69          * the IOVM and haven't returned yet
70          */
71         struct scsipending pending[MAX_PENDING_REQUESTS];
72         /* Start search for next pending free slot here */
73         unsigned int nextinsert;
74         spinlock_t privlock; /* lock to protect data in devdata */
75         bool serverdown;
76         bool serverchangingstate;
77         unsigned long long acquire_failed_cnt;
78         unsigned long long interrupts_rcvd;
79         unsigned long long interrupts_notme;
80         unsigned long long interrupts_disabled;
81         u64 __iomem *flags_addr;
82         atomic_t interrupt_rcvd;
83         wait_queue_head_t rsp_queue;
84         struct visordisk_info head;
85         unsigned int max_buff_len;
86         int devnum;
87         struct task_struct *thread;
88         int thread_wait_ms;
89
90         /*
91          * allows us to pass int handles back-and-forth between us and
92          * iovm, instead of raw pointers
93          */
94         struct idr idr;
95
96         struct dentry *debugfs_dir;
97         struct dentry *debugfs_info;
98 };
99
100 struct visorhba_devices_open {
101         struct visorhba_devdata *devdata;
102 };
103
104 #define for_each_vdisk_match(iter, list, match)                   \
105         for (iter = &list->head; iter->next; iter = iter->next) \
106                 if ((iter->channel == match->channel) &&                  \
107                     (iter->id == match->id) &&                    \
108                     (iter->lun == match->lun))
109 /**
110  *      visor_thread_start - starts a thread for the device
111  *      @threadfn: Function the thread starts
112  *      @thrcontext: Context to pass to the thread, i.e. devdata
113  *      @name: string describing name of thread
114  *
115  *      Starts a thread for the device.
116  *
117  *      Return the task_struct * denoting the thread on success,
118  *             or NULL on failure
119  */
120 static struct task_struct *visor_thread_start
121 (int (*threadfn)(void *), void *thrcontext, char *name)
122 {
123         struct task_struct *task;
124
125         task = kthread_run(threadfn, thrcontext, "%s", name);
126         if (IS_ERR(task)) {
127                 pr_err("visorbus failed to start thread\n");
128                 return NULL;
129         }
130         return task;
131 }
132
133 /**
134  *      visor_thread_stop - stops the thread if it is running
135  */
136 static void visor_thread_stop(struct task_struct *task)
137 {
138         if (!task)
139                 return;  /* no thread running */
140         kthread_stop(task);
141 }
142
143 /**
144  *      add_scsipending_entry - save off io command that is pending in
145  *                              Service Partition
146  *      @devdata: Pointer to devdata
147  *      @cmdtype: Specifies the type of command pending
148  *      @new:   The command to be saved
149  *
150  *      Saves off the io command that is being handled by the Service
151  *      Partition so that it can be handled when it completes. If new is
152  *      NULL it is assumed the entry refers only to the cmdrsp.
153  *      Returns insert_location where entry was added,
154  *      -EBUSY if it can't
155  */
156 static int add_scsipending_entry(struct visorhba_devdata *devdata,
157                                  char cmdtype, void *new)
158 {
159         unsigned long flags;
160         struct scsipending *entry;
161         int insert_location;
162
163         spin_lock_irqsave(&devdata->privlock, flags);
164         insert_location = devdata->nextinsert;
165         while (devdata->pending[insert_location].sent) {
166                 insert_location = (insert_location + 1) % MAX_PENDING_REQUESTS;
167                 if (insert_location == (int)devdata->nextinsert) {
168                         spin_unlock_irqrestore(&devdata->privlock, flags);
169                         return -EBUSY;
170                 }
171         }
172
173         entry = &devdata->pending[insert_location];
174         memset(&entry->cmdrsp, 0, sizeof(entry->cmdrsp));
175         entry->cmdtype = cmdtype;
176         if (new)
177                 entry->sent = new;
178         else /* wants to send cmdrsp */
179                 entry->sent = &entry->cmdrsp;
180         devdata->nextinsert = (insert_location + 1) % MAX_PENDING_REQUESTS;
181         spin_unlock_irqrestore(&devdata->privlock, flags);
182
183         return insert_location;
184 }
185
186 /**
187  *      del_scsipending_enty - removes an entry from the pending array
188  *      @devdata: Device holding the pending array
189  *      @del: Entry to remove
190  *
191  *      Removes the entry pointed at by del and returns it.
192  *      Returns the scsipending entry pointed at
193  */
194 static void *del_scsipending_ent(struct visorhba_devdata *devdata,
195                                  int del)
196 {
197         unsigned long flags;
198         void *sent;
199
200         if (del >= MAX_PENDING_REQUESTS)
201                 return NULL;
202
203         spin_lock_irqsave(&devdata->privlock, flags);
204         sent = devdata->pending[del].sent;
205
206         devdata->pending[del].cmdtype = 0;
207         devdata->pending[del].sent = NULL;
208         spin_unlock_irqrestore(&devdata->privlock, flags);
209
210         return sent;
211 }
212
213 /**
214  *      get_scsipending_cmdrsp - return the cmdrsp stored in a pending entry
215  *      #ddata: Device holding the pending array
216  *      @ent: Entry that stores the cmdrsp
217  *
218  *      Each scsipending entry has a cmdrsp in it. The cmdrsp is only valid
219  *      if the "sent" field is not NULL
220  *      Returns a pointer to the cmdrsp.
221  */
222 static struct uiscmdrsp *get_scsipending_cmdrsp(struct visorhba_devdata *ddata,
223                                                 int ent)
224 {
225         if (ddata->pending[ent].sent)
226                 return &ddata->pending[ent].cmdrsp;
227
228         return NULL;
229 }
230
231 /**
232  *      simple_idr_get - associate a provided pointer with an int value
233  *                       1 <= value <= INT_MAX, and return this int value;
234  *                       the pointer value can be obtained later by passing
235  *                       this int value to idr_find()
236  *      @idrtable: the data object maintaining the pointer<-->int mappings
237  *      @p: the pointer value to be remembered
238  *      @lock: a spinlock used when exclusive access to idrtable is needed
239  */
240 static unsigned int simple_idr_get(struct idr *idrtable, void *p,
241                                    spinlock_t *lock)
242 {
243         int id;
244         unsigned long flags;
245
246         idr_preload(GFP_KERNEL);
247         spin_lock_irqsave(lock, flags);
248         id = idr_alloc(idrtable, p, 1, INT_MAX, GFP_NOWAIT);
249         spin_unlock_irqrestore(lock, flags);
250         idr_preload_end();
251         if (id < 0)
252                 return 0;  /* failure */
253         return (unsigned int)(id);  /* idr_alloc() guarantees > 0 */
254 }
255
256 /**
257  *      setup_scsitaskmgmt_handles - stash the necessary handles so that the
258  *                                   completion processing logic for a taskmgmt
259  *                                   cmd will be able to find who to wake up
260  *                                   and where to stash the result
261  */
262 static void setup_scsitaskmgmt_handles(struct idr *idrtable, spinlock_t *lock,
263                                        struct uiscmdrsp *cmdrsp,
264                                        wait_queue_head_t *event, int *result)
265 {
266         /* specify the event that has to be triggered when this */
267         /* cmd is complete */
268         cmdrsp->scsitaskmgmt.notify_handle =
269                 simple_idr_get(idrtable, event, lock);
270         cmdrsp->scsitaskmgmt.notifyresult_handle =
271                 simple_idr_get(idrtable, result, lock);
272 }
273
274 /**
275  *      cleanup_scsitaskmgmt_handles - forget handles created by
276  *                                     setup_scsitaskmgmt_handles()
277  */
278 static void cleanup_scsitaskmgmt_handles(struct idr *idrtable,
279                                          struct uiscmdrsp *cmdrsp)
280 {
281         if (cmdrsp->scsitaskmgmt.notify_handle)
282                 idr_remove(idrtable, cmdrsp->scsitaskmgmt.notify_handle);
283         if (cmdrsp->scsitaskmgmt.notifyresult_handle)
284                 idr_remove(idrtable, cmdrsp->scsitaskmgmt.notifyresult_handle);
285 }
286
287 /**
288  *      forward_taskmgmt_command - send taskmegmt command to the Service
289  *                                 Partition
290  *      @tasktype: Type of taskmgmt command
291  *      @scsidev: Scsidev that issued command
292  *
293  *      Create a cmdrsp packet and send it to the Serivce Partition
294  *      that will service this request.
295  *      Returns whether the command was queued successfully or not.
296  */
297 static int forward_taskmgmt_command(enum task_mgmt_types tasktype,
298                                     struct scsi_cmnd *scsicmd)
299 {
300         struct uiscmdrsp *cmdrsp;
301         struct scsi_device *scsidev = scsicmd->device;
302         struct visorhba_devdata *devdata =
303                 (struct visorhba_devdata *)scsidev->host->hostdata;
304         int notifyresult = 0xffff;
305         wait_queue_head_t notifyevent;
306         int scsicmd_id = 0;
307
308         if (devdata->serverdown || devdata->serverchangingstate)
309                 return FAILED;
310
311         scsicmd_id = add_scsipending_entry(devdata, CMD_SCSITASKMGMT_TYPE,
312                                            NULL);
313         if (scsicmd_id < 0)
314                 return FAILED;
315
316         cmdrsp = get_scsipending_cmdrsp(devdata, scsicmd_id);
317
318         init_waitqueue_head(&notifyevent);
319
320         /* issue TASK_MGMT_ABORT_TASK */
321         cmdrsp->cmdtype = CMD_SCSITASKMGMT_TYPE;
322         setup_scsitaskmgmt_handles(&devdata->idr, &devdata->privlock, cmdrsp,
323                                    &notifyevent, &notifyresult);
324
325         /* save destination */
326         cmdrsp->scsitaskmgmt.tasktype = tasktype;
327         cmdrsp->scsitaskmgmt.vdest.channel = scsidev->channel;
328         cmdrsp->scsitaskmgmt.vdest.id = scsidev->id;
329         cmdrsp->scsitaskmgmt.vdest.lun = scsidev->lun;
330         cmdrsp->scsitaskmgmt.handle = scsicmd_id;
331
332         dev_dbg(&scsidev->sdev_gendev,
333                 "visorhba: initiating type=%d taskmgmt command\n", tasktype);
334         if (visorchannel_signalinsert(devdata->dev->visorchannel,
335                                       IOCHAN_TO_IOPART,
336                                       cmdrsp))
337                 goto err_del_scsipending_ent;
338
339         /* It can take the Service Partition up to 35 seconds to complete
340          * an IO in some cases, so wait 45 seconds and error out
341          */
342         if (!wait_event_timeout(notifyevent, notifyresult != 0xffff,
343                                 msecs_to_jiffies(45000)))
344                 goto err_del_scsipending_ent;
345
346         dev_dbg(&scsidev->sdev_gendev,
347                 "visorhba: taskmgmt type=%d success; result=0x%x\n",
348                  tasktype, notifyresult);
349         if (tasktype == TASK_MGMT_ABORT_TASK)
350                 scsicmd->result = DID_ABORT << 16;
351         else
352                 scsicmd->result = DID_RESET << 16;
353
354         scsicmd->scsi_done(scsicmd);
355         cleanup_scsitaskmgmt_handles(&devdata->idr, cmdrsp);
356         return SUCCESS;
357
358 err_del_scsipending_ent:
359         dev_dbg(&scsidev->sdev_gendev,
360                 "visorhba: taskmgmt type=%d not executed\n", tasktype);
361         del_scsipending_ent(devdata, scsicmd_id);
362         cleanup_scsitaskmgmt_handles(&devdata->idr, cmdrsp);
363         return FAILED;
364 }
365
366 /**
367  *      visorhba_abort_handler - Send TASK_MGMT_ABORT_TASK
368  *      @scsicmd: The scsicmd that needs aborted
369  *
370  *      Returns SUCCESS if inserted, failure otherwise
371  *
372  */
373 static int visorhba_abort_handler(struct scsi_cmnd *scsicmd)
374 {
375         /* issue TASK_MGMT_ABORT_TASK */
376         struct scsi_device *scsidev;
377         struct visordisk_info *vdisk;
378         struct visorhba_devdata *devdata;
379
380         scsidev = scsicmd->device;
381         devdata = (struct visorhba_devdata *)scsidev->host->hostdata;
382         for_each_vdisk_match(vdisk, devdata, scsidev) {
383                 if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT)
384                         atomic_inc(&vdisk->error_count);
385                 else
386                         atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
387         }
388         return forward_taskmgmt_command(TASK_MGMT_ABORT_TASK, scsicmd);
389 }
390
391 /**
392  *      visorhba_device_reset_handler - Send TASK_MGMT_LUN_RESET
393  *      @scsicmd: The scsicmd that needs aborted
394  *
395  *      Returns SUCCESS if inserted, failure otherwise
396  */
397 static int visorhba_device_reset_handler(struct scsi_cmnd *scsicmd)
398 {
399         /* issue TASK_MGMT_LUN_RESET */
400         struct scsi_device *scsidev;
401         struct visordisk_info *vdisk;
402         struct visorhba_devdata *devdata;
403
404         scsidev = scsicmd->device;
405         devdata = (struct visorhba_devdata *)scsidev->host->hostdata;
406         for_each_vdisk_match(vdisk, devdata, scsidev) {
407                 if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT)
408                         atomic_inc(&vdisk->error_count);
409                 else
410                         atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
411         }
412         return forward_taskmgmt_command(TASK_MGMT_LUN_RESET, scsicmd);
413 }
414
415 /**
416  *      visorhba_bus_reset_handler - Send TASK_MGMT_TARGET_RESET for each
417  *                                   target on the bus
418  *      @scsicmd: The scsicmd that needs aborted
419  *
420  *      Returns SUCCESS
421  */
422 static int visorhba_bus_reset_handler(struct scsi_cmnd *scsicmd)
423 {
424         struct scsi_device *scsidev;
425         struct visordisk_info *vdisk;
426         struct visorhba_devdata *devdata;
427
428         scsidev = scsicmd->device;
429         devdata = (struct visorhba_devdata *)scsidev->host->hostdata;
430         for_each_vdisk_match(vdisk, devdata, scsidev) {
431                 if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT)
432                         atomic_inc(&vdisk->error_count);
433                 else
434                         atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
435         }
436         return forward_taskmgmt_command(TASK_MGMT_BUS_RESET, scsicmd);
437 }
438
439 /**
440  *      visorhba_host_reset_handler - Not supported
441  *      @scsicmd: The scsicmd that needs aborted
442  *
443  *      Not supported, return SUCCESS
444  *      Returns SUCCESS
445  */
446 static int
447 visorhba_host_reset_handler(struct scsi_cmnd *scsicmd)
448 {
449         /* issue TASK_MGMT_TARGET_RESET for each target on each bus for host */
450         return SUCCESS;
451 }
452
453 /**
454  *      visorhba_get_info
455  *      @shp: Scsi host that is requesting information
456  *
457  *      Returns string with info
458  */
459 static const char *visorhba_get_info(struct Scsi_Host *shp)
460 {
461         /* Return version string */
462         return "visorhba";
463 }
464
465 /**
466  *      visorhba_queue_command_lck -- queues command to the Service Partition
467  *      @scsicmd: Command to be queued
468  *      @vsiorhba_cmnd_done: Done command to call when scsicmd is returned
469  *
470  *      Queues to scsicmd to the ServicePartition after converting it to a
471  *      uiscmdrsp structure.
472  *
473  *      Returns success if queued to the Service Partition, otherwise
474  *      failure.
475  */
476 static int
477 visorhba_queue_command_lck(struct scsi_cmnd *scsicmd,
478                            void (*visorhba_cmnd_done)(struct scsi_cmnd *))
479 {
480         struct uiscmdrsp *cmdrsp;
481         struct scsi_device *scsidev = scsicmd->device;
482         int insert_location;
483         unsigned char *cdb = scsicmd->cmnd;
484         struct Scsi_Host *scsihost = scsidev->host;
485         unsigned int i;
486         struct visorhba_devdata *devdata =
487                 (struct visorhba_devdata *)scsihost->hostdata;
488         struct scatterlist *sg = NULL;
489         struct scatterlist *sglist = NULL;
490
491         if (devdata->serverdown || devdata->serverchangingstate)
492                 return SCSI_MLQUEUE_DEVICE_BUSY;
493
494         insert_location = add_scsipending_entry(devdata, CMD_SCSI_TYPE,
495                                                 (void *)scsicmd);
496
497         if (insert_location < 0)
498                 return SCSI_MLQUEUE_DEVICE_BUSY;
499
500         cmdrsp = get_scsipending_cmdrsp(devdata, insert_location);
501
502         cmdrsp->cmdtype = CMD_SCSI_TYPE;
503         /* save the pending insertion location. Deletion from pending
504          * will return the scsicmd pointer for completion
505          */
506         cmdrsp->scsi.handle = insert_location;
507
508         /* save done function that we have call when cmd is complete */
509         scsicmd->scsi_done = visorhba_cmnd_done;
510         /* save destination */
511         cmdrsp->scsi.vdest.channel = scsidev->channel;
512         cmdrsp->scsi.vdest.id = scsidev->id;
513         cmdrsp->scsi.vdest.lun = scsidev->lun;
514         /* save datadir */
515         cmdrsp->scsi.data_dir = scsicmd->sc_data_direction;
516         memcpy(cmdrsp->scsi.cmnd, cdb, MAX_CMND_SIZE);
517
518         cmdrsp->scsi.bufflen = scsi_bufflen(scsicmd);
519
520         /* keep track of the max buffer length so far. */
521         if (cmdrsp->scsi.bufflen > devdata->max_buff_len)
522                 devdata->max_buff_len = cmdrsp->scsi.bufflen;
523
524         if (scsi_sg_count(scsicmd) > MAX_PHYS_INFO)
525                 goto err_del_scsipending_ent;
526
527         /* convert buffer to phys information  */
528         /* buffer is scatterlist - copy it out */
529         sglist = scsi_sglist(scsicmd);
530
531         for_each_sg(sglist, sg, scsi_sg_count(scsicmd), i) {
532                 cmdrsp->scsi.gpi_list[i].address = sg_phys(sg);
533                 cmdrsp->scsi.gpi_list[i].length = sg->length;
534         }
535         cmdrsp->scsi.guest_phys_entries = scsi_sg_count(scsicmd);
536
537         if (visorchannel_signalinsert(devdata->dev->visorchannel,
538                                       IOCHAN_TO_IOPART,
539                                       cmdrsp))
540                 /* queue must be full and we aren't going to wait */
541                 goto err_del_scsipending_ent;
542
543         return 0;
544
545 err_del_scsipending_ent:
546         del_scsipending_ent(devdata, insert_location);
547         return SCSI_MLQUEUE_DEVICE_BUSY;
548 }
549
550 #ifdef DEF_SCSI_QCMD
551 static DEF_SCSI_QCMD(visorhba_queue_command)
552 #else
553 #define visorhba_queue_command visorhba_queue_command_lck
554 #endif
555
556 /**
557  *      visorhba_slave_alloc - called when new disk is discovered
558  *      @scsidev: New disk
559  *
560  *      Create a new visordisk_info structure and add it to our
561  *      list of vdisks.
562  *
563  *      Returns success when created, otherwise error.
564  */
565 static int visorhba_slave_alloc(struct scsi_device *scsidev)
566 {
567         /* this is called by the midlayer before scan for new devices --
568          * LLD can alloc any struct & do init if needed.
569          */
570         struct visordisk_info *vdisk;
571         struct visordisk_info *tmpvdisk;
572         struct visorhba_devdata *devdata;
573         struct Scsi_Host *scsihost = (struct Scsi_Host *)scsidev->host;
574
575         devdata = (struct visorhba_devdata *)scsihost->hostdata;
576         if (!devdata)
577                 return 0; /* even though we errored, treat as success */
578
579         for_each_vdisk_match(vdisk, devdata, scsidev)
580                 return 0; /* already allocated return success */
581
582         tmpvdisk = kzalloc(sizeof(*tmpvdisk), GFP_ATOMIC);
583         if (!tmpvdisk)
584                 return -ENOMEM;
585
586         tmpvdisk->channel = scsidev->channel;
587         tmpvdisk->id = scsidev->id;
588         tmpvdisk->lun = scsidev->lun;
589         vdisk->next = tmpvdisk;
590         return 0;
591 }
592
593 /**
594  *      visorhba_slave_destroy - disk is going away
595  *      @scsidev: scsi device going away
596  *
597  *      Disk is going away, clean up resources.
598  *      Returns void.
599  */
600 static void visorhba_slave_destroy(struct scsi_device *scsidev)
601 {
602         /* midlevel calls this after device has been quiesced and
603          * before it is to be deleted.
604          */
605         struct visordisk_info *vdisk, *delvdisk;
606         struct visorhba_devdata *devdata;
607         struct Scsi_Host *scsihost = (struct Scsi_Host *)scsidev->host;
608
609         devdata = (struct visorhba_devdata *)scsihost->hostdata;
610         for_each_vdisk_match(vdisk, devdata, scsidev) {
611                 delvdisk = vdisk->next;
612                 vdisk->next = delvdisk->next;
613                 kfree(delvdisk);
614                 return;
615         }
616 }
617
618 static struct scsi_host_template visorhba_driver_template = {
619         .name = "Unisys Visor HBA",
620         .info = visorhba_get_info,
621         .queuecommand = visorhba_queue_command,
622         .eh_abort_handler = visorhba_abort_handler,
623         .eh_device_reset_handler = visorhba_device_reset_handler,
624         .eh_bus_reset_handler = visorhba_bus_reset_handler,
625         .eh_host_reset_handler = visorhba_host_reset_handler,
626         .shost_attrs = NULL,
627 #define visorhba_MAX_CMNDS 128
628         .can_queue = visorhba_MAX_CMNDS,
629         .sg_tablesize = 64,
630         .this_id = -1,
631         .slave_alloc = visorhba_slave_alloc,
632         .slave_destroy = visorhba_slave_destroy,
633         .use_clustering = ENABLE_CLUSTERING,
634 };
635
636 /**
637  *      info_debugfs_show - debugfs interface to dump visorhba states
638  *
639  *      This presents a file in the debugfs tree named:
640  *          /visorhba/vbus<x>:dev<y>/info
641  */
642 static int info_debugfs_show(struct seq_file *seq, void *v)
643 {
644         struct visorhba_devdata *devdata = seq->private;
645
646         seq_printf(seq, "max_buff_len = %u\n", devdata->max_buff_len);
647         seq_printf(seq, "interrupts_rcvd = %llu\n", devdata->interrupts_rcvd);
648         seq_printf(seq, "interrupts_disabled = %llu\n",
649                    devdata->interrupts_disabled);
650         seq_printf(seq, "interrupts_notme = %llu\n",
651                    devdata->interrupts_notme);
652         seq_printf(seq, "flags_addr = %p\n", devdata->flags_addr);
653         if (devdata->flags_addr) {
654                 u64 phys_flags_addr =
655                         virt_to_phys((__force  void *)devdata->flags_addr);
656                 seq_printf(seq, "phys_flags_addr = 0x%016llx\n",
657                            phys_flags_addr);
658                 seq_printf(seq, "FeatureFlags = %llu\n",
659                            (__le64)readq(devdata->flags_addr));
660         }
661         seq_printf(seq, "acquire_failed_cnt = %llu\n",
662                    devdata->acquire_failed_cnt);
663
664         return 0;
665 }
666
667 static int info_debugfs_open(struct inode *inode, struct file *file)
668 {
669         return single_open(file, info_debugfs_show, inode->i_private);
670 }
671
672 static const struct file_operations info_debugfs_fops = {
673         .owner = THIS_MODULE,
674         .open = info_debugfs_open,
675         .read = seq_read,
676         .llseek = seq_lseek,
677         .release = single_release,
678 };
679
680 /**
681  *      complete_taskmgmt_command - complete task management
682  *      @cmdrsp: Response from the IOVM
683  *
684  *      Service Partition returned the result of the task management
685  *      command. Wake up anyone waiting for it.
686  *      Returns void
687  */
688 static inline void complete_taskmgmt_command
689 (struct idr *idrtable, struct uiscmdrsp *cmdrsp, int result)
690 {
691         wait_queue_head_t *wq =
692                 idr_find(idrtable, cmdrsp->scsitaskmgmt.notify_handle);
693         int *scsi_result_ptr =
694                 idr_find(idrtable, cmdrsp->scsitaskmgmt.notifyresult_handle);
695
696         if (unlikely(!(wq && scsi_result_ptr))) {
697                 pr_err("visorhba: no completion context; cmd will time out\n");
698                 return;
699         }
700
701         /* copy the result of the taskmgmt and
702          * wake up the error handler that is waiting for this
703          */
704         pr_debug("visorhba: notifying initiator with result=0x%x\n", result);
705         *scsi_result_ptr = result;
706         wake_up_all(wq);
707 }
708
709 /**
710  *      visorhba_serverdown_complete - Called when we are done cleaning up
711  *                                     from serverdown
712  *      @work: work structure for this serverdown request
713  *
714  *      Called when we are done cleanning up from serverdown, stop processing
715  *      queue, fail pending IOs.
716  *      Returns void when finished cleaning up
717  */
718 static void visorhba_serverdown_complete(struct visorhba_devdata *devdata)
719 {
720         int i;
721         struct scsipending *pendingdel = NULL;
722         struct scsi_cmnd *scsicmd = NULL;
723         struct uiscmdrsp *cmdrsp;
724         unsigned long flags;
725
726         /* Stop using the IOVM response queue (queue should be drained
727          * by the end)
728          */
729         visor_thread_stop(devdata->thread);
730
731         /* Fail commands that weren't completed */
732         spin_lock_irqsave(&devdata->privlock, flags);
733         for (i = 0; i < MAX_PENDING_REQUESTS; i++) {
734                 pendingdel = &devdata->pending[i];
735                 switch (pendingdel->cmdtype) {
736                 case CMD_SCSI_TYPE:
737                         scsicmd = pendingdel->sent;
738                         scsicmd->result = DID_RESET << 16;
739                         if (scsicmd->scsi_done)
740                                 scsicmd->scsi_done(scsicmd);
741                         break;
742                 case CMD_SCSITASKMGMT_TYPE:
743                         cmdrsp = pendingdel->sent;
744                         complete_taskmgmt_command(&devdata->idr, cmdrsp,
745                                                   TASK_MGMT_FAILED);
746                         break;
747                 default:
748                         break;
749                 }
750                 pendingdel->cmdtype = 0;
751                 pendingdel->sent = NULL;
752         }
753         spin_unlock_irqrestore(&devdata->privlock, flags);
754
755         devdata->serverdown = true;
756         devdata->serverchangingstate = false;
757 }
758
759 /**
760  *      visorhba_serverdown - Got notified that the IOVM is down
761  *      @devdata: visorhba that is being serviced by downed IOVM.
762  *
763  *      Something happened to the IOVM, return immediately and
764  *      schedule work cleanup work.
765  *      Return SUCCESS or EINVAL
766  */
767 static int visorhba_serverdown(struct visorhba_devdata *devdata)
768 {
769         if (!devdata->serverdown && !devdata->serverchangingstate) {
770                 devdata->serverchangingstate = true;
771                 visorhba_serverdown_complete(devdata);
772         } else if (devdata->serverchangingstate) {
773                 return -EINVAL;
774         }
775         return 0;
776 }
777
778 /**
779  *      do_scsi_linuxstat - scsi command returned linuxstat
780  *      @cmdrsp: response from IOVM
781  *      @scsicmd: Command issued.
782  *
783  *      Don't log errors for disk-not-present inquiries
784  *      Returns void
785  */
786 static void
787 do_scsi_linuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
788 {
789         struct visorhba_devdata *devdata;
790         struct visordisk_info *vdisk;
791         struct scsi_device *scsidev;
792
793         scsidev = scsicmd->device;
794         memcpy(scsicmd->sense_buffer, cmdrsp->scsi.sensebuf, MAX_SENSE_SIZE);
795
796         /* Do not log errors for disk-not-present inquiries */
797         if ((cmdrsp->scsi.cmnd[0] == INQUIRY) &&
798             (host_byte(cmdrsp->scsi.linuxstat) == DID_NO_CONNECT) &&
799             (cmdrsp->scsi.addlstat == ADDL_SEL_TIMEOUT))
800                 return;
801         /* Okay see what our error_count is here.... */
802         devdata = (struct visorhba_devdata *)scsidev->host->hostdata;
803         for_each_vdisk_match(vdisk, devdata, scsidev) {
804                 if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT) {
805                         atomic_inc(&vdisk->error_count);
806                         atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
807                 }
808         }
809 }
810
811 static int set_no_disk_inquiry_result(unsigned char *buf,
812                                       size_t len, bool is_lun0)
813 {
814         if (!buf || len < NO_DISK_INQUIRY_RESULT_LEN)
815                 return -EINVAL;
816         memset(buf, 0, NO_DISK_INQUIRY_RESULT_LEN);
817         buf[2] = SCSI_SPC2_VER;
818         if (is_lun0) {
819                 buf[0] = DEV_DISK_CAPABLE_NOT_PRESENT;
820                 buf[3] = DEV_HISUPPORT;
821         } else {
822                 buf[0] = DEV_NOT_CAPABLE;
823         }
824         buf[4] = NO_DISK_INQUIRY_RESULT_LEN - 5;
825         strncpy(buf + 8, "DELLPSEUDO DEVICE .", NO_DISK_INQUIRY_RESULT_LEN - 8);
826         return 0;
827 }
828
829 /**
830  *      do_scsi_nolinuxstat - scsi command didn't have linuxstat
831  *      @cmdrsp: response from IOVM
832  *      @scsicmd: Command issued.
833  *
834  *      Handle response when no linuxstat was returned
835  *      Returns void
836  */
837 static void
838 do_scsi_nolinuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
839 {
840         struct scsi_device *scsidev;
841         unsigned char buf[36];
842         struct scatterlist *sg;
843         unsigned int i;
844         char *this_page;
845         char *this_page_orig;
846         int bufind = 0;
847         struct visordisk_info *vdisk;
848         struct visorhba_devdata *devdata;
849
850         scsidev = scsicmd->device;
851         if ((cmdrsp->scsi.cmnd[0] == INQUIRY) &&
852             (cmdrsp->scsi.bufflen >= MIN_INQUIRY_RESULT_LEN)) {
853                 if (cmdrsp->scsi.no_disk_result == 0)
854                         return;
855
856                 /* Linux scsi code wants a device at Lun 0
857                  * to issue report luns, but we don't want
858                  * a disk there so we'll present a processor
859                  * there.
860                  */
861                 set_no_disk_inquiry_result(buf, (size_t)cmdrsp->scsi.bufflen,
862                                            scsidev->lun == 0);
863
864                 if (scsi_sg_count(scsicmd) == 0) {
865                         memcpy(scsi_sglist(scsicmd), buf,
866                                cmdrsp->scsi.bufflen);
867                         return;
868                 }
869
870                 sg = scsi_sglist(scsicmd);
871                 for (i = 0; i < scsi_sg_count(scsicmd); i++) {
872                         this_page_orig = kmap_atomic(sg_page(sg + i));
873                         this_page = (void *)((unsigned long)this_page_orig |
874                                              sg[i].offset);
875                         memcpy(this_page, buf + bufind, sg[i].length);
876                         kunmap_atomic(this_page_orig);
877                 }
878         } else {
879                 devdata = (struct visorhba_devdata *)scsidev->host->hostdata;
880                 for_each_vdisk_match(vdisk, devdata, scsidev) {
881                         if (atomic_read(&vdisk->ios_threshold) > 0) {
882                                 atomic_dec(&vdisk->ios_threshold);
883                                 if (atomic_read(&vdisk->ios_threshold) == 0)
884                                         atomic_set(&vdisk->error_count, 0);
885                         }
886                 }
887         }
888 }
889
890 /**
891  *      complete_scsi_command - complete a scsi command
892  *      @uiscmdrsp: Response from Service Partition
893  *      @scsicmd: The scsi command
894  *
895  *      Response returned by the Service Partition, finish it and send
896  *      completion to the scsi midlayer.
897  *      Returns void.
898  */
899 static void
900 complete_scsi_command(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
901 {
902         /* take what we need out of cmdrsp and complete the scsicmd */
903         scsicmd->result = cmdrsp->scsi.linuxstat;
904         if (cmdrsp->scsi.linuxstat)
905                 do_scsi_linuxstat(cmdrsp, scsicmd);
906         else
907                 do_scsi_nolinuxstat(cmdrsp, scsicmd);
908
909         scsicmd->scsi_done(scsicmd);
910 }
911
912 /**
913  *      drain_queue - pull responses out of iochannel
914  *      @cmdrsp: Response from the IOSP
915  *      @devdata: device that owns this iochannel
916  *
917  *      Pulls responses out of the iochannel and process the responses.
918  *      Restuns void
919  */
920 static void
921 drain_queue(struct uiscmdrsp *cmdrsp, struct visorhba_devdata *devdata)
922 {
923         struct scsi_cmnd *scsicmd;
924
925         while (1) {
926                 if (visorchannel_signalremove(devdata->dev->visorchannel,
927                                               IOCHAN_FROM_IOPART,
928                                               cmdrsp))
929                         break; /* queue empty */
930
931                 if (cmdrsp->cmdtype == CMD_SCSI_TYPE) {
932                         /* scsicmd location is returned by the
933                          * deletion
934                          */
935                         scsicmd = del_scsipending_ent(devdata,
936                                                       cmdrsp->scsi.handle);
937                         if (!scsicmd)
938                                 break;
939                         /* complete the orig cmd */
940                         complete_scsi_command(cmdrsp, scsicmd);
941                 } else if (cmdrsp->cmdtype == CMD_SCSITASKMGMT_TYPE) {
942                         if (!del_scsipending_ent(devdata,
943                                                  cmdrsp->scsitaskmgmt.handle))
944                                 break;
945                         complete_taskmgmt_command(&devdata->idr, cmdrsp,
946                                                   cmdrsp->scsitaskmgmt.result);
947                 } else if (cmdrsp->cmdtype == CMD_NOTIFYGUEST_TYPE)
948                         dev_err_once(&devdata->dev->device,
949                                      "ignoring unsupported NOTIFYGUEST\n");
950                 /* cmdrsp is now available for re-use */
951         }
952 }
953
954 /**
955  *      process_incoming_rsps - Process responses from IOSP
956  *      @v: void pointer to visorhba_devdata
957  *
958  *      Main function for the thread that processes the responses
959  *      from the IO Service Partition. When the queue is empty, wait
960  *      to check to see if it is full again.
961  */
962 static int process_incoming_rsps(void *v)
963 {
964         struct visorhba_devdata *devdata = v;
965         struct uiscmdrsp *cmdrsp = NULL;
966         const int size = sizeof(*cmdrsp);
967
968         cmdrsp = kmalloc(size, GFP_ATOMIC);
969         if (!cmdrsp)
970                 return -ENOMEM;
971
972         while (1) {
973                 if (kthread_should_stop())
974                         break;
975                 wait_event_interruptible_timeout(
976                         devdata->rsp_queue, (atomic_read(
977                                              &devdata->interrupt_rcvd) == 1),
978                                 msecs_to_jiffies(devdata->thread_wait_ms));
979                 /* drain queue */
980                 drain_queue(cmdrsp, devdata);
981         }
982         kfree(cmdrsp);
983         return 0;
984 }
985
986 /**
987  *      visorhba_pause - function to handle visorbus pause messages
988  *      @dev: device that is pausing.
989  *      @complete_func: function to call when finished
990  *
991  *      Something has happened to the IO Service Partition that is
992  *      handling this device. Quiet this device and reset commands
993  *      so that the Service Partition can be corrected.
994  *      Returns SUCCESS
995  */
996 static int visorhba_pause(struct visor_device *dev,
997                           visorbus_state_complete_func complete_func)
998 {
999         struct visorhba_devdata *devdata = dev_get_drvdata(&dev->device);
1000
1001         visorhba_serverdown(devdata);
1002         complete_func(dev, 0);
1003         return 0;
1004 }
1005
1006 /**
1007  *      visorhba_resume - function called when the IO Service Partition is back
1008  *      @dev: device that is pausing.
1009  *      @complete_func: function to call when finished
1010  *
1011  *      Yay! The IO Service Partition is back, the channel has been wiped
1012  *      so lets re-establish connection and start processing responses.
1013  *      Returns 0 on success, error on failure.
1014  */
1015 static int visorhba_resume(struct visor_device *dev,
1016                            visorbus_state_complete_func complete_func)
1017 {
1018         struct visorhba_devdata *devdata;
1019
1020         devdata = dev_get_drvdata(&dev->device);
1021         if (!devdata)
1022                 return -EINVAL;
1023
1024         if (devdata->serverdown && !devdata->serverchangingstate)
1025                 devdata->serverchangingstate = true;
1026
1027         devdata->thread = visor_thread_start(process_incoming_rsps, devdata,
1028                                              "vhba_incming");
1029
1030         devdata->serverdown = false;
1031         devdata->serverchangingstate = false;
1032
1033         return 0;
1034 }
1035
1036 /**
1037  *      visorhba_probe - device has been discovered, do acquire
1038  *      @dev: visor_device that was discovered
1039  *
1040  *      A new HBA was discovered, do the initial connections of it.
1041  *      Return 0 on success, otherwise error.
1042  */
1043 static int visorhba_probe(struct visor_device *dev)
1044 {
1045         struct Scsi_Host *scsihost;
1046         struct vhba_config_max max;
1047         struct visorhba_devdata *devdata = NULL;
1048         int err, channel_offset;
1049         u64 features;
1050
1051         scsihost = scsi_host_alloc(&visorhba_driver_template,
1052                                    sizeof(*devdata));
1053         if (!scsihost)
1054                 return -ENODEV;
1055
1056         channel_offset = offsetof(struct spar_io_channel_protocol,
1057                                   vhba.max);
1058         err = visorbus_read_channel(dev, channel_offset, &max,
1059                                     sizeof(struct vhba_config_max));
1060         if (err < 0)
1061                 goto err_scsi_host_put;
1062
1063         scsihost->max_id = (unsigned int)max.max_id;
1064         scsihost->max_lun = (unsigned int)max.max_lun;
1065         scsihost->cmd_per_lun = (unsigned int)max.cmd_per_lun;
1066         scsihost->max_sectors =
1067             (unsigned short)(max.max_io_size >> 9);
1068         scsihost->sg_tablesize =
1069             (unsigned short)(max.max_io_size / PAGE_SIZE);
1070         if (scsihost->sg_tablesize > MAX_PHYS_INFO)
1071                 scsihost->sg_tablesize = MAX_PHYS_INFO;
1072         err = scsi_add_host(scsihost, &dev->device);
1073         if (err < 0)
1074                 goto err_scsi_host_put;
1075
1076         devdata = (struct visorhba_devdata *)scsihost->hostdata;
1077         devdata->dev = dev;
1078         dev_set_drvdata(&dev->device, devdata);
1079
1080         devdata->debugfs_dir = debugfs_create_dir(dev_name(&dev->device),
1081                                                   visorhba_debugfs_dir);
1082         if (!devdata->debugfs_dir) {
1083                 err = -ENOMEM;
1084                 goto err_scsi_remove_host;
1085         }
1086         devdata->debugfs_info =
1087                 debugfs_create_file("info", S_IRUSR | S_IRGRP,
1088                                     devdata->debugfs_dir, devdata,
1089                                     &info_debugfs_fops);
1090         if (!devdata->debugfs_info) {
1091                 err = -ENOMEM;
1092                 goto err_debugfs_dir;
1093         }
1094
1095         init_waitqueue_head(&devdata->rsp_queue);
1096         spin_lock_init(&devdata->privlock);
1097         devdata->serverdown = false;
1098         devdata->serverchangingstate = false;
1099         devdata->scsihost = scsihost;
1100
1101         channel_offset = offsetof(struct spar_io_channel_protocol,
1102                                   channel_header.features);
1103         err = visorbus_read_channel(dev, channel_offset, &features, 8);
1104         if (err)
1105                 goto err_debugfs_info;
1106         features |= ULTRA_IO_CHANNEL_IS_POLLING;
1107         err = visorbus_write_channel(dev, channel_offset, &features, 8);
1108         if (err)
1109                 goto err_debugfs_info;
1110
1111         idr_init(&devdata->idr);
1112
1113         devdata->thread_wait_ms = 2;
1114         devdata->thread = visor_thread_start(process_incoming_rsps, devdata,
1115                                              "vhba_incoming");
1116
1117         scsi_scan_host(scsihost);
1118
1119         return 0;
1120
1121 err_debugfs_info:
1122         debugfs_remove(devdata->debugfs_info);
1123
1124 err_debugfs_dir:
1125         debugfs_remove_recursive(devdata->debugfs_dir);
1126
1127 err_scsi_remove_host:
1128         scsi_remove_host(scsihost);
1129
1130 err_scsi_host_put:
1131         scsi_host_put(scsihost);
1132         return err;
1133 }
1134
1135 /**
1136  *      visorhba_remove - remove a visorhba device
1137  *      @dev: Device to remove
1138  *
1139  *      Removes the visorhba device.
1140  *      Returns void.
1141  */
1142 static void visorhba_remove(struct visor_device *dev)
1143 {
1144         struct visorhba_devdata *devdata = dev_get_drvdata(&dev->device);
1145         struct Scsi_Host *scsihost = NULL;
1146
1147         if (!devdata)
1148                 return;
1149
1150         scsihost = devdata->scsihost;
1151         visor_thread_stop(devdata->thread);
1152         scsi_remove_host(scsihost);
1153         scsi_host_put(scsihost);
1154
1155         idr_destroy(&devdata->idr);
1156
1157         dev_set_drvdata(&dev->device, NULL);
1158         debugfs_remove(devdata->debugfs_info);
1159         debugfs_remove_recursive(devdata->debugfs_dir);
1160 }
1161
1162 /* This is used to tell the visor bus driver which types of visor devices
1163  * we support, and what functions to call when a visor device that we support
1164  * is attached or removed.
1165  */
1166 static struct visor_driver visorhba_driver = {
1167         .name = "visorhba",
1168         .owner = THIS_MODULE,
1169         .channel_types = visorhba_channel_types,
1170         .probe = visorhba_probe,
1171         .remove = visorhba_remove,
1172         .pause = visorhba_pause,
1173         .resume = visorhba_resume,
1174         .channel_interrupt = NULL,
1175 };
1176
1177 /**
1178  *      visorhba_init           - driver init routine
1179  *
1180  *      Initialize the visorhba driver and register it with visorbus
1181  *      to handle s-Par virtual host bus adapter.
1182  */
1183 static int visorhba_init(void)
1184 {
1185         int rc = -ENOMEM;
1186
1187         visorhba_debugfs_dir = debugfs_create_dir("visorhba", NULL);
1188         if (!visorhba_debugfs_dir)
1189                 return -ENOMEM;
1190
1191         rc = visorbus_register_visor_driver(&visorhba_driver);
1192         if (rc)
1193                 goto cleanup_debugfs;
1194
1195         return 0;
1196
1197 cleanup_debugfs:
1198         debugfs_remove_recursive(visorhba_debugfs_dir);
1199
1200         return rc;
1201 }
1202
1203 /**
1204  *      visorhba_cleanup        - driver exit routine
1205  *
1206  *      Unregister driver from the bus and free up memory.
1207  */
1208 static void visorhba_exit(void)
1209 {
1210         visorbus_unregister_visor_driver(&visorhba_driver);
1211         debugfs_remove_recursive(visorhba_debugfs_dir);
1212 }
1213
1214 module_init(visorhba_init);
1215 module_exit(visorhba_exit);
1216
1217 MODULE_AUTHOR("Unisys");
1218 MODULE_LICENSE("GPL");
1219 MODULE_DESCRIPTION("s-Par HBA driver for virtual SCSI host busses");