#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/dma-mapping.h>
#include <linux/syscalls.h>
#include <linux/delay.h>
#include <linux/smp_lock.h>
+#include <linux/kthread.h>
#include <asm/semaphore.h>
#include <scsi/scsi.h>
static int aac_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
{
cmd->scsi_done = done;
+ cmd->SCp.phase = AAC_OWNER_LOWLEVEL;
return (aac_scsi_cmd(cmd) ? FAILED : 0);
}
static int aac_slave_configure(struct scsi_device *sdev)
{
- struct Scsi_Host *host = sdev->host;
+ if (sdev_channel(sdev) == CONTAINER_CHANNEL) {
+ sdev->skip_ms_page_8 = 1;
+ sdev->skip_ms_page_3f = 1;
+ }
+ if ((sdev->type == TYPE_DISK) &&
+ (sdev_channel(sdev) != CONTAINER_CHANNEL)) {
+ struct aac_dev *aac = (struct aac_dev *)sdev->host->hostdata;
+ if (!aac->raid_scsi_mode || (sdev_channel(sdev) != 2))
+ sdev->no_uld_attach = 1;
+ }
+ if (sdev->tagged_supported && (sdev->type == TYPE_DISK) &&
+ (sdev_channel(sdev) == CONTAINER_CHANNEL)) {
+ struct scsi_device * dev;
+ struct Scsi_Host *host = sdev->host;
+ unsigned num_lsu = 0;
+ unsigned num_one = 0;
+ unsigned depth;
- if (sdev->tagged_supported)
- scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, 128);
- else
+ __shost_for_each_device(dev, host) {
+ if (dev->tagged_supported && (dev->type == TYPE_DISK) &&
+ (sdev_channel(dev) == CONTAINER_CHANNEL))
+ ++num_lsu;
+ else
+ ++num_one;
+ }
+ if (num_lsu == 0)
+ ++num_lsu;
+ depth = (host->can_queue - num_one) / num_lsu;
+ if (depth > 256)
+ depth = 256;
+ else if (depth < 2)
+ depth = 2;
+ scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, depth);
+ if (!(((struct aac_dev *)host->hostdata)->adapter_info.options &
+ AAC_OPT_NEW_COMM))
+ blk_queue_max_segment_size(sdev->request_queue, 65536);
+ } else
scsi_adjust_queue_depth(sdev, 0, 1);
- if (!(((struct aac_dev *)host->hostdata)->adapter_info.options
- & AAC_OPT_NEW_COMM))
- blk_queue_max_segment_size(sdev->request_queue, 65536);
-
return 0;
}
__shost_for_each_device(dev, host) {
spin_lock_irqsave(&dev->list_lock, flags);
list_for_each_entry(command, &dev->cmd_list, list) {
- if (command->serial_number) {
+ if ((command != cmd) &&
+ (command->SCp.phase == AAC_OWNER_FIRMWARE)) {
active++;
break;
}
* to driver communication memory to be allocated below 2gig
*/
if (aac_drivers[index].quirks & AAC_QUIRK_31BIT)
- if (pci_set_dma_mask(pdev, 0x7FFFFFFFULL) ||
- pci_set_consistent_dma_mask(pdev, 0x7FFFFFFFULL))
+ if (pci_set_dma_mask(pdev, DMA_31BIT_MASK) ||
+ pci_set_consistent_dma_mask(pdev, DMA_31BIT_MASK))
goto out;
pci_set_master(pdev);
/*
* Start any kernel threads needed
*/
- aac->thread_pid = kernel_thread((int (*)(void *))aac_command_thread,
- aac, 0);
- if (aac->thread_pid < 0) {
+ aac->thread = kthread_run(aac_command_thread, aac, AAC_DRIVERNAME);
+ if (IS_ERR(aac->thread)) {
printk(KERN_ERR "aacraid: Unable to create command thread.\n");
+ error = PTR_ERR(aac->thread);
goto out_deinit;
}
/*
* max channel will be the physical channels plus 1 virtual channel
- * all containers are on the virtual channel 0
+ * all containers are on the virtual channel 0 (CONTAINER_CHANNEL)
* physical channels are address by their actual physical number+1
*/
if (aac->nondasd_support == 1)
return 0;
out_deinit:
- kill_proc(aac->thread_pid, SIGKILL, 0);
- wait_for_completion(&aac->aif_completion);
-
+ kthread_stop(aac->thread);
aac_send_shutdown(aac);
aac_adapter_disable_int(aac);
free_irq(pdev->irq, aac);
out_unmap:
- fib_map_free(aac);
+ aac_fib_map_free(aac);
pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr, aac->comm_phys);
kfree(aac->queues);
iounmap(aac->regs.sa);
scsi_remove_host(shost);
- kill_proc(aac->thread_pid, SIGKILL, 0);
- wait_for_completion(&aac->aif_completion);
+ kthread_stop(aac->thread);
aac_send_shutdown(aac);
aac_adapter_disable_int(aac);
- fib_map_free(aac);
+ aac_fib_map_free(aac);
pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr,
aac->comm_phys);
kfree(aac->queues);