5 * Authors: Joshua Morris <josh.h.morris@us.ibm.com>
6 * Philip Kelleher <pjk1939@linux.vnet.ibm.com>
8 * (C) Copyright 2013 IBM Corporation
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License as
12 * published by the Free Software Foundation; either version 2 of the
13 * License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software Foundation,
22 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 #include <linux/slab.h>
26 #include "rsxx_priv.h"
29 struct list_head list;
31 unsigned int laddr; /* Logical address on the ramsan */
38 unsigned int pg_off; /* Page Offset */
43 /* This timeout is used to detect a stalled DMA channel */
44 #define DMA_ACTIVITY_TIMEOUT msecs_to_jiffies(10000)
54 enum rsxx_dma_status {
64 u8 sub_page; /* Bit[0:2]: 512byte offset */
65 /* Bit[4:6]: 512byte count */
71 HW_CMD_BLK_DISCARD = 0x70,
72 HW_CMD_BLK_WRITE = 0x80,
73 HW_CMD_BLK_READ = 0xC0,
74 HW_CMD_BLK_RECON_READ = 0xE0,
79 HW_STATUS_HARD_ERR = 0x02,
80 HW_STATUS_SOFT_ERR = 0x04,
81 HW_STATUS_FAULT = 0x08,
84 #define STATUS_BUFFER_SIZE8 4096
85 #define COMMAND_BUFFER_SIZE8 4096
87 static struct kmem_cache *rsxx_dma_pool;
94 #define DMA_TRACKER_LIST_SIZE8 (sizeof(struct dma_tracker_list) + \
95 (sizeof(struct dma_tracker) * RSXX_MAX_OUTSTANDING_CMDS))
97 struct dma_tracker_list {
100 struct dma_tracker list[0];
104 /*----------------- Misc Utility Functions -------------------*/
105 static unsigned int rsxx_addr8_to_laddr(u64 addr8, struct rsxx_cardinfo *card)
107 unsigned long long tgt_addr8;
109 tgt_addr8 = ((addr8 >> card->_stripe.upper_shift) &
110 card->_stripe.upper_mask) |
111 ((addr8) & card->_stripe.lower_mask);
112 do_div(tgt_addr8, RSXX_HW_BLK_SIZE);
116 static unsigned int rsxx_get_dma_tgt(struct rsxx_cardinfo *card, u64 addr8)
120 tgt = (addr8 >> card->_stripe.target_shift) & card->_stripe.target_mask;
125 static void rsxx_dma_queue_reset(struct rsxx_cardinfo *card)
127 /* Reset all DMA Command/Status Queues */
128 iowrite32(DMA_QUEUE_RESET, card->regmap + RESET);
131 static unsigned int get_dma_size(struct rsxx_dma *dma)
133 if (dma->sub_page.cnt)
134 return dma->sub_page.cnt << 9;
136 return RSXX_HW_BLK_SIZE;
140 /*----------------- DMA Tracker -------------------*/
141 static void set_tracker_dma(struct dma_tracker_list *trackers,
143 struct rsxx_dma *dma)
145 trackers->list[tag].dma = dma;
148 static struct rsxx_dma *get_tracker_dma(struct dma_tracker_list *trackers,
151 return trackers->list[tag].dma;
154 static int pop_tracker(struct dma_tracker_list *trackers)
158 spin_lock(&trackers->lock);
159 tag = trackers->head;
161 trackers->head = trackers->list[tag].next_tag;
162 trackers->list[tag].next_tag = -1;
164 spin_unlock(&trackers->lock);
169 static void push_tracker(struct dma_tracker_list *trackers, int tag)
171 spin_lock(&trackers->lock);
172 trackers->list[tag].next_tag = trackers->head;
173 trackers->head = tag;
174 trackers->list[tag].dma = NULL;
175 spin_unlock(&trackers->lock);
179 /*----------------- Interrupt Coalescing -------------*/
181 * Interrupt Coalescing Register Format:
182 * Interrupt Timer (64ns units) [15:0]
183 * Interrupt Count [24:16]
186 #define INTR_COAL_LATENCY_MASK (0x0000ffff)
188 #define INTR_COAL_COUNT_SHIFT 16
189 #define INTR_COAL_COUNT_BITS 9
190 #define INTR_COAL_COUNT_MASK (((1 << INTR_COAL_COUNT_BITS) - 1) << \
191 INTR_COAL_COUNT_SHIFT)
192 #define INTR_COAL_LATENCY_UNITS_NS 64
195 static u32 dma_intr_coal_val(u32 mode, u32 count, u32 latency)
197 u32 latency_units = latency / INTR_COAL_LATENCY_UNITS_NS;
199 if (mode == RSXX_INTR_COAL_DISABLED)
202 return ((count << INTR_COAL_COUNT_SHIFT) & INTR_COAL_COUNT_MASK) |
203 (latency_units & INTR_COAL_LATENCY_MASK);
207 static void dma_intr_coal_auto_tune(struct rsxx_cardinfo *card)
213 if (card->config.data.intr_coal.mode != RSXX_INTR_COAL_AUTO_TUNE)
216 for (i = 0; i < card->n_targets; i++)
217 q_depth += atomic_read(&card->ctrl[i].stats.hw_q_depth);
219 intr_coal = dma_intr_coal_val(card->config.data.intr_coal.mode,
221 card->config.data.intr_coal.latency);
222 iowrite32(intr_coal, card->regmap + INTR_COAL);
225 /*----------------- RSXX DMA Handling -------------------*/
226 static void rsxx_complete_dma(struct rsxx_cardinfo *card,
227 struct rsxx_dma *dma,
230 if (status & DMA_SW_ERR)
231 printk_ratelimited(KERN_ERR
232 "SW Error in DMA(cmd x%02x, laddr x%08x)\n",
233 dma->cmd, dma->laddr);
234 if (status & DMA_HW_FAULT)
235 printk_ratelimited(KERN_ERR
236 "HW Fault in DMA(cmd x%02x, laddr x%08x)\n",
237 dma->cmd, dma->laddr);
238 if (status & DMA_CANCELLED)
239 printk_ratelimited(KERN_ERR
240 "DMA Cancelled(cmd x%02x, laddr x%08x)\n",
241 dma->cmd, dma->laddr);
244 pci_unmap_page(card->dev, dma->dma_addr, get_dma_size(dma),
245 dma->cmd == HW_CMD_BLK_WRITE ?
250 dma->cb(card, dma->cb_data, status ? 1 : 0);
252 kmem_cache_free(rsxx_dma_pool, dma);
255 static void rsxx_requeue_dma(struct rsxx_dma_ctrl *ctrl,
256 struct rsxx_dma *dma)
259 * Requeued DMAs go to the front of the queue so they are issued
262 spin_lock(&ctrl->queue_lock);
263 list_add(&dma->list, &ctrl->queue);
264 spin_unlock(&ctrl->queue_lock);
267 static void rsxx_handle_dma_error(struct rsxx_dma_ctrl *ctrl,
268 struct rsxx_dma *dma,
271 unsigned int status = 0;
274 dev_dbg(CARD_TO_DEV(ctrl->card),
275 "Handling DMA error(cmd x%02x, laddr x%08x st:x%02x)\n",
276 dma->cmd, dma->laddr, hw_st);
278 if (hw_st & HW_STATUS_CRC)
279 ctrl->stats.crc_errors++;
280 if (hw_st & HW_STATUS_HARD_ERR)
281 ctrl->stats.hard_errors++;
282 if (hw_st & HW_STATUS_SOFT_ERR)
283 ctrl->stats.soft_errors++;
286 case HW_CMD_BLK_READ:
287 if (hw_st & (HW_STATUS_CRC | HW_STATUS_HARD_ERR)) {
288 if (ctrl->card->scrub_hard) {
289 dma->cmd = HW_CMD_BLK_RECON_READ;
291 ctrl->stats.reads_retried++;
293 status |= DMA_HW_FAULT;
294 ctrl->stats.reads_failed++;
296 } else if (hw_st & HW_STATUS_FAULT) {
297 status |= DMA_HW_FAULT;
298 ctrl->stats.reads_failed++;
302 case HW_CMD_BLK_RECON_READ:
303 if (hw_st & (HW_STATUS_CRC | HW_STATUS_HARD_ERR)) {
304 /* Data could not be reconstructed. */
305 status |= DMA_HW_FAULT;
306 ctrl->stats.reads_failed++;
310 case HW_CMD_BLK_WRITE:
311 status |= DMA_HW_FAULT;
312 ctrl->stats.writes_failed++;
315 case HW_CMD_BLK_DISCARD:
316 status |= DMA_HW_FAULT;
317 ctrl->stats.discards_failed++;
321 dev_err(CARD_TO_DEV(ctrl->card),
322 "Unknown command in DMA!(cmd: x%02x "
323 "laddr x%08x st: x%02x\n",
324 dma->cmd, dma->laddr, hw_st);
325 status |= DMA_SW_ERR;
331 rsxx_requeue_dma(ctrl, dma);
333 rsxx_complete_dma(ctrl->card, dma, status);
336 static void dma_engine_stalled(unsigned long data)
338 struct rsxx_dma_ctrl *ctrl = (struct rsxx_dma_ctrl *)data;
340 if (atomic_read(&ctrl->stats.hw_q_depth) == 0)
343 if (ctrl->cmd.idx != ioread32(ctrl->regmap + SW_CMD_IDX)) {
345 * The dma engine was stalled because the SW_CMD_IDX write
346 * was lost. Issue it again to recover.
348 dev_warn(CARD_TO_DEV(ctrl->card),
349 "SW_CMD_IDX write was lost, re-writing...\n");
350 iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX);
351 mod_timer(&ctrl->activity_timer,
352 jiffies + DMA_ACTIVITY_TIMEOUT);
354 dev_warn(CARD_TO_DEV(ctrl->card),
355 "DMA channel %d has stalled, faulting interface.\n",
357 ctrl->card->dma_fault = 1;
361 static void rsxx_issue_dmas(struct work_struct *work)
363 struct rsxx_dma_ctrl *ctrl;
364 struct rsxx_dma *dma;
366 int cmds_pending = 0;
367 struct hw_cmd *hw_cmd_buf;
369 ctrl = container_of(work, struct rsxx_dma_ctrl, issue_dma_work);
370 hw_cmd_buf = ctrl->cmd.buf;
372 if (unlikely(ctrl->card->halt))
376 spin_lock(&ctrl->queue_lock);
377 if (list_empty(&ctrl->queue)) {
378 spin_unlock(&ctrl->queue_lock);
381 spin_unlock(&ctrl->queue_lock);
383 tag = pop_tracker(ctrl->trackers);
387 spin_lock(&ctrl->queue_lock);
388 dma = list_entry(ctrl->queue.next, struct rsxx_dma, list);
389 list_del(&dma->list);
390 ctrl->stats.sw_q_depth--;
391 spin_unlock(&ctrl->queue_lock);
394 * This will catch any DMAs that slipped in right before the
395 * fault, but was queued after all the other DMAs were
398 if (unlikely(ctrl->card->dma_fault)) {
399 push_tracker(ctrl->trackers, tag);
400 rsxx_complete_dma(ctrl->card, dma, DMA_CANCELLED);
404 set_tracker_dma(ctrl->trackers, tag, dma);
405 hw_cmd_buf[ctrl->cmd.idx].command = dma->cmd;
406 hw_cmd_buf[ctrl->cmd.idx].tag = tag;
407 hw_cmd_buf[ctrl->cmd.idx]._rsvd = 0;
408 hw_cmd_buf[ctrl->cmd.idx].sub_page =
409 ((dma->sub_page.cnt & 0x7) << 4) |
410 (dma->sub_page.off & 0x7);
412 hw_cmd_buf[ctrl->cmd.idx].device_addr =
413 cpu_to_le32(dma->laddr);
415 hw_cmd_buf[ctrl->cmd.idx].host_addr =
416 cpu_to_le64(dma->dma_addr);
418 dev_dbg(CARD_TO_DEV(ctrl->card),
419 "Issue DMA%d(laddr %d tag %d) to idx %d\n",
420 ctrl->id, dma->laddr, tag, ctrl->cmd.idx);
422 ctrl->cmd.idx = (ctrl->cmd.idx + 1) & RSXX_CS_IDX_MASK;
425 if (dma->cmd == HW_CMD_BLK_WRITE)
426 ctrl->stats.writes_issued++;
427 else if (dma->cmd == HW_CMD_BLK_DISCARD)
428 ctrl->stats.discards_issued++;
430 ctrl->stats.reads_issued++;
433 /* Let HW know we've queued commands. */
436 * We must guarantee that the CPU writes to 'ctrl->cmd.buf'
437 * (which is in PCI-consistent system-memory) from the loop
438 * above make it into the coherency domain before the
439 * following PIO "trigger" updating the cmd.idx. A WMB is
440 * sufficient. We need not explicitly CPU cache-flush since
441 * the memory is a PCI-consistent (ie; coherent) mapping.
445 atomic_add(cmds_pending, &ctrl->stats.hw_q_depth);
446 mod_timer(&ctrl->activity_timer,
447 jiffies + DMA_ACTIVITY_TIMEOUT);
448 iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX);
452 static void rsxx_dma_done(struct work_struct *work)
454 struct rsxx_dma_ctrl *ctrl;
455 struct rsxx_dma *dma;
460 struct hw_status *hw_st_buf;
462 ctrl = container_of(work, struct rsxx_dma_ctrl, dma_done_work);
463 hw_st_buf = ctrl->status.buf;
465 if (unlikely(ctrl->card->halt) ||
466 unlikely(ctrl->card->dma_fault))
469 count = le16_to_cpu(hw_st_buf[ctrl->status.idx].count);
471 while (count == ctrl->e_cnt) {
473 * The read memory-barrier is necessary to keep aggressive
474 * processors/optimizers (such as the PPC Apple G5) from
475 * reordering the following status-buffer tag & status read
476 * *before* the count read on subsequent iterations of the
481 status = hw_st_buf[ctrl->status.idx].status;
482 tag = hw_st_buf[ctrl->status.idx].tag;
484 dma = get_tracker_dma(ctrl->trackers, tag);
486 spin_lock_irqsave(&ctrl->card->irq_lock, flags);
487 rsxx_disable_ier(ctrl->card, CR_INTR_DMA_ALL);
488 spin_unlock_irqrestore(&ctrl->card->irq_lock, flags);
490 dev_err(CARD_TO_DEV(ctrl->card),
491 "No tracker for tag %d "
493 tag, ctrl->status.idx, ctrl->id);
497 dev_dbg(CARD_TO_DEV(ctrl->card),
499 "(laddr x%x tag %d st: x%x cnt: x%04x) from idx %d.\n",
500 ctrl->id, dma->laddr, tag, status, count,
503 atomic_dec(&ctrl->stats.hw_q_depth);
505 mod_timer(&ctrl->activity_timer,
506 jiffies + DMA_ACTIVITY_TIMEOUT);
509 rsxx_handle_dma_error(ctrl, dma, status);
511 rsxx_complete_dma(ctrl->card, dma, 0);
513 push_tracker(ctrl->trackers, tag);
515 ctrl->status.idx = (ctrl->status.idx + 1) &
519 count = le16_to_cpu(hw_st_buf[ctrl->status.idx].count);
522 dma_intr_coal_auto_tune(ctrl->card);
524 if (atomic_read(&ctrl->stats.hw_q_depth) == 0)
525 del_timer_sync(&ctrl->activity_timer);
527 spin_lock_irqsave(&ctrl->card->irq_lock, flags);
528 rsxx_enable_ier(ctrl->card, CR_INTR_DMA(ctrl->id));
529 spin_unlock_irqrestore(&ctrl->card->irq_lock, flags);
531 spin_lock(&ctrl->queue_lock);
532 if (ctrl->stats.sw_q_depth)
533 queue_work(ctrl->issue_wq, &ctrl->issue_dma_work);
534 spin_unlock(&ctrl->queue_lock);
537 static int rsxx_cleanup_dma_queue(struct rsxx_cardinfo *card,
540 struct rsxx_dma *dma;
541 struct rsxx_dma *tmp;
544 list_for_each_entry_safe(dma, tmp, q, list) {
545 list_del(&dma->list);
548 pci_unmap_page(card->dev, dma->dma_addr,
550 (dma->cmd == HW_CMD_BLK_WRITE) ?
553 kmem_cache_free(rsxx_dma_pool, dma);
560 static int rsxx_queue_discard(struct rsxx_cardinfo *card,
566 struct rsxx_dma *dma;
568 dma = kmem_cache_alloc(rsxx_dma_pool, GFP_KERNEL);
572 dma->cmd = HW_CMD_BLK_DISCARD;
575 dma->sub_page.off = 0;
576 dma->sub_page.cnt = 0;
580 dma->cb_data = cb_data;
582 dev_dbg(CARD_TO_DEV(card), "Queuing[D] laddr %x\n", dma->laddr);
584 list_add_tail(&dma->list, q);
589 static int rsxx_queue_dma(struct rsxx_cardinfo *card,
592 unsigned int dma_off,
593 unsigned int dma_len,
600 struct rsxx_dma *dma;
602 dma = kmem_cache_alloc(rsxx_dma_pool, GFP_KERNEL);
606 dma->dma_addr = pci_map_page(card->dev, page, pg_off, dma_len,
607 dir ? PCI_DMA_TODEVICE :
609 if (!dma->dma_addr) {
610 kmem_cache_free(rsxx_dma_pool, dma);
614 dma->cmd = dir ? HW_CMD_BLK_WRITE : HW_CMD_BLK_READ;
616 dma->sub_page.off = (dma_off >> 9);
617 dma->sub_page.cnt = (dma_len >> 9);
619 dma->pg_off = pg_off;
621 dma->cb_data = cb_data;
623 dev_dbg(CARD_TO_DEV(card),
624 "Queuing[%c] laddr %x off %d cnt %d page %p pg_off %d\n",
625 dir ? 'W' : 'R', dma->laddr, dma->sub_page.off,
626 dma->sub_page.cnt, dma->page, dma->pg_off);
629 list_add_tail(&dma->list, q);
634 int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
640 struct list_head dma_list[RSXX_MAX_TARGETS];
641 struct bio_vec *bvec;
642 unsigned long long addr8;
646 unsigned int dma_off;
647 unsigned int dma_len;
648 int dma_cnt[RSXX_MAX_TARGETS];
653 addr8 = bio->bi_sector << 9; /* sectors are 512 bytes */
654 atomic_set(n_dmas, 0);
656 for (i = 0; i < card->n_targets; i++) {
657 INIT_LIST_HEAD(&dma_list[i]);
661 if (bio->bi_rw & REQ_DISCARD) {
662 bv_len = bio->bi_size;
665 tgt = rsxx_get_dma_tgt(card, addr8);
666 laddr = rsxx_addr8_to_laddr(addr8, card);
668 st = rsxx_queue_discard(card, &dma_list[tgt], laddr,
675 addr8 += RSXX_HW_BLK_SIZE;
676 bv_len -= RSXX_HW_BLK_SIZE;
679 bio_for_each_segment(bvec, bio, i) {
680 bv_len = bvec->bv_len;
681 bv_off = bvec->bv_offset;
684 tgt = rsxx_get_dma_tgt(card, addr8);
685 laddr = rsxx_addr8_to_laddr(addr8, card);
686 dma_off = addr8 & RSXX_HW_BLK_MASK;
687 dma_len = min(bv_len,
688 RSXX_HW_BLK_SIZE - dma_off);
690 st = rsxx_queue_dma(card, &dma_list[tgt],
693 laddr, bvec->bv_page,
694 bv_off, cb, cb_data);
707 for (i = 0; i < card->n_targets; i++) {
708 if (!list_empty(&dma_list[i])) {
709 spin_lock(&card->ctrl[i].queue_lock);
710 card->ctrl[i].stats.sw_q_depth += dma_cnt[i];
711 list_splice_tail(&dma_list[i], &card->ctrl[i].queue);
712 spin_unlock(&card->ctrl[i].queue_lock);
714 queue_work(card->ctrl[i].issue_wq,
715 &card->ctrl[i].issue_dma_work);
722 for (i = 0; i < card->n_targets; i++)
723 rsxx_cleanup_dma_queue(card, &dma_list[i]);
729 /*----------------- DMA Engine Initialization & Setup -------------------*/
730 static int rsxx_dma_ctrl_init(struct pci_dev *dev,
731 struct rsxx_dma_ctrl *ctrl)
735 memset(&ctrl->stats, 0, sizeof(ctrl->stats));
737 ctrl->status.buf = pci_alloc_consistent(dev, STATUS_BUFFER_SIZE8,
738 &ctrl->status.dma_addr);
739 ctrl->cmd.buf = pci_alloc_consistent(dev, COMMAND_BUFFER_SIZE8,
740 &ctrl->cmd.dma_addr);
741 if (ctrl->status.buf == NULL || ctrl->cmd.buf == NULL)
744 ctrl->trackers = vmalloc(DMA_TRACKER_LIST_SIZE8);
748 ctrl->trackers->head = 0;
749 for (i = 0; i < RSXX_MAX_OUTSTANDING_CMDS; i++) {
750 ctrl->trackers->list[i].next_tag = i + 1;
751 ctrl->trackers->list[i].dma = NULL;
753 ctrl->trackers->list[RSXX_MAX_OUTSTANDING_CMDS-1].next_tag = -1;
754 spin_lock_init(&ctrl->trackers->lock);
756 spin_lock_init(&ctrl->queue_lock);
757 INIT_LIST_HEAD(&ctrl->queue);
759 setup_timer(&ctrl->activity_timer, dma_engine_stalled,
760 (unsigned long)ctrl);
762 ctrl->issue_wq = alloc_ordered_workqueue(DRIVER_NAME"_issue", 0);
766 ctrl->done_wq = alloc_ordered_workqueue(DRIVER_NAME"_done", 0);
770 INIT_WORK(&ctrl->issue_dma_work, rsxx_issue_dmas);
771 INIT_WORK(&ctrl->dma_done_work, rsxx_dma_done);
773 memset(ctrl->status.buf, 0xac, STATUS_BUFFER_SIZE8);
774 iowrite32(lower_32_bits(ctrl->status.dma_addr),
775 ctrl->regmap + SB_ADD_LO);
776 iowrite32(upper_32_bits(ctrl->status.dma_addr),
777 ctrl->regmap + SB_ADD_HI);
779 memset(ctrl->cmd.buf, 0x83, COMMAND_BUFFER_SIZE8);
780 iowrite32(lower_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_LO);
781 iowrite32(upper_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_HI);
783 ctrl->status.idx = ioread32(ctrl->regmap + HW_STATUS_CNT);
784 if (ctrl->status.idx > RSXX_MAX_OUTSTANDING_CMDS) {
785 dev_crit(&dev->dev, "Failed reading status cnt x%x\n",
789 iowrite32(ctrl->status.idx, ctrl->regmap + HW_STATUS_CNT);
790 iowrite32(ctrl->status.idx, ctrl->regmap + SW_STATUS_CNT);
792 ctrl->cmd.idx = ioread32(ctrl->regmap + HW_CMD_IDX);
793 if (ctrl->cmd.idx > RSXX_MAX_OUTSTANDING_CMDS) {
794 dev_crit(&dev->dev, "Failed reading cmd cnt x%x\n",
798 iowrite32(ctrl->cmd.idx, ctrl->regmap + HW_CMD_IDX);
799 iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX);
806 static int rsxx_dma_stripe_setup(struct rsxx_cardinfo *card,
807 unsigned int stripe_size8)
809 if (!is_power_of_2(stripe_size8)) {
810 dev_err(CARD_TO_DEV(card),
811 "stripe_size is NOT a power of 2!\n");
815 card->_stripe.lower_mask = stripe_size8 - 1;
817 card->_stripe.upper_mask = ~(card->_stripe.lower_mask);
818 card->_stripe.upper_shift = ffs(card->n_targets) - 1;
820 card->_stripe.target_mask = card->n_targets - 1;
821 card->_stripe.target_shift = ffs(stripe_size8) - 1;
823 dev_dbg(CARD_TO_DEV(card), "_stripe.lower_mask = x%016llx\n",
824 card->_stripe.lower_mask);
825 dev_dbg(CARD_TO_DEV(card), "_stripe.upper_shift = x%016llx\n",
826 card->_stripe.upper_shift);
827 dev_dbg(CARD_TO_DEV(card), "_stripe.upper_mask = x%016llx\n",
828 card->_stripe.upper_mask);
829 dev_dbg(CARD_TO_DEV(card), "_stripe.target_mask = x%016llx\n",
830 card->_stripe.target_mask);
831 dev_dbg(CARD_TO_DEV(card), "_stripe.target_shift = x%016llx\n",
832 card->_stripe.target_shift);
837 static int rsxx_dma_configure(struct rsxx_cardinfo *card)
841 intr_coal = dma_intr_coal_val(card->config.data.intr_coal.mode,
842 card->config.data.intr_coal.count,
843 card->config.data.intr_coal.latency);
844 iowrite32(intr_coal, card->regmap + INTR_COAL);
846 return rsxx_dma_stripe_setup(card, card->config.data.stripe_size);
849 int rsxx_dma_setup(struct rsxx_cardinfo *card)
855 dev_info(CARD_TO_DEV(card),
856 "Initializing %d DMA targets\n",
859 /* Regmap is divided up into 4K chunks. One for each DMA channel */
860 for (i = 0; i < card->n_targets; i++)
861 card->ctrl[i].regmap = card->regmap + (i * 4096);
865 /* Reset the DMA queues */
866 rsxx_dma_queue_reset(card);
868 /************* Setup DMA Control *************/
869 for (i = 0; i < card->n_targets; i++) {
870 st = rsxx_dma_ctrl_init(card->dev, &card->ctrl[i]);
872 goto failed_dma_setup;
874 card->ctrl[i].card = card;
875 card->ctrl[i].id = i;
878 card->scrub_hard = 1;
880 if (card->config_valid)
881 rsxx_dma_configure(card);
883 /* Enable the interrupts after all setup has completed. */
884 for (i = 0; i < card->n_targets; i++) {
885 spin_lock_irqsave(&card->irq_lock, flags);
886 rsxx_enable_ier_and_isr(card, CR_INTR_DMA(i));
887 spin_unlock_irqrestore(&card->irq_lock, flags);
893 for (i = 0; i < card->n_targets; i++) {
894 struct rsxx_dma_ctrl *ctrl = &card->ctrl[i];
896 if (ctrl->issue_wq) {
897 destroy_workqueue(ctrl->issue_wq);
898 ctrl->issue_wq = NULL;
902 destroy_workqueue(ctrl->done_wq);
903 ctrl->done_wq = NULL;
907 vfree(ctrl->trackers);
909 if (ctrl->status.buf)
910 pci_free_consistent(card->dev, STATUS_BUFFER_SIZE8,
912 ctrl->status.dma_addr);
914 pci_free_consistent(card->dev, COMMAND_BUFFER_SIZE8,
915 ctrl->cmd.buf, ctrl->cmd.dma_addr);
922 void rsxx_dma_destroy(struct rsxx_cardinfo *card)
924 struct rsxx_dma_ctrl *ctrl;
925 struct rsxx_dma *dma;
929 for (i = 0; i < card->n_targets; i++) {
930 ctrl = &card->ctrl[i];
932 if (ctrl->issue_wq) {
933 destroy_workqueue(ctrl->issue_wq);
934 ctrl->issue_wq = NULL;
938 destroy_workqueue(ctrl->done_wq);
939 ctrl->done_wq = NULL;
942 if (timer_pending(&ctrl->activity_timer))
943 del_timer_sync(&ctrl->activity_timer);
945 /* Clean up the DMA queue */
946 spin_lock(&ctrl->queue_lock);
947 cnt = rsxx_cleanup_dma_queue(card, &ctrl->queue);
948 spin_unlock(&ctrl->queue_lock);
951 dev_info(CARD_TO_DEV(card),
952 "Freed %d queued DMAs on channel %d\n",
955 /* Clean up issued DMAs */
956 for (j = 0; j < RSXX_MAX_OUTSTANDING_CMDS; j++) {
957 dma = get_tracker_dma(ctrl->trackers, j);
959 pci_unmap_page(card->dev, dma->dma_addr,
961 (dma->cmd == HW_CMD_BLK_WRITE) ?
964 kmem_cache_free(rsxx_dma_pool, dma);
970 dev_info(CARD_TO_DEV(card),
971 "Freed %d pending DMAs on channel %d\n",
974 vfree(ctrl->trackers);
976 pci_free_consistent(card->dev, STATUS_BUFFER_SIZE8,
977 ctrl->status.buf, ctrl->status.dma_addr);
978 pci_free_consistent(card->dev, COMMAND_BUFFER_SIZE8,
979 ctrl->cmd.buf, ctrl->cmd.dma_addr);
984 int rsxx_dma_init(void)
986 rsxx_dma_pool = KMEM_CACHE(rsxx_dma, SLAB_HWCACHE_ALIGN);
994 void rsxx_dma_cleanup(void)
996 kmem_cache_destroy(rsxx_dma_pool);