5 * Authors: Joshua Morris <josh.h.morris@us.ibm.com>
6 * Philip Kelleher <pjk1939@linux.vnet.ibm.com>
8 * (C) Copyright 2013 IBM Corporation
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License as
12 * published by the Free Software Foundation; either version 2 of the
13 * License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software Foundation,
22 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 #include <linux/slab.h>
26 #include "rsxx_priv.h"
29 struct list_head list;
31 unsigned int laddr; /* Logical address */
38 unsigned int pg_off; /* Page Offset */
43 /* This timeout is used to detect a stalled DMA channel */
44 #define DMA_ACTIVITY_TIMEOUT msecs_to_jiffies(10000)
54 enum rsxx_dma_status {
64 u8 sub_page; /* Bit[0:2]: 512byte offset */
65 /* Bit[4:6]: 512byte count */
71 HW_CMD_BLK_DISCARD = 0x70,
72 HW_CMD_BLK_WRITE = 0x80,
73 HW_CMD_BLK_READ = 0xC0,
74 HW_CMD_BLK_RECON_READ = 0xE0,
79 HW_STATUS_HARD_ERR = 0x02,
80 HW_STATUS_SOFT_ERR = 0x04,
81 HW_STATUS_FAULT = 0x08,
84 static struct kmem_cache *rsxx_dma_pool;
91 #define DMA_TRACKER_LIST_SIZE8 (sizeof(struct dma_tracker_list) + \
92 (sizeof(struct dma_tracker) * RSXX_MAX_OUTSTANDING_CMDS))
94 struct dma_tracker_list {
97 struct dma_tracker list[0];
101 /*----------------- Misc Utility Functions -------------------*/
102 static unsigned int rsxx_addr8_to_laddr(u64 addr8, struct rsxx_cardinfo *card)
104 unsigned long long tgt_addr8;
106 tgt_addr8 = ((addr8 >> card->_stripe.upper_shift) &
107 card->_stripe.upper_mask) |
108 ((addr8) & card->_stripe.lower_mask);
109 do_div(tgt_addr8, RSXX_HW_BLK_SIZE);
113 static unsigned int rsxx_get_dma_tgt(struct rsxx_cardinfo *card, u64 addr8)
117 tgt = (addr8 >> card->_stripe.target_shift) & card->_stripe.target_mask;
122 void rsxx_dma_queue_reset(struct rsxx_cardinfo *card)
124 /* Reset all DMA Command/Status Queues */
125 iowrite32(DMA_QUEUE_RESET, card->regmap + RESET);
128 static unsigned int get_dma_size(struct rsxx_dma *dma)
130 if (dma->sub_page.cnt)
131 return dma->sub_page.cnt << 9;
133 return RSXX_HW_BLK_SIZE;
137 /*----------------- DMA Tracker -------------------*/
138 static void set_tracker_dma(struct dma_tracker_list *trackers,
140 struct rsxx_dma *dma)
142 trackers->list[tag].dma = dma;
145 static struct rsxx_dma *get_tracker_dma(struct dma_tracker_list *trackers,
148 return trackers->list[tag].dma;
151 static int pop_tracker(struct dma_tracker_list *trackers)
155 spin_lock(&trackers->lock);
156 tag = trackers->head;
158 trackers->head = trackers->list[tag].next_tag;
159 trackers->list[tag].next_tag = -1;
161 spin_unlock(&trackers->lock);
166 static void push_tracker(struct dma_tracker_list *trackers, int tag)
168 spin_lock(&trackers->lock);
169 trackers->list[tag].next_tag = trackers->head;
170 trackers->head = tag;
171 trackers->list[tag].dma = NULL;
172 spin_unlock(&trackers->lock);
176 /*----------------- Interrupt Coalescing -------------*/
178 * Interrupt Coalescing Register Format:
179 * Interrupt Timer (64ns units) [15:0]
180 * Interrupt Count [24:16]
183 #define INTR_COAL_LATENCY_MASK (0x0000ffff)
185 #define INTR_COAL_COUNT_SHIFT 16
186 #define INTR_COAL_COUNT_BITS 9
187 #define INTR_COAL_COUNT_MASK (((1 << INTR_COAL_COUNT_BITS) - 1) << \
188 INTR_COAL_COUNT_SHIFT)
189 #define INTR_COAL_LATENCY_UNITS_NS 64
192 static u32 dma_intr_coal_val(u32 mode, u32 count, u32 latency)
194 u32 latency_units = latency / INTR_COAL_LATENCY_UNITS_NS;
196 if (mode == RSXX_INTR_COAL_DISABLED)
199 return ((count << INTR_COAL_COUNT_SHIFT) & INTR_COAL_COUNT_MASK) |
200 (latency_units & INTR_COAL_LATENCY_MASK);
204 static void dma_intr_coal_auto_tune(struct rsxx_cardinfo *card)
210 if (card->config.data.intr_coal.mode != RSXX_INTR_COAL_AUTO_TUNE ||
211 unlikely(card->eeh_state))
214 for (i = 0; i < card->n_targets; i++)
215 q_depth += atomic_read(&card->ctrl[i].stats.hw_q_depth);
217 intr_coal = dma_intr_coal_val(card->config.data.intr_coal.mode,
219 card->config.data.intr_coal.latency);
220 iowrite32(intr_coal, card->regmap + INTR_COAL);
223 /*----------------- RSXX DMA Handling -------------------*/
224 static void rsxx_complete_dma(struct rsxx_dma_ctrl *ctrl,
225 struct rsxx_dma *dma,
228 if (status & DMA_SW_ERR)
229 ctrl->stats.dma_sw_err++;
230 if (status & DMA_HW_FAULT)
231 ctrl->stats.dma_hw_fault++;
232 if (status & DMA_CANCELLED)
233 ctrl->stats.dma_cancelled++;
236 pci_unmap_page(ctrl->card->dev, dma->dma_addr,
238 dma->cmd == HW_CMD_BLK_WRITE ?
243 dma->cb(ctrl->card, dma->cb_data, status ? 1 : 0);
245 kmem_cache_free(rsxx_dma_pool, dma);
248 int rsxx_cleanup_dma_queue(struct rsxx_dma_ctrl *ctrl,
251 struct rsxx_dma *dma;
252 struct rsxx_dma *tmp;
255 list_for_each_entry_safe(dma, tmp, q, list) {
256 list_del(&dma->list);
257 rsxx_complete_dma(ctrl, dma, DMA_CANCELLED);
264 static void rsxx_requeue_dma(struct rsxx_dma_ctrl *ctrl,
265 struct rsxx_dma *dma)
268 * Requeued DMAs go to the front of the queue so they are issued
271 spin_lock_bh(&ctrl->queue_lock);
272 ctrl->stats.sw_q_depth++;
273 list_add(&dma->list, &ctrl->queue);
274 spin_unlock_bh(&ctrl->queue_lock);
277 static void rsxx_handle_dma_error(struct rsxx_dma_ctrl *ctrl,
278 struct rsxx_dma *dma,
281 unsigned int status = 0;
284 dev_dbg(CARD_TO_DEV(ctrl->card),
285 "Handling DMA error(cmd x%02x, laddr x%08x st:x%02x)\n",
286 dma->cmd, dma->laddr, hw_st);
288 if (hw_st & HW_STATUS_CRC)
289 ctrl->stats.crc_errors++;
290 if (hw_st & HW_STATUS_HARD_ERR)
291 ctrl->stats.hard_errors++;
292 if (hw_st & HW_STATUS_SOFT_ERR)
293 ctrl->stats.soft_errors++;
296 case HW_CMD_BLK_READ:
297 if (hw_st & (HW_STATUS_CRC | HW_STATUS_HARD_ERR)) {
298 if (ctrl->card->scrub_hard) {
299 dma->cmd = HW_CMD_BLK_RECON_READ;
301 ctrl->stats.reads_retried++;
303 status |= DMA_HW_FAULT;
304 ctrl->stats.reads_failed++;
306 } else if (hw_st & HW_STATUS_FAULT) {
307 status |= DMA_HW_FAULT;
308 ctrl->stats.reads_failed++;
312 case HW_CMD_BLK_RECON_READ:
313 if (hw_st & (HW_STATUS_CRC | HW_STATUS_HARD_ERR)) {
314 /* Data could not be reconstructed. */
315 status |= DMA_HW_FAULT;
316 ctrl->stats.reads_failed++;
320 case HW_CMD_BLK_WRITE:
321 status |= DMA_HW_FAULT;
322 ctrl->stats.writes_failed++;
325 case HW_CMD_BLK_DISCARD:
326 status |= DMA_HW_FAULT;
327 ctrl->stats.discards_failed++;
331 dev_err(CARD_TO_DEV(ctrl->card),
332 "Unknown command in DMA!(cmd: x%02x "
333 "laddr x%08x st: x%02x\n",
334 dma->cmd, dma->laddr, hw_st);
335 status |= DMA_SW_ERR;
341 rsxx_requeue_dma(ctrl, dma);
343 rsxx_complete_dma(ctrl, dma, status);
346 static void dma_engine_stalled(unsigned long data)
348 struct rsxx_dma_ctrl *ctrl = (struct rsxx_dma_ctrl *)data;
351 if (atomic_read(&ctrl->stats.hw_q_depth) == 0 ||
352 unlikely(ctrl->card->eeh_state))
355 if (ctrl->cmd.idx != ioread32(ctrl->regmap + SW_CMD_IDX)) {
357 * The dma engine was stalled because the SW_CMD_IDX write
358 * was lost. Issue it again to recover.
360 dev_warn(CARD_TO_DEV(ctrl->card),
361 "SW_CMD_IDX write was lost, re-writing...\n");
362 iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX);
363 mod_timer(&ctrl->activity_timer,
364 jiffies + DMA_ACTIVITY_TIMEOUT);
366 dev_warn(CARD_TO_DEV(ctrl->card),
367 "DMA channel %d has stalled, faulting interface.\n",
369 ctrl->card->dma_fault = 1;
371 /* Clean up the DMA queue */
372 spin_lock(&ctrl->queue_lock);
373 cnt = rsxx_cleanup_dma_queue(ctrl, &ctrl->queue);
374 spin_unlock(&ctrl->queue_lock);
376 cnt += rsxx_dma_cancel(ctrl);
379 dev_info(CARD_TO_DEV(ctrl->card),
380 "Freed %d queued DMAs on channel %d\n",
385 static void rsxx_issue_dmas(struct rsxx_dma_ctrl *ctrl)
387 struct rsxx_dma *dma;
389 int cmds_pending = 0;
390 struct hw_cmd *hw_cmd_buf;
392 hw_cmd_buf = ctrl->cmd.buf;
394 if (unlikely(ctrl->card->halt) ||
395 unlikely(ctrl->card->eeh_state))
399 spin_lock_bh(&ctrl->queue_lock);
400 if (list_empty(&ctrl->queue)) {
401 spin_unlock_bh(&ctrl->queue_lock);
404 spin_unlock_bh(&ctrl->queue_lock);
406 tag = pop_tracker(ctrl->trackers);
410 spin_lock_bh(&ctrl->queue_lock);
411 dma = list_entry(ctrl->queue.next, struct rsxx_dma, list);
412 list_del(&dma->list);
413 ctrl->stats.sw_q_depth--;
414 spin_unlock_bh(&ctrl->queue_lock);
417 * This will catch any DMAs that slipped in right before the
418 * fault, but was queued after all the other DMAs were
421 if (unlikely(ctrl->card->dma_fault)) {
422 push_tracker(ctrl->trackers, tag);
423 rsxx_complete_dma(ctrl, dma, DMA_CANCELLED);
427 set_tracker_dma(ctrl->trackers, tag, dma);
428 hw_cmd_buf[ctrl->cmd.idx].command = dma->cmd;
429 hw_cmd_buf[ctrl->cmd.idx].tag = tag;
430 hw_cmd_buf[ctrl->cmd.idx]._rsvd = 0;
431 hw_cmd_buf[ctrl->cmd.idx].sub_page =
432 ((dma->sub_page.cnt & 0x7) << 4) |
433 (dma->sub_page.off & 0x7);
435 hw_cmd_buf[ctrl->cmd.idx].device_addr =
436 cpu_to_le32(dma->laddr);
438 hw_cmd_buf[ctrl->cmd.idx].host_addr =
439 cpu_to_le64(dma->dma_addr);
441 dev_dbg(CARD_TO_DEV(ctrl->card),
442 "Issue DMA%d(laddr %d tag %d) to idx %d\n",
443 ctrl->id, dma->laddr, tag, ctrl->cmd.idx);
445 ctrl->cmd.idx = (ctrl->cmd.idx + 1) & RSXX_CS_IDX_MASK;
448 if (dma->cmd == HW_CMD_BLK_WRITE)
449 ctrl->stats.writes_issued++;
450 else if (dma->cmd == HW_CMD_BLK_DISCARD)
451 ctrl->stats.discards_issued++;
453 ctrl->stats.reads_issued++;
456 /* Let HW know we've queued commands. */
458 atomic_add(cmds_pending, &ctrl->stats.hw_q_depth);
459 mod_timer(&ctrl->activity_timer,
460 jiffies + DMA_ACTIVITY_TIMEOUT);
462 if (unlikely(ctrl->card->eeh_state)) {
463 del_timer_sync(&ctrl->activity_timer);
467 iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX);
471 static void rsxx_dma_done(struct rsxx_dma_ctrl *ctrl)
473 struct rsxx_dma *dma;
478 struct hw_status *hw_st_buf;
480 hw_st_buf = ctrl->status.buf;
482 if (unlikely(ctrl->card->halt) ||
483 unlikely(ctrl->card->dma_fault) ||
484 unlikely(ctrl->card->eeh_state))
487 count = le16_to_cpu(hw_st_buf[ctrl->status.idx].count);
489 while (count == ctrl->e_cnt) {
491 * The read memory-barrier is necessary to keep aggressive
492 * processors/optimizers (such as the PPC Apple G5) from
493 * reordering the following status-buffer tag & status read
494 * *before* the count read on subsequent iterations of the
499 status = hw_st_buf[ctrl->status.idx].status;
500 tag = hw_st_buf[ctrl->status.idx].tag;
502 dma = get_tracker_dma(ctrl->trackers, tag);
504 spin_lock_irqsave(&ctrl->card->irq_lock, flags);
505 rsxx_disable_ier(ctrl->card, CR_INTR_DMA_ALL);
506 spin_unlock_irqrestore(&ctrl->card->irq_lock, flags);
508 dev_err(CARD_TO_DEV(ctrl->card),
509 "No tracker for tag %d "
511 tag, ctrl->status.idx, ctrl->id);
515 dev_dbg(CARD_TO_DEV(ctrl->card),
517 "(laddr x%x tag %d st: x%x cnt: x%04x) from idx %d.\n",
518 ctrl->id, dma->laddr, tag, status, count,
521 atomic_dec(&ctrl->stats.hw_q_depth);
523 mod_timer(&ctrl->activity_timer,
524 jiffies + DMA_ACTIVITY_TIMEOUT);
527 rsxx_handle_dma_error(ctrl, dma, status);
529 rsxx_complete_dma(ctrl, dma, 0);
531 push_tracker(ctrl->trackers, tag);
533 ctrl->status.idx = (ctrl->status.idx + 1) &
537 count = le16_to_cpu(hw_st_buf[ctrl->status.idx].count);
540 dma_intr_coal_auto_tune(ctrl->card);
542 if (atomic_read(&ctrl->stats.hw_q_depth) == 0)
543 del_timer_sync(&ctrl->activity_timer);
545 spin_lock_irqsave(&ctrl->card->irq_lock, flags);
546 rsxx_enable_ier(ctrl->card, CR_INTR_DMA(ctrl->id));
547 spin_unlock_irqrestore(&ctrl->card->irq_lock, flags);
549 spin_lock_bh(&ctrl->queue_lock);
550 if (ctrl->stats.sw_q_depth)
551 queue_work(ctrl->issue_wq, &ctrl->issue_dma_work);
552 spin_unlock_bh(&ctrl->queue_lock);
555 static void rsxx_schedule_issue(struct work_struct *work)
557 struct rsxx_dma_ctrl *ctrl;
559 ctrl = container_of(work, struct rsxx_dma_ctrl, issue_dma_work);
561 mutex_lock(&ctrl->work_lock);
562 rsxx_issue_dmas(ctrl);
563 mutex_unlock(&ctrl->work_lock);
566 static void rsxx_schedule_done(struct work_struct *work)
568 struct rsxx_dma_ctrl *ctrl;
570 ctrl = container_of(work, struct rsxx_dma_ctrl, dma_done_work);
572 mutex_lock(&ctrl->work_lock);
574 mutex_unlock(&ctrl->work_lock);
577 static int rsxx_queue_discard(struct rsxx_cardinfo *card,
583 struct rsxx_dma *dma;
585 dma = kmem_cache_alloc(rsxx_dma_pool, GFP_KERNEL);
589 dma->cmd = HW_CMD_BLK_DISCARD;
592 dma->sub_page.off = 0;
593 dma->sub_page.cnt = 0;
597 dma->cb_data = cb_data;
599 dev_dbg(CARD_TO_DEV(card), "Queuing[D] laddr %x\n", dma->laddr);
601 list_add_tail(&dma->list, q);
606 static int rsxx_queue_dma(struct rsxx_cardinfo *card,
609 unsigned int dma_off,
610 unsigned int dma_len,
617 struct rsxx_dma *dma;
619 dma = kmem_cache_alloc(rsxx_dma_pool, GFP_KERNEL);
623 dma->dma_addr = pci_map_page(card->dev, page, pg_off, dma_len,
624 dir ? PCI_DMA_TODEVICE :
626 if (!dma->dma_addr) {
627 kmem_cache_free(rsxx_dma_pool, dma);
631 dma->cmd = dir ? HW_CMD_BLK_WRITE : HW_CMD_BLK_READ;
633 dma->sub_page.off = (dma_off >> 9);
634 dma->sub_page.cnt = (dma_len >> 9);
636 dma->pg_off = pg_off;
638 dma->cb_data = cb_data;
640 dev_dbg(CARD_TO_DEV(card),
641 "Queuing[%c] laddr %x off %d cnt %d page %p pg_off %d\n",
642 dir ? 'W' : 'R', dma->laddr, dma->sub_page.off,
643 dma->sub_page.cnt, dma->page, dma->pg_off);
646 list_add_tail(&dma->list, q);
651 int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
657 struct list_head dma_list[RSXX_MAX_TARGETS];
658 struct bio_vec *bvec;
659 unsigned long long addr8;
663 unsigned int dma_off;
664 unsigned int dma_len;
665 int dma_cnt[RSXX_MAX_TARGETS];
670 addr8 = bio->bi_sector << 9; /* sectors are 512 bytes */
671 atomic_set(n_dmas, 0);
673 for (i = 0; i < card->n_targets; i++) {
674 INIT_LIST_HEAD(&dma_list[i]);
678 if (bio->bi_rw & REQ_DISCARD) {
679 bv_len = bio->bi_size;
682 tgt = rsxx_get_dma_tgt(card, addr8);
683 laddr = rsxx_addr8_to_laddr(addr8, card);
685 st = rsxx_queue_discard(card, &dma_list[tgt], laddr,
692 addr8 += RSXX_HW_BLK_SIZE;
693 bv_len -= RSXX_HW_BLK_SIZE;
696 bio_for_each_segment(bvec, bio, i) {
697 bv_len = bvec->bv_len;
698 bv_off = bvec->bv_offset;
701 tgt = rsxx_get_dma_tgt(card, addr8);
702 laddr = rsxx_addr8_to_laddr(addr8, card);
703 dma_off = addr8 & RSXX_HW_BLK_MASK;
704 dma_len = min(bv_len,
705 RSXX_HW_BLK_SIZE - dma_off);
707 st = rsxx_queue_dma(card, &dma_list[tgt],
710 laddr, bvec->bv_page,
711 bv_off, cb, cb_data);
724 for (i = 0; i < card->n_targets; i++) {
725 if (!list_empty(&dma_list[i])) {
726 spin_lock_bh(&card->ctrl[i].queue_lock);
727 card->ctrl[i].stats.sw_q_depth += dma_cnt[i];
728 list_splice_tail(&dma_list[i], &card->ctrl[i].queue);
729 spin_unlock_bh(&card->ctrl[i].queue_lock);
731 queue_work(card->ctrl[i].issue_wq,
732 &card->ctrl[i].issue_dma_work);
739 for (i = 0; i < card->n_targets; i++) {
740 spin_lock_bh(&card->ctrl[i].queue_lock);
741 rsxx_cleanup_dma_queue(&card->ctrl[i], &dma_list[i]);
742 spin_unlock_bh(&card->ctrl[i].queue_lock);
749 /*----------------- DMA Engine Initialization & Setup -------------------*/
750 int rsxx_hw_buffers_init(struct pci_dev *dev, struct rsxx_dma_ctrl *ctrl)
752 ctrl->status.buf = pci_alloc_consistent(dev, STATUS_BUFFER_SIZE8,
753 &ctrl->status.dma_addr);
754 ctrl->cmd.buf = pci_alloc_consistent(dev, COMMAND_BUFFER_SIZE8,
755 &ctrl->cmd.dma_addr);
756 if (ctrl->status.buf == NULL || ctrl->cmd.buf == NULL)
759 memset(ctrl->status.buf, 0xac, STATUS_BUFFER_SIZE8);
760 iowrite32(lower_32_bits(ctrl->status.dma_addr),
761 ctrl->regmap + SB_ADD_LO);
762 iowrite32(upper_32_bits(ctrl->status.dma_addr),
763 ctrl->regmap + SB_ADD_HI);
765 memset(ctrl->cmd.buf, 0x83, COMMAND_BUFFER_SIZE8);
766 iowrite32(lower_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_LO);
767 iowrite32(upper_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_HI);
769 ctrl->status.idx = ioread32(ctrl->regmap + HW_STATUS_CNT);
770 if (ctrl->status.idx > RSXX_MAX_OUTSTANDING_CMDS) {
771 dev_crit(&dev->dev, "Failed reading status cnt x%x\n",
775 iowrite32(ctrl->status.idx, ctrl->regmap + HW_STATUS_CNT);
776 iowrite32(ctrl->status.idx, ctrl->regmap + SW_STATUS_CNT);
778 ctrl->cmd.idx = ioread32(ctrl->regmap + HW_CMD_IDX);
779 if (ctrl->cmd.idx > RSXX_MAX_OUTSTANDING_CMDS) {
780 dev_crit(&dev->dev, "Failed reading cmd cnt x%x\n",
784 iowrite32(ctrl->cmd.idx, ctrl->regmap + HW_CMD_IDX);
785 iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX);
790 static int rsxx_dma_ctrl_init(struct pci_dev *dev,
791 struct rsxx_dma_ctrl *ctrl)
796 memset(&ctrl->stats, 0, sizeof(ctrl->stats));
798 ctrl->trackers = vmalloc(DMA_TRACKER_LIST_SIZE8);
802 ctrl->trackers->head = 0;
803 for (i = 0; i < RSXX_MAX_OUTSTANDING_CMDS; i++) {
804 ctrl->trackers->list[i].next_tag = i + 1;
805 ctrl->trackers->list[i].dma = NULL;
807 ctrl->trackers->list[RSXX_MAX_OUTSTANDING_CMDS-1].next_tag = -1;
808 spin_lock_init(&ctrl->trackers->lock);
810 spin_lock_init(&ctrl->queue_lock);
811 mutex_init(&ctrl->work_lock);
812 INIT_LIST_HEAD(&ctrl->queue);
814 setup_timer(&ctrl->activity_timer, dma_engine_stalled,
815 (unsigned long)ctrl);
817 ctrl->issue_wq = alloc_ordered_workqueue(DRIVER_NAME"_issue", 0);
821 ctrl->done_wq = alloc_ordered_workqueue(DRIVER_NAME"_done", 0);
825 INIT_WORK(&ctrl->issue_dma_work, rsxx_schedule_issue);
826 INIT_WORK(&ctrl->dma_done_work, rsxx_schedule_done);
828 st = rsxx_hw_buffers_init(dev, ctrl);
835 static int rsxx_dma_stripe_setup(struct rsxx_cardinfo *card,
836 unsigned int stripe_size8)
838 if (!is_power_of_2(stripe_size8)) {
839 dev_err(CARD_TO_DEV(card),
840 "stripe_size is NOT a power of 2!\n");
844 card->_stripe.lower_mask = stripe_size8 - 1;
846 card->_stripe.upper_mask = ~(card->_stripe.lower_mask);
847 card->_stripe.upper_shift = ffs(card->n_targets) - 1;
849 card->_stripe.target_mask = card->n_targets - 1;
850 card->_stripe.target_shift = ffs(stripe_size8) - 1;
852 dev_dbg(CARD_TO_DEV(card), "_stripe.lower_mask = x%016llx\n",
853 card->_stripe.lower_mask);
854 dev_dbg(CARD_TO_DEV(card), "_stripe.upper_shift = x%016llx\n",
855 card->_stripe.upper_shift);
856 dev_dbg(CARD_TO_DEV(card), "_stripe.upper_mask = x%016llx\n",
857 card->_stripe.upper_mask);
858 dev_dbg(CARD_TO_DEV(card), "_stripe.target_mask = x%016llx\n",
859 card->_stripe.target_mask);
860 dev_dbg(CARD_TO_DEV(card), "_stripe.target_shift = x%016llx\n",
861 card->_stripe.target_shift);
866 int rsxx_dma_configure(struct rsxx_cardinfo *card)
870 intr_coal = dma_intr_coal_val(card->config.data.intr_coal.mode,
871 card->config.data.intr_coal.count,
872 card->config.data.intr_coal.latency);
873 iowrite32(intr_coal, card->regmap + INTR_COAL);
875 return rsxx_dma_stripe_setup(card, card->config.data.stripe_size);
878 int rsxx_dma_setup(struct rsxx_cardinfo *card)
884 dev_info(CARD_TO_DEV(card),
885 "Initializing %d DMA targets\n",
888 /* Regmap is divided up into 4K chunks. One for each DMA channel */
889 for (i = 0; i < card->n_targets; i++)
890 card->ctrl[i].regmap = card->regmap + (i * 4096);
894 /* Reset the DMA queues */
895 rsxx_dma_queue_reset(card);
897 /************* Setup DMA Control *************/
898 for (i = 0; i < card->n_targets; i++) {
899 st = rsxx_dma_ctrl_init(card->dev, &card->ctrl[i]);
901 goto failed_dma_setup;
903 card->ctrl[i].card = card;
904 card->ctrl[i].id = i;
907 card->scrub_hard = 1;
909 if (card->config_valid)
910 rsxx_dma_configure(card);
912 /* Enable the interrupts after all setup has completed. */
913 for (i = 0; i < card->n_targets; i++) {
914 spin_lock_irqsave(&card->irq_lock, flags);
915 rsxx_enable_ier_and_isr(card, CR_INTR_DMA(i));
916 spin_unlock_irqrestore(&card->irq_lock, flags);
922 for (i = 0; i < card->n_targets; i++) {
923 struct rsxx_dma_ctrl *ctrl = &card->ctrl[i];
925 if (ctrl->issue_wq) {
926 destroy_workqueue(ctrl->issue_wq);
927 ctrl->issue_wq = NULL;
931 destroy_workqueue(ctrl->done_wq);
932 ctrl->done_wq = NULL;
936 vfree(ctrl->trackers);
938 if (ctrl->status.buf)
939 pci_free_consistent(card->dev, STATUS_BUFFER_SIZE8,
941 ctrl->status.dma_addr);
943 pci_free_consistent(card->dev, COMMAND_BUFFER_SIZE8,
944 ctrl->cmd.buf, ctrl->cmd.dma_addr);
950 int rsxx_dma_cancel(struct rsxx_dma_ctrl *ctrl)
952 struct rsxx_dma *dma;
956 /* Clean up issued DMAs */
957 for (i = 0; i < RSXX_MAX_OUTSTANDING_CMDS; i++) {
958 dma = get_tracker_dma(ctrl->trackers, i);
960 atomic_dec(&ctrl->stats.hw_q_depth);
961 rsxx_complete_dma(ctrl, dma, DMA_CANCELLED);
962 push_tracker(ctrl->trackers, i);
970 void rsxx_dma_destroy(struct rsxx_cardinfo *card)
972 struct rsxx_dma_ctrl *ctrl;
975 for (i = 0; i < card->n_targets; i++) {
976 ctrl = &card->ctrl[i];
978 if (ctrl->issue_wq) {
979 destroy_workqueue(ctrl->issue_wq);
980 ctrl->issue_wq = NULL;
984 destroy_workqueue(ctrl->done_wq);
985 ctrl->done_wq = NULL;
988 if (timer_pending(&ctrl->activity_timer))
989 del_timer_sync(&ctrl->activity_timer);
991 /* Clean up the DMA queue */
992 spin_lock_bh(&ctrl->queue_lock);
993 rsxx_cleanup_dma_queue(ctrl, &ctrl->queue);
994 spin_unlock_bh(&ctrl->queue_lock);
996 rsxx_dma_cancel(ctrl);
998 vfree(ctrl->trackers);
1000 pci_free_consistent(card->dev, STATUS_BUFFER_SIZE8,
1001 ctrl->status.buf, ctrl->status.dma_addr);
1002 pci_free_consistent(card->dev, COMMAND_BUFFER_SIZE8,
1003 ctrl->cmd.buf, ctrl->cmd.dma_addr);
1007 int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card)
1012 struct rsxx_dma *dma;
1013 struct list_head *issued_dmas;
1015 issued_dmas = kzalloc(sizeof(*issued_dmas) * card->n_targets,
1020 for (i = 0; i < card->n_targets; i++) {
1021 INIT_LIST_HEAD(&issued_dmas[i]);
1023 for (j = 0; j < RSXX_MAX_OUTSTANDING_CMDS; j++) {
1024 dma = get_tracker_dma(card->ctrl[i].trackers, j);
1028 if (dma->cmd == HW_CMD_BLK_WRITE)
1029 card->ctrl[i].stats.writes_issued--;
1030 else if (dma->cmd == HW_CMD_BLK_DISCARD)
1031 card->ctrl[i].stats.discards_issued--;
1033 card->ctrl[i].stats.reads_issued--;
1035 list_add_tail(&dma->list, &issued_dmas[i]);
1036 push_tracker(card->ctrl[i].trackers, j);
1040 spin_lock_bh(&card->ctrl[i].queue_lock);
1041 list_splice(&issued_dmas[i], &card->ctrl[i].queue);
1043 atomic_sub(cnt, &card->ctrl[i].stats.hw_q_depth);
1044 card->ctrl[i].stats.sw_q_depth += cnt;
1045 card->ctrl[i].e_cnt = 0;
1047 list_for_each_entry(dma, &card->ctrl[i].queue, list) {
1049 pci_unmap_page(card->dev, dma->dma_addr,
1051 dma->cmd == HW_CMD_BLK_WRITE ?
1053 PCI_DMA_FROMDEVICE);
1055 spin_unlock_bh(&card->ctrl[i].queue_lock);
1063 int rsxx_eeh_remap_dmas(struct rsxx_cardinfo *card)
1065 struct rsxx_dma *dma;
1068 for (i = 0; i < card->n_targets; i++) {
1069 spin_lock_bh(&card->ctrl[i].queue_lock);
1070 list_for_each_entry(dma, &card->ctrl[i].queue, list) {
1071 dma->dma_addr = pci_map_page(card->dev, dma->page,
1072 dma->pg_off, get_dma_size(dma),
1073 dma->cmd == HW_CMD_BLK_WRITE ?
1075 PCI_DMA_FROMDEVICE);
1076 if (!dma->dma_addr) {
1077 spin_unlock_bh(&card->ctrl[i].queue_lock);
1078 kmem_cache_free(rsxx_dma_pool, dma);
1082 spin_unlock_bh(&card->ctrl[i].queue_lock);
1088 int rsxx_dma_init(void)
1090 rsxx_dma_pool = KMEM_CACHE(rsxx_dma, SLAB_HWCACHE_ALIGN);
1098 void rsxx_dma_cleanup(void)
1100 kmem_cache_destroy(rsxx_dma_pool);