1 /***************************************************************************
2 * Copyright (c) 2005-2009, Broadcom Corporation.
4 * Name: crystalhd_hw . c
7 * BCM70010 Linux driver HW layer.
9 **********************************************************************
10 * This file is part of the crystalhd device driver.
12 * This driver is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation, version 2 of the License.
16 * This driver is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this driver. If not, see <http://www.gnu.org/licenses/>.
23 **********************************************************************/
25 #include <linux/pci.h>
26 #include <linux/delay.h>
27 #include "crystalhd_hw.h"
29 /* Functions internal to this file */
31 static void crystalhd_enable_uarts(struct crystalhd_adp *adp)
33 bc_dec_reg_wr(adp, UartSelectA, BSVS_UART_STREAM);
34 bc_dec_reg_wr(adp, UartSelectB, BSVS_UART_DEC_OUTER);
38 static void crystalhd_start_dram(struct crystalhd_adp *adp)
40 bc_dec_reg_wr(adp, SDRAM_PARAM, ((40 / 5 - 1) << 0) |
41 /* tras (40ns tras)/(5ns period) -1 ((15/5 - 1) << 4) | // trcd */
42 ((15 / 5 - 1) << 7) | /* trp */
43 ((10 / 5 - 1) << 10) | /* trrd */
44 ((15 / 5 + 1) << 12) | /* twr */
45 ((2 + 1) << 16) | /* twtr */
46 ((70 / 5 - 2) << 19) | /* trfc */
49 bc_dec_reg_wr(adp, SDRAM_PRECHARGE, 0);
50 bc_dec_reg_wr(adp, SDRAM_EXT_MODE, 2);
51 bc_dec_reg_wr(adp, SDRAM_MODE, 0x132);
52 bc_dec_reg_wr(adp, SDRAM_PRECHARGE, 0);
53 bc_dec_reg_wr(adp, SDRAM_REFRESH, 0);
54 bc_dec_reg_wr(adp, SDRAM_REFRESH, 0);
55 bc_dec_reg_wr(adp, SDRAM_MODE, 0x32);
56 /* setting the refresh rate here */
57 bc_dec_reg_wr(adp, SDRAM_REF_PARAM, ((1 << 12) | 96));
61 static bool crystalhd_bring_out_of_rst(struct crystalhd_adp *adp)
63 link_misc_perst_deco_ctrl rst_deco_cntrl;
64 link_misc_perst_clk_ctrl rst_clk_cntrl;
68 * Link clocks: MISC_PERST_CLOCK_CTRL Clear PLL power down bit,
69 * delay to allow PLL to lock Clear alternate clock, stop clock bits
71 rst_clk_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_CLOCK_CTRL);
72 rst_clk_cntrl.pll_pwr_dn = 0;
73 crystalhd_reg_wr(adp, MISC_PERST_CLOCK_CTRL, rst_clk_cntrl.whole_reg);
74 msleep_interruptible(50);
76 rst_clk_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_CLOCK_CTRL);
77 rst_clk_cntrl.stop_core_clk = 0;
78 rst_clk_cntrl.sel_alt_clk = 0;
80 crystalhd_reg_wr(adp, MISC_PERST_CLOCK_CTRL, rst_clk_cntrl.whole_reg);
81 msleep_interruptible(50);
84 * Bus Arbiter Timeout: GISB_ARBITER_TIMER
85 * Set internal bus arbiter timeout to 40us based on core clock speed
86 * (63MHz * 40us = 0x9D8)
88 crystalhd_reg_wr(adp, GISB_ARBITER_TIMER, 0x9D8);
91 * Decoder clocks: MISC_PERST_DECODER_CTRL
92 * Enable clocks while 7412 reset is asserted, delay
93 * De-assert 7412 reset
95 rst_deco_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_DECODER_CTRL);
96 rst_deco_cntrl.stop_bcm_7412_clk = 0;
97 rst_deco_cntrl.bcm7412_rst = 1;
98 crystalhd_reg_wr(adp, MISC_PERST_DECODER_CTRL, rst_deco_cntrl.whole_reg);
99 msleep_interruptible(10);
101 rst_deco_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_DECODER_CTRL);
102 rst_deco_cntrl.bcm7412_rst = 0;
103 crystalhd_reg_wr(adp, MISC_PERST_DECODER_CTRL, rst_deco_cntrl.whole_reg);
104 msleep_interruptible(50);
106 /* Disable OTP_CONTENT_MISC to 0 to disable all secure modes */
107 crystalhd_reg_wr(adp, OTP_CONTENT_MISC, 0);
109 /* Clear bit 29 of 0x404 */
110 temp = crystalhd_reg_rd(adp, PCIE_TL_TRANSACTION_CONFIGURATION);
112 crystalhd_reg_wr(adp, PCIE_TL_TRANSACTION_CONFIGURATION, temp);
114 /* 2.5V regulator must be set to 2.6 volts (+6%) */
115 /* FIXME: jarod: what's the point of this reg read? */
116 temp = crystalhd_reg_rd(adp, MISC_PERST_VREG_CTRL);
117 crystalhd_reg_wr(adp, MISC_PERST_VREG_CTRL, 0xF3);
122 static bool crystalhd_put_in_reset(struct crystalhd_adp *adp)
124 link_misc_perst_deco_ctrl rst_deco_cntrl;
125 link_misc_perst_clk_ctrl rst_clk_cntrl;
129 * Decoder clocks: MISC_PERST_DECODER_CTRL
130 * Assert 7412 reset, delay
131 * Assert 7412 stop clock
133 rst_deco_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_DECODER_CTRL);
134 rst_deco_cntrl.stop_bcm_7412_clk = 1;
135 crystalhd_reg_wr(adp, MISC_PERST_DECODER_CTRL, rst_deco_cntrl.whole_reg);
136 msleep_interruptible(50);
138 /* Bus Arbiter Timeout: GISB_ARBITER_TIMER
139 * Set internal bus arbiter timeout to 40us based on core clock speed
140 * (6.75MHZ * 40us = 0x10E)
142 crystalhd_reg_wr(adp, GISB_ARBITER_TIMER, 0x10E);
144 /* Link clocks: MISC_PERST_CLOCK_CTRL
145 * Stop core clk, delay
146 * Set alternate clk, delay, set PLL power down
148 rst_clk_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_CLOCK_CTRL);
149 rst_clk_cntrl.stop_core_clk = 1;
150 rst_clk_cntrl.sel_alt_clk = 1;
151 crystalhd_reg_wr(adp, MISC_PERST_CLOCK_CTRL, rst_clk_cntrl.whole_reg);
152 msleep_interruptible(50);
154 rst_clk_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_CLOCK_CTRL);
155 rst_clk_cntrl.pll_pwr_dn = 1;
156 crystalhd_reg_wr(adp, MISC_PERST_CLOCK_CTRL, rst_clk_cntrl.whole_reg);
159 * Read and restore the Transaction Configuration Register
162 temp = crystalhd_reg_rd(adp, PCIE_TL_TRANSACTION_CONFIGURATION);
165 * Link core soft reset: MISC3_RESET_CTRL
166 * - Write BIT[0]=1 and read it back for core reset to take place
168 crystalhd_reg_wr(adp, MISC3_RESET_CTRL, 1);
169 rst_deco_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC3_RESET_CTRL);
170 msleep_interruptible(50);
172 /* restore the transaction configuration register */
173 crystalhd_reg_wr(adp, PCIE_TL_TRANSACTION_CONFIGURATION, temp);
178 static void crystalhd_disable_interrupts(struct crystalhd_adp *adp)
180 intr_mask_reg intr_mask;
181 intr_mask.whole_reg = crystalhd_reg_rd(adp, INTR_INTR_MSK_STS_REG);
182 intr_mask.mask_pcie_err = 1;
183 intr_mask.mask_pcie_rbusmast_err = 1;
184 intr_mask.mask_pcie_rgr_bridge = 1;
185 intr_mask.mask_rx_done = 1;
186 intr_mask.mask_rx_err = 1;
187 intr_mask.mask_tx_done = 1;
188 intr_mask.mask_tx_err = 1;
189 crystalhd_reg_wr(adp, INTR_INTR_MSK_SET_REG, intr_mask.whole_reg);
194 static void crystalhd_enable_interrupts(struct crystalhd_adp *adp)
196 intr_mask_reg intr_mask;
197 intr_mask.whole_reg = crystalhd_reg_rd(adp, INTR_INTR_MSK_STS_REG);
198 intr_mask.mask_pcie_err = 1;
199 intr_mask.mask_pcie_rbusmast_err = 1;
200 intr_mask.mask_pcie_rgr_bridge = 1;
201 intr_mask.mask_rx_done = 1;
202 intr_mask.mask_rx_err = 1;
203 intr_mask.mask_tx_done = 1;
204 intr_mask.mask_tx_err = 1;
205 crystalhd_reg_wr(adp, INTR_INTR_MSK_CLR_REG, intr_mask.whole_reg);
210 static void crystalhd_clear_errors(struct crystalhd_adp *adp)
214 /* FIXME: jarod: wouldn't we want to write a 0 to the reg? Or does the write clear the bits specified? */
215 reg = crystalhd_reg_rd(adp, MISC1_Y_RX_ERROR_STATUS);
217 crystalhd_reg_wr(adp, MISC1_Y_RX_ERROR_STATUS, reg);
219 reg = crystalhd_reg_rd(adp, MISC1_UV_RX_ERROR_STATUS);
221 crystalhd_reg_wr(adp, MISC1_UV_RX_ERROR_STATUS, reg);
223 reg = crystalhd_reg_rd(adp, MISC1_TX_DMA_ERROR_STATUS);
225 crystalhd_reg_wr(adp, MISC1_TX_DMA_ERROR_STATUS, reg);
228 static void crystalhd_clear_interrupts(struct crystalhd_adp *adp)
230 uint32_t intr_sts = crystalhd_reg_rd(adp, INTR_INTR_STATUS);
233 crystalhd_reg_wr(adp, INTR_INTR_CLR_REG, intr_sts);
235 /* Write End Of Interrupt for PCIE */
236 crystalhd_reg_wr(adp, INTR_EOI_CTRL, 1);
240 static void crystalhd_soft_rst(struct crystalhd_adp *adp)
244 /* Assert c011 soft reset*/
245 bc_dec_reg_wr(adp, DecHt_HostSwReset, 0x00000001);
246 msleep_interruptible(50);
248 /* Release c011 soft reset*/
249 bc_dec_reg_wr(adp, DecHt_HostSwReset, 0x00000000);
251 /* Disable Stuffing..*/
252 val = crystalhd_reg_rd(adp, MISC2_GLOBAL_CTRL);
254 crystalhd_reg_wr(adp, MISC2_GLOBAL_CTRL, val);
257 static bool crystalhd_load_firmware_config(struct crystalhd_adp *adp)
261 crystalhd_reg_wr(adp, DCI_DRAM_BASE_ADDR, (BC_DRAM_FW_CFG_ADDR >> 19));
263 crystalhd_reg_wr(adp, AES_CMD, 0);
264 crystalhd_reg_wr(adp, AES_CONFIG_INFO, (BC_DRAM_FW_CFG_ADDR & 0x7FFFF));
265 crystalhd_reg_wr(adp, AES_CMD, 0x1);
267 /* FIXME: jarod: I've seen this fail, and introducing extra delays helps... */
268 for (i = 0; i < 100; ++i) {
269 reg = crystalhd_reg_rd(adp, AES_STATUS);
272 msleep_interruptible(10);
279 static bool crystalhd_start_device(struct crystalhd_adp *adp)
281 uint32_t dbg_options, glb_cntrl = 0, reg_pwrmgmt = 0;
283 BCMLOG(BCMLOG_INFO, "Starting BCM70012 Device\n");
285 reg_pwrmgmt = crystalhd_reg_rd(adp, PCIE_DLL_DATA_LINK_CONTROL);
286 reg_pwrmgmt &= ~ASPM_L1_ENABLE;
288 crystalhd_reg_wr(adp, PCIE_DLL_DATA_LINK_CONTROL, reg_pwrmgmt);
290 if (!crystalhd_bring_out_of_rst(adp)) {
291 BCMLOG_ERR("Failed To Bring Link Out Of Reset\n");
295 crystalhd_disable_interrupts(adp);
297 crystalhd_clear_errors(adp);
299 crystalhd_clear_interrupts(adp);
301 crystalhd_enable_interrupts(adp);
303 /* Enable the option for getting the total no. of DWORDS
304 * that have been transfered by the RXDMA engine
306 dbg_options = crystalhd_reg_rd(adp, MISC1_DMA_DEBUG_OPTIONS_REG);
308 crystalhd_reg_wr(adp, MISC1_DMA_DEBUG_OPTIONS_REG, dbg_options);
310 /* Enable PCI Global Control options */
311 glb_cntrl = crystalhd_reg_rd(adp, MISC2_GLOBAL_CTRL);
314 crystalhd_reg_wr(adp, MISC2_GLOBAL_CTRL, glb_cntrl);
316 crystalhd_enable_interrupts(adp);
318 crystalhd_soft_rst(adp);
319 crystalhd_start_dram(adp);
320 crystalhd_enable_uarts(adp);
325 static bool crystalhd_stop_device(struct crystalhd_adp *adp)
329 BCMLOG(BCMLOG_INFO, "Stopping BCM70012 Device\n");
330 /* Clear and disable interrupts */
331 crystalhd_disable_interrupts(adp);
332 crystalhd_clear_errors(adp);
333 crystalhd_clear_interrupts(adp);
335 if (!crystalhd_put_in_reset(adp))
336 BCMLOG_ERR("Failed to Put Link To Reset State\n");
338 reg = crystalhd_reg_rd(adp, PCIE_DLL_DATA_LINK_CONTROL);
339 reg |= ASPM_L1_ENABLE;
340 crystalhd_reg_wr(adp, PCIE_DLL_DATA_LINK_CONTROL, reg);
342 /* Set PCI Clk Req */
343 reg = crystalhd_reg_rd(adp, PCIE_CLK_REQ_REG);
344 reg |= PCI_CLK_REQ_ENABLE;
345 crystalhd_reg_wr(adp, PCIE_CLK_REQ_REG, reg);
350 static crystalhd_rx_dma_pkt *crystalhd_hw_alloc_rx_pkt(struct crystalhd_hw *hw)
352 unsigned long flags = 0;
353 crystalhd_rx_dma_pkt *temp = NULL;
358 spin_lock_irqsave(&hw->lock, flags);
359 temp = hw->rx_pkt_pool_head;
361 hw->rx_pkt_pool_head = hw->rx_pkt_pool_head->next;
362 temp->dio_req = NULL;
366 spin_unlock_irqrestore(&hw->lock, flags);
371 static void crystalhd_hw_free_rx_pkt(struct crystalhd_hw *hw,
372 crystalhd_rx_dma_pkt *pkt)
374 unsigned long flags = 0;
379 spin_lock_irqsave(&hw->lock, flags);
380 pkt->next = hw->rx_pkt_pool_head;
381 hw->rx_pkt_pool_head = pkt;
382 spin_unlock_irqrestore(&hw->lock, flags);
386 * Call back from TX - IOQ deletion.
388 * This routine will release the TX DMA rings allocated
389 * druing setup_dma rings interface.
391 * Memory is allocated per DMA ring basis. This is just
392 * a place holder to be able to create the dio queues.
394 static void crystalhd_tx_desc_rel_call_back(void *context, void *data)
399 * Rx Packet release callback..
401 * Release All user mapped capture buffers and Our DMA packets
402 * back to our free pool. The actual cleanup of the DMA
403 * ring descriptors happen during dma ring release.
405 static void crystalhd_rx_pkt_rel_call_back(void *context, void *data)
407 struct crystalhd_hw *hw = (struct crystalhd_hw *)context;
408 crystalhd_rx_dma_pkt *pkt = (crystalhd_rx_dma_pkt *)data;
411 BCMLOG_ERR("Invalid arg - %p %p\n", hw, pkt);
416 crystalhd_unmap_dio(hw->adp, pkt->dio_req);
418 BCMLOG_ERR("Missing dio_req: 0x%x\n", pkt->pkt_tag);
420 crystalhd_hw_free_rx_pkt(hw, pkt);
423 #define crystalhd_hw_delete_ioq(adp, q) \
425 crystalhd_delete_dioq(adp, q); \
429 static void crystalhd_hw_delete_ioqs(struct crystalhd_hw *hw)
434 BCMLOG(BCMLOG_DBG, "Deleting IOQs \n");
435 crystalhd_hw_delete_ioq(hw->adp, hw->tx_actq);
436 crystalhd_hw_delete_ioq(hw->adp, hw->tx_freeq);
437 crystalhd_hw_delete_ioq(hw->adp, hw->rx_actq);
438 crystalhd_hw_delete_ioq(hw->adp, hw->rx_freeq);
439 crystalhd_hw_delete_ioq(hw->adp, hw->rx_rdyq);
442 #define crystalhd_hw_create_ioq(sts, hw, q, cb) \
444 sts = crystalhd_create_dioq(hw->adp, &q, cb, hw); \
445 if (sts != BC_STS_SUCCESS) \
446 goto hw_create_ioq_err; \
453 * RX - Active, Ready and Free.
455 static BC_STATUS crystalhd_hw_create_ioqs(struct crystalhd_hw *hw)
457 BC_STATUS sts = BC_STS_SUCCESS;
460 BCMLOG_ERR("Invalid Arg!!\n");
461 return BC_STS_INV_ARG;
464 crystalhd_hw_create_ioq(sts, hw, hw->tx_freeq,
465 crystalhd_tx_desc_rel_call_back);
466 crystalhd_hw_create_ioq(sts, hw, hw->tx_actq,
467 crystalhd_tx_desc_rel_call_back);
469 crystalhd_hw_create_ioq(sts, hw, hw->rx_freeq,
470 crystalhd_rx_pkt_rel_call_back);
471 crystalhd_hw_create_ioq(sts, hw, hw->rx_rdyq,
472 crystalhd_rx_pkt_rel_call_back);
473 crystalhd_hw_create_ioq(sts, hw, hw->rx_actq,
474 crystalhd_rx_pkt_rel_call_back);
479 crystalhd_hw_delete_ioqs(hw);
485 static bool crystalhd_code_in_full(struct crystalhd_adp *adp, uint32_t needed_sz,
486 bool b_188_byte_pkts, uint8_t flags)
488 uint32_t base, end, writep, readp;
489 uint32_t cpbSize, cpbFullness, fifoSize;
491 if (flags & 0x02) { /* ASF Bit is set */
492 base = bc_dec_reg_rd(adp, REG_Dec_TsAudCDB2Base);
493 end = bc_dec_reg_rd(adp, REG_Dec_TsAudCDB2End);
494 writep = bc_dec_reg_rd(adp, REG_Dec_TsAudCDB2Wrptr);
495 readp = bc_dec_reg_rd(adp, REG_Dec_TsAudCDB2Rdptr);
496 } else if (b_188_byte_pkts) { /*Encrypted 188 byte packets*/
497 base = bc_dec_reg_rd(adp, REG_Dec_TsUser0Base);
498 end = bc_dec_reg_rd(adp, REG_Dec_TsUser0End);
499 writep = bc_dec_reg_rd(adp, REG_Dec_TsUser0Wrptr);
500 readp = bc_dec_reg_rd(adp, REG_Dec_TsUser0Rdptr);
502 base = bc_dec_reg_rd(adp, REG_DecCA_RegCinBase);
503 end = bc_dec_reg_rd(adp, REG_DecCA_RegCinEnd);
504 writep = bc_dec_reg_rd(adp, REG_DecCA_RegCinWrPtr);
505 readp = bc_dec_reg_rd(adp, REG_DecCA_RegCinRdPtr);
508 cpbSize = end - base;
510 cpbFullness = writep - readp;
512 cpbFullness = (end - base) - (readp - writep);
514 fifoSize = cpbSize - cpbFullness;
516 if (fifoSize < BC_INFIFO_THRESHOLD)
519 if (needed_sz > (fifoSize - BC_INFIFO_THRESHOLD))
525 static BC_STATUS crystalhd_hw_tx_req_complete(struct crystalhd_hw *hw,
526 uint32_t list_id, BC_STATUS cs)
530 if (!hw || !list_id) {
531 BCMLOG_ERR("Invalid Arg..\n");
532 return BC_STS_INV_ARG;
537 tx_req = (tx_dma_pkt *)crystalhd_dioq_find_and_fetch(hw->tx_actq, list_id);
539 if (cs != BC_STS_IO_USER_ABORT)
540 BCMLOG_ERR("Find and Fetch Did not find req\n");
541 return BC_STS_NO_DATA;
544 if (tx_req->call_back) {
545 tx_req->call_back(tx_req->dio_req, tx_req->cb_event, cs);
546 tx_req->dio_req = NULL;
547 tx_req->cb_event = NULL;
548 tx_req->call_back = NULL;
550 BCMLOG(BCMLOG_DBG, "Missing Tx Callback - %X\n",
554 /* Now put back the tx_list back in FreeQ */
555 tx_req->list_tag = 0;
557 return crystalhd_dioq_add(hw->tx_freeq, tx_req, false, 0);
560 static bool crystalhd_tx_list0_handler(struct crystalhd_hw *hw, uint32_t err_sts)
562 uint32_t err_mask, tmp;
563 unsigned long flags = 0;
565 err_mask = MISC1_TX_DMA_ERROR_STATUS_TX_L0_DESC_TX_ABORT_ERRORS_MASK |
566 MISC1_TX_DMA_ERROR_STATUS_TX_L0_DMA_DATA_TX_ABORT_ERRORS_MASK |
567 MISC1_TX_DMA_ERROR_STATUS_TX_L0_FIFO_FULL_ERRORS_MASK;
569 if (!(err_sts & err_mask))
572 BCMLOG_ERR("Error on Tx-L0 %x \n", err_sts);
576 if (err_sts & MISC1_TX_DMA_ERROR_STATUS_TX_L0_FIFO_FULL_ERRORS_MASK)
577 tmp &= ~MISC1_TX_DMA_ERROR_STATUS_TX_L0_FIFO_FULL_ERRORS_MASK;
580 spin_lock_irqsave(&hw->lock, flags);
581 /* reset list index.*/
582 hw->tx_list_post_index = 0;
583 spin_unlock_irqrestore(&hw->lock, flags);
586 tmp = err_sts & err_mask;
587 crystalhd_reg_wr(hw->adp, MISC1_TX_DMA_ERROR_STATUS, tmp);
592 static bool crystalhd_tx_list1_handler(struct crystalhd_hw *hw, uint32_t err_sts)
594 uint32_t err_mask, tmp;
595 unsigned long flags = 0;
597 err_mask = MISC1_TX_DMA_ERROR_STATUS_TX_L1_DESC_TX_ABORT_ERRORS_MASK |
598 MISC1_TX_DMA_ERROR_STATUS_TX_L1_DMA_DATA_TX_ABORT_ERRORS_MASK |
599 MISC1_TX_DMA_ERROR_STATUS_TX_L1_FIFO_FULL_ERRORS_MASK;
601 if (!(err_sts & err_mask))
604 BCMLOG_ERR("Error on Tx-L1 %x \n", err_sts);
608 if (err_sts & MISC1_TX_DMA_ERROR_STATUS_TX_L1_FIFO_FULL_ERRORS_MASK)
609 tmp &= ~MISC1_TX_DMA_ERROR_STATUS_TX_L1_FIFO_FULL_ERRORS_MASK;
612 spin_lock_irqsave(&hw->lock, flags);
613 /* reset list index.*/
614 hw->tx_list_post_index = 0;
615 spin_unlock_irqrestore(&hw->lock, flags);
618 tmp = err_sts & err_mask;
619 crystalhd_reg_wr(hw->adp, MISC1_TX_DMA_ERROR_STATUS, tmp);
624 static void crystalhd_tx_isr(struct crystalhd_hw *hw, uint32_t int_sts)
628 if (int_sts & INTR_INTR_STATUS_L0_TX_DMA_DONE_INTR_MASK)
629 crystalhd_hw_tx_req_complete(hw, hw->tx_ioq_tag_seed + 0,
632 if (int_sts & INTR_INTR_STATUS_L1_TX_DMA_DONE_INTR_MASK)
633 crystalhd_hw_tx_req_complete(hw, hw->tx_ioq_tag_seed + 1,
636 if (!(int_sts & (INTR_INTR_STATUS_L0_TX_DMA_ERR_INTR_MASK |
637 INTR_INTR_STATUS_L1_TX_DMA_ERR_INTR_MASK))) {
638 /* No error mask set.. */
642 /* Handle Tx errors. */
643 err_sts = crystalhd_reg_rd(hw->adp, MISC1_TX_DMA_ERROR_STATUS);
645 if (crystalhd_tx_list0_handler(hw, err_sts))
646 crystalhd_hw_tx_req_complete(hw, hw->tx_ioq_tag_seed + 0,
649 if (crystalhd_tx_list1_handler(hw, err_sts))
650 crystalhd_hw_tx_req_complete(hw, hw->tx_ioq_tag_seed + 1,
653 hw->stats.tx_errors++;
656 static void crystalhd_hw_dump_desc(pdma_descriptor p_dma_desc,
657 uint32_t ul_desc_index, uint32_t cnt)
661 if (!p_dma_desc || !cnt)
664 /* FIXME: jarod: perhaps a modparam desc_debug to enable this, rather than
665 * setting ll (log level, I presume) to non-zero? */
669 for (ix = ul_desc_index; ix < (ul_desc_index + cnt); ix++) {
670 BCMLOG(ll, "%s[%d] Buff[%x:%x] Next:[%x:%x] XferSz:%x Intr:%x,Last:%x\n",
671 ((p_dma_desc[ul_desc_index].dma_dir) ? "TDesc" : "RDesc"),
673 p_dma_desc[ul_desc_index].buff_addr_high,
674 p_dma_desc[ul_desc_index].buff_addr_low,
675 p_dma_desc[ul_desc_index].next_desc_addr_high,
676 p_dma_desc[ul_desc_index].next_desc_addr_low,
677 p_dma_desc[ul_desc_index].xfer_size,
678 p_dma_desc[ul_desc_index].intr_enable,
679 p_dma_desc[ul_desc_index].last_rec_indicator);
684 static BC_STATUS crystalhd_hw_fill_desc(crystalhd_dio_req *ioreq,
685 dma_descriptor *desc,
686 dma_addr_t desc_paddr_base,
687 uint32_t sg_cnt, uint32_t sg_st_ix,
688 uint32_t sg_st_off, uint32_t xfr_sz)
690 uint32_t count = 0, ix = 0, sg_ix = 0, len = 0, last_desc_ix = 0;
691 dma_addr_t desc_phy_addr = desc_paddr_base;
694 if (!ioreq || !desc || !desc_paddr_base || !xfr_sz ||
695 (!sg_cnt && !ioreq->uinfo.dir_tx)) {
696 BCMLOG_ERR("Invalid Args\n");
697 return BC_STS_INV_ARG;
700 for (ix = 0; ix < sg_cnt; ix++) {
702 /* Setup SGLE index. */
703 sg_ix = ix + sg_st_ix;
705 /* Get SGLE length */
706 len = crystalhd_get_sgle_len(ioreq, sg_ix);
708 BCMLOG_ERR(" len in sg %d %d %d\n", len, sg_ix, sg_cnt);
709 return BC_STS_NOT_IMPL;
711 /* Setup DMA desc with Phy addr & Length at current index. */
712 addr_temp.full_addr = crystalhd_get_sgle_paddr(ioreq, sg_ix);
713 if (sg_ix == sg_st_ix) {
714 addr_temp.full_addr += sg_st_off;
717 memset(&desc[ix], 0, sizeof(desc[ix]));
718 desc[ix].buff_addr_low = addr_temp.low_part;
719 desc[ix].buff_addr_high = addr_temp.high_part;
720 desc[ix].dma_dir = ioreq->uinfo.dir_tx;
722 /* Chain DMA descriptor. */
723 addr_temp.full_addr = desc_phy_addr + sizeof(dma_descriptor);
724 desc[ix].next_desc_addr_low = addr_temp.low_part;
725 desc[ix].next_desc_addr_high = addr_temp.high_part;
727 if ((count + len) > xfr_sz)
728 len = xfr_sz - count;
731 if ((!len) || (len > crystalhd_get_sgle_len(ioreq, sg_ix))) {
732 BCMLOG_ERR("inv-len(%x) Ix(%d) count:%x xfr_sz:%x sg_cnt:%d\n",
733 len, ix, count, xfr_sz, sg_cnt);
736 /* Length expects Multiple of 4 */
737 desc[ix].xfer_size = (len / 4);
739 crystalhd_hw_dump_desc(desc, ix, 1);
742 desc_phy_addr += sizeof(dma_descriptor);
745 last_desc_ix = ix - 1;
747 if (ioreq->fb_size) {
748 memset(&desc[ix], 0, sizeof(desc[ix]));
749 addr_temp.full_addr = ioreq->fb_pa;
750 desc[ix].buff_addr_low = addr_temp.low_part;
751 desc[ix].buff_addr_high = addr_temp.high_part;
752 desc[ix].dma_dir = ioreq->uinfo.dir_tx;
753 desc[ix].xfer_size = 1;
754 desc[ix].fill_bytes = 4 - ioreq->fb_size;
755 count += ioreq->fb_size;
759 /* setup last descriptor..*/
760 desc[last_desc_ix].last_rec_indicator = 1;
761 desc[last_desc_ix].next_desc_addr_low = 0;
762 desc[last_desc_ix].next_desc_addr_high = 0;
763 desc[last_desc_ix].intr_enable = 1;
765 crystalhd_hw_dump_desc(desc, last_desc_ix, 1);
767 if (count != xfr_sz) {
768 BCMLOG_ERR("interal error sz curr:%x exp:%x\n", count, xfr_sz);
772 return BC_STS_SUCCESS;
775 static BC_STATUS crystalhd_xlat_sgl_to_dma_desc(crystalhd_dio_req *ioreq,
776 pdma_desc_mem pdesc_mem,
777 uint32_t *uv_desc_index)
779 dma_descriptor *desc = NULL;
780 dma_addr_t desc_paddr_base = 0;
781 uint32_t sg_cnt = 0, sg_st_ix = 0, sg_st_off = 0;
783 BC_STATUS sts = BC_STS_SUCCESS;
786 if (!ioreq || !pdesc_mem || !uv_desc_index) {
787 BCMLOG_ERR("Invalid Args\n");
788 return BC_STS_INV_ARG;
791 if (!pdesc_mem->sz || !pdesc_mem->pdma_desc_start ||
792 !ioreq->sg || (!ioreq->sg_cnt && !ioreq->uinfo.dir_tx)) {
793 BCMLOG_ERR("Invalid Args\n");
794 return BC_STS_INV_ARG;
797 if ((ioreq->uinfo.dir_tx) && (ioreq->uinfo.uv_offset)) {
798 BCMLOG_ERR("UV offset for TX??\n");
799 return BC_STS_INV_ARG;
803 desc = pdesc_mem->pdma_desc_start;
804 desc_paddr_base = pdesc_mem->phy_addr;
806 if (ioreq->uinfo.dir_tx || (ioreq->uinfo.uv_offset == 0)) {
807 sg_cnt = ioreq->sg_cnt;
808 xfr_sz = ioreq->uinfo.xfr_len;
810 sg_cnt = ioreq->uinfo.uv_sg_ix + 1;
811 xfr_sz = ioreq->uinfo.uv_offset;
814 sts = crystalhd_hw_fill_desc(ioreq, desc, desc_paddr_base, sg_cnt,
815 sg_st_ix, sg_st_off, xfr_sz);
817 if ((sts != BC_STS_SUCCESS) || !ioreq->uinfo.uv_offset)
820 /* Prepare for UV mapping.. */
821 desc = &pdesc_mem->pdma_desc_start[sg_cnt];
822 desc_paddr_base = pdesc_mem->phy_addr +
823 (sg_cnt * sizeof(dma_descriptor));
825 /* Done with desc addr.. now update sg stuff.*/
826 sg_cnt = ioreq->sg_cnt - ioreq->uinfo.uv_sg_ix;
827 xfr_sz = ioreq->uinfo.xfr_len - ioreq->uinfo.uv_offset;
828 sg_st_ix = ioreq->uinfo.uv_sg_ix;
829 sg_st_off = ioreq->uinfo.uv_sg_off;
831 sts = crystalhd_hw_fill_desc(ioreq, desc, desc_paddr_base, sg_cnt,
832 sg_st_ix, sg_st_off, xfr_sz);
833 if (sts != BC_STS_SUCCESS)
836 *uv_desc_index = sg_st_ix;
841 static void crystalhd_start_tx_dma_engine(struct crystalhd_hw *hw)
845 dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_TX_SW_DESC_LIST_CTRL_STS);
846 if (!(dma_cntrl & DMA_START_BIT)) {
847 dma_cntrl |= DMA_START_BIT;
848 crystalhd_reg_wr(hw->adp, MISC1_TX_SW_DESC_LIST_CTRL_STS,
857 * Verify if the Stop generates a completion interrupt or not.
858 * if it does not generate an interrupt, then add polling here.
860 static BC_STATUS crystalhd_stop_tx_dma_engine(struct crystalhd_hw *hw)
862 uint32_t dma_cntrl, cnt = 30;
863 uint32_t l1 = 1, l2 = 1;
864 unsigned long flags = 0;
866 dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_TX_SW_DESC_LIST_CTRL_STS);
868 BCMLOG(BCMLOG_DBG, "Stopping TX DMA Engine..\n");
870 /* FIXME: jarod: invert dma_ctrl and check bit? or are there missing parens? */
871 if (!dma_cntrl & DMA_START_BIT) {
872 BCMLOG(BCMLOG_DBG, "Already Stopped\n");
873 return BC_STS_SUCCESS;
876 crystalhd_disable_interrupts(hw->adp);
878 /* Issue stop to HW */
879 /* This bit when set gave problems. Please check*/
880 dma_cntrl &= ~DMA_START_BIT;
881 crystalhd_reg_wr(hw->adp, MISC1_TX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
883 BCMLOG(BCMLOG_DBG, "Cleared the DMA Start bit\n");
885 /* Poll for 3seconds (30 * 100ms) on both the lists..*/
886 while ((l1 || l2) && cnt) {
889 l1 = crystalhd_reg_rd(hw->adp, MISC1_TX_FIRST_DESC_L_ADDR_LIST0);
894 l2 = crystalhd_reg_rd(hw->adp, MISC1_TX_FIRST_DESC_L_ADDR_LIST1);
898 msleep_interruptible(100);
904 BCMLOG_ERR("Failed to stop TX DMA.. l1 %d, l2 %d\n", l1, l2);
905 crystalhd_enable_interrupts(hw->adp);
909 spin_lock_irqsave(&hw->lock, flags);
910 hw->tx_list_post_index = 0;
911 spin_unlock_irqrestore(&hw->lock, flags);
912 BCMLOG(BCMLOG_DBG, "stopped TX DMA..\n");
913 crystalhd_enable_interrupts(hw->adp);
915 return BC_STS_SUCCESS;
918 static uint32_t crystalhd_get_pib_avail_cnt(struct crystalhd_hw *hw)
921 * Position of the PIB Entries can be found at
922 * 0th and the 1st location of the Circular list.
925 uint32_t pib_cnt, r_offset, w_offset;
927 Q_addr = hw->pib_del_Q_addr;
929 /* Get the Read Pointer */
930 crystalhd_mem_rd(hw->adp, Q_addr, 1, &r_offset);
932 /* Get the Write Pointer */
933 crystalhd_mem_rd(hw->adp, Q_addr + sizeof(uint32_t), 1, &w_offset);
935 if (r_offset == w_offset)
936 return 0; /* Queue is empty */
938 if (w_offset > r_offset)
939 pib_cnt = w_offset - r_offset;
941 pib_cnt = (w_offset + MAX_PIB_Q_DEPTH) -
942 (r_offset + MIN_PIB_Q_DEPTH);
944 if (pib_cnt > MAX_PIB_Q_DEPTH) {
945 BCMLOG_ERR("Invalid PIB Count (%u)\n", pib_cnt);
952 static uint32_t crystalhd_get_addr_from_pib_Q(struct crystalhd_hw *hw)
955 uint32_t addr_entry, r_offset, w_offset;
957 Q_addr = hw->pib_del_Q_addr;
959 /* Get the Read Pointer 0Th Location is Read Pointer */
960 crystalhd_mem_rd(hw->adp, Q_addr, 1, &r_offset);
962 /* Get the Write Pointer 1st Location is Write pointer */
963 crystalhd_mem_rd(hw->adp, Q_addr + sizeof(uint32_t), 1, &w_offset);
966 if (r_offset == w_offset)
969 if ((r_offset < MIN_PIB_Q_DEPTH) || (r_offset >= MAX_PIB_Q_DEPTH))
972 /* Get the Actual Address of the PIB */
973 crystalhd_mem_rd(hw->adp, Q_addr + (r_offset * sizeof(uint32_t)),
976 /* Increment the Read Pointer */
979 if (MAX_PIB_Q_DEPTH == r_offset)
980 r_offset = MIN_PIB_Q_DEPTH;
982 /* Write back the read pointer to It's Location */
983 crystalhd_mem_wr(hw->adp, Q_addr, 1, &r_offset);
988 static bool crystalhd_rel_addr_to_pib_Q(struct crystalhd_hw *hw, uint32_t addr_to_rel)
991 uint32_t r_offset, w_offset, n_offset;
993 Q_addr = hw->pib_rel_Q_addr;
995 /* Get the Read Pointer */
996 crystalhd_mem_rd(hw->adp, Q_addr, 1, &r_offset);
998 /* Get the Write Pointer */
999 crystalhd_mem_rd(hw->adp, Q_addr + sizeof(uint32_t), 1, &w_offset);
1001 if ((r_offset < MIN_PIB_Q_DEPTH) ||
1002 (r_offset >= MAX_PIB_Q_DEPTH))
1005 n_offset = w_offset + 1;
1007 if (MAX_PIB_Q_DEPTH == n_offset)
1008 n_offset = MIN_PIB_Q_DEPTH;
1010 if (r_offset == n_offset)
1011 return false; /* should never happen */
1013 /* Write the DRAM ADDR to the Queue at Next Offset */
1014 crystalhd_mem_wr(hw->adp, Q_addr + (w_offset * sizeof(uint32_t)),
1017 /* Put the New value of the write pointer in Queue */
1018 crystalhd_mem_wr(hw->adp, Q_addr + sizeof(uint32_t), 1, &n_offset);
1023 static void cpy_pib_to_app(C011_PIB *src_pib, BC_PIC_INFO_BLOCK *dst_pib)
1025 if (!src_pib || !dst_pib) {
1026 BCMLOG_ERR("Invalid Arguments\n");
1030 dst_pib->timeStamp = 0;
1031 dst_pib->picture_number = src_pib->ppb.picture_number;
1032 dst_pib->width = src_pib->ppb.width;
1033 dst_pib->height = src_pib->ppb.height;
1034 dst_pib->chroma_format = src_pib->ppb.chroma_format;
1035 dst_pib->pulldown = src_pib->ppb.pulldown;
1036 dst_pib->flags = src_pib->ppb.flags;
1037 dst_pib->sess_num = src_pib->ptsStcOffset;
1038 dst_pib->aspect_ratio = src_pib->ppb.aspect_ratio;
1039 dst_pib->colour_primaries = src_pib->ppb.colour_primaries;
1040 dst_pib->picture_meta_payload = src_pib->ppb.picture_meta_payload;
1041 dst_pib->frame_rate = src_pib->resolution ;
1045 static void crystalhd_hw_proc_pib(struct crystalhd_hw *hw)
1049 uint32_t pib_addr, pib_cnt;
1050 BC_PIC_INFO_BLOCK *AppPib;
1051 crystalhd_rx_dma_pkt *rx_pkt = NULL;
1053 pib_cnt = crystalhd_get_pib_avail_cnt(hw);
1058 for (cnt = 0; cnt < pib_cnt; cnt++) {
1060 pib_addr = crystalhd_get_addr_from_pib_Q(hw);
1061 crystalhd_mem_rd(hw->adp, pib_addr, sizeof(C011_PIB) / 4,
1062 (uint32_t *)&src_pib);
1064 if (src_pib.bFormatChange) {
1065 rx_pkt = (crystalhd_rx_dma_pkt *)crystalhd_dioq_fetch(hw->rx_freeq);
1069 rx_pkt->flags |= COMP_FLAG_PIB_VALID | COMP_FLAG_FMT_CHANGE;
1070 AppPib = &rx_pkt->pib;
1071 cpy_pib_to_app(&src_pib, AppPib);
1074 "App PIB:%x %x %x %x %x %x %x %x %x %x\n",
1075 rx_pkt->pib.picture_number,
1076 rx_pkt->pib.aspect_ratio,
1077 rx_pkt->pib.chroma_format,
1078 rx_pkt->pib.colour_primaries,
1079 rx_pkt->pib.frame_rate,
1083 rx_pkt->pib.pulldown,
1086 crystalhd_dioq_add(hw->rx_rdyq, (void *)rx_pkt, true, rx_pkt->pkt_tag);
1090 crystalhd_rel_addr_to_pib_Q(hw, pib_addr);
1094 static void crystalhd_start_rx_dma_engine(struct crystalhd_hw *hw)
1098 dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_Y_RX_SW_DESC_LIST_CTRL_STS);
1099 if (!(dma_cntrl & DMA_START_BIT)) {
1100 dma_cntrl |= DMA_START_BIT;
1101 crystalhd_reg_wr(hw->adp, MISC1_Y_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
1104 dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_UV_RX_SW_DESC_LIST_CTRL_STS);
1105 if (!(dma_cntrl & DMA_START_BIT)) {
1106 dma_cntrl |= DMA_START_BIT;
1107 crystalhd_reg_wr(hw->adp, MISC1_UV_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
1113 static void crystalhd_stop_rx_dma_engine(struct crystalhd_hw *hw)
1115 uint32_t dma_cntrl = 0, count = 30;
1116 uint32_t l0y = 1, l0uv = 1, l1y = 1, l1uv = 1;
1118 dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_Y_RX_SW_DESC_LIST_CTRL_STS);
1119 if ((dma_cntrl & DMA_START_BIT)) {
1120 dma_cntrl &= ~DMA_START_BIT;
1121 crystalhd_reg_wr(hw->adp, MISC1_Y_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
1124 dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_UV_RX_SW_DESC_LIST_CTRL_STS);
1125 if ((dma_cntrl & DMA_START_BIT)) {
1126 dma_cntrl &= ~DMA_START_BIT;
1127 crystalhd_reg_wr(hw->adp, MISC1_UV_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
1130 /* Poll for 3seconds (30 * 100ms) on both the lists..*/
1131 while ((l0y || l0uv || l1y || l1uv) && count) {
1134 l0y = crystalhd_reg_rd(hw->adp, MISC1_Y_RX_FIRST_DESC_L_ADDR_LIST0);
1135 l0y &= DMA_START_BIT;
1137 hw->rx_list_sts[0] &= ~rx_waiting_y_intr;
1142 l1y = crystalhd_reg_rd(hw->adp, MISC1_Y_RX_FIRST_DESC_L_ADDR_LIST1);
1143 l1y &= DMA_START_BIT;
1145 hw->rx_list_sts[1] &= ~rx_waiting_y_intr;
1150 l0uv = crystalhd_reg_rd(hw->adp, MISC1_UV_RX_FIRST_DESC_L_ADDR_LIST0);
1151 l0uv &= DMA_START_BIT;
1153 hw->rx_list_sts[0] &= ~rx_waiting_uv_intr;
1158 l1uv = crystalhd_reg_rd(hw->adp, MISC1_UV_RX_FIRST_DESC_L_ADDR_LIST1);
1159 l1uv &= DMA_START_BIT;
1161 hw->rx_list_sts[1] &= ~rx_waiting_uv_intr;
1164 msleep_interruptible(100);
1168 hw->rx_list_post_index = 0;
1170 BCMLOG(BCMLOG_SSTEP, "Capture Stop: %d List0:Sts:%x List1:Sts:%x\n",
1171 count, hw->rx_list_sts[0], hw->rx_list_sts[1]);
1174 static BC_STATUS crystalhd_hw_prog_rxdma(struct crystalhd_hw *hw, crystalhd_rx_dma_pkt *rx_pkt)
1176 uint32_t y_low_addr_reg, y_high_addr_reg;
1177 uint32_t uv_low_addr_reg, uv_high_addr_reg;
1179 unsigned long flags;
1181 if (!hw || !rx_pkt) {
1182 BCMLOG_ERR("Invalid Arguments\n");
1183 return BC_STS_INV_ARG;
1186 if (hw->rx_list_post_index >= DMA_ENGINE_CNT) {
1187 BCMLOG_ERR("List Out Of bounds %x\n", hw->rx_list_post_index);
1188 return BC_STS_INV_ARG;
1191 spin_lock_irqsave(&hw->rx_lock, flags);
1192 /* FIXME: jarod: sts_free is an enum for 0, in crystalhd_hw.h... yuk... */
1193 if (sts_free != hw->rx_list_sts[hw->rx_list_post_index]) {
1194 spin_unlock_irqrestore(&hw->rx_lock, flags);
1198 if (!hw->rx_list_post_index) {
1199 y_low_addr_reg = MISC1_Y_RX_FIRST_DESC_L_ADDR_LIST0;
1200 y_high_addr_reg = MISC1_Y_RX_FIRST_DESC_U_ADDR_LIST0;
1201 uv_low_addr_reg = MISC1_UV_RX_FIRST_DESC_L_ADDR_LIST0;
1202 uv_high_addr_reg = MISC1_UV_RX_FIRST_DESC_U_ADDR_LIST0;
1204 y_low_addr_reg = MISC1_Y_RX_FIRST_DESC_L_ADDR_LIST1;
1205 y_high_addr_reg = MISC1_Y_RX_FIRST_DESC_U_ADDR_LIST1;
1206 uv_low_addr_reg = MISC1_UV_RX_FIRST_DESC_L_ADDR_LIST1;
1207 uv_high_addr_reg = MISC1_UV_RX_FIRST_DESC_U_ADDR_LIST1;
1209 rx_pkt->pkt_tag = hw->rx_pkt_tag_seed + hw->rx_list_post_index;
1210 hw->rx_list_sts[hw->rx_list_post_index] |= rx_waiting_y_intr;
1211 if (rx_pkt->uv_phy_addr)
1212 hw->rx_list_sts[hw->rx_list_post_index] |= rx_waiting_uv_intr;
1213 hw->rx_list_post_index = (hw->rx_list_post_index + 1) % DMA_ENGINE_CNT;
1214 spin_unlock_irqrestore(&hw->rx_lock, flags);
1216 crystalhd_dioq_add(hw->rx_actq, (void *)rx_pkt, false, rx_pkt->pkt_tag);
1218 crystalhd_start_rx_dma_engine(hw);
1219 /* Program the Y descriptor */
1220 desc_addr.full_addr = rx_pkt->desc_mem.phy_addr;
1221 crystalhd_reg_wr(hw->adp, y_high_addr_reg, desc_addr.high_part);
1222 crystalhd_reg_wr(hw->adp, y_low_addr_reg, desc_addr.low_part | 0x01);
1224 if (rx_pkt->uv_phy_addr) {
1225 /* Program the UV descriptor */
1226 desc_addr.full_addr = rx_pkt->uv_phy_addr;
1227 crystalhd_reg_wr(hw->adp, uv_high_addr_reg, desc_addr.high_part);
1228 crystalhd_reg_wr(hw->adp, uv_low_addr_reg, desc_addr.low_part | 0x01);
1231 return BC_STS_SUCCESS;
1234 static BC_STATUS crystalhd_hw_post_cap_buff(struct crystalhd_hw *hw,
1235 crystalhd_rx_dma_pkt *rx_pkt)
1237 BC_STATUS sts = crystalhd_hw_prog_rxdma(hw, rx_pkt);
1239 if (sts == BC_STS_BUSY)
1240 crystalhd_dioq_add(hw->rx_freeq, (void *)rx_pkt,
1241 false, rx_pkt->pkt_tag);
1246 static void crystalhd_get_dnsz(struct crystalhd_hw *hw, uint32_t list_index,
1247 uint32_t *y_dw_dnsz, uint32_t *uv_dw_dnsz)
1249 uint32_t y_dn_sz_reg, uv_dn_sz_reg;
1252 y_dn_sz_reg = MISC1_Y_RX_LIST0_CUR_BYTE_CNT;
1253 uv_dn_sz_reg = MISC1_UV_RX_LIST0_CUR_BYTE_CNT;
1255 y_dn_sz_reg = MISC1_Y_RX_LIST1_CUR_BYTE_CNT;
1256 uv_dn_sz_reg = MISC1_UV_RX_LIST1_CUR_BYTE_CNT;
1259 *y_dw_dnsz = crystalhd_reg_rd(hw->adp, y_dn_sz_reg);
1260 *uv_dw_dnsz = crystalhd_reg_rd(hw->adp, uv_dn_sz_reg);
1264 * This function should be called only after making sure that the two DMA
1265 * lists are free. This function does not check if DMA's are active, before
1266 * turning off the DMA.
1268 static void crystalhd_hw_finalize_pause(struct crystalhd_hw *hw)
1270 uint32_t dma_cntrl, aspm;
1272 hw->stop_pending = 0;
1274 dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_Y_RX_SW_DESC_LIST_CTRL_STS);
1275 if (dma_cntrl & DMA_START_BIT) {
1276 dma_cntrl &= ~DMA_START_BIT;
1277 crystalhd_reg_wr(hw->adp, MISC1_Y_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
1280 dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_UV_RX_SW_DESC_LIST_CTRL_STS);
1281 if (dma_cntrl & DMA_START_BIT) {
1282 dma_cntrl &= ~DMA_START_BIT;
1283 crystalhd_reg_wr(hw->adp, MISC1_UV_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
1285 hw->rx_list_post_index = 0;
1287 aspm = crystalhd_reg_rd(hw->adp, PCIE_DLL_DATA_LINK_CONTROL);
1288 aspm |= ASPM_L1_ENABLE;
1289 /* NAREN BCMLOG(BCMLOG_INFO, "aspm on\n"); */
1290 crystalhd_reg_wr(hw->adp, PCIE_DLL_DATA_LINK_CONTROL, aspm);
1293 static BC_STATUS crystalhd_rx_pkt_done(struct crystalhd_hw *hw, uint32_t list_index,
1296 crystalhd_rx_dma_pkt *rx_pkt = NULL;
1297 uint32_t y_dw_dnsz, uv_dw_dnsz;
1298 BC_STATUS sts = BC_STS_SUCCESS;
1300 if (!hw || list_index >= DMA_ENGINE_CNT) {
1301 BCMLOG_ERR("Invalid Arguments\n");
1302 return BC_STS_INV_ARG;
1305 rx_pkt = crystalhd_dioq_find_and_fetch(hw->rx_actq,
1306 hw->rx_pkt_tag_seed + list_index);
1308 BCMLOG_ERR("Act-Q:PostIx:%x L0Sts:%x L1Sts:%x current L:%x tag:%x comp:%x\n",
1309 hw->rx_list_post_index, hw->rx_list_sts[0],
1310 hw->rx_list_sts[1], list_index,
1311 hw->rx_pkt_tag_seed + list_index, comp_sts);
1312 return BC_STS_INV_ARG;
1315 if (comp_sts == BC_STS_SUCCESS) {
1316 crystalhd_get_dnsz(hw, list_index, &y_dw_dnsz, &uv_dw_dnsz);
1317 rx_pkt->dio_req->uinfo.y_done_sz = y_dw_dnsz;
1318 rx_pkt->flags = COMP_FLAG_DATA_VALID;
1319 if (rx_pkt->uv_phy_addr)
1320 rx_pkt->dio_req->uinfo.uv_done_sz = uv_dw_dnsz;
1321 crystalhd_dioq_add(hw->rx_rdyq, rx_pkt, true,
1322 hw->rx_pkt_tag_seed + list_index);
1326 /* Check if we can post this DIO again. */
1327 return crystalhd_hw_post_cap_buff(hw, rx_pkt);
1330 static bool crystalhd_rx_list0_handler(struct crystalhd_hw *hw, uint32_t int_sts,
1331 uint32_t y_err_sts, uint32_t uv_err_sts)
1336 if (!(y_err_sts & GET_Y0_ERR_MSK) && !(uv_err_sts & GET_UV0_ERR_MSK))
1339 tmp_lsts = hw->rx_list_sts[0];
1342 tmp = y_err_sts & GET_Y0_ERR_MSK;
1343 if (int_sts & INTR_INTR_STATUS_L0_Y_RX_DMA_DONE_INTR_MASK)
1344 hw->rx_list_sts[0] &= ~rx_waiting_y_intr;
1346 if (y_err_sts & MISC1_Y_RX_ERROR_STATUS_RX_L0_UNDERRUN_ERROR_MASK) {
1347 hw->rx_list_sts[0] &= ~rx_waiting_y_intr;
1348 tmp &= ~MISC1_Y_RX_ERROR_STATUS_RX_L0_UNDERRUN_ERROR_MASK;
1351 if (y_err_sts & MISC1_Y_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_MASK) {
1352 hw->rx_list_sts[0] &= ~rx_y_mask;
1353 hw->rx_list_sts[0] |= rx_y_error;
1354 tmp &= ~MISC1_Y_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_MASK;
1358 hw->rx_list_sts[0] &= ~rx_y_mask;
1359 hw->rx_list_sts[0] |= rx_y_error;
1360 hw->rx_list_post_index = 0;
1364 tmp = uv_err_sts & GET_UV0_ERR_MSK;
1365 if (int_sts & INTR_INTR_STATUS_L0_UV_RX_DMA_DONE_INTR_MASK)
1366 hw->rx_list_sts[0] &= ~rx_waiting_uv_intr;
1368 if (uv_err_sts & MISC1_UV_RX_ERROR_STATUS_RX_L0_UNDERRUN_ERROR_MASK) {
1369 hw->rx_list_sts[0] &= ~rx_waiting_uv_intr;
1370 tmp &= ~MISC1_UV_RX_ERROR_STATUS_RX_L0_UNDERRUN_ERROR_MASK;
1373 if (uv_err_sts & MISC1_UV_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_MASK) {
1374 hw->rx_list_sts[0] &= ~rx_uv_mask;
1375 hw->rx_list_sts[0] |= rx_uv_error;
1376 tmp &= ~MISC1_UV_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_MASK;
1380 hw->rx_list_sts[0] &= ~rx_uv_mask;
1381 hw->rx_list_sts[0] |= rx_uv_error;
1382 hw->rx_list_post_index = 0;
1385 if (y_err_sts & GET_Y0_ERR_MSK) {
1386 tmp = y_err_sts & GET_Y0_ERR_MSK;
1387 crystalhd_reg_wr(hw->adp, MISC1_Y_RX_ERROR_STATUS, tmp);
1390 if (uv_err_sts & GET_UV0_ERR_MSK) {
1391 tmp = uv_err_sts & GET_UV0_ERR_MSK;
1392 crystalhd_reg_wr(hw->adp, MISC1_UV_RX_ERROR_STATUS, tmp);
1395 return (tmp_lsts != hw->rx_list_sts[0]);
1398 static bool crystalhd_rx_list1_handler(struct crystalhd_hw *hw, uint32_t int_sts,
1399 uint32_t y_err_sts, uint32_t uv_err_sts)
1404 if (!(y_err_sts & GET_Y1_ERR_MSK) && !(uv_err_sts & GET_UV1_ERR_MSK))
1407 tmp_lsts = hw->rx_list_sts[1];
1410 tmp = y_err_sts & GET_Y1_ERR_MSK;
1411 if (int_sts & INTR_INTR_STATUS_L1_Y_RX_DMA_DONE_INTR_MASK)
1412 hw->rx_list_sts[1] &= ~rx_waiting_y_intr;
1414 if (y_err_sts & MISC1_Y_RX_ERROR_STATUS_RX_L1_UNDERRUN_ERROR_MASK) {
1415 hw->rx_list_sts[1] &= ~rx_waiting_y_intr;
1416 tmp &= ~MISC1_Y_RX_ERROR_STATUS_RX_L1_UNDERRUN_ERROR_MASK;
1419 if (y_err_sts & MISC1_Y_RX_ERROR_STATUS_RX_L1_FIFO_FULL_ERRORS_MASK) {
1420 /* Add retry-support..*/
1421 hw->rx_list_sts[1] &= ~rx_y_mask;
1422 hw->rx_list_sts[1] |= rx_y_error;
1423 tmp &= ~MISC1_Y_RX_ERROR_STATUS_RX_L1_FIFO_FULL_ERRORS_MASK;
1427 hw->rx_list_sts[1] &= ~rx_y_mask;
1428 hw->rx_list_sts[1] |= rx_y_error;
1429 hw->rx_list_post_index = 0;
1433 tmp = uv_err_sts & GET_UV1_ERR_MSK;
1434 if (int_sts & INTR_INTR_STATUS_L1_UV_RX_DMA_DONE_INTR_MASK) {
1435 hw->rx_list_sts[1] &= ~rx_waiting_uv_intr;
1438 if (uv_err_sts & MISC1_UV_RX_ERROR_STATUS_RX_L1_UNDERRUN_ERROR_MASK) {
1439 hw->rx_list_sts[1] &= ~rx_waiting_uv_intr;
1440 tmp &= ~MISC1_UV_RX_ERROR_STATUS_RX_L1_UNDERRUN_ERROR_MASK;
1443 if (uv_err_sts & MISC1_UV_RX_ERROR_STATUS_RX_L1_FIFO_FULL_ERRORS_MASK) {
1444 /* Add retry-support*/
1445 hw->rx_list_sts[1] &= ~rx_uv_mask;
1446 hw->rx_list_sts[1] |= rx_uv_error;
1447 tmp &= ~MISC1_UV_RX_ERROR_STATUS_RX_L1_FIFO_FULL_ERRORS_MASK;
1451 hw->rx_list_sts[1] &= ~rx_uv_mask;
1452 hw->rx_list_sts[1] |= rx_uv_error;
1453 hw->rx_list_post_index = 0;
1456 if (y_err_sts & GET_Y1_ERR_MSK) {
1457 tmp = y_err_sts & GET_Y1_ERR_MSK;
1458 crystalhd_reg_wr(hw->adp, MISC1_Y_RX_ERROR_STATUS, tmp);
1461 if (uv_err_sts & GET_UV1_ERR_MSK) {
1462 tmp = uv_err_sts & GET_UV1_ERR_MSK;
1463 crystalhd_reg_wr(hw->adp, MISC1_UV_RX_ERROR_STATUS, tmp);
1466 return (tmp_lsts != hw->rx_list_sts[1]);
1470 static void crystalhd_rx_isr(struct crystalhd_hw *hw, uint32_t intr_sts)
1472 unsigned long flags;
1473 uint32_t i, list_avail = 0;
1474 BC_STATUS comp_sts = BC_STS_NO_DATA;
1475 uint32_t y_err_sts, uv_err_sts, y_dn_sz = 0, uv_dn_sz = 0;
1479 BCMLOG_ERR("Invalid Arguments\n");
1483 if (!(intr_sts & GET_RX_INTR_MASK))
1486 y_err_sts = crystalhd_reg_rd(hw->adp, MISC1_Y_RX_ERROR_STATUS);
1487 uv_err_sts = crystalhd_reg_rd(hw->adp, MISC1_UV_RX_ERROR_STATUS);
1489 for (i = 0; i < DMA_ENGINE_CNT; i++) {
1490 /* Update States..*/
1491 spin_lock_irqsave(&hw->rx_lock, flags);
1493 ret = crystalhd_rx_list0_handler(hw, intr_sts, y_err_sts, uv_err_sts);
1495 ret = crystalhd_rx_list1_handler(hw, intr_sts, y_err_sts, uv_err_sts);
1497 switch (hw->rx_list_sts[i]) {
1499 comp_sts = BC_STS_SUCCESS;
1505 /* We got error on both or Y or uv. */
1506 hw->stats.rx_errors++;
1507 crystalhd_get_dnsz(hw, i, &y_dn_sz, &uv_dn_sz);
1508 /* FIXME: jarod: this is where my mini pci-e card is tripping up */
1509 BCMLOG(BCMLOG_DBG, "list_index:%x rx[%d] Y:%x "
1510 "UV:%x Int:%x YDnSz:%x UVDnSz:%x\n",
1511 i, hw->stats.rx_errors, y_err_sts,
1512 uv_err_sts, intr_sts, y_dn_sz, uv_dn_sz);
1513 hw->rx_list_sts[i] = sts_free;
1514 comp_sts = BC_STS_ERROR;
1517 /* Wait for completion..*/
1518 comp_sts = BC_STS_NO_DATA;
1522 spin_unlock_irqrestore(&hw->rx_lock, flags);
1524 /* handle completion...*/
1525 if (comp_sts != BC_STS_NO_DATA) {
1526 crystalhd_rx_pkt_done(hw, i, comp_sts);
1527 comp_sts = BC_STS_NO_DATA;
1532 if (hw->stop_pending) {
1533 if ((hw->rx_list_sts[0] == sts_free) &&
1534 (hw->rx_list_sts[1] == sts_free))
1535 crystalhd_hw_finalize_pause(hw);
1537 crystalhd_hw_start_capture(hw);
1542 static BC_STATUS crystalhd_fw_cmd_post_proc(struct crystalhd_hw *hw,
1545 BC_STATUS sts = BC_STS_SUCCESS;
1546 DecRspChannelStartVideo *st_rsp = NULL;
1548 switch (fw_cmd->cmd[0]) {
1549 case eCMD_C011_DEC_CHAN_START_VIDEO:
1550 st_rsp = (DecRspChannelStartVideo *)fw_cmd->rsp;
1551 hw->pib_del_Q_addr = st_rsp->picInfoDeliveryQ;
1552 hw->pib_rel_Q_addr = st_rsp->picInfoReleaseQ;
1553 BCMLOG(BCMLOG_DBG, "DelQAddr:%x RelQAddr:%x\n",
1554 hw->pib_del_Q_addr, hw->pib_rel_Q_addr);
1556 case eCMD_C011_INIT:
1557 if (!(crystalhd_load_firmware_config(hw->adp))) {
1558 BCMLOG_ERR("Invalid Params.\n");
1559 sts = BC_STS_FW_AUTH_FAILED;
1568 static BC_STATUS crystalhd_put_ddr2sleep(struct crystalhd_hw *hw)
1571 link_misc_perst_decoder_ctrl rst_cntrl_reg;
1573 /* Pulse reset pin of 7412 (MISC_PERST_DECODER_CTRL) */
1574 rst_cntrl_reg.whole_reg = crystalhd_reg_rd(hw->adp, MISC_PERST_DECODER_CTRL);
1576 rst_cntrl_reg.bcm_7412_rst = 1;
1577 crystalhd_reg_wr(hw->adp, MISC_PERST_DECODER_CTRL, rst_cntrl_reg.whole_reg);
1578 msleep_interruptible(50);
1580 rst_cntrl_reg.bcm_7412_rst = 0;
1581 crystalhd_reg_wr(hw->adp, MISC_PERST_DECODER_CTRL, rst_cntrl_reg.whole_reg);
1583 /* Close all banks, put DDR in idle */
1584 bc_dec_reg_wr(hw->adp, SDRAM_PRECHARGE, 0);
1586 /* Set bit 25 (drop CKE pin of DDR) */
1587 reg = bc_dec_reg_rd(hw->adp, SDRAM_PARAM);
1589 bc_dec_reg_wr(hw->adp, SDRAM_PARAM, reg);
1591 /* Reset the audio block */
1592 bc_dec_reg_wr(hw->adp, AUD_DSP_MISC_SOFT_RESET, 0x1);
1594 /* Power down Raptor PLL */
1595 reg = bc_dec_reg_rd(hw->adp, DecHt_PllCCtl);
1597 bc_dec_reg_wr(hw->adp, DecHt_PllCCtl, reg);
1599 /* Power down all Audio PLL */
1600 bc_dec_reg_wr(hw->adp, AIO_MISC_PLL_RESET, 0x1);
1602 /* Power down video clock (75MHz) */
1603 reg = bc_dec_reg_rd(hw->adp, DecHt_PllECtl);
1605 bc_dec_reg_wr(hw->adp, DecHt_PllECtl, reg);
1607 /* Power down video clock (75MHz) */
1608 reg = bc_dec_reg_rd(hw->adp, DecHt_PllDCtl);
1610 bc_dec_reg_wr(hw->adp, DecHt_PllDCtl, reg);
1612 /* Power down core clock (200MHz) */
1613 reg = bc_dec_reg_rd(hw->adp, DecHt_PllACtl);
1615 bc_dec_reg_wr(hw->adp, DecHt_PllACtl, reg);
1617 /* Power down core clock (200MHz) */
1618 reg = bc_dec_reg_rd(hw->adp, DecHt_PllBCtl);
1620 bc_dec_reg_wr(hw->adp, DecHt_PllBCtl, reg);
1622 return BC_STS_SUCCESS;
1625 /************************************************
1627 *************************************************/
1629 BC_STATUS crystalhd_download_fw(struct crystalhd_adp *adp, void *buffer, uint32_t sz)
1631 uint32_t reg_data, cnt, *temp_buff;
1632 uint32_t fw_sig_len = 36;
1633 uint32_t dram_offset = BC_FWIMG_ST_ADDR, sig_reg;
1637 if (!adp || !buffer || !sz) {
1638 BCMLOG_ERR("Invalid Params.\n");
1639 return BC_STS_INV_ARG;
1642 reg_data = crystalhd_reg_rd(adp, OTP_CMD);
1643 if (!(reg_data & 0x02)) {
1644 BCMLOG_ERR("Invalid hw config.. otp not programmed\n");
1645 return BC_STS_ERROR;
1649 crystalhd_reg_wr(adp, DCI_CMD, 0);
1650 reg_data |= BC_BIT(0);
1651 crystalhd_reg_wr(adp, DCI_CMD, reg_data);
1655 msleep_interruptible(10);
1657 while (reg_data != BC_BIT(4)) {
1658 reg_data = crystalhd_reg_rd(adp, DCI_STATUS);
1659 reg_data &= BC_BIT(4);
1661 BCMLOG_ERR("Firmware Download RDY Timeout.\n");
1662 return BC_STS_TIMEOUT;
1666 msleep_interruptible(10);
1667 /* Load the FW to the FW_ADDR field in the DCI_FIRMWARE_ADDR */
1668 crystalhd_reg_wr(adp, DCI_FIRMWARE_ADDR, dram_offset);
1669 temp_buff = (uint32_t *)buffer;
1670 for (cnt = 0; cnt < (sz - fw_sig_len); cnt += 4) {
1671 crystalhd_reg_wr(adp, DCI_DRAM_BASE_ADDR, (dram_offset >> 19));
1672 crystalhd_reg_wr(adp, DCI_FIRMWARE_DATA, *temp_buff);
1676 msleep_interruptible(10);
1680 sig_reg = (uint32_t)DCI_SIGNATURE_DATA_7;
1681 for (cnt = 0; cnt < 8; cnt++) {
1682 uint32_t swapped_data = *temp_buff;
1683 swapped_data = bswap_32_1(swapped_data);
1684 crystalhd_reg_wr(adp, sig_reg, swapped_data);
1688 msleep_interruptible(10);
1691 reg_data |= BC_BIT(1);
1692 crystalhd_reg_wr(adp, DCI_CMD, reg_data);
1693 msleep_interruptible(10);
1696 reg_data = crystalhd_reg_rd(adp, DCI_STATUS);
1698 if ((reg_data & BC_BIT(9)) == BC_BIT(9)) {
1700 while ((reg_data & BC_BIT(0)) != BC_BIT(0)) {
1701 reg_data = crystalhd_reg_rd(adp, DCI_STATUS);
1702 reg_data &= BC_BIT(0);
1705 msleep_interruptible(10);
1708 reg_data = crystalhd_reg_rd(adp, DCI_CMD);
1709 reg_data |= BC_BIT(4);
1710 crystalhd_reg_wr(adp, DCI_CMD, reg_data);
1713 BCMLOG_ERR("F/w Signature mismatch\n");
1714 return BC_STS_FW_AUTH_FAILED;
1717 BCMLOG(BCMLOG_INFO, "Firmware Downloaded Successfully\n");
1718 return BC_STS_SUCCESS;;
1721 BC_STATUS crystalhd_do_fw_cmd(struct crystalhd_hw *hw, BC_FW_CMD *fw_cmd)
1723 uint32_t cnt = 0, cmd_res_addr;
1724 uint32_t *cmd_buff, *res_buff;
1725 wait_queue_head_t fw_cmd_event;
1729 crystalhd_create_event(&fw_cmd_event);
1733 if (!hw || !fw_cmd) {
1734 BCMLOG_ERR("Invalid Arguments\n");
1735 return BC_STS_INV_ARG;
1738 cmd_buff = fw_cmd->cmd;
1739 res_buff = fw_cmd->rsp;
1741 if (!cmd_buff || !res_buff) {
1742 BCMLOG_ERR("Invalid Parameters for F/W Command \n");
1743 return BC_STS_INV_ARG;
1748 hw->fwcmd_evt_sts = 0;
1749 hw->pfw_cmd_event = &fw_cmd_event;
1751 /*Write the command to the memory*/
1752 crystalhd_mem_wr(hw->adp, TS_Host2CpuSnd, FW_CMD_BUFF_SZ, cmd_buff);
1754 /*Memory Read for memory arbitrator flush*/
1755 crystalhd_mem_rd(hw->adp, TS_Host2CpuSnd, 1, &cnt);
1757 /* Write the command address to mailbox */
1758 bc_dec_reg_wr(hw->adp, Hst2CpuMbx1, TS_Host2CpuSnd);
1759 msleep_interruptible(50);
1761 crystalhd_wait_on_event(&fw_cmd_event, hw->fwcmd_evt_sts, 20000, rc, 0);
1764 sts = BC_STS_SUCCESS;
1765 } else if (rc == -EBUSY) {
1766 BCMLOG_ERR("Firmware command T/O\n");
1767 sts = BC_STS_TIMEOUT;
1768 } else if (rc == -EINTR) {
1769 BCMLOG(BCMLOG_DBG, "FwCmd Wait Signal int.\n");
1770 sts = BC_STS_IO_USER_ABORT;
1772 BCMLOG_ERR("FwCmd IO Error.\n");
1773 sts = BC_STS_IO_ERROR;
1776 if (sts != BC_STS_SUCCESS) {
1777 BCMLOG_ERR("FwCmd Failed.\n");
1782 /*Get the Responce Address*/
1783 cmd_res_addr = bc_dec_reg_rd(hw->adp, Cpu2HstMbx1);
1785 /*Read the Response*/
1786 crystalhd_mem_rd(hw->adp, cmd_res_addr, FW_CMD_BUFF_SZ, res_buff);
1790 if (res_buff[2] != C011_RET_SUCCESS) {
1791 BCMLOG_ERR("res_buff[2] != C011_RET_SUCCESS\n");
1792 return BC_STS_FW_CMD_ERR;
1795 sts = crystalhd_fw_cmd_post_proc(hw, fw_cmd);
1796 if (sts != BC_STS_SUCCESS)
1797 BCMLOG_ERR("crystalhd_fw_cmd_post_proc Failed.\n");
1802 bool crystalhd_hw_interrupt(struct crystalhd_adp *adp, struct crystalhd_hw *hw)
1804 uint32_t intr_sts = 0;
1805 uint32_t deco_intr = 0;
1808 if (!adp || !hw->dev_started)
1811 hw->stats.num_interrupts++;
1814 deco_intr = bc_dec_reg_rd(adp, Stream2Host_Intr_Sts);
1815 intr_sts = crystalhd_reg_rd(adp, INTR_INTR_STATUS);
1818 /* let system know we processed interrupt..*/
1820 hw->stats.dev_interrupts++;
1823 if (deco_intr && (deco_intr != 0xdeaddead)) {
1825 if (deco_intr & 0x80000000) {
1826 /*Set the Event and the status flag*/
1827 if (hw->pfw_cmd_event) {
1828 hw->fwcmd_evt_sts = 1;
1829 crystalhd_set_event(hw->pfw_cmd_event);
1833 if (deco_intr & BC_BIT(1))
1834 crystalhd_hw_proc_pib(hw);
1836 bc_dec_reg_wr(adp, Stream2Host_Intr_Sts, deco_intr);
1837 /* FIXME: jarod: No udelay? might this be the real reason mini pci-e cards were stalling out? */
1838 bc_dec_reg_wr(adp, Stream2Host_Intr_Sts, 0);
1843 crystalhd_rx_isr(hw, intr_sts);
1846 crystalhd_tx_isr(hw, intr_sts);
1848 /* Clear interrupts */
1851 crystalhd_reg_wr(adp, INTR_INTR_CLR_REG, intr_sts);
1853 crystalhd_reg_wr(adp, INTR_EOI_CTRL, 1);
1861 BC_STATUS crystalhd_hw_open(struct crystalhd_hw *hw, struct crystalhd_adp *adp)
1864 BCMLOG_ERR("Invalid Arguments\n");
1865 return BC_STS_INV_ARG;
1868 if (hw->dev_started)
1869 return BC_STS_SUCCESS;
1871 memset(hw, 0, sizeof(struct crystalhd_hw));
1874 spin_lock_init(&hw->lock);
1875 spin_lock_init(&hw->rx_lock);
1876 /* FIXME: jarod: what are these magic numbers?!? */
1877 hw->tx_ioq_tag_seed = 0x70023070;
1878 hw->rx_pkt_tag_seed = 0x70029070;
1880 hw->stop_pending = 0;
1881 crystalhd_start_device(hw->adp);
1882 hw->dev_started = true;
1884 /* set initial core clock */
1885 hw->core_clock_mhz = CLOCK_PRESET;
1888 crystalhd_hw_set_core_clock(hw);
1890 return BC_STS_SUCCESS;
1893 BC_STATUS crystalhd_hw_close(struct crystalhd_hw *hw)
1896 BCMLOG_ERR("Invalid Arguments\n");
1897 return BC_STS_INV_ARG;
1900 if (!hw->dev_started)
1901 return BC_STS_SUCCESS;
1903 /* Stop and DDR sleep will happen in here */
1904 crystalhd_hw_suspend(hw);
1905 hw->dev_started = false;
1907 return BC_STS_SUCCESS;
1910 BC_STATUS crystalhd_hw_setup_dma_rings(struct crystalhd_hw *hw)
1915 dma_addr_t phy_addr;
1916 BC_STATUS sts = BC_STS_SUCCESS;
1917 crystalhd_rx_dma_pkt *rpkt;
1919 if (!hw || !hw->adp) {
1920 BCMLOG_ERR("Invalid Arguments\n");
1921 return BC_STS_INV_ARG;
1924 sts = crystalhd_hw_create_ioqs(hw);
1925 if (sts != BC_STS_SUCCESS) {
1926 BCMLOG_ERR("Failed to create IOQs..\n");
1930 mem_len = BC_LINK_MAX_SGLS * sizeof(dma_descriptor);
1932 for (i = 0; i < BC_TX_LIST_CNT; i++) {
1933 mem = bc_kern_dma_alloc(hw->adp, mem_len, &phy_addr);
1935 memset(mem, 0, mem_len);
1937 BCMLOG_ERR("Insufficient Memory For TX\n");
1938 crystalhd_hw_free_dma_rings(hw);
1939 return BC_STS_INSUFF_RES;
1941 /* rx_pkt_pool -- static memory allocation */
1942 hw->tx_pkt_pool[i].desc_mem.pdma_desc_start = mem;
1943 hw->tx_pkt_pool[i].desc_mem.phy_addr = phy_addr;
1944 hw->tx_pkt_pool[i].desc_mem.sz = BC_LINK_MAX_SGLS *
1945 sizeof(dma_descriptor);
1946 hw->tx_pkt_pool[i].list_tag = 0;
1948 /* Add TX dma requests to Free Queue..*/
1949 sts = crystalhd_dioq_add(hw->tx_freeq,
1950 &hw->tx_pkt_pool[i], false, 0);
1951 if (sts != BC_STS_SUCCESS) {
1952 crystalhd_hw_free_dma_rings(hw);
1957 for (i = 0; i < BC_RX_LIST_CNT; i++) {
1958 rpkt = kzalloc(sizeof(*rpkt), GFP_KERNEL);
1960 BCMLOG_ERR("Insufficient Memory For RX\n");
1961 crystalhd_hw_free_dma_rings(hw);
1962 return BC_STS_INSUFF_RES;
1965 mem = bc_kern_dma_alloc(hw->adp, mem_len, &phy_addr);
1967 memset(mem, 0, mem_len);
1969 BCMLOG_ERR("Insufficient Memory For RX\n");
1970 crystalhd_hw_free_dma_rings(hw);
1971 return BC_STS_INSUFF_RES;
1973 rpkt->desc_mem.pdma_desc_start = mem;
1974 rpkt->desc_mem.phy_addr = phy_addr;
1975 rpkt->desc_mem.sz = BC_LINK_MAX_SGLS * sizeof(dma_descriptor);
1976 rpkt->pkt_tag = hw->rx_pkt_tag_seed + i;
1977 crystalhd_hw_free_rx_pkt(hw, rpkt);
1980 return BC_STS_SUCCESS;
1983 BC_STATUS crystalhd_hw_free_dma_rings(struct crystalhd_hw *hw)
1986 crystalhd_rx_dma_pkt *rpkt = NULL;
1988 if (!hw || !hw->adp) {
1989 BCMLOG_ERR("Invalid Arguments\n");
1990 return BC_STS_INV_ARG;
1993 /* Delete all IOQs.. */
1994 crystalhd_hw_delete_ioqs(hw);
1996 for (i = 0; i < BC_TX_LIST_CNT; i++) {
1997 if (hw->tx_pkt_pool[i].desc_mem.pdma_desc_start) {
1998 bc_kern_dma_free(hw->adp,
1999 hw->tx_pkt_pool[i].desc_mem.sz,
2000 hw->tx_pkt_pool[i].desc_mem.pdma_desc_start,
2001 hw->tx_pkt_pool[i].desc_mem.phy_addr);
2003 hw->tx_pkt_pool[i].desc_mem.pdma_desc_start = NULL;
2007 BCMLOG(BCMLOG_DBG, "Releasing RX Pkt pool\n");
2009 rpkt = crystalhd_hw_alloc_rx_pkt(hw);
2012 bc_kern_dma_free(hw->adp, rpkt->desc_mem.sz,
2013 rpkt->desc_mem.pdma_desc_start,
2014 rpkt->desc_mem.phy_addr);
2018 return BC_STS_SUCCESS;
2021 BC_STATUS crystalhd_hw_post_tx(struct crystalhd_hw *hw, crystalhd_dio_req *ioreq,
2022 hw_comp_callback call_back,
2023 wait_queue_head_t *cb_event, uint32_t *list_id,
2026 tx_dma_pkt *tx_dma_packet = NULL;
2027 uint32_t first_desc_u_addr, first_desc_l_addr;
2028 uint32_t low_addr, high_addr;
2030 BC_STATUS sts, add_sts;
2031 uint32_t dummy_index = 0;
2032 unsigned long flags;
2035 if (!hw || !ioreq || !call_back || !cb_event || !list_id) {
2036 BCMLOG_ERR("Invalid Arguments\n");
2037 return BC_STS_INV_ARG;
2041 * Since we hit code in busy condition very frequently,
2042 * we will check the code in status first before
2043 * checking the availability of free elem.
2045 * This will avoid the Q fetch/add in normal condition.
2047 rc = crystalhd_code_in_full(hw->adp, ioreq->uinfo.xfr_len,
2050 hw->stats.cin_busy++;
2054 /* Get a list from TxFreeQ */
2055 tx_dma_packet = (tx_dma_pkt *)crystalhd_dioq_fetch(hw->tx_freeq);
2056 if (!tx_dma_packet) {
2057 BCMLOG_ERR("No empty elements..\n");
2058 return BC_STS_ERR_USAGE;
2061 sts = crystalhd_xlat_sgl_to_dma_desc(ioreq,
2062 &tx_dma_packet->desc_mem,
2064 if (sts != BC_STS_SUCCESS) {
2065 add_sts = crystalhd_dioq_add(hw->tx_freeq, tx_dma_packet,
2067 if (add_sts != BC_STS_SUCCESS)
2068 BCMLOG_ERR("double fault..\n");
2075 desc_addr.full_addr = tx_dma_packet->desc_mem.phy_addr;
2076 low_addr = desc_addr.low_part;
2077 high_addr = desc_addr.high_part;
2079 tx_dma_packet->call_back = call_back;
2080 tx_dma_packet->cb_event = cb_event;
2081 tx_dma_packet->dio_req = ioreq;
2083 spin_lock_irqsave(&hw->lock, flags);
2085 if (hw->tx_list_post_index == 0) {
2086 first_desc_u_addr = MISC1_TX_FIRST_DESC_U_ADDR_LIST0;
2087 first_desc_l_addr = MISC1_TX_FIRST_DESC_L_ADDR_LIST0;
2089 first_desc_u_addr = MISC1_TX_FIRST_DESC_U_ADDR_LIST1;
2090 first_desc_l_addr = MISC1_TX_FIRST_DESC_L_ADDR_LIST1;
2093 *list_id = tx_dma_packet->list_tag = hw->tx_ioq_tag_seed +
2094 hw->tx_list_post_index;
2096 hw->tx_list_post_index = (hw->tx_list_post_index + 1) % DMA_ENGINE_CNT;
2098 spin_unlock_irqrestore(&hw->lock, flags);
2101 /* Insert in Active Q..*/
2102 crystalhd_dioq_add(hw->tx_actq, tx_dma_packet, false,
2103 tx_dma_packet->list_tag);
2106 * Interrupt will come as soon as you write
2107 * the valid bit. So be ready for that. All
2108 * the initialization should happen before that.
2110 crystalhd_start_tx_dma_engine(hw);
2111 crystalhd_reg_wr(hw->adp, first_desc_u_addr, desc_addr.high_part);
2113 crystalhd_reg_wr(hw->adp, first_desc_l_addr, desc_addr.low_part | 0x01);
2114 /* Be sure we set the valid bit ^^^^ */
2116 return BC_STS_SUCCESS;
2120 * This is a force cancel and we are racing with ISR.
2122 * Will try to remove the req from ActQ before ISR gets it.
2123 * If ISR gets it first then the completion happens in the
2124 * normal path and we will return _STS_NO_DATA from here.
2126 * FIX_ME: Not Tested the actual condition..
2128 BC_STATUS crystalhd_hw_cancel_tx(struct crystalhd_hw *hw, uint32_t list_id)
2130 if (!hw || !list_id) {
2131 BCMLOG_ERR("Invalid Arguments\n");
2132 return BC_STS_INV_ARG;
2135 crystalhd_stop_tx_dma_engine(hw);
2136 crystalhd_hw_tx_req_complete(hw, list_id, BC_STS_IO_USER_ABORT);
2138 return BC_STS_SUCCESS;
2141 BC_STATUS crystalhd_hw_add_cap_buffer(struct crystalhd_hw *hw,
2142 crystalhd_dio_req *ioreq, bool en_post)
2144 crystalhd_rx_dma_pkt *rpkt;
2145 uint32_t tag, uv_desc_ix = 0;
2148 if (!hw || !ioreq) {
2149 BCMLOG_ERR("Invalid Arguments\n");
2150 return BC_STS_INV_ARG;
2153 rpkt = crystalhd_hw_alloc_rx_pkt(hw);
2155 BCMLOG_ERR("Insufficient resources\n");
2156 return BC_STS_INSUFF_RES;
2159 rpkt->dio_req = ioreq;
2160 tag = rpkt->pkt_tag;
2162 sts = crystalhd_xlat_sgl_to_dma_desc(ioreq, &rpkt->desc_mem, &uv_desc_ix);
2163 if (sts != BC_STS_SUCCESS)
2166 rpkt->uv_phy_addr = 0;
2168 /* Store the address of UV in the rx packet for post*/
2170 rpkt->uv_phy_addr = rpkt->desc_mem.phy_addr +
2171 (sizeof(dma_descriptor) * (uv_desc_ix + 1));
2174 sts = crystalhd_hw_post_cap_buff(hw, rpkt);
2176 sts = crystalhd_dioq_add(hw->rx_freeq, rpkt, false, tag);
2181 BC_STATUS crystalhd_hw_get_cap_buffer(struct crystalhd_hw *hw,
2182 BC_PIC_INFO_BLOCK *pib,
2183 crystalhd_dio_req **ioreq)
2185 crystalhd_rx_dma_pkt *rpkt;
2186 uint32_t timeout = BC_PROC_OUTPUT_TIMEOUT / 1000;
2187 uint32_t sig_pending = 0;
2190 if (!hw || !ioreq || !pib) {
2191 BCMLOG_ERR("Invalid Arguments\n");
2192 return BC_STS_INV_ARG;
2195 rpkt = crystalhd_dioq_fetch_wait(hw->rx_rdyq, timeout, &sig_pending);
2198 BCMLOG(BCMLOG_INFO, "wait on frame time out %d\n", sig_pending);
2199 return BC_STS_IO_USER_ABORT;
2201 return BC_STS_TIMEOUT;
2205 rpkt->dio_req->uinfo.comp_flags = rpkt->flags;
2207 if (rpkt->flags & COMP_FLAG_PIB_VALID)
2208 memcpy(pib, &rpkt->pib, sizeof(*pib));
2210 *ioreq = rpkt->dio_req;
2212 crystalhd_hw_free_rx_pkt(hw, rpkt);
2214 return BC_STS_SUCCESS;
2217 BC_STATUS crystalhd_hw_start_capture(struct crystalhd_hw *hw)
2219 crystalhd_rx_dma_pkt *rx_pkt;
2224 BCMLOG_ERR("Invalid Arguments\n");
2225 return BC_STS_INV_ARG;
2228 /* This is start of capture.. Post to both the lists.. */
2229 for (i = 0; i < DMA_ENGINE_CNT; i++) {
2230 rx_pkt = crystalhd_dioq_fetch(hw->rx_freeq);
2232 return BC_STS_NO_DATA;
2233 sts = crystalhd_hw_post_cap_buff(hw, rx_pkt);
2234 if (BC_STS_SUCCESS != sts)
2239 return BC_STS_SUCCESS;
2242 BC_STATUS crystalhd_hw_stop_capture(struct crystalhd_hw *hw)
2247 BCMLOG_ERR("Invalid Arguments\n");
2248 return BC_STS_INV_ARG;
2251 crystalhd_stop_rx_dma_engine(hw);
2254 temp = crystalhd_dioq_fetch(hw->rx_freeq);
2256 crystalhd_rx_pkt_rel_call_back(hw, temp);
2259 return BC_STS_SUCCESS;
2262 BC_STATUS crystalhd_hw_pause(struct crystalhd_hw *hw)
2264 hw->stats.pause_cnt++;
2265 hw->stop_pending = 1;
2267 if ((hw->rx_list_sts[0] == sts_free) &&
2268 (hw->rx_list_sts[1] == sts_free))
2269 crystalhd_hw_finalize_pause(hw);
2271 return BC_STS_SUCCESS;
2274 BC_STATUS crystalhd_hw_unpause(struct crystalhd_hw *hw)
2279 hw->stop_pending = 0;
2281 aspm = crystalhd_reg_rd(hw->adp, PCIE_DLL_DATA_LINK_CONTROL);
2282 aspm &= ~ASPM_L1_ENABLE;
2283 /* NAREN BCMLOG(BCMLOG_INFO, "aspm off\n"); */
2284 crystalhd_reg_wr(hw->adp, PCIE_DLL_DATA_LINK_CONTROL, aspm);
2286 sts = crystalhd_hw_start_capture(hw);
2290 BC_STATUS crystalhd_hw_suspend(struct crystalhd_hw *hw)
2295 BCMLOG_ERR("Invalid Arguments\n");
2296 return BC_STS_INV_ARG;
2299 sts = crystalhd_put_ddr2sleep(hw);
2300 if (sts != BC_STS_SUCCESS) {
2301 BCMLOG_ERR("Failed to Put DDR To Sleep!!\n");
2302 return BC_STS_ERROR;
2305 if (!crystalhd_stop_device(hw->adp)) {
2306 BCMLOG_ERR("Failed to Stop Device!!\n");
2307 return BC_STS_ERROR;
2310 return BC_STS_SUCCESS;
2313 void crystalhd_hw_stats(struct crystalhd_hw *hw, struct crystalhd_hw_stats *stats)
2316 BCMLOG_ERR("Invalid Arguments\n");
2320 /* if called w/NULL stats, its a req to zero out the stats */
2322 memset(&hw->stats, 0, sizeof(hw->stats));
2326 hw->stats.freeq_count = crystalhd_dioq_count(hw->rx_freeq);
2327 hw->stats.rdyq_count = crystalhd_dioq_count(hw->rx_rdyq);
2328 memcpy(stats, &hw->stats, sizeof(*stats));
2331 BC_STATUS crystalhd_hw_set_core_clock(struct crystalhd_hw *hw)
2334 uint32_t vco_mg, refresh_reg;
2337 BCMLOG_ERR("Invalid Arguments\n");
2338 return BC_STS_INV_ARG;
2341 /* FIXME: jarod: wha? */
2342 /*n = (hw->core_clock_mhz * 3) / 20 + 1; */
2343 n = hw->core_clock_mhz/5;
2345 if (n == hw->prev_n)
2346 return BC_STS_CLK_NOCHG;
2348 if (hw->pwr_lock > 0) {
2349 /* BCMLOG(BCMLOG_INFO,"pwr_lock is %u\n", hw->pwr_lock) */
2350 return BC_STS_CLK_NOCHG;
2363 reg = bc_dec_reg_rd(hw->adp, DecHt_PllACtl);
2367 reg |= vco_mg << 12;
2369 BCMLOG(BCMLOG_INFO, "clock is moving to %d with n %d with vco_mg %d\n",
2370 hw->core_clock_mhz, n, vco_mg);
2372 /* Change the DRAM refresh rate to accomodate the new frequency */
2373 /* refresh reg = ((refresh_rate * clock_rate)/16) - 1; rounding up*/
2374 refresh_reg = (7 * hw->core_clock_mhz / 16);
2375 bc_dec_reg_wr(hw->adp, SDRAM_REF_PARAM, ((1 << 12) | refresh_reg));
2377 bc_dec_reg_wr(hw->adp, DecHt_PllACtl, reg);
2381 for (i = 0; i < 10; i++) {
2382 reg = bc_dec_reg_rd(hw->adp, DecHt_PllACtl);
2384 if (reg & 0x00020000) {
2386 /* FIXME: jarod: outputting a random "C" is... confusing... */
2387 BCMLOG(BCMLOG_INFO, "C");
2388 return BC_STS_SUCCESS;
2390 msleep_interruptible(10);
2393 BCMLOG(BCMLOG_INFO, "clk change failed\n");
2394 return BC_STS_CLK_NOCHG;