1 /***************************************************************************
2 * Copyright (c) 2005-2009, Broadcom Corporation.
4 * Name: crystalhd_hw . c
7 * BCM70010 Linux driver HW layer.
9 **********************************************************************
10 * This file is part of the crystalhd device driver.
12 * This driver is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation, version 2 of the License.
16 * This driver is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this driver. If not, see <http://www.gnu.org/licenses/>.
23 **********************************************************************/
25 #include "crystalhd.h"
27 #include <linux/pci.h>
28 #include <linux/slab.h>
29 #include <linux/delay.h>
31 /* Functions internal to this file */
33 static void crystalhd_enable_uarts(struct crystalhd_adp *adp)
35 bc_dec_reg_wr(adp, UartSelectA, BSVS_UART_STREAM);
36 bc_dec_reg_wr(adp, UartSelectB, BSVS_UART_DEC_OUTER);
40 static void crystalhd_start_dram(struct crystalhd_adp *adp)
42 bc_dec_reg_wr(adp, SDRAM_PARAM, ((40 / 5 - 1) << 0) |
43 /* tras (40ns tras)/(5ns period) -1 ((15/5 - 1) << 4) | // trcd */
44 ((15 / 5 - 1) << 7) | /* trp */
45 ((10 / 5 - 1) << 10) | /* trrd */
46 ((15 / 5 + 1) << 12) | /* twr */
47 ((2 + 1) << 16) | /* twtr */
48 ((70 / 5 - 2) << 19) | /* trfc */
51 bc_dec_reg_wr(adp, SDRAM_PRECHARGE, 0);
52 bc_dec_reg_wr(adp, SDRAM_EXT_MODE, 2);
53 bc_dec_reg_wr(adp, SDRAM_MODE, 0x132);
54 bc_dec_reg_wr(adp, SDRAM_PRECHARGE, 0);
55 bc_dec_reg_wr(adp, SDRAM_REFRESH, 0);
56 bc_dec_reg_wr(adp, SDRAM_REFRESH, 0);
57 bc_dec_reg_wr(adp, SDRAM_MODE, 0x32);
58 /* setting the refresh rate here */
59 bc_dec_reg_wr(adp, SDRAM_REF_PARAM, ((1 << 12) | 96));
63 static bool crystalhd_bring_out_of_rst(struct crystalhd_adp *adp)
65 union link_misc_perst_deco_ctrl rst_deco_cntrl;
66 union link_misc_perst_clk_ctrl rst_clk_cntrl;
70 * Link clocks: MISC_PERST_CLOCK_CTRL Clear PLL power down bit,
71 * delay to allow PLL to lock Clear alternate clock, stop clock bits
73 rst_clk_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_CLOCK_CTRL);
74 rst_clk_cntrl.pll_pwr_dn = 0;
75 crystalhd_reg_wr(adp, MISC_PERST_CLOCK_CTRL, rst_clk_cntrl.whole_reg);
76 msleep_interruptible(50);
78 rst_clk_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_CLOCK_CTRL);
79 rst_clk_cntrl.stop_core_clk = 0;
80 rst_clk_cntrl.sel_alt_clk = 0;
82 crystalhd_reg_wr(adp, MISC_PERST_CLOCK_CTRL, rst_clk_cntrl.whole_reg);
83 msleep_interruptible(50);
86 * Bus Arbiter Timeout: GISB_ARBITER_TIMER
87 * Set internal bus arbiter timeout to 40us based on core clock speed
88 * (63MHz * 40us = 0x9D8)
90 crystalhd_reg_wr(adp, GISB_ARBITER_TIMER, 0x9D8);
93 * Decoder clocks: MISC_PERST_DECODER_CTRL
94 * Enable clocks while 7412 reset is asserted, delay
95 * De-assert 7412 reset
97 rst_deco_cntrl.whole_reg = crystalhd_reg_rd(adp,
98 MISC_PERST_DECODER_CTRL);
99 rst_deco_cntrl.stop_bcm_7412_clk = 0;
100 rst_deco_cntrl.bcm7412_rst = 1;
101 crystalhd_reg_wr(adp, MISC_PERST_DECODER_CTRL,
102 rst_deco_cntrl.whole_reg);
103 msleep_interruptible(10);
105 rst_deco_cntrl.whole_reg = crystalhd_reg_rd(adp,
106 MISC_PERST_DECODER_CTRL);
107 rst_deco_cntrl.bcm7412_rst = 0;
108 crystalhd_reg_wr(adp, MISC_PERST_DECODER_CTRL,
109 rst_deco_cntrl.whole_reg);
110 msleep_interruptible(50);
112 /* Disable OTP_CONTENT_MISC to 0 to disable all secure modes */
113 crystalhd_reg_wr(adp, OTP_CONTENT_MISC, 0);
115 /* Clear bit 29 of 0x404 */
116 temp = crystalhd_reg_rd(adp, PCIE_TL_TRANSACTION_CONFIGURATION);
118 crystalhd_reg_wr(adp, PCIE_TL_TRANSACTION_CONFIGURATION, temp);
120 /* 2.5V regulator must be set to 2.6 volts (+6%) */
121 /* FIXME: jarod: what's the point of this reg read? */
122 temp = crystalhd_reg_rd(adp, MISC_PERST_VREG_CTRL);
123 crystalhd_reg_wr(adp, MISC_PERST_VREG_CTRL, 0xF3);
128 static bool crystalhd_put_in_reset(struct crystalhd_adp *adp)
130 union link_misc_perst_deco_ctrl rst_deco_cntrl;
131 union link_misc_perst_clk_ctrl rst_clk_cntrl;
135 * Decoder clocks: MISC_PERST_DECODER_CTRL
136 * Assert 7412 reset, delay
137 * Assert 7412 stop clock
139 rst_deco_cntrl.whole_reg = crystalhd_reg_rd(adp,
140 MISC_PERST_DECODER_CTRL);
141 rst_deco_cntrl.stop_bcm_7412_clk = 1;
142 crystalhd_reg_wr(adp, MISC_PERST_DECODER_CTRL,
143 rst_deco_cntrl.whole_reg);
144 msleep_interruptible(50);
146 /* Bus Arbiter Timeout: GISB_ARBITER_TIMER
147 * Set internal bus arbiter timeout to 40us based on core clock speed
148 * (6.75MHZ * 40us = 0x10E)
150 crystalhd_reg_wr(adp, GISB_ARBITER_TIMER, 0x10E);
152 /* Link clocks: MISC_PERST_CLOCK_CTRL
153 * Stop core clk, delay
154 * Set alternate clk, delay, set PLL power down
156 rst_clk_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_CLOCK_CTRL);
157 rst_clk_cntrl.stop_core_clk = 1;
158 rst_clk_cntrl.sel_alt_clk = 1;
159 crystalhd_reg_wr(adp, MISC_PERST_CLOCK_CTRL, rst_clk_cntrl.whole_reg);
160 msleep_interruptible(50);
162 rst_clk_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_CLOCK_CTRL);
163 rst_clk_cntrl.pll_pwr_dn = 1;
164 crystalhd_reg_wr(adp, MISC_PERST_CLOCK_CTRL, rst_clk_cntrl.whole_reg);
167 * Read and restore the Transaction Configuration Register
170 temp = crystalhd_reg_rd(adp, PCIE_TL_TRANSACTION_CONFIGURATION);
173 * Link core soft reset: MISC3_RESET_CTRL
174 * - Write BIT[0]=1 and read it back for core reset to take place
176 crystalhd_reg_wr(adp, MISC3_RESET_CTRL, 1);
177 rst_deco_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC3_RESET_CTRL);
178 msleep_interruptible(50);
180 /* restore the transaction configuration register */
181 crystalhd_reg_wr(adp, PCIE_TL_TRANSACTION_CONFIGURATION, temp);
186 static void crystalhd_disable_interrupts(struct crystalhd_adp *adp)
188 union intr_mask_reg intr_mask;
189 intr_mask.whole_reg = crystalhd_reg_rd(adp, INTR_INTR_MSK_STS_REG);
190 intr_mask.mask_pcie_err = 1;
191 intr_mask.mask_pcie_rbusmast_err = 1;
192 intr_mask.mask_pcie_rgr_bridge = 1;
193 intr_mask.mask_rx_done = 1;
194 intr_mask.mask_rx_err = 1;
195 intr_mask.mask_tx_done = 1;
196 intr_mask.mask_tx_err = 1;
197 crystalhd_reg_wr(adp, INTR_INTR_MSK_SET_REG, intr_mask.whole_reg);
202 static void crystalhd_enable_interrupts(struct crystalhd_adp *adp)
204 union intr_mask_reg intr_mask;
205 intr_mask.whole_reg = crystalhd_reg_rd(adp, INTR_INTR_MSK_STS_REG);
206 intr_mask.mask_pcie_err = 1;
207 intr_mask.mask_pcie_rbusmast_err = 1;
208 intr_mask.mask_pcie_rgr_bridge = 1;
209 intr_mask.mask_rx_done = 1;
210 intr_mask.mask_rx_err = 1;
211 intr_mask.mask_tx_done = 1;
212 intr_mask.mask_tx_err = 1;
213 crystalhd_reg_wr(adp, INTR_INTR_MSK_CLR_REG, intr_mask.whole_reg);
218 static void crystalhd_clear_errors(struct crystalhd_adp *adp)
222 /* FIXME: jarod: wouldn't we want to write a 0 to the reg?
223 Or does the write clear the bits specified? */
224 reg = crystalhd_reg_rd(adp, MISC1_Y_RX_ERROR_STATUS);
226 crystalhd_reg_wr(adp, MISC1_Y_RX_ERROR_STATUS, reg);
228 reg = crystalhd_reg_rd(adp, MISC1_UV_RX_ERROR_STATUS);
230 crystalhd_reg_wr(adp, MISC1_UV_RX_ERROR_STATUS, reg);
232 reg = crystalhd_reg_rd(adp, MISC1_TX_DMA_ERROR_STATUS);
234 crystalhd_reg_wr(adp, MISC1_TX_DMA_ERROR_STATUS, reg);
237 static void crystalhd_clear_interrupts(struct crystalhd_adp *adp)
239 uint32_t intr_sts = crystalhd_reg_rd(adp, INTR_INTR_STATUS);
242 crystalhd_reg_wr(adp, INTR_INTR_CLR_REG, intr_sts);
244 /* Write End Of Interrupt for PCIE */
245 crystalhd_reg_wr(adp, INTR_EOI_CTRL, 1);
249 static void crystalhd_soft_rst(struct crystalhd_adp *adp)
253 /* Assert c011 soft reset*/
254 bc_dec_reg_wr(adp, DecHt_HostSwReset, 0x00000001);
255 msleep_interruptible(50);
257 /* Release c011 soft reset*/
258 bc_dec_reg_wr(adp, DecHt_HostSwReset, 0x00000000);
260 /* Disable Stuffing..*/
261 val = crystalhd_reg_rd(adp, MISC2_GLOBAL_CTRL);
263 crystalhd_reg_wr(adp, MISC2_GLOBAL_CTRL, val);
266 static bool crystalhd_load_firmware_config(struct crystalhd_adp *adp)
270 crystalhd_reg_wr(adp, DCI_DRAM_BASE_ADDR, (BC_DRAM_FW_CFG_ADDR >> 19));
272 crystalhd_reg_wr(adp, AES_CMD, 0);
273 crystalhd_reg_wr(adp, AES_CONFIG_INFO,
274 (BC_DRAM_FW_CFG_ADDR & 0x7FFFF));
275 crystalhd_reg_wr(adp, AES_CMD, 0x1);
277 /* FIXME: jarod: I've seen this fail,
278 and introducing extra delays helps... */
279 for (i = 0; i < 100; ++i) {
280 reg = crystalhd_reg_rd(adp, AES_STATUS);
283 msleep_interruptible(10);
290 static bool crystalhd_start_device(struct crystalhd_adp *adp)
292 uint32_t dbg_options, glb_cntrl = 0, reg_pwrmgmt = 0;
294 BCMLOG(BCMLOG_INFO, "Starting BCM70012 Device\n");
296 reg_pwrmgmt = crystalhd_reg_rd(adp, PCIE_DLL_DATA_LINK_CONTROL);
297 reg_pwrmgmt &= ~ASPM_L1_ENABLE;
299 crystalhd_reg_wr(adp, PCIE_DLL_DATA_LINK_CONTROL, reg_pwrmgmt);
301 if (!crystalhd_bring_out_of_rst(adp)) {
302 BCMLOG_ERR("Failed To Bring Link Out Of Reset\n");
306 crystalhd_disable_interrupts(adp);
308 crystalhd_clear_errors(adp);
310 crystalhd_clear_interrupts(adp);
312 crystalhd_enable_interrupts(adp);
314 /* Enable the option for getting the total no. of DWORDS
315 * that have been transferred by the RXDMA engine
317 dbg_options = crystalhd_reg_rd(adp, MISC1_DMA_DEBUG_OPTIONS_REG);
319 crystalhd_reg_wr(adp, MISC1_DMA_DEBUG_OPTIONS_REG, dbg_options);
321 /* Enable PCI Global Control options */
322 glb_cntrl = crystalhd_reg_rd(adp, MISC2_GLOBAL_CTRL);
325 crystalhd_reg_wr(adp, MISC2_GLOBAL_CTRL, glb_cntrl);
327 crystalhd_enable_interrupts(adp);
329 crystalhd_soft_rst(adp);
330 crystalhd_start_dram(adp);
331 crystalhd_enable_uarts(adp);
336 static bool crystalhd_stop_device(struct crystalhd_adp *adp)
340 BCMLOG(BCMLOG_INFO, "Stopping BCM70012 Device\n");
341 /* Clear and disable interrupts */
342 crystalhd_disable_interrupts(adp);
343 crystalhd_clear_errors(adp);
344 crystalhd_clear_interrupts(adp);
346 if (!crystalhd_put_in_reset(adp))
347 BCMLOG_ERR("Failed to Put Link To Reset State\n");
349 reg = crystalhd_reg_rd(adp, PCIE_DLL_DATA_LINK_CONTROL);
350 reg |= ASPM_L1_ENABLE;
351 crystalhd_reg_wr(adp, PCIE_DLL_DATA_LINK_CONTROL, reg);
353 /* Set PCI Clk Req */
354 reg = crystalhd_reg_rd(adp, PCIE_CLK_REQ_REG);
355 reg |= PCI_CLK_REQ_ENABLE;
356 crystalhd_reg_wr(adp, PCIE_CLK_REQ_REG, reg);
361 static struct crystalhd_rx_dma_pkt *crystalhd_hw_alloc_rx_pkt(
362 struct crystalhd_hw *hw)
364 unsigned long flags = 0;
365 struct crystalhd_rx_dma_pkt *temp = NULL;
370 spin_lock_irqsave(&hw->lock, flags);
371 temp = hw->rx_pkt_pool_head;
373 hw->rx_pkt_pool_head = hw->rx_pkt_pool_head->next;
374 temp->dio_req = NULL;
378 spin_unlock_irqrestore(&hw->lock, flags);
383 static void crystalhd_hw_free_rx_pkt(struct crystalhd_hw *hw,
384 struct crystalhd_rx_dma_pkt *pkt)
386 unsigned long flags = 0;
391 spin_lock_irqsave(&hw->lock, flags);
392 pkt->next = hw->rx_pkt_pool_head;
393 hw->rx_pkt_pool_head = pkt;
394 spin_unlock_irqrestore(&hw->lock, flags);
398 * Call back from TX - IOQ deletion.
400 * This routine will release the TX DMA rings allocated
401 * during setup_dma rings interface.
403 * Memory is allocated per DMA ring basis. This is just
404 * a place holder to be able to create the dio queues.
406 static void crystalhd_tx_desc_rel_call_back(void *context, void *data)
411 * Rx Packet release callback..
413 * Release All user mapped capture buffers and Our DMA packets
414 * back to our free pool. The actual cleanup of the DMA
415 * ring descriptors happen during dma ring release.
417 static void crystalhd_rx_pkt_rel_call_back(void *context, void *data)
419 struct crystalhd_hw *hw = (struct crystalhd_hw *)context;
420 struct crystalhd_rx_dma_pkt *pkt = (struct crystalhd_rx_dma_pkt *)data;
423 BCMLOG_ERR("Invalid arg - %p %p\n", hw, pkt);
428 crystalhd_unmap_dio(hw->adp, pkt->dio_req);
430 BCMLOG_ERR("Missing dio_req: 0x%x\n", pkt->pkt_tag);
432 crystalhd_hw_free_rx_pkt(hw, pkt);
435 #define crystalhd_hw_delete_ioq(adp, q) \
438 crystalhd_delete_dioq(adp, q); \
443 static void crystalhd_hw_delete_ioqs(struct crystalhd_hw *hw)
448 BCMLOG(BCMLOG_DBG, "Deleting IOQs\n");
449 crystalhd_hw_delete_ioq(hw->adp, hw->tx_actq);
450 crystalhd_hw_delete_ioq(hw->adp, hw->tx_freeq);
451 crystalhd_hw_delete_ioq(hw->adp, hw->rx_actq);
452 crystalhd_hw_delete_ioq(hw->adp, hw->rx_freeq);
453 crystalhd_hw_delete_ioq(hw->adp, hw->rx_rdyq);
456 #define crystalhd_hw_create_ioq(sts, hw, q, cb) \
458 sts = crystalhd_create_dioq(hw->adp, &q, cb, hw); \
459 if (sts != BC_STS_SUCCESS) \
460 goto hw_create_ioq_err; \
467 * RX - Active, Ready and Free.
469 static enum BC_STATUS crystalhd_hw_create_ioqs(struct crystalhd_hw *hw)
471 enum BC_STATUS sts = BC_STS_SUCCESS;
474 BCMLOG_ERR("Invalid Arg!!\n");
475 return BC_STS_INV_ARG;
478 crystalhd_hw_create_ioq(sts, hw, hw->tx_freeq,
479 crystalhd_tx_desc_rel_call_back);
480 crystalhd_hw_create_ioq(sts, hw, hw->tx_actq,
481 crystalhd_tx_desc_rel_call_back);
483 crystalhd_hw_create_ioq(sts, hw, hw->rx_freeq,
484 crystalhd_rx_pkt_rel_call_back);
485 crystalhd_hw_create_ioq(sts, hw, hw->rx_rdyq,
486 crystalhd_rx_pkt_rel_call_back);
487 crystalhd_hw_create_ioq(sts, hw, hw->rx_actq,
488 crystalhd_rx_pkt_rel_call_back);
493 crystalhd_hw_delete_ioqs(hw);
499 static bool crystalhd_code_in_full(struct crystalhd_adp *adp,
500 uint32_t needed_sz, bool b_188_byte_pkts, uint8_t flags)
502 uint32_t base, end, writep, readp;
503 uint32_t cpbSize, cpbFullness, fifoSize;
505 if (flags & 0x02) { /* ASF Bit is set */
506 base = bc_dec_reg_rd(adp, REG_Dec_TsAudCDB2Base);
507 end = bc_dec_reg_rd(adp, REG_Dec_TsAudCDB2End);
508 writep = bc_dec_reg_rd(adp, REG_Dec_TsAudCDB2Wrptr);
509 readp = bc_dec_reg_rd(adp, REG_Dec_TsAudCDB2Rdptr);
510 } else if (b_188_byte_pkts) { /*Encrypted 188 byte packets*/
511 base = bc_dec_reg_rd(adp, REG_Dec_TsUser0Base);
512 end = bc_dec_reg_rd(adp, REG_Dec_TsUser0End);
513 writep = bc_dec_reg_rd(adp, REG_Dec_TsUser0Wrptr);
514 readp = bc_dec_reg_rd(adp, REG_Dec_TsUser0Rdptr);
516 base = bc_dec_reg_rd(adp, REG_DecCA_RegCinBase);
517 end = bc_dec_reg_rd(adp, REG_DecCA_RegCinEnd);
518 writep = bc_dec_reg_rd(adp, REG_DecCA_RegCinWrPtr);
519 readp = bc_dec_reg_rd(adp, REG_DecCA_RegCinRdPtr);
522 cpbSize = end - base;
524 cpbFullness = writep - readp;
526 cpbFullness = (end - base) - (readp - writep);
528 fifoSize = cpbSize - cpbFullness;
530 if (fifoSize < BC_INFIFO_THRESHOLD)
533 if (needed_sz > (fifoSize - BC_INFIFO_THRESHOLD))
539 static enum BC_STATUS crystalhd_hw_tx_req_complete(struct crystalhd_hw *hw,
540 uint32_t list_id, enum BC_STATUS cs)
542 struct tx_dma_pkt *tx_req;
544 if (!hw || !list_id) {
545 BCMLOG_ERR("Invalid Arg..\n");
546 return BC_STS_INV_ARG;
551 tx_req = (struct tx_dma_pkt *)crystalhd_dioq_find_and_fetch(
552 hw->tx_actq, list_id);
554 if (cs != BC_STS_IO_USER_ABORT)
555 BCMLOG_ERR("Find and Fetch Did not find req\n");
556 return BC_STS_NO_DATA;
559 if (tx_req->call_back) {
560 tx_req->call_back(tx_req->dio_req, tx_req->cb_event, cs);
561 tx_req->dio_req = NULL;
562 tx_req->cb_event = NULL;
563 tx_req->call_back = NULL;
565 BCMLOG(BCMLOG_DBG, "Missing Tx Callback - %X\n",
569 /* Now put back the tx_list back in FreeQ */
570 tx_req->list_tag = 0;
572 return crystalhd_dioq_add(hw->tx_freeq, tx_req, false, 0);
575 static bool crystalhd_tx_list0_handler(struct crystalhd_hw *hw,
578 uint32_t err_mask, tmp;
579 unsigned long flags = 0;
581 err_mask = MISC1_TX_DMA_ERROR_STATUS_TX_L0_DESC_TX_ABORT_ERRORS_MASK |
582 MISC1_TX_DMA_ERROR_STATUS_TX_L0_DMA_DATA_TX_ABORT_ERRORS_MASK |
583 MISC1_TX_DMA_ERROR_STATUS_TX_L0_FIFO_FULL_ERRORS_MASK;
585 if (!(err_sts & err_mask))
588 BCMLOG_ERR("Error on Tx-L0 %x\n", err_sts);
592 if (err_sts & MISC1_TX_DMA_ERROR_STATUS_TX_L0_FIFO_FULL_ERRORS_MASK)
593 tmp &= ~MISC1_TX_DMA_ERROR_STATUS_TX_L0_FIFO_FULL_ERRORS_MASK;
596 spin_lock_irqsave(&hw->lock, flags);
597 /* reset list index.*/
598 hw->tx_list_post_index = 0;
599 spin_unlock_irqrestore(&hw->lock, flags);
602 tmp = err_sts & err_mask;
603 crystalhd_reg_wr(hw->adp, MISC1_TX_DMA_ERROR_STATUS, tmp);
608 static bool crystalhd_tx_list1_handler(struct crystalhd_hw *hw,
611 uint32_t err_mask, tmp;
612 unsigned long flags = 0;
614 err_mask = MISC1_TX_DMA_ERROR_STATUS_TX_L1_DESC_TX_ABORT_ERRORS_MASK |
615 MISC1_TX_DMA_ERROR_STATUS_TX_L1_DMA_DATA_TX_ABORT_ERRORS_MASK |
616 MISC1_TX_DMA_ERROR_STATUS_TX_L1_FIFO_FULL_ERRORS_MASK;
618 if (!(err_sts & err_mask))
621 BCMLOG_ERR("Error on Tx-L1 %x\n", err_sts);
625 if (err_sts & MISC1_TX_DMA_ERROR_STATUS_TX_L1_FIFO_FULL_ERRORS_MASK)
626 tmp &= ~MISC1_TX_DMA_ERROR_STATUS_TX_L1_FIFO_FULL_ERRORS_MASK;
629 spin_lock_irqsave(&hw->lock, flags);
630 /* reset list index.*/
631 hw->tx_list_post_index = 0;
632 spin_unlock_irqrestore(&hw->lock, flags);
635 tmp = err_sts & err_mask;
636 crystalhd_reg_wr(hw->adp, MISC1_TX_DMA_ERROR_STATUS, tmp);
641 static void crystalhd_tx_isr(struct crystalhd_hw *hw, uint32_t int_sts)
645 if (int_sts & INTR_INTR_STATUS_L0_TX_DMA_DONE_INTR_MASK)
646 crystalhd_hw_tx_req_complete(hw, hw->tx_ioq_tag_seed + 0,
649 if (int_sts & INTR_INTR_STATUS_L1_TX_DMA_DONE_INTR_MASK)
650 crystalhd_hw_tx_req_complete(hw, hw->tx_ioq_tag_seed + 1,
653 if (!(int_sts & (INTR_INTR_STATUS_L0_TX_DMA_ERR_INTR_MASK |
654 INTR_INTR_STATUS_L1_TX_DMA_ERR_INTR_MASK))) {
655 /* No error mask set.. */
659 /* Handle Tx errors. */
660 err_sts = crystalhd_reg_rd(hw->adp, MISC1_TX_DMA_ERROR_STATUS);
662 if (crystalhd_tx_list0_handler(hw, err_sts))
663 crystalhd_hw_tx_req_complete(hw, hw->tx_ioq_tag_seed + 0,
666 if (crystalhd_tx_list1_handler(hw, err_sts))
667 crystalhd_hw_tx_req_complete(hw, hw->tx_ioq_tag_seed + 1,
670 hw->stats.tx_errors++;
673 static void crystalhd_hw_dump_desc(struct dma_descriptor *p_dma_desc,
674 uint32_t ul_desc_index, uint32_t cnt)
678 if (!p_dma_desc || !cnt)
681 /* FIXME: jarod: perhaps a modparam desc_debug to enable this,
682 rather than setting ll (log level, I presume) to non-zero? */
686 for (ix = ul_desc_index; ix < (ul_desc_index + cnt); ix++) {
688 "%s[%d] Buff[%x:%x] Next:[%x:%x] XferSz:%x Intr:%x,Last:%x\n",
689 ((p_dma_desc[ul_desc_index].dma_dir) ? "TDesc" : "RDesc"),
691 p_dma_desc[ul_desc_index].buff_addr_high,
692 p_dma_desc[ul_desc_index].buff_addr_low,
693 p_dma_desc[ul_desc_index].next_desc_addr_high,
694 p_dma_desc[ul_desc_index].next_desc_addr_low,
695 p_dma_desc[ul_desc_index].xfer_size,
696 p_dma_desc[ul_desc_index].intr_enable,
697 p_dma_desc[ul_desc_index].last_rec_indicator);
702 static enum BC_STATUS crystalhd_hw_fill_desc(struct crystalhd_dio_req *ioreq,
703 struct dma_descriptor *desc,
704 dma_addr_t desc_paddr_base,
705 uint32_t sg_cnt, uint32_t sg_st_ix,
706 uint32_t sg_st_off, uint32_t xfr_sz)
708 uint32_t count = 0, ix = 0, sg_ix = 0, len = 0, last_desc_ix = 0;
709 dma_addr_t desc_phy_addr = desc_paddr_base;
710 union addr_64 addr_temp;
712 if (!ioreq || !desc || !desc_paddr_base || !xfr_sz ||
713 (!sg_cnt && !ioreq->uinfo.dir_tx)) {
714 BCMLOG_ERR("Invalid Args\n");
715 return BC_STS_INV_ARG;
718 for (ix = 0; ix < sg_cnt; ix++) {
720 /* Setup SGLE index. */
721 sg_ix = ix + sg_st_ix;
723 /* Get SGLE length */
724 len = crystalhd_get_sgle_len(ioreq, sg_ix);
726 BCMLOG_ERR(" len in sg %d %d %d\n", len, sg_ix,
728 return BC_STS_NOT_IMPL;
730 /* Setup DMA desc with Phy addr & Length at current index. */
731 addr_temp.full_addr = crystalhd_get_sgle_paddr(ioreq, sg_ix);
732 if (sg_ix == sg_st_ix) {
733 addr_temp.full_addr += sg_st_off;
736 memset(&desc[ix], 0, sizeof(desc[ix]));
737 desc[ix].buff_addr_low = addr_temp.low_part;
738 desc[ix].buff_addr_high = addr_temp.high_part;
739 desc[ix].dma_dir = ioreq->uinfo.dir_tx;
741 /* Chain DMA descriptor. */
742 addr_temp.full_addr = desc_phy_addr +
743 sizeof(struct dma_descriptor);
744 desc[ix].next_desc_addr_low = addr_temp.low_part;
745 desc[ix].next_desc_addr_high = addr_temp.high_part;
747 if ((count + len) > xfr_sz)
748 len = xfr_sz - count;
751 if ((!len) || (len > crystalhd_get_sgle_len(ioreq, sg_ix))) {
753 "inv-len(%x) Ix(%d) count:%x xfr_sz:%x sg_cnt:%d\n",
754 len, ix, count, xfr_sz, sg_cnt);
757 /* Length expects Multiple of 4 */
758 desc[ix].xfer_size = (len / 4);
760 crystalhd_hw_dump_desc(desc, ix, 1);
763 desc_phy_addr += sizeof(struct dma_descriptor);
766 last_desc_ix = ix - 1;
768 if (ioreq->fb_size) {
769 memset(&desc[ix], 0, sizeof(desc[ix]));
770 addr_temp.full_addr = ioreq->fb_pa;
771 desc[ix].buff_addr_low = addr_temp.low_part;
772 desc[ix].buff_addr_high = addr_temp.high_part;
773 desc[ix].dma_dir = ioreq->uinfo.dir_tx;
774 desc[ix].xfer_size = 1;
775 desc[ix].fill_bytes = 4 - ioreq->fb_size;
776 count += ioreq->fb_size;
780 /* setup last descriptor..*/
781 desc[last_desc_ix].last_rec_indicator = 1;
782 desc[last_desc_ix].next_desc_addr_low = 0;
783 desc[last_desc_ix].next_desc_addr_high = 0;
784 desc[last_desc_ix].intr_enable = 1;
786 crystalhd_hw_dump_desc(desc, last_desc_ix, 1);
788 if (count != xfr_sz) {
789 BCMLOG_ERR("internal error sz curr:%x exp:%x\n", count, xfr_sz);
793 return BC_STS_SUCCESS;
796 static enum BC_STATUS crystalhd_xlat_sgl_to_dma_desc(
797 struct crystalhd_dio_req *ioreq,
798 struct dma_desc_mem *pdesc_mem,
799 uint32_t *uv_desc_index)
801 struct dma_descriptor *desc = NULL;
802 dma_addr_t desc_paddr_base = 0;
803 uint32_t sg_cnt = 0, sg_st_ix = 0, sg_st_off = 0;
805 enum BC_STATUS sts = BC_STS_SUCCESS;
808 if (!ioreq || !pdesc_mem || !uv_desc_index) {
809 BCMLOG_ERR("Invalid Args\n");
810 return BC_STS_INV_ARG;
813 if (!pdesc_mem->sz || !pdesc_mem->pdma_desc_start ||
814 !ioreq->sg || (!ioreq->sg_cnt && !ioreq->uinfo.dir_tx)) {
815 BCMLOG_ERR("Invalid Args\n");
816 return BC_STS_INV_ARG;
819 if ((ioreq->uinfo.dir_tx) && (ioreq->uinfo.uv_offset)) {
820 BCMLOG_ERR("UV offset for TX??\n");
821 return BC_STS_INV_ARG;
825 desc = pdesc_mem->pdma_desc_start;
826 desc_paddr_base = pdesc_mem->phy_addr;
828 if (ioreq->uinfo.dir_tx || (ioreq->uinfo.uv_offset == 0)) {
829 sg_cnt = ioreq->sg_cnt;
830 xfr_sz = ioreq->uinfo.xfr_len;
832 sg_cnt = ioreq->uinfo.uv_sg_ix + 1;
833 xfr_sz = ioreq->uinfo.uv_offset;
836 sts = crystalhd_hw_fill_desc(ioreq, desc, desc_paddr_base, sg_cnt,
837 sg_st_ix, sg_st_off, xfr_sz);
839 if ((sts != BC_STS_SUCCESS) || !ioreq->uinfo.uv_offset)
842 /* Prepare for UV mapping.. */
843 desc = &pdesc_mem->pdma_desc_start[sg_cnt];
844 desc_paddr_base = pdesc_mem->phy_addr +
845 (sg_cnt * sizeof(struct dma_descriptor));
847 /* Done with desc addr.. now update sg stuff.*/
848 sg_cnt = ioreq->sg_cnt - ioreq->uinfo.uv_sg_ix;
849 xfr_sz = ioreq->uinfo.xfr_len - ioreq->uinfo.uv_offset;
850 sg_st_ix = ioreq->uinfo.uv_sg_ix;
851 sg_st_off = ioreq->uinfo.uv_sg_off;
853 sts = crystalhd_hw_fill_desc(ioreq, desc, desc_paddr_base, sg_cnt,
854 sg_st_ix, sg_st_off, xfr_sz);
855 if (sts != BC_STS_SUCCESS)
858 *uv_desc_index = sg_st_ix;
863 static void crystalhd_start_tx_dma_engine(struct crystalhd_hw *hw)
867 dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_TX_SW_DESC_LIST_CTRL_STS);
868 if (!(dma_cntrl & DMA_START_BIT)) {
869 dma_cntrl |= DMA_START_BIT;
870 crystalhd_reg_wr(hw->adp, MISC1_TX_SW_DESC_LIST_CTRL_STS,
879 * Verify if the Stop generates a completion interrupt or not.
880 * if it does not generate an interrupt, then add polling here.
882 static enum BC_STATUS crystalhd_stop_tx_dma_engine(struct crystalhd_hw *hw)
884 uint32_t dma_cntrl, cnt = 30;
885 uint32_t l1 = 1, l2 = 1;
886 unsigned long flags = 0;
888 dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_TX_SW_DESC_LIST_CTRL_STS);
890 BCMLOG(BCMLOG_DBG, "Stopping TX DMA Engine..\n");
892 if (!(dma_cntrl & DMA_START_BIT)) {
893 BCMLOG(BCMLOG_DBG, "Already Stopped\n");
894 return BC_STS_SUCCESS;
897 crystalhd_disable_interrupts(hw->adp);
899 /* Issue stop to HW */
900 /* This bit when set gave problems. Please check*/
901 dma_cntrl &= ~DMA_START_BIT;
902 crystalhd_reg_wr(hw->adp, MISC1_TX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
904 BCMLOG(BCMLOG_DBG, "Cleared the DMA Start bit\n");
906 /* Poll for 3seconds (30 * 100ms) on both the lists..*/
907 while ((l1 || l2) && cnt) {
910 l1 = crystalhd_reg_rd(hw->adp,
911 MISC1_TX_FIRST_DESC_L_ADDR_LIST0);
916 l2 = crystalhd_reg_rd(hw->adp,
917 MISC1_TX_FIRST_DESC_L_ADDR_LIST1);
921 msleep_interruptible(100);
927 BCMLOG_ERR("Failed to stop TX DMA.. l1 %d, l2 %d\n", l1, l2);
928 crystalhd_enable_interrupts(hw->adp);
932 spin_lock_irqsave(&hw->lock, flags);
933 hw->tx_list_post_index = 0;
934 spin_unlock_irqrestore(&hw->lock, flags);
935 BCMLOG(BCMLOG_DBG, "stopped TX DMA..\n");
936 crystalhd_enable_interrupts(hw->adp);
938 return BC_STS_SUCCESS;
941 static uint32_t crystalhd_get_pib_avail_cnt(struct crystalhd_hw *hw)
944 * Position of the PIB Entries can be found at
945 * 0th and the 1st location of the Circular list.
948 uint32_t pib_cnt, r_offset, w_offset;
950 Q_addr = hw->pib_del_Q_addr;
952 /* Get the Read Pointer */
953 crystalhd_mem_rd(hw->adp, Q_addr, 1, &r_offset);
955 /* Get the Write Pointer */
956 crystalhd_mem_rd(hw->adp, Q_addr + sizeof(uint32_t), 1, &w_offset);
958 if (r_offset == w_offset)
959 return 0; /* Queue is empty */
961 if (w_offset > r_offset)
962 pib_cnt = w_offset - r_offset;
964 pib_cnt = (w_offset + MAX_PIB_Q_DEPTH) -
965 (r_offset + MIN_PIB_Q_DEPTH);
967 if (pib_cnt > MAX_PIB_Q_DEPTH) {
968 BCMLOG_ERR("Invalid PIB Count (%u)\n", pib_cnt);
975 static uint32_t crystalhd_get_addr_from_pib_Q(struct crystalhd_hw *hw)
978 uint32_t addr_entry, r_offset, w_offset;
980 Q_addr = hw->pib_del_Q_addr;
982 /* Get the Read Pointer 0Th Location is Read Pointer */
983 crystalhd_mem_rd(hw->adp, Q_addr, 1, &r_offset);
985 /* Get the Write Pointer 1st Location is Write pointer */
986 crystalhd_mem_rd(hw->adp, Q_addr + sizeof(uint32_t), 1, &w_offset);
989 if (r_offset == w_offset)
992 if ((r_offset < MIN_PIB_Q_DEPTH) || (r_offset >= MAX_PIB_Q_DEPTH))
995 /* Get the Actual Address of the PIB */
996 crystalhd_mem_rd(hw->adp, Q_addr + (r_offset * sizeof(uint32_t)),
999 /* Increment the Read Pointer */
1002 if (MAX_PIB_Q_DEPTH == r_offset)
1003 r_offset = MIN_PIB_Q_DEPTH;
1005 /* Write back the read pointer to It's Location */
1006 crystalhd_mem_wr(hw->adp, Q_addr, 1, &r_offset);
1011 static bool crystalhd_rel_addr_to_pib_Q(struct crystalhd_hw *hw,
1012 uint32_t addr_to_rel)
1015 uint32_t r_offset, w_offset, n_offset;
1017 Q_addr = hw->pib_rel_Q_addr;
1019 /* Get the Read Pointer */
1020 crystalhd_mem_rd(hw->adp, Q_addr, 1, &r_offset);
1022 /* Get the Write Pointer */
1023 crystalhd_mem_rd(hw->adp, Q_addr + sizeof(uint32_t), 1, &w_offset);
1025 if ((r_offset < MIN_PIB_Q_DEPTH) ||
1026 (r_offset >= MAX_PIB_Q_DEPTH))
1029 n_offset = w_offset + 1;
1031 if (MAX_PIB_Q_DEPTH == n_offset)
1032 n_offset = MIN_PIB_Q_DEPTH;
1034 if (r_offset == n_offset)
1035 return false; /* should never happen */
1037 /* Write the DRAM ADDR to the Queue at Next Offset */
1038 crystalhd_mem_wr(hw->adp, Q_addr + (w_offset * sizeof(uint32_t)),
1041 /* Put the New value of the write pointer in Queue */
1042 crystalhd_mem_wr(hw->adp, Q_addr + sizeof(uint32_t), 1, &n_offset);
1047 static void cpy_pib_to_app(struct c011_pib *src_pib,
1048 struct BC_PIC_INFO_BLOCK *dst_pib)
1050 if (!src_pib || !dst_pib) {
1051 BCMLOG_ERR("Invalid Arguments\n");
1055 dst_pib->timeStamp = 0;
1056 dst_pib->picture_number = src_pib->ppb.picture_number;
1057 dst_pib->width = src_pib->ppb.width;
1058 dst_pib->height = src_pib->ppb.height;
1059 dst_pib->chroma_format = src_pib->ppb.chroma_format;
1060 dst_pib->pulldown = src_pib->ppb.pulldown;
1061 dst_pib->flags = src_pib->ppb.flags;
1062 dst_pib->sess_num = src_pib->ptsStcOffset;
1063 dst_pib->aspect_ratio = src_pib->ppb.aspect_ratio;
1064 dst_pib->colour_primaries = src_pib->ppb.colour_primaries;
1065 dst_pib->picture_meta_payload = src_pib->ppb.picture_meta_payload;
1066 dst_pib->frame_rate = src_pib->resolution;
1070 static void crystalhd_hw_proc_pib(struct crystalhd_hw *hw)
1073 struct c011_pib src_pib;
1074 uint32_t pib_addr, pib_cnt;
1075 struct BC_PIC_INFO_BLOCK *AppPib;
1076 struct crystalhd_rx_dma_pkt *rx_pkt = NULL;
1078 pib_cnt = crystalhd_get_pib_avail_cnt(hw);
1083 for (cnt = 0; cnt < pib_cnt; cnt++) {
1085 pib_addr = crystalhd_get_addr_from_pib_Q(hw);
1086 crystalhd_mem_rd(hw->adp, pib_addr, sizeof(struct c011_pib) / 4,
1087 (uint32_t *)&src_pib);
1089 if (src_pib.bFormatChange) {
1090 rx_pkt = (struct crystalhd_rx_dma_pkt *)
1091 crystalhd_dioq_fetch(hw->rx_freeq);
1095 rx_pkt->flags |= COMP_FLAG_PIB_VALID |
1096 COMP_FLAG_FMT_CHANGE;
1097 AppPib = &rx_pkt->pib;
1098 cpy_pib_to_app(&src_pib, AppPib);
1101 "App PIB:%x %x %x %x %x %x %x %x %x %x\n",
1102 rx_pkt->pib.picture_number,
1103 rx_pkt->pib.aspect_ratio,
1104 rx_pkt->pib.chroma_format,
1105 rx_pkt->pib.colour_primaries,
1106 rx_pkt->pib.frame_rate,
1110 rx_pkt->pib.pulldown,
1113 crystalhd_dioq_add(hw->rx_rdyq, (void *)rx_pkt, true,
1118 crystalhd_rel_addr_to_pib_Q(hw, pib_addr);
1122 static void crystalhd_start_rx_dma_engine(struct crystalhd_hw *hw)
1126 dma_cntrl = crystalhd_reg_rd(hw->adp,
1127 MISC1_Y_RX_SW_DESC_LIST_CTRL_STS);
1128 if (!(dma_cntrl & DMA_START_BIT)) {
1129 dma_cntrl |= DMA_START_BIT;
1130 crystalhd_reg_wr(hw->adp,
1131 MISC1_Y_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
1134 dma_cntrl = crystalhd_reg_rd(hw->adp,
1135 MISC1_UV_RX_SW_DESC_LIST_CTRL_STS);
1136 if (!(dma_cntrl & DMA_START_BIT)) {
1137 dma_cntrl |= DMA_START_BIT;
1138 crystalhd_reg_wr(hw->adp,
1139 MISC1_UV_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
1145 static void crystalhd_stop_rx_dma_engine(struct crystalhd_hw *hw)
1147 uint32_t dma_cntrl = 0, count = 30;
1148 uint32_t l0y = 1, l0uv = 1, l1y = 1, l1uv = 1;
1150 dma_cntrl = crystalhd_reg_rd(hw->adp,
1151 MISC1_Y_RX_SW_DESC_LIST_CTRL_STS);
1152 if ((dma_cntrl & DMA_START_BIT)) {
1153 dma_cntrl &= ~DMA_START_BIT;
1154 crystalhd_reg_wr(hw->adp,
1155 MISC1_Y_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
1158 dma_cntrl = crystalhd_reg_rd(hw->adp,
1159 MISC1_UV_RX_SW_DESC_LIST_CTRL_STS);
1160 if ((dma_cntrl & DMA_START_BIT)) {
1161 dma_cntrl &= ~DMA_START_BIT;
1162 crystalhd_reg_wr(hw->adp,
1163 MISC1_UV_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
1166 /* Poll for 3seconds (30 * 100ms) on both the lists..*/
1167 while ((l0y || l0uv || l1y || l1uv) && count) {
1170 l0y = crystalhd_reg_rd(hw->adp,
1171 MISC1_Y_RX_FIRST_DESC_L_ADDR_LIST0);
1172 l0y &= DMA_START_BIT;
1174 hw->rx_list_sts[0] &= ~rx_waiting_y_intr;
1178 l1y = crystalhd_reg_rd(hw->adp,
1179 MISC1_Y_RX_FIRST_DESC_L_ADDR_LIST1);
1180 l1y &= DMA_START_BIT;
1182 hw->rx_list_sts[1] &= ~rx_waiting_y_intr;
1186 l0uv = crystalhd_reg_rd(hw->adp,
1187 MISC1_UV_RX_FIRST_DESC_L_ADDR_LIST0);
1188 l0uv &= DMA_START_BIT;
1190 hw->rx_list_sts[0] &= ~rx_waiting_uv_intr;
1194 l1uv = crystalhd_reg_rd(hw->adp,
1195 MISC1_UV_RX_FIRST_DESC_L_ADDR_LIST1);
1196 l1uv &= DMA_START_BIT;
1198 hw->rx_list_sts[1] &= ~rx_waiting_uv_intr;
1200 msleep_interruptible(100);
1204 hw->rx_list_post_index = 0;
1206 BCMLOG(BCMLOG_SSTEP, "Capture Stop: %d List0:Sts:%x List1:Sts:%x\n",
1207 count, hw->rx_list_sts[0], hw->rx_list_sts[1]);
1210 static enum BC_STATUS crystalhd_hw_prog_rxdma(struct crystalhd_hw *hw,
1211 struct crystalhd_rx_dma_pkt *rx_pkt)
1213 uint32_t y_low_addr_reg, y_high_addr_reg;
1214 uint32_t uv_low_addr_reg, uv_high_addr_reg;
1215 union addr_64 desc_addr;
1216 unsigned long flags;
1218 if (!hw || !rx_pkt) {
1219 BCMLOG_ERR("Invalid Arguments\n");
1220 return BC_STS_INV_ARG;
1223 if (hw->rx_list_post_index >= DMA_ENGINE_CNT) {
1224 BCMLOG_ERR("List Out Of bounds %x\n", hw->rx_list_post_index);
1225 return BC_STS_INV_ARG;
1228 spin_lock_irqsave(&hw->rx_lock, flags);
1229 /* FIXME: jarod: sts_free is an enum for 0,
1230 in crystalhd_hw.h... yuk... */
1231 if (sts_free != hw->rx_list_sts[hw->rx_list_post_index]) {
1232 spin_unlock_irqrestore(&hw->rx_lock, flags);
1236 if (!hw->rx_list_post_index) {
1237 y_low_addr_reg = MISC1_Y_RX_FIRST_DESC_L_ADDR_LIST0;
1238 y_high_addr_reg = MISC1_Y_RX_FIRST_DESC_U_ADDR_LIST0;
1239 uv_low_addr_reg = MISC1_UV_RX_FIRST_DESC_L_ADDR_LIST0;
1240 uv_high_addr_reg = MISC1_UV_RX_FIRST_DESC_U_ADDR_LIST0;
1242 y_low_addr_reg = MISC1_Y_RX_FIRST_DESC_L_ADDR_LIST1;
1243 y_high_addr_reg = MISC1_Y_RX_FIRST_DESC_U_ADDR_LIST1;
1244 uv_low_addr_reg = MISC1_UV_RX_FIRST_DESC_L_ADDR_LIST1;
1245 uv_high_addr_reg = MISC1_UV_RX_FIRST_DESC_U_ADDR_LIST1;
1247 rx_pkt->pkt_tag = hw->rx_pkt_tag_seed + hw->rx_list_post_index;
1248 hw->rx_list_sts[hw->rx_list_post_index] |= rx_waiting_y_intr;
1249 if (rx_pkt->uv_phy_addr)
1250 hw->rx_list_sts[hw->rx_list_post_index] |= rx_waiting_uv_intr;
1251 hw->rx_list_post_index = (hw->rx_list_post_index + 1) % DMA_ENGINE_CNT;
1252 spin_unlock_irqrestore(&hw->rx_lock, flags);
1254 crystalhd_dioq_add(hw->rx_actq, (void *)rx_pkt, false,
1257 crystalhd_start_rx_dma_engine(hw);
1258 /* Program the Y descriptor */
1259 desc_addr.full_addr = rx_pkt->desc_mem.phy_addr;
1260 crystalhd_reg_wr(hw->adp, y_high_addr_reg, desc_addr.high_part);
1261 crystalhd_reg_wr(hw->adp, y_low_addr_reg, desc_addr.low_part | 0x01);
1263 if (rx_pkt->uv_phy_addr) {
1264 /* Program the UV descriptor */
1265 desc_addr.full_addr = rx_pkt->uv_phy_addr;
1266 crystalhd_reg_wr(hw->adp, uv_high_addr_reg,
1267 desc_addr.high_part);
1268 crystalhd_reg_wr(hw->adp, uv_low_addr_reg,
1269 desc_addr.low_part | 0x01);
1272 return BC_STS_SUCCESS;
1275 static enum BC_STATUS crystalhd_hw_post_cap_buff(struct crystalhd_hw *hw,
1276 struct crystalhd_rx_dma_pkt *rx_pkt)
1278 enum BC_STATUS sts = crystalhd_hw_prog_rxdma(hw, rx_pkt);
1280 if (sts == BC_STS_BUSY)
1281 crystalhd_dioq_add(hw->rx_freeq, (void *)rx_pkt,
1282 false, rx_pkt->pkt_tag);
1287 static void crystalhd_get_dnsz(struct crystalhd_hw *hw, uint32_t list_index,
1288 uint32_t *y_dw_dnsz, uint32_t *uv_dw_dnsz)
1290 uint32_t y_dn_sz_reg, uv_dn_sz_reg;
1293 y_dn_sz_reg = MISC1_Y_RX_LIST0_CUR_BYTE_CNT;
1294 uv_dn_sz_reg = MISC1_UV_RX_LIST0_CUR_BYTE_CNT;
1296 y_dn_sz_reg = MISC1_Y_RX_LIST1_CUR_BYTE_CNT;
1297 uv_dn_sz_reg = MISC1_UV_RX_LIST1_CUR_BYTE_CNT;
1300 *y_dw_dnsz = crystalhd_reg_rd(hw->adp, y_dn_sz_reg);
1301 *uv_dw_dnsz = crystalhd_reg_rd(hw->adp, uv_dn_sz_reg);
1305 * This function should be called only after making sure that the two DMA
1306 * lists are free. This function does not check if DMA's are active, before
1307 * turning off the DMA.
1309 static void crystalhd_hw_finalize_pause(struct crystalhd_hw *hw)
1311 uint32_t dma_cntrl, aspm;
1313 hw->stop_pending = 0;
1315 dma_cntrl = crystalhd_reg_rd(hw->adp,
1316 MISC1_Y_RX_SW_DESC_LIST_CTRL_STS);
1317 if (dma_cntrl & DMA_START_BIT) {
1318 dma_cntrl &= ~DMA_START_BIT;
1319 crystalhd_reg_wr(hw->adp,
1320 MISC1_Y_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
1323 dma_cntrl = crystalhd_reg_rd(hw->adp,
1324 MISC1_UV_RX_SW_DESC_LIST_CTRL_STS);
1325 if (dma_cntrl & DMA_START_BIT) {
1326 dma_cntrl &= ~DMA_START_BIT;
1327 crystalhd_reg_wr(hw->adp,
1328 MISC1_UV_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
1330 hw->rx_list_post_index = 0;
1332 aspm = crystalhd_reg_rd(hw->adp, PCIE_DLL_DATA_LINK_CONTROL);
1333 aspm |= ASPM_L1_ENABLE;
1334 /* NAREN BCMLOG(BCMLOG_INFO, "aspm on\n"); */
1335 crystalhd_reg_wr(hw->adp, PCIE_DLL_DATA_LINK_CONTROL, aspm);
1338 static enum BC_STATUS crystalhd_rx_pkt_done(struct crystalhd_hw *hw,
1339 uint32_t list_index, enum BC_STATUS comp_sts)
1341 struct crystalhd_rx_dma_pkt *rx_pkt = NULL;
1342 uint32_t y_dw_dnsz, uv_dw_dnsz;
1343 enum BC_STATUS sts = BC_STS_SUCCESS;
1345 if (!hw || list_index >= DMA_ENGINE_CNT) {
1346 BCMLOG_ERR("Invalid Arguments\n");
1347 return BC_STS_INV_ARG;
1350 rx_pkt = crystalhd_dioq_find_and_fetch(hw->rx_actq,
1351 hw->rx_pkt_tag_seed + list_index);
1354 "Act-Q:PostIx:%x L0Sts:%x L1Sts:%x current L:%x tag:%x comp:%x\n",
1355 hw->rx_list_post_index, hw->rx_list_sts[0],
1356 hw->rx_list_sts[1], list_index,
1357 hw->rx_pkt_tag_seed + list_index, comp_sts);
1358 return BC_STS_INV_ARG;
1361 if (comp_sts == BC_STS_SUCCESS) {
1362 crystalhd_get_dnsz(hw, list_index, &y_dw_dnsz, &uv_dw_dnsz);
1363 rx_pkt->dio_req->uinfo.y_done_sz = y_dw_dnsz;
1364 rx_pkt->flags = COMP_FLAG_DATA_VALID;
1365 if (rx_pkt->uv_phy_addr)
1366 rx_pkt->dio_req->uinfo.uv_done_sz = uv_dw_dnsz;
1367 crystalhd_dioq_add(hw->rx_rdyq, rx_pkt, true,
1368 hw->rx_pkt_tag_seed + list_index);
1372 /* Check if we can post this DIO again. */
1373 return crystalhd_hw_post_cap_buff(hw, rx_pkt);
1376 static bool crystalhd_rx_list0_handler(struct crystalhd_hw *hw,
1377 uint32_t int_sts, uint32_t y_err_sts, uint32_t uv_err_sts)
1380 enum list_sts tmp_lsts;
1382 if (!(y_err_sts & GET_Y0_ERR_MSK) && !(uv_err_sts & GET_UV0_ERR_MSK))
1385 tmp_lsts = hw->rx_list_sts[0];
1388 tmp = y_err_sts & GET_Y0_ERR_MSK;
1389 if (int_sts & INTR_INTR_STATUS_L0_Y_RX_DMA_DONE_INTR_MASK)
1390 hw->rx_list_sts[0] &= ~rx_waiting_y_intr;
1392 if (y_err_sts & MISC1_Y_RX_ERROR_STATUS_RX_L0_UNDERRUN_ERROR_MASK) {
1393 hw->rx_list_sts[0] &= ~rx_waiting_y_intr;
1394 tmp &= ~MISC1_Y_RX_ERROR_STATUS_RX_L0_UNDERRUN_ERROR_MASK;
1397 if (y_err_sts & MISC1_Y_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_MASK) {
1398 hw->rx_list_sts[0] &= ~rx_y_mask;
1399 hw->rx_list_sts[0] |= rx_y_error;
1400 tmp &= ~MISC1_Y_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_MASK;
1404 hw->rx_list_sts[0] &= ~rx_y_mask;
1405 hw->rx_list_sts[0] |= rx_y_error;
1406 hw->rx_list_post_index = 0;
1410 tmp = uv_err_sts & GET_UV0_ERR_MSK;
1411 if (int_sts & INTR_INTR_STATUS_L0_UV_RX_DMA_DONE_INTR_MASK)
1412 hw->rx_list_sts[0] &= ~rx_waiting_uv_intr;
1414 if (uv_err_sts & MISC1_UV_RX_ERROR_STATUS_RX_L0_UNDERRUN_ERROR_MASK) {
1415 hw->rx_list_sts[0] &= ~rx_waiting_uv_intr;
1416 tmp &= ~MISC1_UV_RX_ERROR_STATUS_RX_L0_UNDERRUN_ERROR_MASK;
1420 MISC1_UV_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_MASK) {
1421 hw->rx_list_sts[0] &= ~rx_uv_mask;
1422 hw->rx_list_sts[0] |= rx_uv_error;
1423 tmp &= ~MISC1_UV_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_MASK;
1427 hw->rx_list_sts[0] &= ~rx_uv_mask;
1428 hw->rx_list_sts[0] |= rx_uv_error;
1429 hw->rx_list_post_index = 0;
1432 if (y_err_sts & GET_Y0_ERR_MSK) {
1433 tmp = y_err_sts & GET_Y0_ERR_MSK;
1434 crystalhd_reg_wr(hw->adp, MISC1_Y_RX_ERROR_STATUS, tmp);
1437 if (uv_err_sts & GET_UV0_ERR_MSK) {
1438 tmp = uv_err_sts & GET_UV0_ERR_MSK;
1439 crystalhd_reg_wr(hw->adp, MISC1_UV_RX_ERROR_STATUS, tmp);
1442 return tmp_lsts != hw->rx_list_sts[0];
1445 static bool crystalhd_rx_list1_handler(struct crystalhd_hw *hw,
1446 uint32_t int_sts, uint32_t y_err_sts, uint32_t uv_err_sts)
1449 enum list_sts tmp_lsts;
1451 if (!(y_err_sts & GET_Y1_ERR_MSK) && !(uv_err_sts & GET_UV1_ERR_MSK))
1454 tmp_lsts = hw->rx_list_sts[1];
1457 tmp = y_err_sts & GET_Y1_ERR_MSK;
1458 if (int_sts & INTR_INTR_STATUS_L1_Y_RX_DMA_DONE_INTR_MASK)
1459 hw->rx_list_sts[1] &= ~rx_waiting_y_intr;
1461 if (y_err_sts & MISC1_Y_RX_ERROR_STATUS_RX_L1_UNDERRUN_ERROR_MASK) {
1462 hw->rx_list_sts[1] &= ~rx_waiting_y_intr;
1463 tmp &= ~MISC1_Y_RX_ERROR_STATUS_RX_L1_UNDERRUN_ERROR_MASK;
1466 if (y_err_sts & MISC1_Y_RX_ERROR_STATUS_RX_L1_FIFO_FULL_ERRORS_MASK) {
1467 /* Add retry-support..*/
1468 hw->rx_list_sts[1] &= ~rx_y_mask;
1469 hw->rx_list_sts[1] |= rx_y_error;
1470 tmp &= ~MISC1_Y_RX_ERROR_STATUS_RX_L1_FIFO_FULL_ERRORS_MASK;
1474 hw->rx_list_sts[1] &= ~rx_y_mask;
1475 hw->rx_list_sts[1] |= rx_y_error;
1476 hw->rx_list_post_index = 0;
1480 tmp = uv_err_sts & GET_UV1_ERR_MSK;
1481 if (int_sts & INTR_INTR_STATUS_L1_UV_RX_DMA_DONE_INTR_MASK)
1482 hw->rx_list_sts[1] &= ~rx_waiting_uv_intr;
1484 if (uv_err_sts & MISC1_UV_RX_ERROR_STATUS_RX_L1_UNDERRUN_ERROR_MASK) {
1485 hw->rx_list_sts[1] &= ~rx_waiting_uv_intr;
1486 tmp &= ~MISC1_UV_RX_ERROR_STATUS_RX_L1_UNDERRUN_ERROR_MASK;
1489 if (uv_err_sts & MISC1_UV_RX_ERROR_STATUS_RX_L1_FIFO_FULL_ERRORS_MASK) {
1490 /* Add retry-support*/
1491 hw->rx_list_sts[1] &= ~rx_uv_mask;
1492 hw->rx_list_sts[1] |= rx_uv_error;
1493 tmp &= ~MISC1_UV_RX_ERROR_STATUS_RX_L1_FIFO_FULL_ERRORS_MASK;
1497 hw->rx_list_sts[1] &= ~rx_uv_mask;
1498 hw->rx_list_sts[1] |= rx_uv_error;
1499 hw->rx_list_post_index = 0;
1502 if (y_err_sts & GET_Y1_ERR_MSK) {
1503 tmp = y_err_sts & GET_Y1_ERR_MSK;
1504 crystalhd_reg_wr(hw->adp, MISC1_Y_RX_ERROR_STATUS, tmp);
1507 if (uv_err_sts & GET_UV1_ERR_MSK) {
1508 tmp = uv_err_sts & GET_UV1_ERR_MSK;
1509 crystalhd_reg_wr(hw->adp, MISC1_UV_RX_ERROR_STATUS, tmp);
1512 return tmp_lsts != hw->rx_list_sts[1];
1516 static void crystalhd_rx_isr(struct crystalhd_hw *hw, uint32_t intr_sts)
1518 unsigned long flags;
1519 uint32_t i, list_avail = 0;
1520 enum BC_STATUS comp_sts = BC_STS_NO_DATA;
1521 uint32_t y_err_sts, uv_err_sts, y_dn_sz = 0, uv_dn_sz = 0;
1525 BCMLOG_ERR("Invalid Arguments\n");
1529 if (!(intr_sts & GET_RX_INTR_MASK))
1532 y_err_sts = crystalhd_reg_rd(hw->adp, MISC1_Y_RX_ERROR_STATUS);
1533 uv_err_sts = crystalhd_reg_rd(hw->adp, MISC1_UV_RX_ERROR_STATUS);
1535 for (i = 0; i < DMA_ENGINE_CNT; i++) {
1536 /* Update States..*/
1537 spin_lock_irqsave(&hw->rx_lock, flags);
1539 ret = crystalhd_rx_list0_handler(hw, intr_sts,
1540 y_err_sts, uv_err_sts);
1542 ret = crystalhd_rx_list1_handler(hw, intr_sts,
1543 y_err_sts, uv_err_sts);
1545 switch (hw->rx_list_sts[i]) {
1547 comp_sts = BC_STS_SUCCESS;
1553 /* We got error on both or Y or uv. */
1554 hw->stats.rx_errors++;
1555 crystalhd_get_dnsz(hw, i, &y_dn_sz, &uv_dn_sz);
1556 /* FIXME: jarod: this is where
1557 my mini pci-e card is tripping up */
1558 BCMLOG(BCMLOG_DBG, "list_index:%x rx[%d] Y:%x UV:%x Int:%x YDnSz:%x UVDnSz:%x\n",
1559 i, hw->stats.rx_errors, y_err_sts,
1560 uv_err_sts, intr_sts, y_dn_sz,
1562 hw->rx_list_sts[i] = sts_free;
1563 comp_sts = BC_STS_ERROR;
1566 /* Wait for completion..*/
1567 comp_sts = BC_STS_NO_DATA;
1571 spin_unlock_irqrestore(&hw->rx_lock, flags);
1573 /* handle completion...*/
1574 if (comp_sts != BC_STS_NO_DATA) {
1575 crystalhd_rx_pkt_done(hw, i, comp_sts);
1576 comp_sts = BC_STS_NO_DATA;
1581 if (hw->stop_pending) {
1582 if ((hw->rx_list_sts[0] == sts_free) &&
1583 (hw->rx_list_sts[1] == sts_free))
1584 crystalhd_hw_finalize_pause(hw);
1586 crystalhd_hw_start_capture(hw);
1591 static enum BC_STATUS crystalhd_fw_cmd_post_proc(struct crystalhd_hw *hw,
1592 struct BC_FW_CMD *fw_cmd)
1594 enum BC_STATUS sts = BC_STS_SUCCESS;
1595 struct dec_rsp_channel_start_video *st_rsp = NULL;
1597 switch (fw_cmd->cmd[0]) {
1598 case eCMD_C011_DEC_CHAN_START_VIDEO:
1599 st_rsp = (struct dec_rsp_channel_start_video *)fw_cmd->rsp;
1600 hw->pib_del_Q_addr = st_rsp->picInfoDeliveryQ;
1601 hw->pib_rel_Q_addr = st_rsp->picInfoReleaseQ;
1602 BCMLOG(BCMLOG_DBG, "DelQAddr:%x RelQAddr:%x\n",
1603 hw->pib_del_Q_addr, hw->pib_rel_Q_addr);
1605 case eCMD_C011_INIT:
1606 if (!(crystalhd_load_firmware_config(hw->adp))) {
1607 BCMLOG_ERR("Invalid Params.\n");
1608 sts = BC_STS_FW_AUTH_FAILED;
1617 static enum BC_STATUS crystalhd_put_ddr2sleep(struct crystalhd_hw *hw)
1620 union link_misc_perst_decoder_ctrl rst_cntrl_reg;
1622 /* Pulse reset pin of 7412 (MISC_PERST_DECODER_CTRL) */
1623 rst_cntrl_reg.whole_reg = crystalhd_reg_rd(hw->adp,
1624 MISC_PERST_DECODER_CTRL);
1626 rst_cntrl_reg.bcm_7412_rst = 1;
1627 crystalhd_reg_wr(hw->adp, MISC_PERST_DECODER_CTRL,
1628 rst_cntrl_reg.whole_reg);
1629 msleep_interruptible(50);
1631 rst_cntrl_reg.bcm_7412_rst = 0;
1632 crystalhd_reg_wr(hw->adp, MISC_PERST_DECODER_CTRL,
1633 rst_cntrl_reg.whole_reg);
1635 /* Close all banks, put DDR in idle */
1636 bc_dec_reg_wr(hw->adp, SDRAM_PRECHARGE, 0);
1638 /* Set bit 25 (drop CKE pin of DDR) */
1639 reg = bc_dec_reg_rd(hw->adp, SDRAM_PARAM);
1641 bc_dec_reg_wr(hw->adp, SDRAM_PARAM, reg);
1643 /* Reset the audio block */
1644 bc_dec_reg_wr(hw->adp, AUD_DSP_MISC_SOFT_RESET, 0x1);
1646 /* Power down Raptor PLL */
1647 reg = bc_dec_reg_rd(hw->adp, DecHt_PllCCtl);
1649 bc_dec_reg_wr(hw->adp, DecHt_PllCCtl, reg);
1651 /* Power down all Audio PLL */
1652 bc_dec_reg_wr(hw->adp, AIO_MISC_PLL_RESET, 0x1);
1654 /* Power down video clock (75MHz) */
1655 reg = bc_dec_reg_rd(hw->adp, DecHt_PllECtl);
1657 bc_dec_reg_wr(hw->adp, DecHt_PllECtl, reg);
1659 /* Power down video clock (75MHz) */
1660 reg = bc_dec_reg_rd(hw->adp, DecHt_PllDCtl);
1662 bc_dec_reg_wr(hw->adp, DecHt_PllDCtl, reg);
1664 /* Power down core clock (200MHz) */
1665 reg = bc_dec_reg_rd(hw->adp, DecHt_PllACtl);
1667 bc_dec_reg_wr(hw->adp, DecHt_PllACtl, reg);
1669 /* Power down core clock (200MHz) */
1670 reg = bc_dec_reg_rd(hw->adp, DecHt_PllBCtl);
1672 bc_dec_reg_wr(hw->adp, DecHt_PllBCtl, reg);
1674 return BC_STS_SUCCESS;
1677 /************************************************
1679 *************************************************/
1681 enum BC_STATUS crystalhd_download_fw(struct crystalhd_adp *adp, void *buffer,
1684 uint32_t reg_data, cnt, *temp_buff;
1685 uint32_t fw_sig_len = 36;
1686 uint32_t dram_offset = BC_FWIMG_ST_ADDR, sig_reg;
1689 if (!adp || !buffer || !sz) {
1690 BCMLOG_ERR("Invalid Params.\n");
1691 return BC_STS_INV_ARG;
1694 reg_data = crystalhd_reg_rd(adp, OTP_CMD);
1695 if (!(reg_data & 0x02)) {
1696 BCMLOG_ERR("Invalid hw config.. otp not programmed\n");
1697 return BC_STS_ERROR;
1701 crystalhd_reg_wr(adp, DCI_CMD, 0);
1702 reg_data |= BC_BIT(0);
1703 crystalhd_reg_wr(adp, DCI_CMD, reg_data);
1707 msleep_interruptible(10);
1709 while (reg_data != BC_BIT(4)) {
1710 reg_data = crystalhd_reg_rd(adp, DCI_STATUS);
1711 reg_data &= BC_BIT(4);
1713 BCMLOG_ERR("Firmware Download RDY Timeout.\n");
1714 return BC_STS_TIMEOUT;
1718 msleep_interruptible(10);
1719 /* Load the FW to the FW_ADDR field in the DCI_FIRMWARE_ADDR */
1720 crystalhd_reg_wr(adp, DCI_FIRMWARE_ADDR, dram_offset);
1721 temp_buff = (uint32_t *)buffer;
1722 for (cnt = 0; cnt < (sz - fw_sig_len); cnt += 4) {
1723 crystalhd_reg_wr(adp, DCI_DRAM_BASE_ADDR, (dram_offset >> 19));
1724 crystalhd_reg_wr(adp, DCI_FIRMWARE_DATA, *temp_buff);
1728 msleep_interruptible(10);
1732 sig_reg = (uint32_t)DCI_SIGNATURE_DATA_7;
1733 for (cnt = 0; cnt < 8; cnt++) {
1734 uint32_t swapped_data = *temp_buff;
1735 swapped_data = bswap_32_1(swapped_data);
1736 crystalhd_reg_wr(adp, sig_reg, swapped_data);
1740 msleep_interruptible(10);
1743 reg_data |= BC_BIT(1);
1744 crystalhd_reg_wr(adp, DCI_CMD, reg_data);
1745 msleep_interruptible(10);
1748 reg_data = crystalhd_reg_rd(adp, DCI_STATUS);
1750 if ((reg_data & BC_BIT(9)) == BC_BIT(9)) {
1752 while ((reg_data & BC_BIT(0)) != BC_BIT(0)) {
1753 reg_data = crystalhd_reg_rd(adp, DCI_STATUS);
1754 reg_data &= BC_BIT(0);
1757 msleep_interruptible(10);
1760 reg_data = crystalhd_reg_rd(adp, DCI_CMD);
1761 reg_data |= BC_BIT(4);
1762 crystalhd_reg_wr(adp, DCI_CMD, reg_data);
1765 BCMLOG_ERR("F/w Signature mismatch\n");
1766 return BC_STS_FW_AUTH_FAILED;
1769 BCMLOG(BCMLOG_INFO, "Firmware Downloaded Successfully\n");
1770 return BC_STS_SUCCESS;
1773 enum BC_STATUS crystalhd_do_fw_cmd(struct crystalhd_hw *hw,
1774 struct BC_FW_CMD *fw_cmd)
1776 uint32_t cnt = 0, cmd_res_addr;
1777 uint32_t *cmd_buff, *res_buff;
1778 wait_queue_head_t fw_cmd_event;
1782 crystalhd_create_event(&fw_cmd_event);
1784 if (!hw || !fw_cmd) {
1785 BCMLOG_ERR("Invalid Arguments\n");
1786 return BC_STS_INV_ARG;
1789 cmd_buff = fw_cmd->cmd;
1790 res_buff = fw_cmd->rsp;
1792 if (!cmd_buff || !res_buff) {
1793 BCMLOG_ERR("Invalid Parameters for F/W Command\n");
1794 return BC_STS_INV_ARG;
1799 hw->fwcmd_evt_sts = 0;
1800 hw->pfw_cmd_event = &fw_cmd_event;
1802 /*Write the command to the memory*/
1803 crystalhd_mem_wr(hw->adp, TS_Host2CpuSnd, FW_CMD_BUFF_SZ, cmd_buff);
1805 /*Memory Read for memory arbitrator flush*/
1806 crystalhd_mem_rd(hw->adp, TS_Host2CpuSnd, 1, &cnt);
1808 /* Write the command address to mailbox */
1809 bc_dec_reg_wr(hw->adp, Hst2CpuMbx1, TS_Host2CpuSnd);
1810 msleep_interruptible(50);
1812 crystalhd_wait_on_event(&fw_cmd_event, hw->fwcmd_evt_sts, 20000, rc, 0);
1815 sts = BC_STS_SUCCESS;
1816 } else if (rc == -EBUSY) {
1817 BCMLOG_ERR("Firmware command T/O\n");
1818 sts = BC_STS_TIMEOUT;
1819 } else if (rc == -EINTR) {
1820 BCMLOG(BCMLOG_DBG, "FwCmd Wait Signal int.\n");
1821 sts = BC_STS_IO_USER_ABORT;
1823 BCMLOG_ERR("FwCmd IO Error.\n");
1824 sts = BC_STS_IO_ERROR;
1827 if (sts != BC_STS_SUCCESS) {
1828 BCMLOG_ERR("FwCmd Failed.\n");
1833 /*Get the Response Address*/
1834 cmd_res_addr = bc_dec_reg_rd(hw->adp, Cpu2HstMbx1);
1836 /*Read the Response*/
1837 crystalhd_mem_rd(hw->adp, cmd_res_addr, FW_CMD_BUFF_SZ, res_buff);
1841 if (res_buff[2] != C011_RET_SUCCESS) {
1842 BCMLOG_ERR("res_buff[2] != C011_RET_SUCCESS\n");
1843 return BC_STS_FW_CMD_ERR;
1846 sts = crystalhd_fw_cmd_post_proc(hw, fw_cmd);
1847 if (sts != BC_STS_SUCCESS)
1848 BCMLOG_ERR("crystalhd_fw_cmd_post_proc Failed.\n");
1853 bool crystalhd_hw_interrupt(struct crystalhd_adp *adp, struct crystalhd_hw *hw)
1855 uint32_t intr_sts = 0;
1856 uint32_t deco_intr = 0;
1859 if (!adp || !hw->dev_started)
1862 hw->stats.num_interrupts++;
1865 deco_intr = bc_dec_reg_rd(adp, Stream2Host_Intr_Sts);
1866 intr_sts = crystalhd_reg_rd(adp, INTR_INTR_STATUS);
1869 /* let system know we processed interrupt..*/
1871 hw->stats.dev_interrupts++;
1874 if (deco_intr && (deco_intr != 0xdeaddead)) {
1876 if (deco_intr & 0x80000000) {
1877 /*Set the Event and the status flag*/
1878 if (hw->pfw_cmd_event) {
1879 hw->fwcmd_evt_sts = 1;
1880 crystalhd_set_event(hw->pfw_cmd_event);
1884 if (deco_intr & BC_BIT(1))
1885 crystalhd_hw_proc_pib(hw);
1887 bc_dec_reg_wr(adp, Stream2Host_Intr_Sts, deco_intr);
1888 /* FIXME: jarod: No udelay? might this be
1889 the real reason mini pci-e cards were stalling out? */
1890 bc_dec_reg_wr(adp, Stream2Host_Intr_Sts, 0);
1895 crystalhd_rx_isr(hw, intr_sts);
1898 crystalhd_tx_isr(hw, intr_sts);
1900 /* Clear interrupts */
1903 crystalhd_reg_wr(adp, INTR_INTR_CLR_REG, intr_sts);
1905 crystalhd_reg_wr(adp, INTR_EOI_CTRL, 1);
1913 enum BC_STATUS crystalhd_hw_open(struct crystalhd_hw *hw,
1914 struct crystalhd_adp *adp)
1917 BCMLOG_ERR("Invalid Arguments\n");
1918 return BC_STS_INV_ARG;
1921 if (hw->dev_started)
1922 return BC_STS_SUCCESS;
1924 memset(hw, 0, sizeof(struct crystalhd_hw));
1927 spin_lock_init(&hw->lock);
1928 spin_lock_init(&hw->rx_lock);
1929 /* FIXME: jarod: what are these magic numbers?!? */
1930 hw->tx_ioq_tag_seed = 0x70023070;
1931 hw->rx_pkt_tag_seed = 0x70029070;
1933 hw->stop_pending = 0;
1934 crystalhd_start_device(hw->adp);
1935 hw->dev_started = true;
1937 /* set initial core clock */
1938 hw->core_clock_mhz = CLOCK_PRESET;
1941 crystalhd_hw_set_core_clock(hw);
1943 return BC_STS_SUCCESS;
1946 enum BC_STATUS crystalhd_hw_close(struct crystalhd_hw *hw)
1949 BCMLOG_ERR("Invalid Arguments\n");
1950 return BC_STS_INV_ARG;
1953 if (!hw->dev_started)
1954 return BC_STS_SUCCESS;
1956 /* Stop and DDR sleep will happen in here */
1957 crystalhd_hw_suspend(hw);
1958 hw->dev_started = false;
1960 return BC_STS_SUCCESS;
1963 enum BC_STATUS crystalhd_hw_setup_dma_rings(struct crystalhd_hw *hw)
1968 dma_addr_t phy_addr;
1969 enum BC_STATUS sts = BC_STS_SUCCESS;
1970 struct crystalhd_rx_dma_pkt *rpkt;
1972 if (!hw || !hw->adp) {
1973 BCMLOG_ERR("Invalid Arguments\n");
1974 return BC_STS_INV_ARG;
1977 sts = crystalhd_hw_create_ioqs(hw);
1978 if (sts != BC_STS_SUCCESS) {
1979 BCMLOG_ERR("Failed to create IOQs..\n");
1983 mem_len = BC_LINK_MAX_SGLS * sizeof(struct dma_descriptor);
1985 for (i = 0; i < BC_TX_LIST_CNT; i++) {
1986 mem = bc_kern_dma_alloc(hw->adp, mem_len, &phy_addr);
1988 memset(mem, 0, mem_len);
1990 BCMLOG_ERR("Insufficient Memory For TX\n");
1991 crystalhd_hw_free_dma_rings(hw);
1992 return BC_STS_INSUFF_RES;
1994 /* rx_pkt_pool -- static memory allocation */
1995 hw->tx_pkt_pool[i].desc_mem.pdma_desc_start = mem;
1996 hw->tx_pkt_pool[i].desc_mem.phy_addr = phy_addr;
1997 hw->tx_pkt_pool[i].desc_mem.sz = BC_LINK_MAX_SGLS *
1998 sizeof(struct dma_descriptor);
1999 hw->tx_pkt_pool[i].list_tag = 0;
2001 /* Add TX dma requests to Free Queue..*/
2002 sts = crystalhd_dioq_add(hw->tx_freeq,
2003 &hw->tx_pkt_pool[i], false, 0);
2004 if (sts != BC_STS_SUCCESS) {
2005 crystalhd_hw_free_dma_rings(hw);
2010 for (i = 0; i < BC_RX_LIST_CNT; i++) {
2011 rpkt = kzalloc(sizeof(*rpkt), GFP_KERNEL);
2013 BCMLOG_ERR("Insufficient Memory For RX\n");
2014 crystalhd_hw_free_dma_rings(hw);
2015 return BC_STS_INSUFF_RES;
2018 mem = bc_kern_dma_alloc(hw->adp, mem_len, &phy_addr);
2020 memset(mem, 0, mem_len);
2022 BCMLOG_ERR("Insufficient Memory For RX\n");
2023 crystalhd_hw_free_dma_rings(hw);
2025 return BC_STS_INSUFF_RES;
2027 rpkt->desc_mem.pdma_desc_start = mem;
2028 rpkt->desc_mem.phy_addr = phy_addr;
2029 rpkt->desc_mem.sz = BC_LINK_MAX_SGLS *
2030 sizeof(struct dma_descriptor);
2031 rpkt->pkt_tag = hw->rx_pkt_tag_seed + i;
2032 crystalhd_hw_free_rx_pkt(hw, rpkt);
2035 return BC_STS_SUCCESS;
2038 enum BC_STATUS crystalhd_hw_free_dma_rings(struct crystalhd_hw *hw)
2041 struct crystalhd_rx_dma_pkt *rpkt = NULL;
2043 if (!hw || !hw->adp) {
2044 BCMLOG_ERR("Invalid Arguments\n");
2045 return BC_STS_INV_ARG;
2048 /* Delete all IOQs.. */
2049 crystalhd_hw_delete_ioqs(hw);
2051 for (i = 0; i < BC_TX_LIST_CNT; i++) {
2052 if (hw->tx_pkt_pool[i].desc_mem.pdma_desc_start) {
2053 bc_kern_dma_free(hw->adp,
2054 hw->tx_pkt_pool[i].desc_mem.sz,
2055 hw->tx_pkt_pool[i].desc_mem.pdma_desc_start,
2056 hw->tx_pkt_pool[i].desc_mem.phy_addr);
2058 hw->tx_pkt_pool[i].desc_mem.pdma_desc_start = NULL;
2062 BCMLOG(BCMLOG_DBG, "Releasing RX Pkt pool\n");
2064 rpkt = crystalhd_hw_alloc_rx_pkt(hw);
2067 bc_kern_dma_free(hw->adp, rpkt->desc_mem.sz,
2068 rpkt->desc_mem.pdma_desc_start,
2069 rpkt->desc_mem.phy_addr);
2073 return BC_STS_SUCCESS;
2076 enum BC_STATUS crystalhd_hw_post_tx(struct crystalhd_hw *hw,
2077 struct crystalhd_dio_req *ioreq,
2078 hw_comp_callback call_back,
2079 wait_queue_head_t *cb_event, uint32_t *list_id,
2082 struct tx_dma_pkt *tx_dma_packet = NULL;
2083 uint32_t first_desc_u_addr, first_desc_l_addr;
2084 uint32_t low_addr, high_addr;
2085 union addr_64 desc_addr;
2086 enum BC_STATUS sts, add_sts;
2087 uint32_t dummy_index = 0;
2088 unsigned long flags;
2091 if (!hw || !ioreq || !call_back || !cb_event || !list_id) {
2092 BCMLOG_ERR("Invalid Arguments\n");
2093 return BC_STS_INV_ARG;
2097 * Since we hit code in busy condition very frequently,
2098 * we will check the code in status first before
2099 * checking the availability of free elem.
2101 * This will avoid the Q fetch/add in normal condition.
2103 rc = crystalhd_code_in_full(hw->adp, ioreq->uinfo.xfr_len,
2106 hw->stats.cin_busy++;
2110 /* Get a list from TxFreeQ */
2111 tx_dma_packet = (struct tx_dma_pkt *)crystalhd_dioq_fetch(
2113 if (!tx_dma_packet) {
2114 BCMLOG_ERR("No empty elements..\n");
2115 return BC_STS_ERR_USAGE;
2118 sts = crystalhd_xlat_sgl_to_dma_desc(ioreq,
2119 &tx_dma_packet->desc_mem,
2121 if (sts != BC_STS_SUCCESS) {
2122 add_sts = crystalhd_dioq_add(hw->tx_freeq, tx_dma_packet,
2124 if (add_sts != BC_STS_SUCCESS)
2125 BCMLOG_ERR("double fault..\n");
2132 desc_addr.full_addr = tx_dma_packet->desc_mem.phy_addr;
2133 low_addr = desc_addr.low_part;
2134 high_addr = desc_addr.high_part;
2136 tx_dma_packet->call_back = call_back;
2137 tx_dma_packet->cb_event = cb_event;
2138 tx_dma_packet->dio_req = ioreq;
2140 spin_lock_irqsave(&hw->lock, flags);
2142 if (hw->tx_list_post_index == 0) {
2143 first_desc_u_addr = MISC1_TX_FIRST_DESC_U_ADDR_LIST0;
2144 first_desc_l_addr = MISC1_TX_FIRST_DESC_L_ADDR_LIST0;
2146 first_desc_u_addr = MISC1_TX_FIRST_DESC_U_ADDR_LIST1;
2147 first_desc_l_addr = MISC1_TX_FIRST_DESC_L_ADDR_LIST1;
2150 *list_id = tx_dma_packet->list_tag = hw->tx_ioq_tag_seed +
2151 hw->tx_list_post_index;
2153 hw->tx_list_post_index = (hw->tx_list_post_index + 1) % DMA_ENGINE_CNT;
2155 spin_unlock_irqrestore(&hw->lock, flags);
2158 /* Insert in Active Q..*/
2159 crystalhd_dioq_add(hw->tx_actq, tx_dma_packet, false,
2160 tx_dma_packet->list_tag);
2163 * Interrupt will come as soon as you write
2164 * the valid bit. So be ready for that. All
2165 * the initialization should happen before that.
2167 crystalhd_start_tx_dma_engine(hw);
2168 crystalhd_reg_wr(hw->adp, first_desc_u_addr, desc_addr.high_part);
2170 crystalhd_reg_wr(hw->adp, first_desc_l_addr, desc_addr.low_part |
2172 /* Be sure we set the valid bit ^^^^ */
2174 return BC_STS_SUCCESS;
2178 * This is a force cancel and we are racing with ISR.
2180 * Will try to remove the req from ActQ before ISR gets it.
2181 * If ISR gets it first then the completion happens in the
2182 * normal path and we will return _STS_NO_DATA from here.
2184 * FIX_ME: Not Tested the actual condition..
2186 enum BC_STATUS crystalhd_hw_cancel_tx(struct crystalhd_hw *hw,
2189 if (!hw || !list_id) {
2190 BCMLOG_ERR("Invalid Arguments\n");
2191 return BC_STS_INV_ARG;
2194 crystalhd_stop_tx_dma_engine(hw);
2195 crystalhd_hw_tx_req_complete(hw, list_id, BC_STS_IO_USER_ABORT);
2197 return BC_STS_SUCCESS;
2200 enum BC_STATUS crystalhd_hw_add_cap_buffer(struct crystalhd_hw *hw,
2201 struct crystalhd_dio_req *ioreq, bool en_post)
2203 struct crystalhd_rx_dma_pkt *rpkt;
2204 uint32_t tag, uv_desc_ix = 0;
2207 if (!hw || !ioreq) {
2208 BCMLOG_ERR("Invalid Arguments\n");
2209 return BC_STS_INV_ARG;
2212 rpkt = crystalhd_hw_alloc_rx_pkt(hw);
2214 BCMLOG_ERR("Insufficient resources\n");
2215 return BC_STS_INSUFF_RES;
2218 rpkt->dio_req = ioreq;
2219 tag = rpkt->pkt_tag;
2221 sts = crystalhd_xlat_sgl_to_dma_desc(ioreq, &rpkt->desc_mem,
2223 if (sts != BC_STS_SUCCESS)
2226 rpkt->uv_phy_addr = 0;
2228 /* Store the address of UV in the rx packet for post*/
2230 rpkt->uv_phy_addr = rpkt->desc_mem.phy_addr +
2231 (sizeof(struct dma_descriptor) * (uv_desc_ix + 1));
2234 sts = crystalhd_hw_post_cap_buff(hw, rpkt);
2236 sts = crystalhd_dioq_add(hw->rx_freeq, rpkt, false, tag);
2241 enum BC_STATUS crystalhd_hw_get_cap_buffer(struct crystalhd_hw *hw,
2242 struct BC_PIC_INFO_BLOCK *pib,
2243 struct crystalhd_dio_req **ioreq)
2245 struct crystalhd_rx_dma_pkt *rpkt;
2246 uint32_t timeout = BC_PROC_OUTPUT_TIMEOUT / 1000;
2247 uint32_t sig_pending = 0;
2250 if (!hw || !ioreq || !pib) {
2251 BCMLOG_ERR("Invalid Arguments\n");
2252 return BC_STS_INV_ARG;
2255 rpkt = crystalhd_dioq_fetch_wait(hw->rx_rdyq, timeout, &sig_pending);
2258 BCMLOG(BCMLOG_INFO, "wait on frame time out %d\n",
2260 return BC_STS_IO_USER_ABORT;
2262 return BC_STS_TIMEOUT;
2266 rpkt->dio_req->uinfo.comp_flags = rpkt->flags;
2268 if (rpkt->flags & COMP_FLAG_PIB_VALID)
2269 memcpy(pib, &rpkt->pib, sizeof(*pib));
2271 *ioreq = rpkt->dio_req;
2273 crystalhd_hw_free_rx_pkt(hw, rpkt);
2275 return BC_STS_SUCCESS;
2278 enum BC_STATUS crystalhd_hw_start_capture(struct crystalhd_hw *hw)
2280 struct crystalhd_rx_dma_pkt *rx_pkt;
2285 BCMLOG_ERR("Invalid Arguments\n");
2286 return BC_STS_INV_ARG;
2289 /* This is start of capture.. Post to both the lists.. */
2290 for (i = 0; i < DMA_ENGINE_CNT; i++) {
2291 rx_pkt = crystalhd_dioq_fetch(hw->rx_freeq);
2293 return BC_STS_NO_DATA;
2294 sts = crystalhd_hw_post_cap_buff(hw, rx_pkt);
2295 if (BC_STS_SUCCESS != sts)
2300 return BC_STS_SUCCESS;
2303 enum BC_STATUS crystalhd_hw_stop_capture(struct crystalhd_hw *hw)
2308 BCMLOG_ERR("Invalid Arguments\n");
2309 return BC_STS_INV_ARG;
2312 crystalhd_stop_rx_dma_engine(hw);
2315 temp = crystalhd_dioq_fetch(hw->rx_freeq);
2317 crystalhd_rx_pkt_rel_call_back(hw, temp);
2320 return BC_STS_SUCCESS;
2323 enum BC_STATUS crystalhd_hw_pause(struct crystalhd_hw *hw)
2325 hw->stats.pause_cnt++;
2326 hw->stop_pending = 1;
2328 if ((hw->rx_list_sts[0] == sts_free) &&
2329 (hw->rx_list_sts[1] == sts_free))
2330 crystalhd_hw_finalize_pause(hw);
2332 return BC_STS_SUCCESS;
2335 enum BC_STATUS crystalhd_hw_unpause(struct crystalhd_hw *hw)
2340 hw->stop_pending = 0;
2342 aspm = crystalhd_reg_rd(hw->adp, PCIE_DLL_DATA_LINK_CONTROL);
2343 aspm &= ~ASPM_L1_ENABLE;
2344 /* NAREN BCMLOG(BCMLOG_INFO, "aspm off\n"); */
2345 crystalhd_reg_wr(hw->adp, PCIE_DLL_DATA_LINK_CONTROL, aspm);
2347 sts = crystalhd_hw_start_capture(hw);
2351 enum BC_STATUS crystalhd_hw_suspend(struct crystalhd_hw *hw)
2356 BCMLOG_ERR("Invalid Arguments\n");
2357 return BC_STS_INV_ARG;
2360 sts = crystalhd_put_ddr2sleep(hw);
2361 if (sts != BC_STS_SUCCESS) {
2362 BCMLOG_ERR("Failed to Put DDR To Sleep!!\n");
2363 return BC_STS_ERROR;
2366 if (!crystalhd_stop_device(hw->adp)) {
2367 BCMLOG_ERR("Failed to Stop Device!!\n");
2368 return BC_STS_ERROR;
2371 return BC_STS_SUCCESS;
2374 void crystalhd_hw_stats(struct crystalhd_hw *hw,
2375 struct crystalhd_hw_stats *stats)
2378 BCMLOG_ERR("Invalid Arguments\n");
2382 /* if called w/NULL stats, its a req to zero out the stats */
2384 memset(&hw->stats, 0, sizeof(hw->stats));
2388 hw->stats.freeq_count = crystalhd_dioq_count(hw->rx_freeq);
2389 hw->stats.rdyq_count = crystalhd_dioq_count(hw->rx_rdyq);
2390 memcpy(stats, &hw->stats, sizeof(*stats));
2393 enum BC_STATUS crystalhd_hw_set_core_clock(struct crystalhd_hw *hw)
2396 uint32_t vco_mg, refresh_reg;
2399 BCMLOG_ERR("Invalid Arguments\n");
2400 return BC_STS_INV_ARG;
2403 /* FIXME: jarod: wha? */
2404 /*n = (hw->core_clock_mhz * 3) / 20 + 1; */
2405 n = hw->core_clock_mhz/5;
2407 if (n == hw->prev_n)
2408 return BC_STS_CLK_NOCHG;
2410 if (hw->pwr_lock > 0) {
2411 /* BCMLOG(BCMLOG_INFO,"pwr_lock is %u\n", hw->pwr_lock) */
2412 return BC_STS_CLK_NOCHG;
2425 reg = bc_dec_reg_rd(hw->adp, DecHt_PllACtl);
2429 reg |= vco_mg << 12;
2431 BCMLOG(BCMLOG_INFO, "clock is moving to %d with n %d with vco_mg %d\n",
2432 hw->core_clock_mhz, n, vco_mg);
2434 /* Change the DRAM refresh rate to accommodate the new frequency */
2435 /* refresh reg = ((refresh_rate * clock_rate)/16) - 1; rounding up*/
2436 refresh_reg = (7 * hw->core_clock_mhz / 16);
2437 bc_dec_reg_wr(hw->adp, SDRAM_REF_PARAM, ((1 << 12) | refresh_reg));
2439 bc_dec_reg_wr(hw->adp, DecHt_PllACtl, reg);
2443 for (i = 0; i < 10; i++) {
2444 reg = bc_dec_reg_rd(hw->adp, DecHt_PllACtl);
2446 if (reg & 0x00020000) {
2448 /* FIXME: jarod: outputting
2449 a random "C" is... confusing... */
2450 BCMLOG(BCMLOG_INFO, "C");
2451 return BC_STS_SUCCESS;
2453 msleep_interruptible(10);
2456 BCMLOG(BCMLOG_INFO, "clk change failed\n");
2457 return BC_STS_CLK_NOCHG;