]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/staging/crystalhd/crystalhd_hw.c
Merge tag 'firewire-updates' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee139...
[karo-tx-linux.git] / drivers / staging / crystalhd / crystalhd_hw.c
1 /***************************************************************************
2  * Copyright (c) 2005-2009, Broadcom Corporation.
3  *
4  *  Name: crystalhd_hw . c
5  *
6  *  Description:
7  *              BCM70010 Linux driver HW layer.
8  *
9  **********************************************************************
10  * This file is part of the crystalhd device driver.
11  *
12  * This driver is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License as published by
14  * the Free Software Foundation, version 2 of the License.
15  *
16  * This driver is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
19  * GNU General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with this driver.  If not, see <http://www.gnu.org/licenses/>.
23  **********************************************************************/
24
25 #include "crystalhd.h"
26
27 #include <linux/pci.h>
28 #include <linux/slab.h>
29 #include <linux/delay.h>
30
31 /* Functions internal to this file */
32
33 static void crystalhd_enable_uarts(struct crystalhd_adp *adp)
34 {
35         bc_dec_reg_wr(adp, UartSelectA, BSVS_UART_STREAM);
36         bc_dec_reg_wr(adp, UartSelectB, BSVS_UART_DEC_OUTER);
37 }
38
39
40 static void crystalhd_start_dram(struct crystalhd_adp *adp)
41 {
42         bc_dec_reg_wr(adp, SDRAM_PARAM, ((40 / 5 - 1) <<  0) |
43         /* tras (40ns tras)/(5ns period) -1 ((15/5 - 1) <<  4) | // trcd */
44                       ((15 / 5 - 1) <<  7) |    /* trp */
45                       ((10 / 5 - 1) << 10) |    /* trrd */
46                       ((15 / 5 + 1) << 12) |    /* twr */
47                       ((2 + 1) << 16) |         /* twtr */
48                       ((70 / 5 - 2) << 19) |    /* trfc */
49                       (0 << 23));
50
51         bc_dec_reg_wr(adp, SDRAM_PRECHARGE, 0);
52         bc_dec_reg_wr(adp, SDRAM_EXT_MODE, 2);
53         bc_dec_reg_wr(adp, SDRAM_MODE, 0x132);
54         bc_dec_reg_wr(adp, SDRAM_PRECHARGE, 0);
55         bc_dec_reg_wr(adp, SDRAM_REFRESH, 0);
56         bc_dec_reg_wr(adp, SDRAM_REFRESH, 0);
57         bc_dec_reg_wr(adp, SDRAM_MODE, 0x32);
58         /* setting the refresh rate here */
59         bc_dec_reg_wr(adp, SDRAM_REF_PARAM, ((1 << 12) | 96));
60 }
61
62
63 static bool crystalhd_bring_out_of_rst(struct crystalhd_adp *adp)
64 {
65         union link_misc_perst_deco_ctrl rst_deco_cntrl;
66         union link_misc_perst_clk_ctrl rst_clk_cntrl;
67         uint32_t temp;
68
69         /*
70          * Link clocks: MISC_PERST_CLOCK_CTRL Clear PLL power down bit,
71          * delay to allow PLL to lock Clear alternate clock, stop clock bits
72          */
73         rst_clk_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_CLOCK_CTRL);
74         rst_clk_cntrl.pll_pwr_dn = 0;
75         crystalhd_reg_wr(adp, MISC_PERST_CLOCK_CTRL, rst_clk_cntrl.whole_reg);
76         msleep_interruptible(50);
77
78         rst_clk_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_CLOCK_CTRL);
79         rst_clk_cntrl.stop_core_clk = 0;
80         rst_clk_cntrl.sel_alt_clk = 0;
81
82         crystalhd_reg_wr(adp, MISC_PERST_CLOCK_CTRL, rst_clk_cntrl.whole_reg);
83         msleep_interruptible(50);
84
85         /*
86          * Bus Arbiter Timeout: GISB_ARBITER_TIMER
87          * Set internal bus arbiter timeout to 40us based on core clock speed
88          * (63MHz * 40us = 0x9D8)
89          */
90         crystalhd_reg_wr(adp, GISB_ARBITER_TIMER, 0x9D8);
91
92         /*
93          * Decoder clocks: MISC_PERST_DECODER_CTRL
94          * Enable clocks while 7412 reset is asserted, delay
95          * De-assert 7412 reset
96          */
97         rst_deco_cntrl.whole_reg = crystalhd_reg_rd(adp,
98                                          MISC_PERST_DECODER_CTRL);
99         rst_deco_cntrl.stop_bcm_7412_clk = 0;
100         rst_deco_cntrl.bcm7412_rst = 1;
101         crystalhd_reg_wr(adp, MISC_PERST_DECODER_CTRL,
102                                          rst_deco_cntrl.whole_reg);
103         msleep_interruptible(10);
104
105         rst_deco_cntrl.whole_reg = crystalhd_reg_rd(adp,
106                                          MISC_PERST_DECODER_CTRL);
107         rst_deco_cntrl.bcm7412_rst = 0;
108         crystalhd_reg_wr(adp, MISC_PERST_DECODER_CTRL,
109                                          rst_deco_cntrl.whole_reg);
110         msleep_interruptible(50);
111
112         /* Disable OTP_CONTENT_MISC to 0 to disable all secure modes */
113         crystalhd_reg_wr(adp, OTP_CONTENT_MISC, 0);
114
115         /* Clear bit 29 of 0x404 */
116         temp = crystalhd_reg_rd(adp, PCIE_TL_TRANSACTION_CONFIGURATION);
117         temp &= ~BC_BIT(29);
118         crystalhd_reg_wr(adp, PCIE_TL_TRANSACTION_CONFIGURATION, temp);
119
120         /* 2.5V regulator must be set to 2.6 volts (+6%) */
121         /* FIXME: jarod: what's the point of this reg read? */
122         temp = crystalhd_reg_rd(adp, MISC_PERST_VREG_CTRL);
123         crystalhd_reg_wr(adp, MISC_PERST_VREG_CTRL, 0xF3);
124
125         return true;
126 }
127
128 static bool crystalhd_put_in_reset(struct crystalhd_adp *adp)
129 {
130         union link_misc_perst_deco_ctrl rst_deco_cntrl;
131         union link_misc_perst_clk_ctrl  rst_clk_cntrl;
132         uint32_t                  temp;
133
134         /*
135          * Decoder clocks: MISC_PERST_DECODER_CTRL
136          * Assert 7412 reset, delay
137          * Assert 7412 stop clock
138          */
139         rst_deco_cntrl.whole_reg = crystalhd_reg_rd(adp,
140                                          MISC_PERST_DECODER_CTRL);
141         rst_deco_cntrl.stop_bcm_7412_clk = 1;
142         crystalhd_reg_wr(adp, MISC_PERST_DECODER_CTRL,
143                                          rst_deco_cntrl.whole_reg);
144         msleep_interruptible(50);
145
146         /* Bus Arbiter Timeout: GISB_ARBITER_TIMER
147          * Set internal bus arbiter timeout to 40us based on core clock speed
148          * (6.75MHZ * 40us = 0x10E)
149          */
150         crystalhd_reg_wr(adp, GISB_ARBITER_TIMER, 0x10E);
151
152         /* Link clocks: MISC_PERST_CLOCK_CTRL
153          * Stop core clk, delay
154          * Set alternate clk, delay, set PLL power down
155          */
156         rst_clk_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_CLOCK_CTRL);
157         rst_clk_cntrl.stop_core_clk = 1;
158         rst_clk_cntrl.sel_alt_clk = 1;
159         crystalhd_reg_wr(adp, MISC_PERST_CLOCK_CTRL, rst_clk_cntrl.whole_reg);
160         msleep_interruptible(50);
161
162         rst_clk_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_CLOCK_CTRL);
163         rst_clk_cntrl.pll_pwr_dn = 1;
164         crystalhd_reg_wr(adp, MISC_PERST_CLOCK_CTRL, rst_clk_cntrl.whole_reg);
165
166         /*
167          * Read and restore the Transaction Configuration Register
168          * after core reset
169          */
170         temp = crystalhd_reg_rd(adp, PCIE_TL_TRANSACTION_CONFIGURATION);
171
172         /*
173          * Link core soft reset: MISC3_RESET_CTRL
174          * - Write BIT[0]=1 and read it back for core reset to take place
175          */
176         crystalhd_reg_wr(adp, MISC3_RESET_CTRL, 1);
177         rst_deco_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC3_RESET_CTRL);
178         msleep_interruptible(50);
179
180         /* restore the transaction configuration register */
181         crystalhd_reg_wr(adp, PCIE_TL_TRANSACTION_CONFIGURATION, temp);
182
183         return true;
184 }
185
186 static void crystalhd_disable_interrupts(struct crystalhd_adp *adp)
187 {
188         union intr_mask_reg   intr_mask;
189         intr_mask.whole_reg = crystalhd_reg_rd(adp, INTR_INTR_MSK_STS_REG);
190         intr_mask.mask_pcie_err = 1;
191         intr_mask.mask_pcie_rbusmast_err = 1;
192         intr_mask.mask_pcie_rgr_bridge   = 1;
193         intr_mask.mask_rx_done = 1;
194         intr_mask.mask_rx_err  = 1;
195         intr_mask.mask_tx_done = 1;
196         intr_mask.mask_tx_err  = 1;
197         crystalhd_reg_wr(adp, INTR_INTR_MSK_SET_REG, intr_mask.whole_reg);
198
199         return;
200 }
201
202 static void crystalhd_enable_interrupts(struct crystalhd_adp *adp)
203 {
204         union intr_mask_reg   intr_mask;
205         intr_mask.whole_reg = crystalhd_reg_rd(adp, INTR_INTR_MSK_STS_REG);
206         intr_mask.mask_pcie_err = 1;
207         intr_mask.mask_pcie_rbusmast_err = 1;
208         intr_mask.mask_pcie_rgr_bridge   = 1;
209         intr_mask.mask_rx_done = 1;
210         intr_mask.mask_rx_err  = 1;
211         intr_mask.mask_tx_done = 1;
212         intr_mask.mask_tx_err  = 1;
213         crystalhd_reg_wr(adp, INTR_INTR_MSK_CLR_REG, intr_mask.whole_reg);
214
215         return;
216 }
217
218 static void crystalhd_clear_errors(struct crystalhd_adp *adp)
219 {
220         uint32_t reg;
221
222         /* FIXME: jarod: wouldn't we want to write a 0 to the reg?
223          Or does the write clear the bits specified? */
224         reg = crystalhd_reg_rd(adp, MISC1_Y_RX_ERROR_STATUS);
225         if (reg)
226                 crystalhd_reg_wr(adp, MISC1_Y_RX_ERROR_STATUS, reg);
227
228         reg = crystalhd_reg_rd(adp, MISC1_UV_RX_ERROR_STATUS);
229         if (reg)
230                 crystalhd_reg_wr(adp, MISC1_UV_RX_ERROR_STATUS, reg);
231
232         reg = crystalhd_reg_rd(adp, MISC1_TX_DMA_ERROR_STATUS);
233         if (reg)
234                 crystalhd_reg_wr(adp, MISC1_TX_DMA_ERROR_STATUS, reg);
235 }
236
237 static void crystalhd_clear_interrupts(struct crystalhd_adp *adp)
238 {
239         uint32_t intr_sts = crystalhd_reg_rd(adp, INTR_INTR_STATUS);
240
241         if (intr_sts) {
242                 crystalhd_reg_wr(adp, INTR_INTR_CLR_REG, intr_sts);
243
244                 /* Write End Of Interrupt for PCIE */
245                 crystalhd_reg_wr(adp, INTR_EOI_CTRL, 1);
246         }
247 }
248
249 static void crystalhd_soft_rst(struct crystalhd_adp *adp)
250 {
251         uint32_t val;
252
253         /* Assert c011 soft reset*/
254         bc_dec_reg_wr(adp, DecHt_HostSwReset, 0x00000001);
255         msleep_interruptible(50);
256
257         /* Release c011 soft reset*/
258         bc_dec_reg_wr(adp, DecHt_HostSwReset, 0x00000000);
259
260         /* Disable Stuffing..*/
261         val = crystalhd_reg_rd(adp, MISC2_GLOBAL_CTRL);
262         val |= BC_BIT(8);
263         crystalhd_reg_wr(adp, MISC2_GLOBAL_CTRL, val);
264 }
265
266 static bool crystalhd_load_firmware_config(struct crystalhd_adp *adp)
267 {
268         uint32_t i = 0, reg;
269
270         crystalhd_reg_wr(adp, DCI_DRAM_BASE_ADDR, (BC_DRAM_FW_CFG_ADDR >> 19));
271
272         crystalhd_reg_wr(adp, AES_CMD, 0);
273         crystalhd_reg_wr(adp, AES_CONFIG_INFO,
274                  (BC_DRAM_FW_CFG_ADDR & 0x7FFFF));
275         crystalhd_reg_wr(adp, AES_CMD, 0x1);
276
277         /* FIXME: jarod: I've seen this fail,
278          and introducing extra delays helps... */
279         for (i = 0; i < 100; ++i) {
280                 reg = crystalhd_reg_rd(adp, AES_STATUS);
281                 if (reg & 0x1)
282                         return true;
283                 msleep_interruptible(10);
284         }
285
286         return false;
287 }
288
289
290 static bool crystalhd_start_device(struct crystalhd_adp *adp)
291 {
292         uint32_t dbg_options, glb_cntrl = 0, reg_pwrmgmt = 0;
293
294         BCMLOG(BCMLOG_INFO, "Starting BCM70012 Device\n");
295
296         reg_pwrmgmt = crystalhd_reg_rd(adp, PCIE_DLL_DATA_LINK_CONTROL);
297         reg_pwrmgmt &= ~ASPM_L1_ENABLE;
298
299         crystalhd_reg_wr(adp, PCIE_DLL_DATA_LINK_CONTROL, reg_pwrmgmt);
300
301         if (!crystalhd_bring_out_of_rst(adp)) {
302                 BCMLOG_ERR("Failed To Bring Link Out Of Reset\n");
303                 return false;
304         }
305
306         crystalhd_disable_interrupts(adp);
307
308         crystalhd_clear_errors(adp);
309
310         crystalhd_clear_interrupts(adp);
311
312         crystalhd_enable_interrupts(adp);
313
314         /* Enable the option for getting the total no. of DWORDS
315          * that have been transferred by the RXDMA engine
316          */
317         dbg_options = crystalhd_reg_rd(adp, MISC1_DMA_DEBUG_OPTIONS_REG);
318         dbg_options |= 0x10;
319         crystalhd_reg_wr(adp, MISC1_DMA_DEBUG_OPTIONS_REG, dbg_options);
320
321         /* Enable PCI Global Control options */
322         glb_cntrl = crystalhd_reg_rd(adp, MISC2_GLOBAL_CTRL);
323         glb_cntrl |= 0x100;
324         glb_cntrl |= 0x8000;
325         crystalhd_reg_wr(adp, MISC2_GLOBAL_CTRL, glb_cntrl);
326
327         crystalhd_enable_interrupts(adp);
328
329         crystalhd_soft_rst(adp);
330         crystalhd_start_dram(adp);
331         crystalhd_enable_uarts(adp);
332
333         return true;
334 }
335
336 static bool crystalhd_stop_device(struct crystalhd_adp *adp)
337 {
338         uint32_t reg;
339
340         BCMLOG(BCMLOG_INFO, "Stopping BCM70012 Device\n");
341         /* Clear and disable interrupts */
342         crystalhd_disable_interrupts(adp);
343         crystalhd_clear_errors(adp);
344         crystalhd_clear_interrupts(adp);
345
346         if (!crystalhd_put_in_reset(adp))
347                 BCMLOG_ERR("Failed to Put Link To Reset State\n");
348
349         reg = crystalhd_reg_rd(adp, PCIE_DLL_DATA_LINK_CONTROL);
350         reg |= ASPM_L1_ENABLE;
351         crystalhd_reg_wr(adp, PCIE_DLL_DATA_LINK_CONTROL, reg);
352
353         /* Set PCI Clk Req */
354         reg = crystalhd_reg_rd(adp, PCIE_CLK_REQ_REG);
355         reg |= PCI_CLK_REQ_ENABLE;
356         crystalhd_reg_wr(adp, PCIE_CLK_REQ_REG, reg);
357
358         return true;
359 }
360
361 static struct crystalhd_rx_dma_pkt *crystalhd_hw_alloc_rx_pkt(
362                                         struct crystalhd_hw *hw)
363 {
364         unsigned long flags = 0;
365         struct crystalhd_rx_dma_pkt *temp = NULL;
366
367         if (!hw)
368                 return NULL;
369
370         spin_lock_irqsave(&hw->lock, flags);
371         temp = hw->rx_pkt_pool_head;
372         if (temp) {
373                 hw->rx_pkt_pool_head = hw->rx_pkt_pool_head->next;
374                 temp->dio_req = NULL;
375                 temp->pkt_tag = 0;
376                 temp->flags = 0;
377         }
378         spin_unlock_irqrestore(&hw->lock, flags);
379
380         return temp;
381 }
382
383 static void crystalhd_hw_free_rx_pkt(struct crystalhd_hw *hw,
384                                    struct crystalhd_rx_dma_pkt *pkt)
385 {
386         unsigned long flags = 0;
387
388         if (!hw || !pkt)
389                 return;
390
391         spin_lock_irqsave(&hw->lock, flags);
392         pkt->next = hw->rx_pkt_pool_head;
393         hw->rx_pkt_pool_head = pkt;
394         spin_unlock_irqrestore(&hw->lock, flags);
395 }
396
397 /*
398  * Call back from TX - IOQ deletion.
399  *
400  * This routine will release the TX DMA rings allocated
401  * during setup_dma rings interface.
402  *
403  * Memory is allocated per DMA ring basis. This is just
404  * a place holder to be able to create the dio queues.
405  */
406 static void crystalhd_tx_desc_rel_call_back(void *context, void *data)
407 {
408 }
409
410 /*
411  * Rx Packet release callback..
412  *
413  * Release All user mapped capture buffers and Our DMA packets
414  * back to our free pool. The actual cleanup of the DMA
415  * ring descriptors happen during dma ring release.
416  */
417 static void crystalhd_rx_pkt_rel_call_back(void *context, void *data)
418 {
419         struct crystalhd_hw *hw = (struct crystalhd_hw *)context;
420         struct crystalhd_rx_dma_pkt *pkt = (struct crystalhd_rx_dma_pkt *)data;
421
422         if (!pkt || !hw) {
423                 BCMLOG_ERR("Invalid arg - %p %p\n", hw, pkt);
424                 return;
425         }
426
427         if (pkt->dio_req)
428                 crystalhd_unmap_dio(hw->adp, pkt->dio_req);
429         else
430                 BCMLOG_ERR("Missing dio_req: 0x%x\n", pkt->pkt_tag);
431
432         crystalhd_hw_free_rx_pkt(hw, pkt);
433 }
434
435 #define crystalhd_hw_delete_ioq(adp, q)         \
436 do {                                            \
437         if (q) {                                \
438                 crystalhd_delete_dioq(adp, q);  \
439                 q = NULL;                       \
440         }                                       \
441 } while (0)
442
443 static void crystalhd_hw_delete_ioqs(struct crystalhd_hw *hw)
444 {
445         if (!hw)
446                 return;
447
448         BCMLOG(BCMLOG_DBG, "Deleting IOQs\n");
449         crystalhd_hw_delete_ioq(hw->adp, hw->tx_actq);
450         crystalhd_hw_delete_ioq(hw->adp, hw->tx_freeq);
451         crystalhd_hw_delete_ioq(hw->adp, hw->rx_actq);
452         crystalhd_hw_delete_ioq(hw->adp, hw->rx_freeq);
453         crystalhd_hw_delete_ioq(hw->adp, hw->rx_rdyq);
454 }
455
456 #define crystalhd_hw_create_ioq(sts, hw, q, cb)                 \
457 do {                                                            \
458         sts = crystalhd_create_dioq(hw->adp, &q, cb, hw);       \
459         if (sts != BC_STS_SUCCESS)                              \
460                 goto hw_create_ioq_err;                         \
461 } while (0)
462
463 /*
464  * Create IOQs..
465  *
466  * TX - Active & Free
467  * RX - Active, Ready and Free.
468  */
469 static enum BC_STATUS crystalhd_hw_create_ioqs(struct crystalhd_hw   *hw)
470 {
471         enum BC_STATUS   sts = BC_STS_SUCCESS;
472
473         if (!hw) {
474                 BCMLOG_ERR("Invalid Arg!!\n");
475                 return BC_STS_INV_ARG;
476         }
477
478         crystalhd_hw_create_ioq(sts, hw, hw->tx_freeq,
479                               crystalhd_tx_desc_rel_call_back);
480         crystalhd_hw_create_ioq(sts, hw, hw->tx_actq,
481                               crystalhd_tx_desc_rel_call_back);
482
483         crystalhd_hw_create_ioq(sts, hw, hw->rx_freeq,
484                               crystalhd_rx_pkt_rel_call_back);
485         crystalhd_hw_create_ioq(sts, hw, hw->rx_rdyq,
486                               crystalhd_rx_pkt_rel_call_back);
487         crystalhd_hw_create_ioq(sts, hw, hw->rx_actq,
488                               crystalhd_rx_pkt_rel_call_back);
489
490         return sts;
491
492 hw_create_ioq_err:
493         crystalhd_hw_delete_ioqs(hw);
494
495         return sts;
496 }
497
498
499 static bool crystalhd_code_in_full(struct crystalhd_adp *adp,
500                  uint32_t needed_sz, bool b_188_byte_pkts,  uint8_t flags)
501 {
502         uint32_t base, end, writep, readp;
503         uint32_t cpbSize, cpbFullness, fifoSize;
504
505         if (flags & 0x02) { /* ASF Bit is set */
506                 base   = bc_dec_reg_rd(adp, REG_Dec_TsAudCDB2Base);
507                 end    = bc_dec_reg_rd(adp, REG_Dec_TsAudCDB2End);
508                 writep = bc_dec_reg_rd(adp, REG_Dec_TsAudCDB2Wrptr);
509                 readp  = bc_dec_reg_rd(adp, REG_Dec_TsAudCDB2Rdptr);
510         } else if (b_188_byte_pkts) { /*Encrypted 188 byte packets*/
511                 base   = bc_dec_reg_rd(adp, REG_Dec_TsUser0Base);
512                 end    = bc_dec_reg_rd(adp, REG_Dec_TsUser0End);
513                 writep = bc_dec_reg_rd(adp, REG_Dec_TsUser0Wrptr);
514                 readp  = bc_dec_reg_rd(adp, REG_Dec_TsUser0Rdptr);
515         } else {
516                 base   = bc_dec_reg_rd(adp, REG_DecCA_RegCinBase);
517                 end    = bc_dec_reg_rd(adp, REG_DecCA_RegCinEnd);
518                 writep = bc_dec_reg_rd(adp, REG_DecCA_RegCinWrPtr);
519                 readp  = bc_dec_reg_rd(adp, REG_DecCA_RegCinRdPtr);
520         }
521
522         cpbSize = end - base;
523         if (writep >= readp)
524                 cpbFullness = writep - readp;
525         else
526                 cpbFullness = (end - base) - (readp - writep);
527
528         fifoSize = cpbSize - cpbFullness;
529
530         if (fifoSize < BC_INFIFO_THRESHOLD)
531                 return true;
532
533         if (needed_sz > (fifoSize - BC_INFIFO_THRESHOLD))
534                 return true;
535
536         return false;
537 }
538
539 static enum BC_STATUS crystalhd_hw_tx_req_complete(struct crystalhd_hw *hw,
540                                          uint32_t list_id, enum BC_STATUS cs)
541 {
542         struct tx_dma_pkt *tx_req;
543
544         if (!hw || !list_id) {
545                 BCMLOG_ERR("Invalid Arg..\n");
546                 return BC_STS_INV_ARG;
547         }
548
549         hw->pwr_lock--;
550
551         tx_req = (struct tx_dma_pkt *)crystalhd_dioq_find_and_fetch(
552                                         hw->tx_actq, list_id);
553         if (!tx_req) {
554                 if (cs != BC_STS_IO_USER_ABORT)
555                         BCMLOG_ERR("Find and Fetch Did not find req\n");
556                 return BC_STS_NO_DATA;
557         }
558
559         if (tx_req->call_back) {
560                 tx_req->call_back(tx_req->dio_req, tx_req->cb_event, cs);
561                 tx_req->dio_req   = NULL;
562                 tx_req->cb_event  = NULL;
563                 tx_req->call_back = NULL;
564         } else {
565                 BCMLOG(BCMLOG_DBG, "Missing Tx Callback - %X\n",
566                        tx_req->list_tag);
567         }
568
569         /* Now put back the tx_list back in FreeQ */
570         tx_req->list_tag = 0;
571
572         return crystalhd_dioq_add(hw->tx_freeq, tx_req, false, 0);
573 }
574
575 static bool crystalhd_tx_list0_handler(struct crystalhd_hw *hw,
576                                          uint32_t err_sts)
577 {
578         uint32_t err_mask, tmp;
579         unsigned long flags = 0;
580
581         err_mask = MISC1_TX_DMA_ERROR_STATUS_TX_L0_DESC_TX_ABORT_ERRORS_MASK |
582                 MISC1_TX_DMA_ERROR_STATUS_TX_L0_DMA_DATA_TX_ABORT_ERRORS_MASK |
583                 MISC1_TX_DMA_ERROR_STATUS_TX_L0_FIFO_FULL_ERRORS_MASK;
584
585         if (!(err_sts & err_mask))
586                 return false;
587
588         BCMLOG_ERR("Error on Tx-L0 %x\n", err_sts);
589
590         tmp = err_mask;
591
592         if (err_sts & MISC1_TX_DMA_ERROR_STATUS_TX_L0_FIFO_FULL_ERRORS_MASK)
593                 tmp &= ~MISC1_TX_DMA_ERROR_STATUS_TX_L0_FIFO_FULL_ERRORS_MASK;
594
595         if (tmp) {
596                 spin_lock_irqsave(&hw->lock, flags);
597                 /* reset list index.*/
598                 hw->tx_list_post_index = 0;
599                 spin_unlock_irqrestore(&hw->lock, flags);
600         }
601
602         tmp = err_sts & err_mask;
603         crystalhd_reg_wr(hw->adp, MISC1_TX_DMA_ERROR_STATUS, tmp);
604
605         return true;
606 }
607
608 static bool crystalhd_tx_list1_handler(struct crystalhd_hw *hw,
609                                          uint32_t err_sts)
610 {
611         uint32_t err_mask, tmp;
612         unsigned long flags = 0;
613
614         err_mask = MISC1_TX_DMA_ERROR_STATUS_TX_L1_DESC_TX_ABORT_ERRORS_MASK |
615                 MISC1_TX_DMA_ERROR_STATUS_TX_L1_DMA_DATA_TX_ABORT_ERRORS_MASK |
616                 MISC1_TX_DMA_ERROR_STATUS_TX_L1_FIFO_FULL_ERRORS_MASK;
617
618         if (!(err_sts & err_mask))
619                 return false;
620
621         BCMLOG_ERR("Error on Tx-L1 %x\n", err_sts);
622
623         tmp = err_mask;
624
625         if (err_sts & MISC1_TX_DMA_ERROR_STATUS_TX_L1_FIFO_FULL_ERRORS_MASK)
626                 tmp &= ~MISC1_TX_DMA_ERROR_STATUS_TX_L1_FIFO_FULL_ERRORS_MASK;
627
628         if (tmp) {
629                 spin_lock_irqsave(&hw->lock, flags);
630                 /* reset list index.*/
631                 hw->tx_list_post_index = 0;
632                 spin_unlock_irqrestore(&hw->lock, flags);
633         }
634
635         tmp = err_sts & err_mask;
636         crystalhd_reg_wr(hw->adp, MISC1_TX_DMA_ERROR_STATUS, tmp);
637
638         return true;
639 }
640
641 static void crystalhd_tx_isr(struct crystalhd_hw *hw, uint32_t int_sts)
642 {
643         uint32_t err_sts;
644
645         if (int_sts & INTR_INTR_STATUS_L0_TX_DMA_DONE_INTR_MASK)
646                 crystalhd_hw_tx_req_complete(hw, hw->tx_ioq_tag_seed + 0,
647                                            BC_STS_SUCCESS);
648
649         if (int_sts & INTR_INTR_STATUS_L1_TX_DMA_DONE_INTR_MASK)
650                 crystalhd_hw_tx_req_complete(hw, hw->tx_ioq_tag_seed + 1,
651                                            BC_STS_SUCCESS);
652
653         if (!(int_sts & (INTR_INTR_STATUS_L0_TX_DMA_ERR_INTR_MASK |
654                         INTR_INTR_STATUS_L1_TX_DMA_ERR_INTR_MASK))) {
655                         /* No error mask set.. */
656                         return;
657         }
658
659         /* Handle Tx errors. */
660         err_sts = crystalhd_reg_rd(hw->adp, MISC1_TX_DMA_ERROR_STATUS);
661
662         if (crystalhd_tx_list0_handler(hw, err_sts))
663                 crystalhd_hw_tx_req_complete(hw, hw->tx_ioq_tag_seed + 0,
664                                            BC_STS_ERROR);
665
666         if (crystalhd_tx_list1_handler(hw, err_sts))
667                 crystalhd_hw_tx_req_complete(hw, hw->tx_ioq_tag_seed + 1,
668                                            BC_STS_ERROR);
669
670         hw->stats.tx_errors++;
671 }
672
673 static void crystalhd_hw_dump_desc(struct dma_descriptor *p_dma_desc,
674                                  uint32_t ul_desc_index, uint32_t cnt)
675 {
676         uint32_t ix, ll = 0;
677
678         if (!p_dma_desc || !cnt)
679                 return;
680
681         /* FIXME: jarod: perhaps a modparam desc_debug to enable this,
682          rather than setting ll (log level, I presume) to non-zero? */
683         if (!ll)
684                 return;
685
686         for (ix = ul_desc_index; ix < (ul_desc_index + cnt); ix++) {
687                 BCMLOG(ll,
688                  "%s[%d] Buff[%x:%x] Next:[%x:%x] XferSz:%x Intr:%x,Last:%x\n",
689                  ((p_dma_desc[ul_desc_index].dma_dir) ? "TDesc" : "RDesc"),
690                        ul_desc_index,
691                        p_dma_desc[ul_desc_index].buff_addr_high,
692                        p_dma_desc[ul_desc_index].buff_addr_low,
693                        p_dma_desc[ul_desc_index].next_desc_addr_high,
694                        p_dma_desc[ul_desc_index].next_desc_addr_low,
695                        p_dma_desc[ul_desc_index].xfer_size,
696                        p_dma_desc[ul_desc_index].intr_enable,
697                        p_dma_desc[ul_desc_index].last_rec_indicator);
698         }
699
700 }
701
702 static enum BC_STATUS crystalhd_hw_fill_desc(struct crystalhd_dio_req *ioreq,
703                                       struct dma_descriptor *desc,
704                                       dma_addr_t desc_paddr_base,
705                                       uint32_t sg_cnt, uint32_t sg_st_ix,
706                                       uint32_t sg_st_off, uint32_t xfr_sz)
707 {
708         uint32_t count = 0, ix = 0, sg_ix = 0, len = 0, last_desc_ix = 0;
709         dma_addr_t desc_phy_addr = desc_paddr_base;
710         union addr_64 addr_temp;
711
712         if (!ioreq || !desc || !desc_paddr_base || !xfr_sz ||
713             (!sg_cnt && !ioreq->uinfo.dir_tx)) {
714                 BCMLOG_ERR("Invalid Args\n");
715                 return BC_STS_INV_ARG;
716         }
717
718         for (ix = 0; ix < sg_cnt; ix++) {
719
720                 /* Setup SGLE index. */
721                 sg_ix = ix + sg_st_ix;
722
723                 /* Get SGLE length */
724                 len = crystalhd_get_sgle_len(ioreq, sg_ix);
725                 if (len % 4) {
726                         BCMLOG_ERR(" len in sg %d %d %d\n", len, sg_ix,
727                                  sg_cnt);
728                         return BC_STS_NOT_IMPL;
729                 }
730                 /* Setup DMA desc with Phy addr & Length at current index. */
731                 addr_temp.full_addr = crystalhd_get_sgle_paddr(ioreq, sg_ix);
732                 if (sg_ix == sg_st_ix) {
733                         addr_temp.full_addr += sg_st_off;
734                         len -= sg_st_off;
735                 }
736                 memset(&desc[ix], 0, sizeof(desc[ix]));
737                 desc[ix].buff_addr_low  = addr_temp.low_part;
738                 desc[ix].buff_addr_high = addr_temp.high_part;
739                 desc[ix].dma_dir        = ioreq->uinfo.dir_tx;
740
741                 /* Chain DMA descriptor.  */
742                 addr_temp.full_addr = desc_phy_addr +
743                                          sizeof(struct dma_descriptor);
744                 desc[ix].next_desc_addr_low = addr_temp.low_part;
745                 desc[ix].next_desc_addr_high = addr_temp.high_part;
746
747                 if ((count + len) > xfr_sz)
748                         len = xfr_sz - count;
749
750                 /* Debug.. */
751                 if ((!len) || (len > crystalhd_get_sgle_len(ioreq, sg_ix))) {
752                         BCMLOG_ERR(
753                          "inv-len(%x) Ix(%d) count:%x xfr_sz:%x sg_cnt:%d\n",
754                          len, ix, count, xfr_sz, sg_cnt);
755                         return BC_STS_ERROR;
756                 }
757                 /* Length expects Multiple of 4 */
758                 desc[ix].xfer_size = (len / 4);
759
760                 crystalhd_hw_dump_desc(desc, ix, 1);
761
762                 count += len;
763                 desc_phy_addr += sizeof(struct dma_descriptor);
764         }
765
766         last_desc_ix = ix - 1;
767
768         if (ioreq->fb_size) {
769                 memset(&desc[ix], 0, sizeof(desc[ix]));
770                 addr_temp.full_addr     = ioreq->fb_pa;
771                 desc[ix].buff_addr_low  = addr_temp.low_part;
772                 desc[ix].buff_addr_high = addr_temp.high_part;
773                 desc[ix].dma_dir        = ioreq->uinfo.dir_tx;
774                 desc[ix].xfer_size      = 1;
775                 desc[ix].fill_bytes     = 4 - ioreq->fb_size;
776                 count += ioreq->fb_size;
777                 last_desc_ix++;
778         }
779
780         /* setup last descriptor..*/
781         desc[last_desc_ix].last_rec_indicator  = 1;
782         desc[last_desc_ix].next_desc_addr_low  = 0;
783         desc[last_desc_ix].next_desc_addr_high = 0;
784         desc[last_desc_ix].intr_enable = 1;
785
786         crystalhd_hw_dump_desc(desc, last_desc_ix, 1);
787
788         if (count != xfr_sz) {
789                 BCMLOG_ERR("internal error sz curr:%x exp:%x\n", count, xfr_sz);
790                 return BC_STS_ERROR;
791         }
792
793         return BC_STS_SUCCESS;
794 }
795
796 static enum BC_STATUS crystalhd_xlat_sgl_to_dma_desc(
797                                               struct crystalhd_dio_req *ioreq,
798                                               struct dma_desc_mem *pdesc_mem,
799                                               uint32_t *uv_desc_index)
800 {
801         struct dma_descriptor *desc = NULL;
802         dma_addr_t desc_paddr_base = 0;
803         uint32_t sg_cnt = 0, sg_st_ix = 0, sg_st_off = 0;
804         uint32_t xfr_sz = 0;
805         enum BC_STATUS sts = BC_STS_SUCCESS;
806
807         /* Check params.. */
808         if (!ioreq || !pdesc_mem || !uv_desc_index) {
809                 BCMLOG_ERR("Invalid Args\n");
810                 return BC_STS_INV_ARG;
811         }
812
813         if (!pdesc_mem->sz || !pdesc_mem->pdma_desc_start ||
814             !ioreq->sg || (!ioreq->sg_cnt && !ioreq->uinfo.dir_tx)) {
815                 BCMLOG_ERR("Invalid Args\n");
816                 return BC_STS_INV_ARG;
817         }
818
819         if ((ioreq->uinfo.dir_tx) && (ioreq->uinfo.uv_offset)) {
820                 BCMLOG_ERR("UV offset for TX??\n");
821                 return BC_STS_INV_ARG;
822
823         }
824
825         desc = pdesc_mem->pdma_desc_start;
826         desc_paddr_base = pdesc_mem->phy_addr;
827
828         if (ioreq->uinfo.dir_tx || (ioreq->uinfo.uv_offset == 0)) {
829                 sg_cnt = ioreq->sg_cnt;
830                 xfr_sz = ioreq->uinfo.xfr_len;
831         } else {
832                 sg_cnt = ioreq->uinfo.uv_sg_ix + 1;
833                 xfr_sz = ioreq->uinfo.uv_offset;
834         }
835
836         sts = crystalhd_hw_fill_desc(ioreq, desc, desc_paddr_base, sg_cnt,
837                                    sg_st_ix, sg_st_off, xfr_sz);
838
839         if ((sts != BC_STS_SUCCESS) || !ioreq->uinfo.uv_offset)
840                 return sts;
841
842         /* Prepare for UV mapping.. */
843         desc = &pdesc_mem->pdma_desc_start[sg_cnt];
844         desc_paddr_base = pdesc_mem->phy_addr +
845                           (sg_cnt * sizeof(struct dma_descriptor));
846
847         /* Done with desc addr.. now update sg stuff.*/
848         sg_cnt    = ioreq->sg_cnt - ioreq->uinfo.uv_sg_ix;
849         xfr_sz    = ioreq->uinfo.xfr_len - ioreq->uinfo.uv_offset;
850         sg_st_ix  = ioreq->uinfo.uv_sg_ix;
851         sg_st_off = ioreq->uinfo.uv_sg_off;
852
853         sts = crystalhd_hw_fill_desc(ioreq, desc, desc_paddr_base, sg_cnt,
854                                    sg_st_ix, sg_st_off, xfr_sz);
855         if (sts != BC_STS_SUCCESS)
856                 return sts;
857
858         *uv_desc_index = sg_st_ix;
859
860         return sts;
861 }
862
863 static void crystalhd_start_tx_dma_engine(struct crystalhd_hw *hw)
864 {
865         uint32_t dma_cntrl;
866
867         dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_TX_SW_DESC_LIST_CTRL_STS);
868         if (!(dma_cntrl & DMA_START_BIT)) {
869                 dma_cntrl |= DMA_START_BIT;
870                 crystalhd_reg_wr(hw->adp, MISC1_TX_SW_DESC_LIST_CTRL_STS,
871                                dma_cntrl);
872         }
873
874         return;
875 }
876
877 /* _CHECK_THIS_
878  *
879  * Verify if the Stop generates a completion interrupt or not.
880  * if it does not generate an interrupt, then add polling here.
881  */
882 static enum BC_STATUS crystalhd_stop_tx_dma_engine(struct crystalhd_hw *hw)
883 {
884         uint32_t dma_cntrl, cnt = 30;
885         uint32_t l1 = 1, l2 = 1;
886         unsigned long flags = 0;
887
888         dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_TX_SW_DESC_LIST_CTRL_STS);
889
890         BCMLOG(BCMLOG_DBG, "Stopping TX DMA Engine..\n");
891
892         if (!(dma_cntrl & DMA_START_BIT)) {
893                 BCMLOG(BCMLOG_DBG, "Already Stopped\n");
894                 return BC_STS_SUCCESS;
895         }
896
897         crystalhd_disable_interrupts(hw->adp);
898
899         /* Issue stop to HW */
900         /* This bit when set gave problems. Please check*/
901         dma_cntrl &= ~DMA_START_BIT;
902         crystalhd_reg_wr(hw->adp, MISC1_TX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
903
904         BCMLOG(BCMLOG_DBG, "Cleared the DMA Start bit\n");
905
906         /* Poll for 3seconds (30 * 100ms) on both the lists..*/
907         while ((l1 || l2) && cnt) {
908
909                 if (l1) {
910                         l1 = crystalhd_reg_rd(hw->adp,
911                                  MISC1_TX_FIRST_DESC_L_ADDR_LIST0);
912                         l1 &= DMA_START_BIT;
913                 }
914
915                 if (l2) {
916                         l2 = crystalhd_reg_rd(hw->adp,
917                                  MISC1_TX_FIRST_DESC_L_ADDR_LIST1);
918                         l2 &= DMA_START_BIT;
919                 }
920
921                 msleep_interruptible(100);
922
923                 cnt--;
924         }
925
926         if (!cnt) {
927                 BCMLOG_ERR("Failed to stop TX DMA.. l1 %d, l2 %d\n", l1, l2);
928                 crystalhd_enable_interrupts(hw->adp);
929                 return BC_STS_ERROR;
930         }
931
932         spin_lock_irqsave(&hw->lock, flags);
933         hw->tx_list_post_index = 0;
934         spin_unlock_irqrestore(&hw->lock, flags);
935         BCMLOG(BCMLOG_DBG, "stopped TX DMA..\n");
936         crystalhd_enable_interrupts(hw->adp);
937
938         return BC_STS_SUCCESS;
939 }
940
941 static uint32_t crystalhd_get_pib_avail_cnt(struct crystalhd_hw *hw)
942 {
943         /*
944         * Position of the PIB Entries can be found at
945         * 0th and the 1st location of the Circular list.
946         */
947         uint32_t Q_addr;
948         uint32_t pib_cnt, r_offset, w_offset;
949
950         Q_addr = hw->pib_del_Q_addr;
951
952         /* Get the Read Pointer */
953         crystalhd_mem_rd(hw->adp, Q_addr, 1, &r_offset);
954
955         /* Get the Write Pointer */
956         crystalhd_mem_rd(hw->adp, Q_addr + sizeof(uint32_t), 1, &w_offset);
957
958         if (r_offset == w_offset)
959                 return 0;       /* Queue is empty */
960
961         if (w_offset > r_offset)
962                 pib_cnt = w_offset - r_offset;
963         else
964                 pib_cnt = (w_offset + MAX_PIB_Q_DEPTH) -
965                           (r_offset + MIN_PIB_Q_DEPTH);
966
967         if (pib_cnt > MAX_PIB_Q_DEPTH) {
968                 BCMLOG_ERR("Invalid PIB Count (%u)\n", pib_cnt);
969                 return 0;
970         }
971
972         return pib_cnt;
973 }
974
975 static uint32_t crystalhd_get_addr_from_pib_Q(struct crystalhd_hw *hw)
976 {
977         uint32_t Q_addr;
978         uint32_t addr_entry, r_offset, w_offset;
979
980         Q_addr = hw->pib_del_Q_addr;
981
982         /* Get the Read Pointer 0Th Location is Read Pointer */
983         crystalhd_mem_rd(hw->adp, Q_addr, 1, &r_offset);
984
985         /* Get the Write Pointer 1st Location is Write pointer */
986         crystalhd_mem_rd(hw->adp, Q_addr + sizeof(uint32_t), 1, &w_offset);
987
988         /* Queue is empty */
989         if (r_offset == w_offset)
990                 return 0;
991
992         if ((r_offset < MIN_PIB_Q_DEPTH) || (r_offset >= MAX_PIB_Q_DEPTH))
993                 return 0;
994
995         /* Get the Actual Address of the PIB */
996         crystalhd_mem_rd(hw->adp, Q_addr + (r_offset * sizeof(uint32_t)),
997                        1, &addr_entry);
998
999         /* Increment the Read Pointer */
1000         r_offset++;
1001
1002         if (MAX_PIB_Q_DEPTH == r_offset)
1003                 r_offset = MIN_PIB_Q_DEPTH;
1004
1005         /* Write back the read pointer to It's Location */
1006         crystalhd_mem_wr(hw->adp, Q_addr, 1, &r_offset);
1007
1008         return addr_entry;
1009 }
1010
1011 static bool crystalhd_rel_addr_to_pib_Q(struct crystalhd_hw *hw,
1012                                          uint32_t addr_to_rel)
1013 {
1014         uint32_t Q_addr;
1015         uint32_t r_offset, w_offset, n_offset;
1016
1017         Q_addr = hw->pib_rel_Q_addr;
1018
1019         /* Get the Read Pointer */
1020         crystalhd_mem_rd(hw->adp, Q_addr, 1, &r_offset);
1021
1022         /* Get the Write Pointer */
1023         crystalhd_mem_rd(hw->adp, Q_addr + sizeof(uint32_t), 1, &w_offset);
1024
1025         if ((r_offset < MIN_PIB_Q_DEPTH) ||
1026             (r_offset >= MAX_PIB_Q_DEPTH))
1027                 return false;
1028
1029         n_offset = w_offset + 1;
1030
1031         if (MAX_PIB_Q_DEPTH == n_offset)
1032                 n_offset = MIN_PIB_Q_DEPTH;
1033
1034         if (r_offset == n_offset)
1035                 return false; /* should never happen */
1036
1037         /* Write the DRAM ADDR to the Queue at Next Offset */
1038         crystalhd_mem_wr(hw->adp, Q_addr + (w_offset * sizeof(uint32_t)),
1039                        1, &addr_to_rel);
1040
1041         /* Put the New value of the write pointer in Queue */
1042         crystalhd_mem_wr(hw->adp, Q_addr + sizeof(uint32_t), 1, &n_offset);
1043
1044         return true;
1045 }
1046
1047 static void cpy_pib_to_app(struct c011_pib *src_pib,
1048                                          struct BC_PIC_INFO_BLOCK *dst_pib)
1049 {
1050         if (!src_pib || !dst_pib) {
1051                 BCMLOG_ERR("Invalid Arguments\n");
1052                 return;
1053         }
1054
1055         dst_pib->timeStamp           = 0;
1056         dst_pib->picture_number      = src_pib->ppb.picture_number;
1057         dst_pib->width               = src_pib->ppb.width;
1058         dst_pib->height              = src_pib->ppb.height;
1059         dst_pib->chroma_format       = src_pib->ppb.chroma_format;
1060         dst_pib->pulldown            = src_pib->ppb.pulldown;
1061         dst_pib->flags               = src_pib->ppb.flags;
1062         dst_pib->sess_num            = src_pib->ptsStcOffset;
1063         dst_pib->aspect_ratio        = src_pib->ppb.aspect_ratio;
1064         dst_pib->colour_primaries     = src_pib->ppb.colour_primaries;
1065         dst_pib->picture_meta_payload = src_pib->ppb.picture_meta_payload;
1066         dst_pib->frame_rate             = src_pib->resolution;
1067         return;
1068 }
1069
1070 static void crystalhd_hw_proc_pib(struct crystalhd_hw *hw)
1071 {
1072         unsigned int cnt;
1073         struct c011_pib src_pib;
1074         uint32_t pib_addr, pib_cnt;
1075         struct BC_PIC_INFO_BLOCK *AppPib;
1076         struct crystalhd_rx_dma_pkt *rx_pkt = NULL;
1077
1078         pib_cnt = crystalhd_get_pib_avail_cnt(hw);
1079
1080         if (!pib_cnt)
1081                 return;
1082
1083         for (cnt = 0; cnt < pib_cnt; cnt++) {
1084
1085                 pib_addr = crystalhd_get_addr_from_pib_Q(hw);
1086                 crystalhd_mem_rd(hw->adp, pib_addr, sizeof(struct c011_pib) / 4,
1087                                (uint32_t *)&src_pib);
1088
1089                 if (src_pib.bFormatChange) {
1090                         rx_pkt = (struct crystalhd_rx_dma_pkt *)
1091                                         crystalhd_dioq_fetch(hw->rx_freeq);
1092                         if (!rx_pkt)
1093                                 return;
1094                         rx_pkt->flags = 0;
1095                         rx_pkt->flags |= COMP_FLAG_PIB_VALID |
1096                                          COMP_FLAG_FMT_CHANGE;
1097                         AppPib = &rx_pkt->pib;
1098                         cpy_pib_to_app(&src_pib, AppPib);
1099
1100                         BCMLOG(BCMLOG_DBG,
1101                                "App PIB:%x %x %x %x %x %x %x %x %x %x\n",
1102                                rx_pkt->pib.picture_number,
1103                                rx_pkt->pib.aspect_ratio,
1104                                rx_pkt->pib.chroma_format,
1105                                rx_pkt->pib.colour_primaries,
1106                                rx_pkt->pib.frame_rate,
1107                                rx_pkt->pib.height,
1108                                rx_pkt->pib.height,
1109                                rx_pkt->pib.n_drop,
1110                                rx_pkt->pib.pulldown,
1111                                rx_pkt->pib.ycom);
1112
1113                         crystalhd_dioq_add(hw->rx_rdyq, (void *)rx_pkt, true,
1114                                          rx_pkt->pkt_tag);
1115
1116                 }
1117
1118                 crystalhd_rel_addr_to_pib_Q(hw, pib_addr);
1119         }
1120 }
1121
1122 static void crystalhd_start_rx_dma_engine(struct crystalhd_hw *hw)
1123 {
1124         uint32_t        dma_cntrl;
1125
1126         dma_cntrl = crystalhd_reg_rd(hw->adp,
1127                          MISC1_Y_RX_SW_DESC_LIST_CTRL_STS);
1128         if (!(dma_cntrl & DMA_START_BIT)) {
1129                 dma_cntrl |= DMA_START_BIT;
1130                 crystalhd_reg_wr(hw->adp,
1131                          MISC1_Y_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
1132         }
1133
1134         dma_cntrl = crystalhd_reg_rd(hw->adp,
1135                          MISC1_UV_RX_SW_DESC_LIST_CTRL_STS);
1136         if (!(dma_cntrl & DMA_START_BIT)) {
1137                 dma_cntrl |= DMA_START_BIT;
1138                 crystalhd_reg_wr(hw->adp,
1139                          MISC1_UV_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
1140         }
1141
1142         return;
1143 }
1144
1145 static void crystalhd_stop_rx_dma_engine(struct crystalhd_hw *hw)
1146 {
1147         uint32_t dma_cntrl = 0, count = 30;
1148         uint32_t l0y = 1, l0uv = 1, l1y = 1, l1uv = 1;
1149
1150         dma_cntrl = crystalhd_reg_rd(hw->adp,
1151                          MISC1_Y_RX_SW_DESC_LIST_CTRL_STS);
1152         if ((dma_cntrl & DMA_START_BIT)) {
1153                 dma_cntrl &= ~DMA_START_BIT;
1154                 crystalhd_reg_wr(hw->adp,
1155                          MISC1_Y_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
1156         }
1157
1158         dma_cntrl = crystalhd_reg_rd(hw->adp,
1159                          MISC1_UV_RX_SW_DESC_LIST_CTRL_STS);
1160         if ((dma_cntrl & DMA_START_BIT)) {
1161                 dma_cntrl &= ~DMA_START_BIT;
1162                 crystalhd_reg_wr(hw->adp,
1163                          MISC1_UV_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
1164         }
1165
1166         /* Poll for 3seconds (30 * 100ms) on both the lists..*/
1167         while ((l0y || l0uv || l1y || l1uv) && count) {
1168
1169                 if (l0y) {
1170                         l0y = crystalhd_reg_rd(hw->adp,
1171                                  MISC1_Y_RX_FIRST_DESC_L_ADDR_LIST0);
1172                         l0y &= DMA_START_BIT;
1173                         if (!l0y)
1174                                 hw->rx_list_sts[0] &= ~rx_waiting_y_intr;
1175                 }
1176
1177                 if (l1y) {
1178                         l1y = crystalhd_reg_rd(hw->adp,
1179                                  MISC1_Y_RX_FIRST_DESC_L_ADDR_LIST1);
1180                         l1y &= DMA_START_BIT;
1181                         if (!l1y)
1182                                 hw->rx_list_sts[1] &= ~rx_waiting_y_intr;
1183                 }
1184
1185                 if (l0uv) {
1186                         l0uv = crystalhd_reg_rd(hw->adp,
1187                                  MISC1_UV_RX_FIRST_DESC_L_ADDR_LIST0);
1188                         l0uv &= DMA_START_BIT;
1189                         if (!l0uv)
1190                                 hw->rx_list_sts[0] &= ~rx_waiting_uv_intr;
1191                 }
1192
1193                 if (l1uv) {
1194                         l1uv = crystalhd_reg_rd(hw->adp,
1195                                  MISC1_UV_RX_FIRST_DESC_L_ADDR_LIST1);
1196                         l1uv &= DMA_START_BIT;
1197                         if (!l1uv)
1198                                 hw->rx_list_sts[1] &= ~rx_waiting_uv_intr;
1199                 }
1200                 msleep_interruptible(100);
1201                 count--;
1202         }
1203
1204         hw->rx_list_post_index = 0;
1205
1206         BCMLOG(BCMLOG_SSTEP, "Capture Stop: %d List0:Sts:%x List1:Sts:%x\n",
1207                count, hw->rx_list_sts[0], hw->rx_list_sts[1]);
1208 }
1209
1210 static enum BC_STATUS crystalhd_hw_prog_rxdma(struct crystalhd_hw *hw,
1211                                          struct crystalhd_rx_dma_pkt *rx_pkt)
1212 {
1213         uint32_t y_low_addr_reg, y_high_addr_reg;
1214         uint32_t uv_low_addr_reg, uv_high_addr_reg;
1215         union addr_64 desc_addr;
1216         unsigned long flags;
1217
1218         if (!hw || !rx_pkt) {
1219                 BCMLOG_ERR("Invalid Arguments\n");
1220                 return BC_STS_INV_ARG;
1221         }
1222
1223         if (hw->rx_list_post_index >= DMA_ENGINE_CNT) {
1224                 BCMLOG_ERR("List Out Of bounds %x\n", hw->rx_list_post_index);
1225                 return BC_STS_INV_ARG;
1226         }
1227
1228         spin_lock_irqsave(&hw->rx_lock, flags);
1229         /* FIXME: jarod: sts_free is an enum for 0,
1230          in crystalhd_hw.h... yuk... */
1231         if (sts_free != hw->rx_list_sts[hw->rx_list_post_index]) {
1232                 spin_unlock_irqrestore(&hw->rx_lock, flags);
1233                 return BC_STS_BUSY;
1234         }
1235
1236         if (!hw->rx_list_post_index) {
1237                 y_low_addr_reg   = MISC1_Y_RX_FIRST_DESC_L_ADDR_LIST0;
1238                 y_high_addr_reg  = MISC1_Y_RX_FIRST_DESC_U_ADDR_LIST0;
1239                 uv_low_addr_reg  = MISC1_UV_RX_FIRST_DESC_L_ADDR_LIST0;
1240                 uv_high_addr_reg = MISC1_UV_RX_FIRST_DESC_U_ADDR_LIST0;
1241         } else {
1242                 y_low_addr_reg   = MISC1_Y_RX_FIRST_DESC_L_ADDR_LIST1;
1243                 y_high_addr_reg  = MISC1_Y_RX_FIRST_DESC_U_ADDR_LIST1;
1244                 uv_low_addr_reg  = MISC1_UV_RX_FIRST_DESC_L_ADDR_LIST1;
1245                 uv_high_addr_reg = MISC1_UV_RX_FIRST_DESC_U_ADDR_LIST1;
1246         }
1247         rx_pkt->pkt_tag = hw->rx_pkt_tag_seed + hw->rx_list_post_index;
1248         hw->rx_list_sts[hw->rx_list_post_index] |= rx_waiting_y_intr;
1249         if (rx_pkt->uv_phy_addr)
1250                 hw->rx_list_sts[hw->rx_list_post_index] |= rx_waiting_uv_intr;
1251         hw->rx_list_post_index = (hw->rx_list_post_index + 1) % DMA_ENGINE_CNT;
1252         spin_unlock_irqrestore(&hw->rx_lock, flags);
1253
1254         crystalhd_dioq_add(hw->rx_actq, (void *)rx_pkt, false,
1255                          rx_pkt->pkt_tag);
1256
1257         crystalhd_start_rx_dma_engine(hw);
1258         /* Program the Y descriptor */
1259         desc_addr.full_addr = rx_pkt->desc_mem.phy_addr;
1260         crystalhd_reg_wr(hw->adp, y_high_addr_reg, desc_addr.high_part);
1261         crystalhd_reg_wr(hw->adp, y_low_addr_reg, desc_addr.low_part | 0x01);
1262
1263         if (rx_pkt->uv_phy_addr) {
1264                 /* Program the UV descriptor */
1265                 desc_addr.full_addr = rx_pkt->uv_phy_addr;
1266                 crystalhd_reg_wr(hw->adp, uv_high_addr_reg,
1267                          desc_addr.high_part);
1268                 crystalhd_reg_wr(hw->adp, uv_low_addr_reg,
1269                          desc_addr.low_part | 0x01);
1270         }
1271
1272         return BC_STS_SUCCESS;
1273 }
1274
1275 static enum BC_STATUS crystalhd_hw_post_cap_buff(struct crystalhd_hw *hw,
1276                                           struct crystalhd_rx_dma_pkt *rx_pkt)
1277 {
1278         enum BC_STATUS sts = crystalhd_hw_prog_rxdma(hw, rx_pkt);
1279
1280         if (sts == BC_STS_BUSY)
1281                 crystalhd_dioq_add(hw->rx_freeq, (void *)rx_pkt,
1282                                  false, rx_pkt->pkt_tag);
1283
1284         return sts;
1285 }
1286
1287 static void crystalhd_get_dnsz(struct crystalhd_hw *hw, uint32_t list_index,
1288                              uint32_t *y_dw_dnsz, uint32_t *uv_dw_dnsz)
1289 {
1290         uint32_t y_dn_sz_reg, uv_dn_sz_reg;
1291
1292         if (!list_index) {
1293                 y_dn_sz_reg  = MISC1_Y_RX_LIST0_CUR_BYTE_CNT;
1294                 uv_dn_sz_reg = MISC1_UV_RX_LIST0_CUR_BYTE_CNT;
1295         } else {
1296                 y_dn_sz_reg  = MISC1_Y_RX_LIST1_CUR_BYTE_CNT;
1297                 uv_dn_sz_reg = MISC1_UV_RX_LIST1_CUR_BYTE_CNT;
1298         }
1299
1300         *y_dw_dnsz  = crystalhd_reg_rd(hw->adp, y_dn_sz_reg);
1301         *uv_dw_dnsz = crystalhd_reg_rd(hw->adp, uv_dn_sz_reg);
1302 }
1303
1304 /*
1305  * This function should be called only after making sure that the two DMA
1306  * lists are free. This function does not check if DMA's are active, before
1307  * turning off the DMA.
1308  */
1309 static void crystalhd_hw_finalize_pause(struct crystalhd_hw *hw)
1310 {
1311         uint32_t dma_cntrl, aspm;
1312
1313         hw->stop_pending = 0;
1314
1315         dma_cntrl = crystalhd_reg_rd(hw->adp,
1316                          MISC1_Y_RX_SW_DESC_LIST_CTRL_STS);
1317         if (dma_cntrl & DMA_START_BIT) {
1318                 dma_cntrl &= ~DMA_START_BIT;
1319                 crystalhd_reg_wr(hw->adp,
1320                          MISC1_Y_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
1321         }
1322
1323         dma_cntrl = crystalhd_reg_rd(hw->adp,
1324                          MISC1_UV_RX_SW_DESC_LIST_CTRL_STS);
1325         if (dma_cntrl & DMA_START_BIT) {
1326                 dma_cntrl &= ~DMA_START_BIT;
1327                 crystalhd_reg_wr(hw->adp,
1328                          MISC1_UV_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
1329         }
1330         hw->rx_list_post_index = 0;
1331
1332         aspm = crystalhd_reg_rd(hw->adp, PCIE_DLL_DATA_LINK_CONTROL);
1333         aspm |= ASPM_L1_ENABLE;
1334         /* NAREN BCMLOG(BCMLOG_INFO, "aspm on\n"); */
1335         crystalhd_reg_wr(hw->adp, PCIE_DLL_DATA_LINK_CONTROL, aspm);
1336 }
1337
1338 static enum BC_STATUS crystalhd_rx_pkt_done(struct crystalhd_hw *hw,
1339                          uint32_t list_index, enum BC_STATUS comp_sts)
1340 {
1341         struct crystalhd_rx_dma_pkt *rx_pkt = NULL;
1342         uint32_t y_dw_dnsz, uv_dw_dnsz;
1343         enum BC_STATUS sts = BC_STS_SUCCESS;
1344
1345         if (!hw || list_index >= DMA_ENGINE_CNT) {
1346                 BCMLOG_ERR("Invalid Arguments\n");
1347                 return BC_STS_INV_ARG;
1348         }
1349
1350         rx_pkt = crystalhd_dioq_find_and_fetch(hw->rx_actq,
1351                                              hw->rx_pkt_tag_seed + list_index);
1352         if (!rx_pkt) {
1353                 BCMLOG_ERR(
1354                 "Act-Q:PostIx:%x L0Sts:%x L1Sts:%x current L:%x tag:%x comp:%x\n",
1355                            hw->rx_list_post_index, hw->rx_list_sts[0],
1356                            hw->rx_list_sts[1], list_index,
1357                            hw->rx_pkt_tag_seed + list_index, comp_sts);
1358                 return BC_STS_INV_ARG;
1359         }
1360
1361         if (comp_sts == BC_STS_SUCCESS) {
1362                 crystalhd_get_dnsz(hw, list_index, &y_dw_dnsz, &uv_dw_dnsz);
1363                 rx_pkt->dio_req->uinfo.y_done_sz = y_dw_dnsz;
1364                 rx_pkt->flags = COMP_FLAG_DATA_VALID;
1365                 if (rx_pkt->uv_phy_addr)
1366                         rx_pkt->dio_req->uinfo.uv_done_sz = uv_dw_dnsz;
1367                 crystalhd_dioq_add(hw->rx_rdyq, rx_pkt, true,
1368                                 hw->rx_pkt_tag_seed + list_index);
1369                 return sts;
1370         }
1371
1372         /* Check if we can post this DIO again. */
1373         return crystalhd_hw_post_cap_buff(hw, rx_pkt);
1374 }
1375
1376 static bool crystalhd_rx_list0_handler(struct crystalhd_hw *hw,
1377                  uint32_t int_sts, uint32_t y_err_sts, uint32_t uv_err_sts)
1378 {
1379         uint32_t tmp;
1380         enum list_sts tmp_lsts;
1381
1382         if (!(y_err_sts & GET_Y0_ERR_MSK) && !(uv_err_sts & GET_UV0_ERR_MSK))
1383                 return false;
1384
1385         tmp_lsts = hw->rx_list_sts[0];
1386
1387         /* Y0 - DMA */
1388         tmp = y_err_sts & GET_Y0_ERR_MSK;
1389         if (int_sts & INTR_INTR_STATUS_L0_Y_RX_DMA_DONE_INTR_MASK)
1390                 hw->rx_list_sts[0] &= ~rx_waiting_y_intr;
1391
1392         if (y_err_sts & MISC1_Y_RX_ERROR_STATUS_RX_L0_UNDERRUN_ERROR_MASK) {
1393                 hw->rx_list_sts[0] &= ~rx_waiting_y_intr;
1394                 tmp &= ~MISC1_Y_RX_ERROR_STATUS_RX_L0_UNDERRUN_ERROR_MASK;
1395         }
1396
1397         if (y_err_sts & MISC1_Y_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_MASK) {
1398                 hw->rx_list_sts[0] &= ~rx_y_mask;
1399                 hw->rx_list_sts[0] |= rx_y_error;
1400                 tmp &= ~MISC1_Y_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_MASK;
1401         }
1402
1403         if (tmp) {
1404                 hw->rx_list_sts[0] &= ~rx_y_mask;
1405                 hw->rx_list_sts[0] |= rx_y_error;
1406                 hw->rx_list_post_index = 0;
1407         }
1408
1409         /* UV0 - DMA */
1410         tmp = uv_err_sts & GET_UV0_ERR_MSK;
1411         if (int_sts & INTR_INTR_STATUS_L0_UV_RX_DMA_DONE_INTR_MASK)
1412                 hw->rx_list_sts[0] &= ~rx_waiting_uv_intr;
1413
1414         if (uv_err_sts & MISC1_UV_RX_ERROR_STATUS_RX_L0_UNDERRUN_ERROR_MASK) {
1415                 hw->rx_list_sts[0] &= ~rx_waiting_uv_intr;
1416                 tmp &= ~MISC1_UV_RX_ERROR_STATUS_RX_L0_UNDERRUN_ERROR_MASK;
1417         }
1418
1419         if (uv_err_sts &
1420          MISC1_UV_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_MASK) {
1421                 hw->rx_list_sts[0] &= ~rx_uv_mask;
1422                 hw->rx_list_sts[0] |= rx_uv_error;
1423                 tmp &= ~MISC1_UV_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_MASK;
1424         }
1425
1426         if (tmp) {
1427                 hw->rx_list_sts[0] &= ~rx_uv_mask;
1428                 hw->rx_list_sts[0] |= rx_uv_error;
1429                 hw->rx_list_post_index = 0;
1430         }
1431
1432         if (y_err_sts & GET_Y0_ERR_MSK) {
1433                 tmp = y_err_sts & GET_Y0_ERR_MSK;
1434                 crystalhd_reg_wr(hw->adp, MISC1_Y_RX_ERROR_STATUS, tmp);
1435         }
1436
1437         if (uv_err_sts & GET_UV0_ERR_MSK) {
1438                 tmp = uv_err_sts & GET_UV0_ERR_MSK;
1439                 crystalhd_reg_wr(hw->adp, MISC1_UV_RX_ERROR_STATUS, tmp);
1440         }
1441
1442         return tmp_lsts != hw->rx_list_sts[0];
1443 }
1444
1445 static bool crystalhd_rx_list1_handler(struct crystalhd_hw *hw,
1446                  uint32_t int_sts, uint32_t y_err_sts, uint32_t uv_err_sts)
1447 {
1448         uint32_t tmp;
1449         enum list_sts tmp_lsts;
1450
1451         if (!(y_err_sts & GET_Y1_ERR_MSK) && !(uv_err_sts & GET_UV1_ERR_MSK))
1452                 return false;
1453
1454         tmp_lsts = hw->rx_list_sts[1];
1455
1456         /* Y1 - DMA */
1457         tmp = y_err_sts & GET_Y1_ERR_MSK;
1458         if (int_sts & INTR_INTR_STATUS_L1_Y_RX_DMA_DONE_INTR_MASK)
1459                 hw->rx_list_sts[1] &= ~rx_waiting_y_intr;
1460
1461         if (y_err_sts & MISC1_Y_RX_ERROR_STATUS_RX_L1_UNDERRUN_ERROR_MASK) {
1462                 hw->rx_list_sts[1] &= ~rx_waiting_y_intr;
1463                 tmp &= ~MISC1_Y_RX_ERROR_STATUS_RX_L1_UNDERRUN_ERROR_MASK;
1464         }
1465
1466         if (y_err_sts & MISC1_Y_RX_ERROR_STATUS_RX_L1_FIFO_FULL_ERRORS_MASK) {
1467                 /* Add retry-support..*/
1468                 hw->rx_list_sts[1] &= ~rx_y_mask;
1469                 hw->rx_list_sts[1] |= rx_y_error;
1470                 tmp &= ~MISC1_Y_RX_ERROR_STATUS_RX_L1_FIFO_FULL_ERRORS_MASK;
1471         }
1472
1473         if (tmp) {
1474                 hw->rx_list_sts[1] &= ~rx_y_mask;
1475                 hw->rx_list_sts[1] |= rx_y_error;
1476                 hw->rx_list_post_index = 0;
1477         }
1478
1479         /* UV1 - DMA */
1480         tmp = uv_err_sts & GET_UV1_ERR_MSK;
1481         if (int_sts & INTR_INTR_STATUS_L1_UV_RX_DMA_DONE_INTR_MASK)
1482                 hw->rx_list_sts[1] &= ~rx_waiting_uv_intr;
1483
1484         if (uv_err_sts & MISC1_UV_RX_ERROR_STATUS_RX_L1_UNDERRUN_ERROR_MASK) {
1485                 hw->rx_list_sts[1] &= ~rx_waiting_uv_intr;
1486                 tmp &= ~MISC1_UV_RX_ERROR_STATUS_RX_L1_UNDERRUN_ERROR_MASK;
1487         }
1488
1489         if (uv_err_sts & MISC1_UV_RX_ERROR_STATUS_RX_L1_FIFO_FULL_ERRORS_MASK) {
1490                 /* Add retry-support*/
1491                 hw->rx_list_sts[1] &= ~rx_uv_mask;
1492                 hw->rx_list_sts[1] |= rx_uv_error;
1493                 tmp &= ~MISC1_UV_RX_ERROR_STATUS_RX_L1_FIFO_FULL_ERRORS_MASK;
1494         }
1495
1496         if (tmp) {
1497                 hw->rx_list_sts[1] &= ~rx_uv_mask;
1498                 hw->rx_list_sts[1] |= rx_uv_error;
1499                 hw->rx_list_post_index = 0;
1500         }
1501
1502         if (y_err_sts & GET_Y1_ERR_MSK) {
1503                 tmp = y_err_sts & GET_Y1_ERR_MSK;
1504                 crystalhd_reg_wr(hw->adp, MISC1_Y_RX_ERROR_STATUS, tmp);
1505         }
1506
1507         if (uv_err_sts & GET_UV1_ERR_MSK) {
1508                 tmp = uv_err_sts & GET_UV1_ERR_MSK;
1509                 crystalhd_reg_wr(hw->adp, MISC1_UV_RX_ERROR_STATUS, tmp);
1510         }
1511
1512         return tmp_lsts != hw->rx_list_sts[1];
1513 }
1514
1515
1516 static void crystalhd_rx_isr(struct crystalhd_hw *hw, uint32_t intr_sts)
1517 {
1518         unsigned long flags;
1519         uint32_t i, list_avail = 0;
1520         enum BC_STATUS comp_sts = BC_STS_NO_DATA;
1521         uint32_t y_err_sts, uv_err_sts, y_dn_sz = 0, uv_dn_sz = 0;
1522         bool ret = false;
1523
1524         if (!hw) {
1525                 BCMLOG_ERR("Invalid Arguments\n");
1526                 return;
1527         }
1528
1529         if (!(intr_sts & GET_RX_INTR_MASK))
1530                 return;
1531
1532         y_err_sts = crystalhd_reg_rd(hw->adp, MISC1_Y_RX_ERROR_STATUS);
1533         uv_err_sts = crystalhd_reg_rd(hw->adp, MISC1_UV_RX_ERROR_STATUS);
1534
1535         for (i = 0; i < DMA_ENGINE_CNT; i++) {
1536                 /* Update States..*/
1537                 spin_lock_irqsave(&hw->rx_lock, flags);
1538                 if (i == 0)
1539                         ret = crystalhd_rx_list0_handler(hw, intr_sts,
1540                                          y_err_sts, uv_err_sts);
1541                 else
1542                         ret = crystalhd_rx_list1_handler(hw, intr_sts,
1543                                          y_err_sts, uv_err_sts);
1544                 if (ret) {
1545                         switch (hw->rx_list_sts[i]) {
1546                         case sts_free:
1547                                 comp_sts = BC_STS_SUCCESS;
1548                                 list_avail = 1;
1549                                 break;
1550                         case rx_y_error:
1551                         case rx_uv_error:
1552                         case rx_sts_error:
1553                                 /* We got error on both or Y or uv. */
1554                                 hw->stats.rx_errors++;
1555                                 crystalhd_get_dnsz(hw, i, &y_dn_sz, &uv_dn_sz);
1556                                 /* FIXME: jarod: this is where
1557                                  my mini pci-e card is tripping up */
1558                                 BCMLOG(BCMLOG_DBG, "list_index:%x rx[%d] Y:%x UV:%x Int:%x YDnSz:%x UVDnSz:%x\n",
1559                                        i, hw->stats.rx_errors, y_err_sts,
1560                                        uv_err_sts, intr_sts, y_dn_sz,
1561                                        uv_dn_sz);
1562                                 hw->rx_list_sts[i] = sts_free;
1563                                 comp_sts = BC_STS_ERROR;
1564                                 break;
1565                         default:
1566                                 /* Wait for completion..*/
1567                                 comp_sts = BC_STS_NO_DATA;
1568                                 break;
1569                         }
1570                 }
1571                 spin_unlock_irqrestore(&hw->rx_lock, flags);
1572
1573                 /* handle completion...*/
1574                 if (comp_sts != BC_STS_NO_DATA) {
1575                         crystalhd_rx_pkt_done(hw, i, comp_sts);
1576                         comp_sts = BC_STS_NO_DATA;
1577                 }
1578         }
1579
1580         if (list_avail) {
1581                 if (hw->stop_pending) {
1582                         if ((hw->rx_list_sts[0] == sts_free) &&
1583                             (hw->rx_list_sts[1] == sts_free))
1584                                 crystalhd_hw_finalize_pause(hw);
1585                 } else {
1586                         crystalhd_hw_start_capture(hw);
1587                 }
1588         }
1589 }
1590
1591 static enum BC_STATUS crystalhd_fw_cmd_post_proc(struct crystalhd_hw *hw,
1592                                           struct BC_FW_CMD *fw_cmd)
1593 {
1594         enum BC_STATUS sts = BC_STS_SUCCESS;
1595         struct dec_rsp_channel_start_video *st_rsp = NULL;
1596
1597         switch (fw_cmd->cmd[0]) {
1598         case eCMD_C011_DEC_CHAN_START_VIDEO:
1599                 st_rsp = (struct dec_rsp_channel_start_video *)fw_cmd->rsp;
1600                 hw->pib_del_Q_addr = st_rsp->picInfoDeliveryQ;
1601                 hw->pib_rel_Q_addr = st_rsp->picInfoReleaseQ;
1602                 BCMLOG(BCMLOG_DBG, "DelQAddr:%x RelQAddr:%x\n",
1603                        hw->pib_del_Q_addr, hw->pib_rel_Q_addr);
1604                 break;
1605         case eCMD_C011_INIT:
1606                 if (!(crystalhd_load_firmware_config(hw->adp))) {
1607                         BCMLOG_ERR("Invalid Params.\n");
1608                         sts = BC_STS_FW_AUTH_FAILED;
1609                 }
1610                 break;
1611         default:
1612                 break;
1613         }
1614         return sts;
1615 }
1616
1617 static enum BC_STATUS crystalhd_put_ddr2sleep(struct crystalhd_hw *hw)
1618 {
1619         uint32_t reg;
1620         union link_misc_perst_decoder_ctrl rst_cntrl_reg;
1621
1622         /* Pulse reset pin of 7412 (MISC_PERST_DECODER_CTRL) */
1623         rst_cntrl_reg.whole_reg = crystalhd_reg_rd(hw->adp,
1624                                          MISC_PERST_DECODER_CTRL);
1625
1626         rst_cntrl_reg.bcm_7412_rst = 1;
1627         crystalhd_reg_wr(hw->adp, MISC_PERST_DECODER_CTRL,
1628                                          rst_cntrl_reg.whole_reg);
1629         msleep_interruptible(50);
1630
1631         rst_cntrl_reg.bcm_7412_rst = 0;
1632         crystalhd_reg_wr(hw->adp, MISC_PERST_DECODER_CTRL,
1633                                          rst_cntrl_reg.whole_reg);
1634
1635         /* Close all banks, put DDR in idle */
1636         bc_dec_reg_wr(hw->adp, SDRAM_PRECHARGE, 0);
1637
1638         /* Set bit 25 (drop CKE pin of DDR) */
1639         reg = bc_dec_reg_rd(hw->adp, SDRAM_PARAM);
1640         reg |= 0x02000000;
1641         bc_dec_reg_wr(hw->adp, SDRAM_PARAM, reg);
1642
1643         /* Reset the audio block */
1644         bc_dec_reg_wr(hw->adp, AUD_DSP_MISC_SOFT_RESET, 0x1);
1645
1646         /* Power down Raptor PLL */
1647         reg = bc_dec_reg_rd(hw->adp, DecHt_PllCCtl);
1648         reg |= 0x00008000;
1649         bc_dec_reg_wr(hw->adp, DecHt_PllCCtl, reg);
1650
1651         /* Power down all Audio PLL */
1652         bc_dec_reg_wr(hw->adp, AIO_MISC_PLL_RESET, 0x1);
1653
1654         /* Power down video clock (75MHz) */
1655         reg = bc_dec_reg_rd(hw->adp, DecHt_PllECtl);
1656         reg |= 0x00008000;
1657         bc_dec_reg_wr(hw->adp, DecHt_PllECtl, reg);
1658
1659         /* Power down video clock (75MHz) */
1660         reg = bc_dec_reg_rd(hw->adp, DecHt_PllDCtl);
1661         reg |= 0x00008000;
1662         bc_dec_reg_wr(hw->adp, DecHt_PllDCtl, reg);
1663
1664         /* Power down core clock (200MHz) */
1665         reg = bc_dec_reg_rd(hw->adp, DecHt_PllACtl);
1666         reg |= 0x00008000;
1667         bc_dec_reg_wr(hw->adp, DecHt_PllACtl, reg);
1668
1669         /* Power down core clock (200MHz) */
1670         reg = bc_dec_reg_rd(hw->adp, DecHt_PllBCtl);
1671         reg |= 0x00008000;
1672         bc_dec_reg_wr(hw->adp, DecHt_PllBCtl, reg);
1673
1674         return BC_STS_SUCCESS;
1675 }
1676
1677 /************************************************
1678 **
1679 *************************************************/
1680
1681 enum BC_STATUS crystalhd_download_fw(struct crystalhd_adp *adp, void *buffer,
1682                                          uint32_t sz)
1683 {
1684         uint32_t reg_data, cnt, *temp_buff;
1685         uint32_t fw_sig_len = 36;
1686         uint32_t dram_offset = BC_FWIMG_ST_ADDR, sig_reg;
1687
1688
1689         if (!adp || !buffer || !sz) {
1690                 BCMLOG_ERR("Invalid Params.\n");
1691                 return BC_STS_INV_ARG;
1692         }
1693
1694         reg_data = crystalhd_reg_rd(adp, OTP_CMD);
1695         if (!(reg_data & 0x02)) {
1696                 BCMLOG_ERR("Invalid hw config.. otp not programmed\n");
1697                 return BC_STS_ERROR;
1698         }
1699
1700         reg_data = 0;
1701         crystalhd_reg_wr(adp, DCI_CMD, 0);
1702         reg_data |= BC_BIT(0);
1703         crystalhd_reg_wr(adp, DCI_CMD, reg_data);
1704
1705         reg_data = 0;
1706         cnt = 1000;
1707         msleep_interruptible(10);
1708
1709         while (reg_data != BC_BIT(4)) {
1710                 reg_data = crystalhd_reg_rd(adp, DCI_STATUS);
1711                 reg_data &= BC_BIT(4);
1712                 if (--cnt == 0) {
1713                         BCMLOG_ERR("Firmware Download RDY Timeout.\n");
1714                         return BC_STS_TIMEOUT;
1715                 }
1716         }
1717
1718         msleep_interruptible(10);
1719         /*  Load the FW to the FW_ADDR field in the DCI_FIRMWARE_ADDR */
1720         crystalhd_reg_wr(adp, DCI_FIRMWARE_ADDR, dram_offset);
1721         temp_buff = (uint32_t *)buffer;
1722         for (cnt = 0; cnt < (sz - fw_sig_len); cnt += 4) {
1723                 crystalhd_reg_wr(adp, DCI_DRAM_BASE_ADDR, (dram_offset >> 19));
1724                 crystalhd_reg_wr(adp, DCI_FIRMWARE_DATA, *temp_buff);
1725                 dram_offset += 4;
1726                 temp_buff++;
1727         }
1728         msleep_interruptible(10);
1729
1730         temp_buff++;
1731
1732         sig_reg = (uint32_t)DCI_SIGNATURE_DATA_7;
1733         for (cnt = 0; cnt < 8; cnt++) {
1734                 uint32_t swapped_data = *temp_buff;
1735                 swapped_data = bswap_32_1(swapped_data);
1736                 crystalhd_reg_wr(adp, sig_reg, swapped_data);
1737                 sig_reg -= 4;
1738                 temp_buff++;
1739         }
1740         msleep_interruptible(10);
1741
1742         reg_data = 0;
1743         reg_data |= BC_BIT(1);
1744         crystalhd_reg_wr(adp, DCI_CMD, reg_data);
1745         msleep_interruptible(10);
1746
1747         reg_data = 0;
1748         reg_data = crystalhd_reg_rd(adp, DCI_STATUS);
1749
1750         if ((reg_data & BC_BIT(9)) == BC_BIT(9)) {
1751                 cnt = 1000;
1752                 while ((reg_data & BC_BIT(0)) != BC_BIT(0)) {
1753                         reg_data = crystalhd_reg_rd(adp, DCI_STATUS);
1754                         reg_data &= BC_BIT(0);
1755                         if (!(--cnt))
1756                                 break;
1757                         msleep_interruptible(10);
1758                 }
1759                 reg_data = 0;
1760                 reg_data = crystalhd_reg_rd(adp, DCI_CMD);
1761                 reg_data |= BC_BIT(4);
1762                 crystalhd_reg_wr(adp, DCI_CMD, reg_data);
1763
1764         } else {
1765                 BCMLOG_ERR("F/w Signature mismatch\n");
1766                 return BC_STS_FW_AUTH_FAILED;
1767         }
1768
1769         BCMLOG(BCMLOG_INFO, "Firmware Downloaded Successfully\n");
1770         return BC_STS_SUCCESS;
1771 }
1772
1773 enum BC_STATUS crystalhd_do_fw_cmd(struct crystalhd_hw *hw,
1774                                 struct BC_FW_CMD *fw_cmd)
1775 {
1776         uint32_t cnt = 0, cmd_res_addr;
1777         uint32_t *cmd_buff, *res_buff;
1778         wait_queue_head_t fw_cmd_event;
1779         int rc = 0;
1780         enum BC_STATUS sts;
1781
1782         crystalhd_create_event(&fw_cmd_event);
1783
1784         if (!hw || !fw_cmd) {
1785                 BCMLOG_ERR("Invalid Arguments\n");
1786                 return BC_STS_INV_ARG;
1787         }
1788
1789         cmd_buff = fw_cmd->cmd;
1790         res_buff = fw_cmd->rsp;
1791
1792         if (!cmd_buff || !res_buff) {
1793                 BCMLOG_ERR("Invalid Parameters for F/W Command\n");
1794                 return BC_STS_INV_ARG;
1795         }
1796
1797         hw->pwr_lock++;
1798
1799         hw->fwcmd_evt_sts = 0;
1800         hw->pfw_cmd_event = &fw_cmd_event;
1801
1802         /*Write the command to the memory*/
1803         crystalhd_mem_wr(hw->adp, TS_Host2CpuSnd, FW_CMD_BUFF_SZ, cmd_buff);
1804
1805         /*Memory Read for memory arbitrator flush*/
1806         crystalhd_mem_rd(hw->adp, TS_Host2CpuSnd, 1, &cnt);
1807
1808         /* Write the command address to mailbox */
1809         bc_dec_reg_wr(hw->adp, Hst2CpuMbx1, TS_Host2CpuSnd);
1810         msleep_interruptible(50);
1811
1812         crystalhd_wait_on_event(&fw_cmd_event, hw->fwcmd_evt_sts, 20000, rc, 0);
1813
1814         if (!rc) {
1815                 sts = BC_STS_SUCCESS;
1816         } else if (rc == -EBUSY) {
1817                 BCMLOG_ERR("Firmware command T/O\n");
1818                 sts = BC_STS_TIMEOUT;
1819         } else if (rc == -EINTR) {
1820                 BCMLOG(BCMLOG_DBG, "FwCmd Wait Signal int.\n");
1821                 sts = BC_STS_IO_USER_ABORT;
1822         } else {
1823                 BCMLOG_ERR("FwCmd IO Error.\n");
1824                 sts = BC_STS_IO_ERROR;
1825         }
1826
1827         if (sts != BC_STS_SUCCESS) {
1828                 BCMLOG_ERR("FwCmd Failed.\n");
1829                 hw->pwr_lock--;
1830                 return sts;
1831         }
1832
1833         /*Get the Response Address*/
1834         cmd_res_addr = bc_dec_reg_rd(hw->adp, Cpu2HstMbx1);
1835
1836         /*Read the Response*/
1837         crystalhd_mem_rd(hw->adp, cmd_res_addr, FW_CMD_BUFF_SZ, res_buff);
1838
1839         hw->pwr_lock--;
1840
1841         if (res_buff[2] != C011_RET_SUCCESS) {
1842                 BCMLOG_ERR("res_buff[2] != C011_RET_SUCCESS\n");
1843                 return BC_STS_FW_CMD_ERR;
1844         }
1845
1846         sts = crystalhd_fw_cmd_post_proc(hw, fw_cmd);
1847         if (sts != BC_STS_SUCCESS)
1848                 BCMLOG_ERR("crystalhd_fw_cmd_post_proc Failed.\n");
1849
1850         return sts;
1851 }
1852
1853 bool crystalhd_hw_interrupt(struct crystalhd_adp *adp, struct crystalhd_hw *hw)
1854 {
1855         uint32_t intr_sts = 0;
1856         uint32_t deco_intr = 0;
1857         bool rc = false;
1858
1859         if (!adp || !hw->dev_started)
1860                 return rc;
1861
1862         hw->stats.num_interrupts++;
1863         hw->pwr_lock++;
1864
1865         deco_intr = bc_dec_reg_rd(adp, Stream2Host_Intr_Sts);
1866         intr_sts  = crystalhd_reg_rd(adp, INTR_INTR_STATUS);
1867
1868         if (intr_sts) {
1869                 /* let system know we processed interrupt..*/
1870                 rc = true;
1871                 hw->stats.dev_interrupts++;
1872         }
1873
1874         if (deco_intr && (deco_intr != 0xdeaddead)) {
1875
1876                 if (deco_intr & 0x80000000) {
1877                         /*Set the Event and the status flag*/
1878                         if (hw->pfw_cmd_event) {
1879                                 hw->fwcmd_evt_sts = 1;
1880                                 crystalhd_set_event(hw->pfw_cmd_event);
1881                         }
1882                 }
1883
1884                 if (deco_intr & BC_BIT(1))
1885                         crystalhd_hw_proc_pib(hw);
1886
1887                 bc_dec_reg_wr(adp, Stream2Host_Intr_Sts, deco_intr);
1888                 /* FIXME: jarod: No udelay? might this be
1889                  the real reason mini pci-e cards were stalling out? */
1890                 bc_dec_reg_wr(adp, Stream2Host_Intr_Sts, 0);
1891                 rc = true;
1892         }
1893
1894         /* Rx interrupts */
1895         crystalhd_rx_isr(hw, intr_sts);
1896
1897         /* Tx interrupts*/
1898         crystalhd_tx_isr(hw, intr_sts);
1899
1900         /* Clear interrupts */
1901         if (rc) {
1902                 if (intr_sts)
1903                         crystalhd_reg_wr(adp, INTR_INTR_CLR_REG, intr_sts);
1904
1905                 crystalhd_reg_wr(adp, INTR_EOI_CTRL, 1);
1906         }
1907
1908         hw->pwr_lock--;
1909
1910         return rc;
1911 }
1912
1913 enum BC_STATUS crystalhd_hw_open(struct crystalhd_hw *hw,
1914                          struct crystalhd_adp *adp)
1915 {
1916         if (!hw || !adp) {
1917                 BCMLOG_ERR("Invalid Arguments\n");
1918                 return BC_STS_INV_ARG;
1919         }
1920
1921         if (hw->dev_started)
1922                 return BC_STS_SUCCESS;
1923
1924         memset(hw, 0, sizeof(struct crystalhd_hw));
1925
1926         hw->adp = adp;
1927         spin_lock_init(&hw->lock);
1928         spin_lock_init(&hw->rx_lock);
1929         /* FIXME: jarod: what are these magic numbers?!? */
1930         hw->tx_ioq_tag_seed = 0x70023070;
1931         hw->rx_pkt_tag_seed = 0x70029070;
1932
1933         hw->stop_pending = 0;
1934         crystalhd_start_device(hw->adp);
1935         hw->dev_started = true;
1936
1937         /* set initial core clock  */
1938         hw->core_clock_mhz = CLOCK_PRESET;
1939         hw->prev_n = 0;
1940         hw->pwr_lock = 0;
1941         crystalhd_hw_set_core_clock(hw);
1942
1943         return BC_STS_SUCCESS;
1944 }
1945
1946 enum BC_STATUS crystalhd_hw_close(struct crystalhd_hw *hw)
1947 {
1948         if (!hw) {
1949                 BCMLOG_ERR("Invalid Arguments\n");
1950                 return BC_STS_INV_ARG;
1951         }
1952
1953         if (!hw->dev_started)
1954                 return BC_STS_SUCCESS;
1955
1956         /* Stop and DDR sleep will happen in here */
1957         crystalhd_hw_suspend(hw);
1958         hw->dev_started = false;
1959
1960         return BC_STS_SUCCESS;
1961 }
1962
1963 enum BC_STATUS crystalhd_hw_setup_dma_rings(struct crystalhd_hw *hw)
1964 {
1965         unsigned int i;
1966         void *mem;
1967         size_t mem_len;
1968         dma_addr_t phy_addr;
1969         enum BC_STATUS sts = BC_STS_SUCCESS;
1970         struct crystalhd_rx_dma_pkt *rpkt;
1971
1972         if (!hw || !hw->adp) {
1973                 BCMLOG_ERR("Invalid Arguments\n");
1974                 return BC_STS_INV_ARG;
1975         }
1976
1977         sts = crystalhd_hw_create_ioqs(hw);
1978         if (sts != BC_STS_SUCCESS) {
1979                 BCMLOG_ERR("Failed to create IOQs..\n");
1980                 return sts;
1981         }
1982
1983         mem_len = BC_LINK_MAX_SGLS * sizeof(struct dma_descriptor);
1984
1985         for (i = 0; i < BC_TX_LIST_CNT; i++) {
1986                 mem = bc_kern_dma_alloc(hw->adp, mem_len, &phy_addr);
1987                 if (mem) {
1988                         memset(mem, 0, mem_len);
1989                 } else {
1990                         BCMLOG_ERR("Insufficient Memory For TX\n");
1991                         crystalhd_hw_free_dma_rings(hw);
1992                         return BC_STS_INSUFF_RES;
1993                 }
1994                 /* rx_pkt_pool -- static memory allocation  */
1995                 hw->tx_pkt_pool[i].desc_mem.pdma_desc_start = mem;
1996                 hw->tx_pkt_pool[i].desc_mem.phy_addr = phy_addr;
1997                 hw->tx_pkt_pool[i].desc_mem.sz = BC_LINK_MAX_SGLS *
1998                                                  sizeof(struct dma_descriptor);
1999                 hw->tx_pkt_pool[i].list_tag = 0;
2000
2001                 /* Add TX dma requests to Free Queue..*/
2002                 sts = crystalhd_dioq_add(hw->tx_freeq,
2003                                        &hw->tx_pkt_pool[i], false, 0);
2004                 if (sts != BC_STS_SUCCESS) {
2005                         crystalhd_hw_free_dma_rings(hw);
2006                         return sts;
2007                 }
2008         }
2009
2010         for (i = 0; i < BC_RX_LIST_CNT; i++) {
2011                 rpkt = kzalloc(sizeof(*rpkt), GFP_KERNEL);
2012                 if (!rpkt) {
2013                         BCMLOG_ERR("Insufficient Memory For RX\n");
2014                         crystalhd_hw_free_dma_rings(hw);
2015                         return BC_STS_INSUFF_RES;
2016                 }
2017
2018                 mem = bc_kern_dma_alloc(hw->adp, mem_len, &phy_addr);
2019                 if (mem) {
2020                         memset(mem, 0, mem_len);
2021                 } else {
2022                         BCMLOG_ERR("Insufficient Memory For RX\n");
2023                         crystalhd_hw_free_dma_rings(hw);
2024                         kfree(rpkt);
2025                         return BC_STS_INSUFF_RES;
2026                 }
2027                 rpkt->desc_mem.pdma_desc_start = mem;
2028                 rpkt->desc_mem.phy_addr = phy_addr;
2029                 rpkt->desc_mem.sz  = BC_LINK_MAX_SGLS *
2030                                          sizeof(struct dma_descriptor);
2031                 rpkt->pkt_tag = hw->rx_pkt_tag_seed + i;
2032                 crystalhd_hw_free_rx_pkt(hw, rpkt);
2033         }
2034
2035         return BC_STS_SUCCESS;
2036 }
2037
2038 enum BC_STATUS crystalhd_hw_free_dma_rings(struct crystalhd_hw *hw)
2039 {
2040         unsigned int i;
2041         struct crystalhd_rx_dma_pkt *rpkt = NULL;
2042
2043         if (!hw || !hw->adp) {
2044                 BCMLOG_ERR("Invalid Arguments\n");
2045                 return BC_STS_INV_ARG;
2046         }
2047
2048         /* Delete all IOQs.. */
2049         crystalhd_hw_delete_ioqs(hw);
2050
2051         for (i = 0; i < BC_TX_LIST_CNT; i++) {
2052                 if (hw->tx_pkt_pool[i].desc_mem.pdma_desc_start) {
2053                         bc_kern_dma_free(hw->adp,
2054                                 hw->tx_pkt_pool[i].desc_mem.sz,
2055                                 hw->tx_pkt_pool[i].desc_mem.pdma_desc_start,
2056                                 hw->tx_pkt_pool[i].desc_mem.phy_addr);
2057
2058                         hw->tx_pkt_pool[i].desc_mem.pdma_desc_start = NULL;
2059                 }
2060         }
2061
2062         BCMLOG(BCMLOG_DBG, "Releasing RX Pkt pool\n");
2063         do {
2064                 rpkt = crystalhd_hw_alloc_rx_pkt(hw);
2065                 if (!rpkt)
2066                         break;
2067                 bc_kern_dma_free(hw->adp, rpkt->desc_mem.sz,
2068                                  rpkt->desc_mem.pdma_desc_start,
2069                                  rpkt->desc_mem.phy_addr);
2070                 kfree(rpkt);
2071         } while (rpkt);
2072
2073         return BC_STS_SUCCESS;
2074 }
2075
2076 enum BC_STATUS crystalhd_hw_post_tx(struct crystalhd_hw *hw,
2077                              struct crystalhd_dio_req *ioreq,
2078                              hw_comp_callback call_back,
2079                              wait_queue_head_t *cb_event, uint32_t *list_id,
2080                              uint8_t data_flags)
2081 {
2082         struct tx_dma_pkt *tx_dma_packet = NULL;
2083         uint32_t first_desc_u_addr, first_desc_l_addr;
2084         uint32_t low_addr, high_addr;
2085         union addr_64 desc_addr;
2086         enum BC_STATUS sts, add_sts;
2087         uint32_t dummy_index = 0;
2088         unsigned long flags;
2089         bool rc;
2090
2091         if (!hw || !ioreq || !call_back || !cb_event || !list_id) {
2092                 BCMLOG_ERR("Invalid Arguments\n");
2093                 return BC_STS_INV_ARG;
2094         }
2095
2096         /*
2097          * Since we hit code in busy condition very frequently,
2098          * we will check the code in status first before
2099          * checking the availability of free elem.
2100          *
2101          * This will avoid the Q fetch/add in normal condition.
2102          */
2103         rc = crystalhd_code_in_full(hw->adp, ioreq->uinfo.xfr_len,
2104                                   false, data_flags);
2105         if (rc) {
2106                 hw->stats.cin_busy++;
2107                 return BC_STS_BUSY;
2108         }
2109
2110         /* Get a list from TxFreeQ */
2111         tx_dma_packet = (struct tx_dma_pkt *)crystalhd_dioq_fetch(
2112                                                 hw->tx_freeq);
2113         if (!tx_dma_packet) {
2114                 BCMLOG_ERR("No empty elements..\n");
2115                 return BC_STS_ERR_USAGE;
2116         }
2117
2118         sts = crystalhd_xlat_sgl_to_dma_desc(ioreq,
2119                                            &tx_dma_packet->desc_mem,
2120                                            &dummy_index);
2121         if (sts != BC_STS_SUCCESS) {
2122                 add_sts = crystalhd_dioq_add(hw->tx_freeq, tx_dma_packet,
2123                                            false, 0);
2124                 if (add_sts != BC_STS_SUCCESS)
2125                         BCMLOG_ERR("double fault..\n");
2126
2127                 return sts;
2128         }
2129
2130         hw->pwr_lock++;
2131
2132         desc_addr.full_addr = tx_dma_packet->desc_mem.phy_addr;
2133         low_addr = desc_addr.low_part;
2134         high_addr = desc_addr.high_part;
2135
2136         tx_dma_packet->call_back = call_back;
2137         tx_dma_packet->cb_event  = cb_event;
2138         tx_dma_packet->dio_req   = ioreq;
2139
2140         spin_lock_irqsave(&hw->lock, flags);
2141
2142         if (hw->tx_list_post_index == 0) {
2143                 first_desc_u_addr = MISC1_TX_FIRST_DESC_U_ADDR_LIST0;
2144                 first_desc_l_addr = MISC1_TX_FIRST_DESC_L_ADDR_LIST0;
2145         } else {
2146                 first_desc_u_addr = MISC1_TX_FIRST_DESC_U_ADDR_LIST1;
2147                 first_desc_l_addr = MISC1_TX_FIRST_DESC_L_ADDR_LIST1;
2148         }
2149
2150         *list_id = tx_dma_packet->list_tag = hw->tx_ioq_tag_seed +
2151                                              hw->tx_list_post_index;
2152
2153         hw->tx_list_post_index = (hw->tx_list_post_index + 1) % DMA_ENGINE_CNT;
2154
2155         spin_unlock_irqrestore(&hw->lock, flags);
2156
2157
2158         /* Insert in Active Q..*/
2159         crystalhd_dioq_add(hw->tx_actq, tx_dma_packet, false,
2160                          tx_dma_packet->list_tag);
2161
2162         /*
2163          * Interrupt will come as soon as you write
2164          * the valid bit. So be ready for that. All
2165          * the initialization should happen before that.
2166          */
2167         crystalhd_start_tx_dma_engine(hw);
2168         crystalhd_reg_wr(hw->adp, first_desc_u_addr, desc_addr.high_part);
2169
2170         crystalhd_reg_wr(hw->adp, first_desc_l_addr, desc_addr.low_part |
2171                                          0x01);
2172                                         /* Be sure we set the valid bit ^^^^ */
2173
2174         return BC_STS_SUCCESS;
2175 }
2176
2177 /*
2178  * This is a force cancel and we are racing with ISR.
2179  *
2180  * Will try to remove the req from ActQ before ISR gets it.
2181  * If ISR gets it first then the completion happens in the
2182  * normal path and we will return _STS_NO_DATA from here.
2183  *
2184  * FIX_ME: Not Tested the actual condition..
2185  */
2186 enum BC_STATUS crystalhd_hw_cancel_tx(struct crystalhd_hw *hw,
2187                                          uint32_t list_id)
2188 {
2189         if (!hw || !list_id) {
2190                 BCMLOG_ERR("Invalid Arguments\n");
2191                 return BC_STS_INV_ARG;
2192         }
2193
2194         crystalhd_stop_tx_dma_engine(hw);
2195         crystalhd_hw_tx_req_complete(hw, list_id, BC_STS_IO_USER_ABORT);
2196
2197         return BC_STS_SUCCESS;
2198 }
2199
2200 enum BC_STATUS crystalhd_hw_add_cap_buffer(struct crystalhd_hw *hw,
2201                                  struct crystalhd_dio_req *ioreq, bool en_post)
2202 {
2203         struct crystalhd_rx_dma_pkt *rpkt;
2204         uint32_t tag, uv_desc_ix = 0;
2205         enum BC_STATUS sts;
2206
2207         if (!hw || !ioreq) {
2208                 BCMLOG_ERR("Invalid Arguments\n");
2209                 return BC_STS_INV_ARG;
2210         }
2211
2212         rpkt = crystalhd_hw_alloc_rx_pkt(hw);
2213         if (!rpkt) {
2214                 BCMLOG_ERR("Insufficient resources\n");
2215                 return BC_STS_INSUFF_RES;
2216         }
2217
2218         rpkt->dio_req = ioreq;
2219         tag = rpkt->pkt_tag;
2220
2221         sts = crystalhd_xlat_sgl_to_dma_desc(ioreq, &rpkt->desc_mem,
2222                                          &uv_desc_ix);
2223         if (sts != BC_STS_SUCCESS)
2224                 return sts;
2225
2226         rpkt->uv_phy_addr = 0;
2227
2228         /* Store the address of UV in the rx packet for post*/
2229         if (uv_desc_ix)
2230                 rpkt->uv_phy_addr = rpkt->desc_mem.phy_addr +
2231                          (sizeof(struct dma_descriptor) * (uv_desc_ix + 1));
2232
2233         if (en_post)
2234                 sts = crystalhd_hw_post_cap_buff(hw, rpkt);
2235         else
2236                 sts = crystalhd_dioq_add(hw->rx_freeq, rpkt, false, tag);
2237
2238         return sts;
2239 }
2240
2241 enum BC_STATUS crystalhd_hw_get_cap_buffer(struct crystalhd_hw *hw,
2242                                     struct BC_PIC_INFO_BLOCK *pib,
2243                                     struct crystalhd_dio_req **ioreq)
2244 {
2245         struct crystalhd_rx_dma_pkt *rpkt;
2246         uint32_t timeout = BC_PROC_OUTPUT_TIMEOUT / 1000;
2247         uint32_t sig_pending = 0;
2248
2249
2250         if (!hw || !ioreq || !pib) {
2251                 BCMLOG_ERR("Invalid Arguments\n");
2252                 return BC_STS_INV_ARG;
2253         }
2254
2255         rpkt = crystalhd_dioq_fetch_wait(hw->rx_rdyq, timeout, &sig_pending);
2256         if (!rpkt) {
2257                 if (sig_pending) {
2258                         BCMLOG(BCMLOG_INFO, "wait on frame time out %d\n",
2259                                          sig_pending);
2260                         return BC_STS_IO_USER_ABORT;
2261                 } else {
2262                         return BC_STS_TIMEOUT;
2263                 }
2264         }
2265
2266         rpkt->dio_req->uinfo.comp_flags = rpkt->flags;
2267
2268         if (rpkt->flags & COMP_FLAG_PIB_VALID)
2269                 memcpy(pib, &rpkt->pib, sizeof(*pib));
2270
2271         *ioreq = rpkt->dio_req;
2272
2273         crystalhd_hw_free_rx_pkt(hw, rpkt);
2274
2275         return BC_STS_SUCCESS;
2276 }
2277
2278 enum BC_STATUS crystalhd_hw_start_capture(struct crystalhd_hw *hw)
2279 {
2280         struct crystalhd_rx_dma_pkt *rx_pkt;
2281         enum BC_STATUS sts;
2282         uint32_t i;
2283
2284         if (!hw) {
2285                 BCMLOG_ERR("Invalid Arguments\n");
2286                 return BC_STS_INV_ARG;
2287         }
2288
2289         /* This is start of capture.. Post to both the lists.. */
2290         for (i = 0; i < DMA_ENGINE_CNT; i++) {
2291                 rx_pkt = crystalhd_dioq_fetch(hw->rx_freeq);
2292                 if (!rx_pkt)
2293                         return BC_STS_NO_DATA;
2294                 sts = crystalhd_hw_post_cap_buff(hw, rx_pkt);
2295                 if (BC_STS_SUCCESS != sts)
2296                         break;
2297
2298         }
2299
2300         return BC_STS_SUCCESS;
2301 }
2302
2303 enum BC_STATUS crystalhd_hw_stop_capture(struct crystalhd_hw *hw)
2304 {
2305         void *temp = NULL;
2306
2307         if (!hw) {
2308                 BCMLOG_ERR("Invalid Arguments\n");
2309                 return BC_STS_INV_ARG;
2310         }
2311
2312         crystalhd_stop_rx_dma_engine(hw);
2313
2314         do {
2315                 temp = crystalhd_dioq_fetch(hw->rx_freeq);
2316                 if (temp)
2317                         crystalhd_rx_pkt_rel_call_back(hw, temp);
2318         } while (temp);
2319
2320         return BC_STS_SUCCESS;
2321 }
2322
2323 enum BC_STATUS crystalhd_hw_pause(struct crystalhd_hw *hw)
2324 {
2325         hw->stats.pause_cnt++;
2326         hw->stop_pending = 1;
2327
2328         if ((hw->rx_list_sts[0] == sts_free) &&
2329             (hw->rx_list_sts[1] == sts_free))
2330                 crystalhd_hw_finalize_pause(hw);
2331
2332         return BC_STS_SUCCESS;
2333 }
2334
2335 enum BC_STATUS crystalhd_hw_unpause(struct crystalhd_hw *hw)
2336 {
2337         enum BC_STATUS sts;
2338         uint32_t aspm;
2339
2340         hw->stop_pending = 0;
2341
2342         aspm = crystalhd_reg_rd(hw->adp, PCIE_DLL_DATA_LINK_CONTROL);
2343         aspm &= ~ASPM_L1_ENABLE;
2344 /* NAREN BCMLOG(BCMLOG_INFO, "aspm off\n"); */
2345         crystalhd_reg_wr(hw->adp, PCIE_DLL_DATA_LINK_CONTROL, aspm);
2346
2347         sts = crystalhd_hw_start_capture(hw);
2348         return sts;
2349 }
2350
2351 enum BC_STATUS crystalhd_hw_suspend(struct crystalhd_hw *hw)
2352 {
2353         enum BC_STATUS sts;
2354
2355         if (!hw) {
2356                 BCMLOG_ERR("Invalid Arguments\n");
2357                 return BC_STS_INV_ARG;
2358         }
2359
2360         sts = crystalhd_put_ddr2sleep(hw);
2361         if (sts != BC_STS_SUCCESS) {
2362                 BCMLOG_ERR("Failed to Put DDR To Sleep!!\n");
2363                 return BC_STS_ERROR;
2364         }
2365
2366         if (!crystalhd_stop_device(hw->adp)) {
2367                 BCMLOG_ERR("Failed to Stop Device!!\n");
2368                 return BC_STS_ERROR;
2369         }
2370
2371         return BC_STS_SUCCESS;
2372 }
2373
2374 void crystalhd_hw_stats(struct crystalhd_hw *hw,
2375                  struct crystalhd_hw_stats *stats)
2376 {
2377         if (!hw) {
2378                 BCMLOG_ERR("Invalid Arguments\n");
2379                 return;
2380         }
2381
2382         /* if called w/NULL stats, its a req to zero out the stats */
2383         if (!stats) {
2384                 memset(&hw->stats, 0, sizeof(hw->stats));
2385                 return;
2386         }
2387
2388         hw->stats.freeq_count = crystalhd_dioq_count(hw->rx_freeq);
2389         hw->stats.rdyq_count  = crystalhd_dioq_count(hw->rx_rdyq);
2390         memcpy(stats, &hw->stats, sizeof(*stats));
2391 }
2392
2393 enum BC_STATUS crystalhd_hw_set_core_clock(struct crystalhd_hw *hw)
2394 {
2395         uint32_t reg, n, i;
2396         uint32_t vco_mg, refresh_reg;
2397
2398         if (!hw) {
2399                 BCMLOG_ERR("Invalid Arguments\n");
2400                 return BC_STS_INV_ARG;
2401         }
2402
2403         /* FIXME: jarod: wha? */
2404         /*n = (hw->core_clock_mhz * 3) / 20 + 1; */
2405         n = hw->core_clock_mhz/5;
2406
2407         if (n == hw->prev_n)
2408                 return BC_STS_CLK_NOCHG;
2409
2410         if (hw->pwr_lock > 0) {
2411                 /* BCMLOG(BCMLOG_INFO,"pwr_lock is %u\n", hw->pwr_lock) */
2412                 return BC_STS_CLK_NOCHG;
2413         }
2414
2415         i = n * 27;
2416         if (i < 560)
2417                 vco_mg = 0;
2418         else if (i < 900)
2419                 vco_mg = 1;
2420         else if (i < 1030)
2421                 vco_mg = 2;
2422         else
2423                 vco_mg = 3;
2424
2425         reg = bc_dec_reg_rd(hw->adp, DecHt_PllACtl);
2426
2427         reg &= 0xFFFFCFC0;
2428         reg |= n;
2429         reg |= vco_mg << 12;
2430
2431         BCMLOG(BCMLOG_INFO, "clock is moving to %d with n %d with vco_mg %d\n",
2432                hw->core_clock_mhz, n, vco_mg);
2433
2434         /* Change the DRAM refresh rate to accommodate the new frequency */
2435         /* refresh reg = ((refresh_rate * clock_rate)/16) - 1; rounding up*/
2436         refresh_reg = (7 * hw->core_clock_mhz / 16);
2437         bc_dec_reg_wr(hw->adp, SDRAM_REF_PARAM, ((1 << 12) | refresh_reg));
2438
2439         bc_dec_reg_wr(hw->adp, DecHt_PllACtl, reg);
2440
2441         i = 0;
2442
2443         for (i = 0; i < 10; i++) {
2444                 reg = bc_dec_reg_rd(hw->adp, DecHt_PllACtl);
2445
2446                 if (reg & 0x00020000) {
2447                         hw->prev_n = n;
2448                         /* FIXME: jarod: outputting
2449                          a random "C" is... confusing... */
2450                         BCMLOG(BCMLOG_INFO, "C");
2451                         return BC_STS_SUCCESS;
2452                 } else {
2453                         msleep_interruptible(10);
2454                 }
2455         }
2456         BCMLOG(BCMLOG_INFO, "clk change failed\n");
2457         return BC_STS_CLK_NOCHG;
2458 }