2 * LocalPlus Bus FIFO driver for the Freescale MPC52xx.
4 * Copyright (C) 2009 Secret Lab Technologies Ltd.
6 * This file is released under the GPLv2
9 * - Add support for multiple requests to be queued.
12 #include <linux/interrupt.h>
13 #include <linux/kernel.h>
15 #include <linux/of_platform.h>
16 #include <linux/spinlock.h>
17 #include <linux/module.h>
20 #include <asm/mpc52xx.h>
23 #include <linux/fsl/bestcomm/bestcomm.h>
24 #include <linux/fsl/bestcomm/bestcomm_priv.h>
25 #include <linux/fsl/bestcomm/gen_bd.h>
27 MODULE_AUTHOR("Grant Likely <grant.likely@secretlab.ca>");
28 MODULE_DESCRIPTION("MPC5200 LocalPlus FIFO device driver");
29 MODULE_LICENSE("GPL");
31 #define LPBFIFO_REG_PACKET_SIZE (0x00)
32 #define LPBFIFO_REG_START_ADDRESS (0x04)
33 #define LPBFIFO_REG_CONTROL (0x08)
34 #define LPBFIFO_REG_ENABLE (0x0C)
35 #define LPBFIFO_REG_BYTES_DONE_STATUS (0x14)
36 #define LPBFIFO_REG_FIFO_DATA (0x40)
37 #define LPBFIFO_REG_FIFO_STATUS (0x44)
38 #define LPBFIFO_REG_FIFO_CONTROL (0x48)
39 #define LPBFIFO_REG_FIFO_ALARM (0x4C)
41 struct mpc52xx_lpbfifo {
43 phys_addr_t regs_phys;
48 struct bcom_task *bcom_tx_task;
49 struct bcom_task *bcom_rx_task;
50 struct bcom_task *bcom_cur_task;
52 /* Current state data */
53 struct mpc52xx_lpbfifo_request *req;
57 /* The MPC5200 has only one fifo, so only need one instance structure */
58 static struct mpc52xx_lpbfifo lpbfifo;
61 * mpc52xx_lpbfifo_kick - Trigger the next block of data to be transferred
63 static void mpc52xx_lpbfifo_kick(struct mpc52xx_lpbfifo_request *req)
65 size_t transfer_size = req->size - req->pos;
71 int dma = !(req->flags & MPC52XX_LPBFIFO_FLAG_NO_DMA);
72 int write = req->flags & MPC52XX_LPBFIFO_FLAG_WRITE;
73 int poll_dma = req->flags & MPC52XX_LPBFIFO_FLAG_POLL_DMA;
75 /* Set and clear the reset bits; is good practice in User Manual */
76 out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x01010000);
78 /* set master enable bit */
79 out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x00000001);
81 /* While the FIFO can be setup for transfer sizes as large as
82 * 16M-1, the FIFO itself is only 512 bytes deep and it does
83 * not generate interrupts for FIFO full events (only transfer
84 * complete will raise an IRQ). Therefore when not using
85 * Bestcomm to drive the FIFO it needs to either be polled, or
86 * transfers need to constrained to the size of the fifo.
88 * This driver restricts the size of the transfer
90 if (transfer_size > 512)
93 /* Load the FIFO with data */
95 reg = lpbfifo.regs + LPBFIFO_REG_FIFO_DATA;
96 data = req->data + req->pos;
97 for (i = 0; i < transfer_size; i += 4)
98 out_be32(reg, *data++);
101 /* Unmask both error and completion irqs */
102 out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x00000301);
104 /* Choose the correct direction
106 * Configure the watermarks so DMA will always complete correctly.
107 * It may be worth experimenting with the ALARM value to see if
108 * there is a performance impacit. However, if it is wrong there
109 * is a risk of DMA not transferring the last chunk of data
112 out_be32(lpbfifo.regs + LPBFIFO_REG_FIFO_ALARM, 0x1e4);
113 out_8(lpbfifo.regs + LPBFIFO_REG_FIFO_CONTROL, 7);
114 lpbfifo.bcom_cur_task = lpbfifo.bcom_tx_task;
116 out_be32(lpbfifo.regs + LPBFIFO_REG_FIFO_ALARM, 0x1ff);
117 out_8(lpbfifo.regs + LPBFIFO_REG_FIFO_CONTROL, 0);
118 lpbfifo.bcom_cur_task = lpbfifo.bcom_rx_task;
121 if (lpbfifo.dma_irqs_enabled) {
122 disable_irq(bcom_get_task_irq(lpbfifo.bcom_rx_task));
123 lpbfifo.dma_irqs_enabled = 0;
126 if (!lpbfifo.dma_irqs_enabled) {
127 enable_irq(bcom_get_task_irq(lpbfifo.bcom_rx_task));
128 lpbfifo.dma_irqs_enabled = 1;
133 bd = bcom_prepare_next_buffer(lpbfifo.bcom_cur_task);
134 bd->status = transfer_size;
137 * In the DMA read case, the DMA doesn't complete,
138 * possibly due to incorrect watermarks in the ALARM
139 * and CONTROL regs. For now instead of trying to
140 * determine the right watermarks that will make this
141 * work, just increase the number of bytes the FIFO is
144 * When submitting another operation, the FIFO will get
145 * reset, so the condition of the FIFO waiting for a
146 * non-existent 4 bytes will get cleared.
148 transfer_size += 4; /* BLECH! */
150 bd->data[0] = req->data_phys + req->pos;
151 bcom_submit_next_buffer(lpbfifo.bcom_cur_task, NULL);
153 /* error irq & master enabled bit */
154 bit_fields = 0x00000201;
157 if (write && (!poll_dma))
158 bit_fields |= 0x00000100; /* completion irq too */
159 out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, bit_fields);
162 /* Set transfer size, width, chip select and READ mode */
163 out_be32(lpbfifo.regs + LPBFIFO_REG_START_ADDRESS,
164 req->offset + req->pos);
165 out_be32(lpbfifo.regs + LPBFIFO_REG_PACKET_SIZE, transfer_size);
167 bit_fields = req->cs << 24 | 0x000008;
169 bit_fields |= 0x010000; /* read mode */
170 out_be32(lpbfifo.regs + LPBFIFO_REG_CONTROL, bit_fields);
173 if (!lpbfifo.req->defer_xfer_start)
174 out_8(lpbfifo.regs + LPBFIFO_REG_PACKET_SIZE, 0x01);
176 bcom_enable(lpbfifo.bcom_cur_task);
180 * mpc52xx_lpbfifo_irq - IRQ handler for LPB FIFO
182 * On transmit, the dma completion irq triggers before the fifo completion
183 * triggers. Handle the dma completion here instead of the LPB FIFO Bestcomm
184 * task completion irq because everything is not really done until the LPB FIFO
185 * completion irq triggers.
188 * For DMA, on receive, the "Fat Lady" is the bestcom completion irq. on
189 * transmit, the fifo completion irq is the "Fat Lady". The opera (or in this
190 * case the DMA/FIFO operation) is not finished until the "Fat Lady" sings.
192 * Reasons for entering this routine:
193 * 1) PIO mode rx and tx completion irq
194 * 2) DMA interrupt mode tx completion irq
195 * 3) DMA polled mode tx
198 * 1) Transfer aborted
199 * 2) FIFO complete without DMA; more data to do
200 * 3) FIFO complete without DMA; all data transferred
201 * 4) FIFO complete using DMA
203 * Condition 1 can occur regardless of whether or not DMA is used.
204 * It requires executing the callback to report the error and exiting
207 * Condition 2 requires programming the FIFO with the next block of data
209 * Condition 3 requires executing the callback to report completion
211 * Condition 4 means the same as 3, except that we also retrieve the bcom
212 * buffer so DMA doesn't get clogged up.
214 * To make things trickier, the spinlock must be dropped before
215 * executing the callback, otherwise we could end up with a deadlock
216 * or nested spinlock condition. The out path is non-trivial, so
217 * extra fiddling is done to make sure all paths lead to the same
220 static irqreturn_t mpc52xx_lpbfifo_irq(int irq, void *dev_id)
222 struct mpc52xx_lpbfifo_request *req;
223 u32 status = in_8(lpbfifo.regs + LPBFIFO_REG_BYTES_DONE_STATUS);
230 int dma, write, poll_dma;
232 spin_lock_irqsave(&lpbfifo.lock, flags);
237 spin_unlock_irqrestore(&lpbfifo.lock, flags);
238 pr_err("bogus LPBFIFO IRQ\n");
242 dma = !(req->flags & MPC52XX_LPBFIFO_FLAG_NO_DMA);
243 write = req->flags & MPC52XX_LPBFIFO_FLAG_WRITE;
244 poll_dma = req->flags & MPC52XX_LPBFIFO_FLAG_POLL_DMA;
247 spin_unlock_irqrestore(&lpbfifo.lock, flags);
248 pr_err("bogus LPBFIFO IRQ (dma and not writing)\n");
252 if ((status & 0x01) == 0) {
256 /* check abort bit */
258 out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x01010000);
263 /* Read result from hardware */
264 count = in_be32(lpbfifo.regs + LPBFIFO_REG_BYTES_DONE_STATUS);
267 if (!dma && !write) {
268 /* copy the data out of the FIFO */
269 reg = lpbfifo.regs + LPBFIFO_REG_FIFO_DATA;
270 data = req->data + req->pos;
271 for (i = 0; i < count; i += 4)
272 *data++ = in_be32(reg);
275 /* Update transfer position and count */
278 /* Decide what to do next */
279 if (req->size - req->pos)
280 mpc52xx_lpbfifo_kick(req); /* more work to do */
286 out_8(lpbfifo.regs + LPBFIFO_REG_BYTES_DONE_STATUS, 0x01);
288 if (dma && (status & 0x11)) {
290 * Count the DMA as complete only when the FIFO completion
291 * status or abort bits are set.
293 * (status & 0x01) should always be the case except sometimes
294 * when using polled DMA.
296 * (status & 0x10) {transfer aborted}: This case needs more
299 bcom_retrieve_buffer(lpbfifo.bcom_cur_task, &status, NULL);
301 req->last_byte = ((u8 *)req->data)[req->size - 1];
303 /* When the do_callback flag is set; it means the transfer is finished
304 * so set the FIFO as idle */
308 if (irq != 0) /* don't increment on polled case */
311 req->irq_ticks += get_tbl() - ts;
312 spin_unlock_irqrestore(&lpbfifo.lock, flags);
314 /* Spinlock is released; it is now safe to call the callback */
315 if (do_callback && req->callback)
322 * mpc52xx_lpbfifo_bcom_irq - IRQ handler for LPB FIFO Bestcomm task
324 * Only used when receiving data.
326 static irqreturn_t mpc52xx_lpbfifo_bcom_irq(int irq, void *dev_id)
328 struct mpc52xx_lpbfifo_request *req;
333 spin_lock_irqsave(&lpbfifo.lock, flags);
337 if (!req || (req->flags & MPC52XX_LPBFIFO_FLAG_NO_DMA)) {
338 spin_unlock_irqrestore(&lpbfifo.lock, flags);
342 if (irq != 0) /* don't increment on polled case */
345 if (!bcom_buffer_done(lpbfifo.bcom_cur_task)) {
346 spin_unlock_irqrestore(&lpbfifo.lock, flags);
348 req->buffer_not_done_cnt++;
349 if ((req->buffer_not_done_cnt % 1000) == 0)
350 pr_err("transfer stalled\n");
355 bcom_retrieve_buffer(lpbfifo.bcom_cur_task, &status, NULL);
357 req->last_byte = ((u8 *)req->data)[req->size - 1];
359 req->pos = status & 0x00ffffff;
361 /* Mark the FIFO as idle */
364 /* Release the lock before calling out to the callback. */
365 req->irq_ticks += get_tbl() - ts;
366 spin_unlock_irqrestore(&lpbfifo.lock, flags);
375 * mpc52xx_lpbfifo_bcom_poll - Poll for DMA completion
377 void mpc52xx_lpbfifo_poll(void)
379 struct mpc52xx_lpbfifo_request *req = lpbfifo.req;
380 int dma = !(req->flags & MPC52XX_LPBFIFO_FLAG_NO_DMA);
381 int write = req->flags & MPC52XX_LPBFIFO_FLAG_WRITE;
384 * For more information, see comments on the "Fat Lady"
387 mpc52xx_lpbfifo_irq(0, NULL);
389 mpc52xx_lpbfifo_bcom_irq(0, NULL);
391 EXPORT_SYMBOL(mpc52xx_lpbfifo_poll);
394 * mpc52xx_lpbfifo_submit - Submit an LPB FIFO transfer request.
395 * @req: Pointer to request structure
397 int mpc52xx_lpbfifo_submit(struct mpc52xx_lpbfifo_request *req)
404 spin_lock_irqsave(&lpbfifo.lock, flags);
406 /* If the req pointer is already set, then a transfer is in progress */
408 spin_unlock_irqrestore(&lpbfifo.lock, flags);
412 /* Setup the transfer */
416 req->buffer_not_done_cnt = 0;
419 mpc52xx_lpbfifo_kick(req);
420 spin_unlock_irqrestore(&lpbfifo.lock, flags);
423 EXPORT_SYMBOL(mpc52xx_lpbfifo_submit);
425 int mpc52xx_lpbfifo_start_xfer(struct mpc52xx_lpbfifo_request *req)
432 spin_lock_irqsave(&lpbfifo.lock, flags);
435 * If the req pointer is already set and a transfer was
436 * started on submit, then this transfer is in progress
438 if (lpbfifo.req && !lpbfifo.req->defer_xfer_start) {
439 spin_unlock_irqrestore(&lpbfifo.lock, flags);
444 * If the req was previously submitted but not
445 * started, start it now
447 if (lpbfifo.req && lpbfifo.req == req &&
448 lpbfifo.req->defer_xfer_start) {
449 out_8(lpbfifo.regs + LPBFIFO_REG_PACKET_SIZE, 0x01);
452 spin_unlock_irqrestore(&lpbfifo.lock, flags);
455 EXPORT_SYMBOL(mpc52xx_lpbfifo_start_xfer);
457 void mpc52xx_lpbfifo_abort(struct mpc52xx_lpbfifo_request *req)
461 spin_lock_irqsave(&lpbfifo.lock, flags);
462 if (lpbfifo.req == req) {
463 /* Put it into reset and clear the state */
464 bcom_gen_bd_rx_reset(lpbfifo.bcom_rx_task);
465 bcom_gen_bd_tx_reset(lpbfifo.bcom_tx_task);
466 out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x01010000);
469 spin_unlock_irqrestore(&lpbfifo.lock, flags);
471 EXPORT_SYMBOL(mpc52xx_lpbfifo_abort);
473 static int mpc52xx_lpbfifo_probe(struct platform_device *op)
478 if (lpbfifo.dev != NULL)
481 lpbfifo.irq = irq_of_parse_and_map(op->dev.of_node, 0);
485 if (of_address_to_resource(op->dev.of_node, 0, &res))
487 lpbfifo.regs_phys = res.start;
488 lpbfifo.regs = of_iomap(op->dev.of_node, 0);
492 spin_lock_init(&lpbfifo.lock);
494 /* Put FIFO into reset */
495 out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x01010000);
497 /* Register the interrupt handler */
498 rc = request_irq(lpbfifo.irq, mpc52xx_lpbfifo_irq, 0,
499 "mpc52xx-lpbfifo", &lpbfifo);
503 /* Request the Bestcomm receive (fifo --> memory) task and IRQ */
504 lpbfifo.bcom_rx_task =
505 bcom_gen_bd_rx_init(2, res.start + LPBFIFO_REG_FIFO_DATA,
506 BCOM_INITIATOR_SCLPC, BCOM_IPR_SCLPC,
508 if (!lpbfifo.bcom_rx_task)
511 rc = request_irq(bcom_get_task_irq(lpbfifo.bcom_rx_task),
512 mpc52xx_lpbfifo_bcom_irq, 0,
513 "mpc52xx-lpbfifo-rx", &lpbfifo);
515 goto err_bcom_rx_irq;
517 lpbfifo.dma_irqs_enabled = 1;
519 /* Request the Bestcomm transmit (memory --> fifo) task and IRQ */
520 lpbfifo.bcom_tx_task =
521 bcom_gen_bd_tx_init(2, res.start + LPBFIFO_REG_FIFO_DATA,
522 BCOM_INITIATOR_SCLPC, BCOM_IPR_SCLPC);
523 if (!lpbfifo.bcom_tx_task)
526 lpbfifo.dev = &op->dev;
530 free_irq(bcom_get_task_irq(lpbfifo.bcom_rx_task), &lpbfifo);
532 bcom_gen_bd_rx_release(lpbfifo.bcom_rx_task);
535 iounmap(lpbfifo.regs);
538 dev_err(&op->dev, "mpc52xx_lpbfifo_probe() failed\n");
543 static int mpc52xx_lpbfifo_remove(struct platform_device *op)
545 if (lpbfifo.dev != &op->dev)
548 /* Put FIFO in reset */
549 out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x01010000);
551 /* Release the bestcomm transmit task */
552 free_irq(bcom_get_task_irq(lpbfifo.bcom_tx_task), &lpbfifo);
553 bcom_gen_bd_tx_release(lpbfifo.bcom_tx_task);
555 /* Release the bestcomm receive task */
556 free_irq(bcom_get_task_irq(lpbfifo.bcom_rx_task), &lpbfifo);
557 bcom_gen_bd_rx_release(lpbfifo.bcom_rx_task);
559 free_irq(lpbfifo.irq, &lpbfifo);
560 iounmap(lpbfifo.regs);
567 static const struct of_device_id mpc52xx_lpbfifo_match[] = {
568 { .compatible = "fsl,mpc5200-lpbfifo", },
571 MODULE_DEVICE_TABLE(of, mpc52xx_lpbfifo_match);
573 static struct platform_driver mpc52xx_lpbfifo_driver = {
575 .name = "mpc52xx-lpbfifo",
576 .of_match_table = mpc52xx_lpbfifo_match,
578 .probe = mpc52xx_lpbfifo_probe,
579 .remove = mpc52xx_lpbfifo_remove,
581 module_platform_driver(mpc52xx_lpbfifo_driver);