]> git.karo-electronics.de Git - mv-sheeva.git/commitdiff
staging: ti dspbridge: add core driver sources
authorOmar Ramirez Luna <omar.ramirez@ti.com>
Wed, 23 Jun 2010 13:01:56 +0000 (16:01 +0300)
committerGreg Kroah-Hartman <gregkh@suse.de>
Wed, 23 Jun 2010 22:39:06 +0000 (15:39 -0700)
Add TI's DSP Bridge core driver sources

Signed-off-by: Omar Ramirez Luna <omar.ramirez@ti.com>
Signed-off-by: Kanigeri, Hari <h-kanigeri2@ti.com>
Signed-off-by: Ameya Palande <ameya.palande@nokia.com>
Signed-off-by: Guzman Lugo, Fernando <fernando.lugo@ti.com>
Signed-off-by: Hebbar, Shivananda <x0hebbar@ti.com>
Signed-off-by: Ramos Falcon, Ernesto <ernesto@ti.com>
Signed-off-by: Felipe Contreras <felipe.contreras@gmail.com>
Signed-off-by: Anna, Suman <s-anna@ti.com>
Signed-off-by: Gupta, Ramesh <grgupta@ti.com>
Signed-off-by: Gomez Castellanos, Ivan <ivan.gomez@ti.com>
Signed-off-by: Andy Shevchenko <ext-andriy.shevchenko@nokia.com>
Signed-off-by: Armando Uribe De Leon <x0095078@ti.com>
Signed-off-by: Deepak Chitriki <deepak.chitriki@ti.com>
Signed-off-by: Menon, Nishanth <nm@ti.com>
Signed-off-by: Phil Carmody <ext-phil.2.carmody@nokia.com>
Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
17 files changed:
drivers/staging/tidspbridge/core/_cmm.h [new file with mode: 0644]
drivers/staging/tidspbridge/core/_deh.h [new file with mode: 0644]
drivers/staging/tidspbridge/core/_msg_sm.h [new file with mode: 0644]
drivers/staging/tidspbridge/core/_tiomap.h [new file with mode: 0644]
drivers/staging/tidspbridge/core/_tiomap_pwr.h [new file with mode: 0644]
drivers/staging/tidspbridge/core/chnl_sm.c [new file with mode: 0644]
drivers/staging/tidspbridge/core/dsp-clock.c [new file with mode: 0644]
drivers/staging/tidspbridge/core/io_sm.c [new file with mode: 0644]
drivers/staging/tidspbridge/core/mmu_fault.c [new file with mode: 0644]
drivers/staging/tidspbridge/core/mmu_fault.h [new file with mode: 0644]
drivers/staging/tidspbridge/core/msg_sm.c [new file with mode: 0644]
drivers/staging/tidspbridge/core/tiomap3430.c [new file with mode: 0644]
drivers/staging/tidspbridge/core/tiomap3430_pwr.c [new file with mode: 0644]
drivers/staging/tidspbridge/core/tiomap_io.c [new file with mode: 0644]
drivers/staging/tidspbridge/core/tiomap_io.h [new file with mode: 0644]
drivers/staging/tidspbridge/core/ue_deh.c [new file with mode: 0644]
drivers/staging/tidspbridge/core/wdt.c [new file with mode: 0644]

diff --git a/drivers/staging/tidspbridge/core/_cmm.h b/drivers/staging/tidspbridge/core/_cmm.h
new file mode 100644 (file)
index 0000000..7660bef
--- /dev/null
@@ -0,0 +1,45 @@
+/*
+ * _cmm.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Private header file defining CMM manager objects and defines needed
+ * by IO manager to register shared memory regions when DSP base image
+ * is loaded(bridge_io_on_loaded).
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef _CMM_
+#define _CMM_
+
+/*
+ *  These target side symbols define the beginning and ending addresses
+ *  of the section of shared memory used for shared memory manager CMM.
+ *  They are defined in the *cfg.cmd file by cdb code.
+ */
+#define SHM0_SHARED_BASE_SYM             "_SHM0_BEG"
+#define SHM0_SHARED_END_SYM              "_SHM0_END"
+#define SHM0_SHARED_RESERVED_BASE_SYM    "_SHM0_RSVDSTRT"
+
+/*
+ *  Shared Memory Region #0(SHMSEG0) is used in the following way:
+ *
+ *  |(_SHM0_BEG)                  | (_SHM0_RSVDSTRT)           | (_SHM0_END)
+ *  V                             V                            V
+ *  ------------------------------------------------------------
+ *  |     DSP-side allocations    |    GPP-side allocations    |
+ *  ------------------------------------------------------------
+ *
+ *
+ */
+
+#endif /* _CMM_ */
diff --git a/drivers/staging/tidspbridge/core/_deh.h b/drivers/staging/tidspbridge/core/_deh.h
new file mode 100644 (file)
index 0000000..8da2212
--- /dev/null
@@ -0,0 +1,35 @@
+/*
+ * _deh.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Private header for DEH module.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef _DEH_
+#define _DEH_
+
+#include <dspbridge/ntfy.h>
+#include <dspbridge/dspdefs.h>
+
+/* DEH Manager: only one created per board: */
+struct deh_mgr {
+       struct bridge_dev_context *hbridge_context;     /* Bridge context. */
+       struct ntfy_object *ntfy_obj;   /* NTFY object */
+       struct dsp_errorinfo err_info;  /* DSP exception info. */
+
+       /* MMU Fault DPC */
+       struct tasklet_struct dpc_tasklet;
+};
+
+#endif /* _DEH_ */
diff --git a/drivers/staging/tidspbridge/core/_msg_sm.h b/drivers/staging/tidspbridge/core/_msg_sm.h
new file mode 100644 (file)
index 0000000..556de5c
--- /dev/null
@@ -0,0 +1,142 @@
+/*
+ * _msg_sm.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Private header file defining msg_ctrl manager objects and defines needed
+ * by IO manager.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef _MSG_SM_
+#define _MSG_SM_
+
+#include <dspbridge/list.h>
+#include <dspbridge/msgdefs.h>
+
+/*
+ *  These target side symbols define the beginning and ending addresses
+ *  of the section of shared memory used for messages. They are
+ *  defined in the *cfg.cmd file by cdb code.
+ */
+#define MSG_SHARED_BUFFER_BASE_SYM      "_MSG_BEG"
+#define MSG_SHARED_BUFFER_LIMIT_SYM     "_MSG_END"
+
+#ifndef _CHNL_WORDSIZE
+#define _CHNL_WORDSIZE 4       /* default _CHNL_WORDSIZE is 2 bytes/word */
+#endif
+
+/*
+ *  ======== msg_ctrl ========
+ *  There is a control structure for messages to the DSP, and a control
+ *  structure for messages from the DSP. The shared memory region for
+ *  transferring messages is partitioned as follows:
+ *
+ *  ----------------------------------------------------------
+ *  |Control | Messages from DSP | Control | Messages to DSP |
+ *  ----------------------------------------------------------
+ *
+ *  msg_ctrl control structure for messages to the DSP is used in the following
+ *  way:
+ *
+ *  buf_empty -      This flag is set to FALSE by the GPP after it has output
+ *                  messages for the DSP. The DSP host driver sets it to
+ *                  TRUE after it has copied the messages.
+ *  post_swi -       Set to 1 by the GPP after it has written the messages,
+ *                  set the size, and set buf_empty to FALSE.
+ *                  The DSP Host driver uses SWI_andn of the post_swi field
+ *                  when a host interrupt occurs. The host driver clears
+ *                  this after posting the SWI.
+ *  size -          Number of messages to be read by the DSP.
+ *
+ *  For messages from the DSP:
+ *  buf_empty -      This flag is set to FALSE by the DSP after it has output
+ *                  messages for the GPP. The DPC on the GPP sets it to
+ *                  TRUE after it has copied the messages.
+ *  post_swi -       Set to 1 the DPC on the GPP after copying the messages.
+ *  size -          Number of messages to be read by the GPP.
+ */
+struct msg_ctrl {
+       u32 buf_empty;          /* to/from DSP buffer is empty */
+       u32 post_swi;           /* Set to "1" to post msg_ctrl SWI */
+       u32 size;               /* Number of messages to/from the DSP */
+       u32 resvd;
+};
+
+/*
+ *  ======== msg_mgr ========
+ *  The msg_mgr maintains a list of all MSG_QUEUEs. Each NODE object can
+ *  have msg_queue to hold all messages that come up from the corresponding
+ *  node on the DSP. The msg_mgr also has a shared queue of messages
+ *  ready to go to the DSP.
+ */
+struct msg_mgr {
+       /* The first field must match that in msgobj.h */
+
+       /* Function interface to Bridge driver */
+       struct bridge_drv_interface *intf_fxns;
+
+       struct io_mgr *hio_mgr; /* IO manager */
+       struct lst_list *queue_list;    /* List of MSG_QUEUEs */
+       spinlock_t msg_mgr_lock;        /* For critical sections */
+       /* Signalled when MsgFrame is available */
+       struct sync_object *sync_event;
+       struct lst_list *msg_free_list; /* Free MsgFrames ready to be filled */
+       struct lst_list *msg_used_list; /* MsgFrames ready to go to DSP */
+       u32 msgs_pending;       /* # of queued messages to go to DSP */
+       u32 max_msgs;           /* Max # of msgs that fit in buffer */
+       msg_onexit on_exit;     /* called when RMS_EXIT is received */
+};
+
+/*
+ *  ======== msg_queue ========
+ *  Each NODE has a msg_queue for receiving messages from the
+ *  corresponding node on the DSP. The msg_queue object maintains a list
+ *  of messages that have been sent to the host, but not yet read (MSG_Get),
+ *  and a list of free frames that can be filled when new messages arrive
+ *  from the DSP.
+ *  The msg_queue's hSynEvent gets posted when a message is ready.
+ */
+struct msg_queue {
+       struct list_head list_elem;
+       struct msg_mgr *hmsg_mgr;
+       u32 max_msgs;           /* Node message depth */
+       u32 msgq_id;            /* Node environment pointer */
+       struct lst_list *msg_free_list; /* Free MsgFrames ready to be filled */
+       /* Filled MsgFramess waiting to be read */
+       struct lst_list *msg_used_list;
+       void *arg;              /* Handle passed to mgr on_exit callback */
+       struct sync_object *sync_event; /* Signalled when message is ready */
+       struct sync_object *sync_done;  /* For synchronizing cleanup */
+       struct sync_object *sync_done_ack;      /* For synchronizing cleanup */
+       struct ntfy_object *ntfy_obj;   /* For notification of message ready */
+       bool done;              /* TRUE <==> deleting the object */
+       u32 io_msg_pend;        /* Number of pending MSG_get/put calls */
+};
+
+/*
+ *  ======== msg_dspmsg ========
+ */
+struct msg_dspmsg {
+       struct dsp_msg msg;
+       u32 msgq_id;            /* Identifies the node the message goes to */
+};
+
+/*
+ *  ======== msg_frame ========
+ */
+struct msg_frame {
+       struct list_head list_elem;
+       struct msg_dspmsg msg_data;
+};
+
+#endif /* _MSG_SM_ */
diff --git a/drivers/staging/tidspbridge/core/_tiomap.h b/drivers/staging/tidspbridge/core/_tiomap.h
new file mode 100644 (file)
index 0000000..bf0164e
--- /dev/null
@@ -0,0 +1,377 @@
+/*
+ * _tiomap.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Definitions and types private to this Bridge driver.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef _TIOMAP_
+#define _TIOMAP_
+
+#include <plat/powerdomain.h>
+#include <plat/clockdomain.h>
+#include <mach-omap2/prm-regbits-34xx.h>
+#include <mach-omap2/cm-regbits-34xx.h>
+#include <dspbridge/devdefs.h>
+#include <hw_defs.h>
+#include <dspbridge/dspioctl.h>        /* for bridge_ioctl_extproc defn */
+#include <dspbridge/sync.h>
+#include <dspbridge/clk.h>
+
+struct map_l4_peripheral {
+       u32 phys_addr;
+       u32 dsp_virt_addr;
+};
+
+#define ARM_MAILBOX_START               0xfffcf000
+#define ARM_MAILBOX_LENGTH              0x800
+
+/* New Registers in OMAP3.1 */
+
+#define TESTBLOCK_ID_START              0xfffed400
+#define TESTBLOCK_ID_LENGTH             0xff
+
+/* ID Returned by OMAP1510 */
+#define TBC_ID_VALUE                    0xB47002F
+
+#define SPACE_LENGTH                    0x2000
+#define API_CLKM_DPLL_DMA               0xfffec000
+#define ARM_INTERRUPT_OFFSET            0xb00
+
+#define BIOS24XX
+
+#define L4_PERIPHERAL_NULL          0x0
+#define DSPVA_PERIPHERAL_NULL       0x0
+
+#define MAX_LOCK_TLB_ENTRIES 15
+
+#define L4_PERIPHERAL_PRM        0x48306000    /*PRM L4 Peripheral */
+#define DSPVA_PERIPHERAL_PRM     0x1181e000
+#define L4_PERIPHERAL_SCM        0x48002000    /*SCM L4 Peripheral */
+#define DSPVA_PERIPHERAL_SCM     0x1181f000
+#define L4_PERIPHERAL_MMU        0x5D000000    /*MMU L4 Peripheral */
+#define DSPVA_PERIPHERAL_MMU     0x11820000
+#define L4_PERIPHERAL_CM        0x48004000     /* Core L4, Clock Management */
+#define DSPVA_PERIPHERAL_CM     0x1181c000
+#define L4_PERIPHERAL_PER        0x48005000    /*  PER */
+#define DSPVA_PERIPHERAL_PER     0x1181d000
+
+#define L4_PERIPHERAL_GPIO1       0x48310000
+#define DSPVA_PERIPHERAL_GPIO1    0x11809000
+#define L4_PERIPHERAL_GPIO2       0x49050000
+#define DSPVA_PERIPHERAL_GPIO2    0x1180a000
+#define L4_PERIPHERAL_GPIO3       0x49052000
+#define DSPVA_PERIPHERAL_GPIO3    0x1180b000
+#define L4_PERIPHERAL_GPIO4       0x49054000
+#define DSPVA_PERIPHERAL_GPIO4    0x1180c000
+#define L4_PERIPHERAL_GPIO5       0x49056000
+#define DSPVA_PERIPHERAL_GPIO5    0x1180d000
+
+#define L4_PERIPHERAL_IVA2WDT      0x49030000
+#define DSPVA_PERIPHERAL_IVA2WDT   0x1180e000
+
+#define L4_PERIPHERAL_DISPLAY     0x48050000
+#define DSPVA_PERIPHERAL_DISPLAY  0x1180f000
+
+#define L4_PERIPHERAL_SSI         0x48058000
+#define DSPVA_PERIPHERAL_SSI      0x11804000
+#define L4_PERIPHERAL_GDD         0x48059000
+#define DSPVA_PERIPHERAL_GDD      0x11805000
+#define L4_PERIPHERAL_SS1         0x4805a000
+#define DSPVA_PERIPHERAL_SS1      0x11806000
+#define L4_PERIPHERAL_SS2         0x4805b000
+#define DSPVA_PERIPHERAL_SS2      0x11807000
+
+#define L4_PERIPHERAL_CAMERA      0x480BC000
+#define DSPVA_PERIPHERAL_CAMERA   0x11819000
+
+#define L4_PERIPHERAL_SDMA        0x48056000
+#define DSPVA_PERIPHERAL_SDMA     0x11810000   /* 0x1181d000 conflict w/ PER */
+
+#define L4_PERIPHERAL_UART1             0x4806a000
+#define DSPVA_PERIPHERAL_UART1          0x11811000
+#define L4_PERIPHERAL_UART2             0x4806c000
+#define DSPVA_PERIPHERAL_UART2          0x11812000
+#define L4_PERIPHERAL_UART3             0x49020000
+#define DSPVA_PERIPHERAL_UART3    0x11813000
+
+#define L4_PERIPHERAL_MCBSP1      0x48074000
+#define DSPVA_PERIPHERAL_MCBSP1   0x11814000
+#define L4_PERIPHERAL_MCBSP2      0x49022000
+#define DSPVA_PERIPHERAL_MCBSP2   0x11815000
+#define L4_PERIPHERAL_MCBSP3      0x49024000
+#define DSPVA_PERIPHERAL_MCBSP3   0x11816000
+#define L4_PERIPHERAL_MCBSP4      0x49026000
+#define DSPVA_PERIPHERAL_MCBSP4   0x11817000
+#define L4_PERIPHERAL_MCBSP5      0x48096000
+#define DSPVA_PERIPHERAL_MCBSP5   0x11818000
+
+#define L4_PERIPHERAL_GPTIMER5    0x49038000
+#define DSPVA_PERIPHERAL_GPTIMER5 0x11800000
+#define L4_PERIPHERAL_GPTIMER6    0x4903a000
+#define DSPVA_PERIPHERAL_GPTIMER6 0x11801000
+#define L4_PERIPHERAL_GPTIMER7    0x4903c000
+#define DSPVA_PERIPHERAL_GPTIMER7 0x11802000
+#define L4_PERIPHERAL_GPTIMER8    0x4903e000
+#define DSPVA_PERIPHERAL_GPTIMER8 0x11803000
+
+#define L4_PERIPHERAL_SPI1      0x48098000
+#define DSPVA_PERIPHERAL_SPI1   0x1181a000
+#define L4_PERIPHERAL_SPI2      0x4809a000
+#define DSPVA_PERIPHERAL_SPI2   0x1181b000
+
+#define L4_PERIPHERAL_MBOX        0x48094000
+#define DSPVA_PERIPHERAL_MBOX     0x11808000
+
+#define PM_GRPSEL_BASE                         0x48307000
+#define DSPVA_GRPSEL_BASE              0x11821000
+
+#define L4_PERIPHERAL_SIDETONE_MCBSP2        0x49028000
+#define DSPVA_PERIPHERAL_SIDETONE_MCBSP2 0x11824000
+#define L4_PERIPHERAL_SIDETONE_MCBSP3        0x4902a000
+#define DSPVA_PERIPHERAL_SIDETONE_MCBSP3 0x11825000
+
+/* define a static array with L4 mappings */
+static const struct map_l4_peripheral l4_peripheral_table[] = {
+       {L4_PERIPHERAL_MBOX, DSPVA_PERIPHERAL_MBOX},
+       {L4_PERIPHERAL_SCM, DSPVA_PERIPHERAL_SCM},
+       {L4_PERIPHERAL_MMU, DSPVA_PERIPHERAL_MMU},
+       {L4_PERIPHERAL_GPTIMER5, DSPVA_PERIPHERAL_GPTIMER5},
+       {L4_PERIPHERAL_GPTIMER6, DSPVA_PERIPHERAL_GPTIMER6},
+       {L4_PERIPHERAL_GPTIMER7, DSPVA_PERIPHERAL_GPTIMER7},
+       {L4_PERIPHERAL_GPTIMER8, DSPVA_PERIPHERAL_GPTIMER8},
+       {L4_PERIPHERAL_GPIO1, DSPVA_PERIPHERAL_GPIO1},
+       {L4_PERIPHERAL_GPIO2, DSPVA_PERIPHERAL_GPIO2},
+       {L4_PERIPHERAL_GPIO3, DSPVA_PERIPHERAL_GPIO3},
+       {L4_PERIPHERAL_GPIO4, DSPVA_PERIPHERAL_GPIO4},
+       {L4_PERIPHERAL_GPIO5, DSPVA_PERIPHERAL_GPIO5},
+       {L4_PERIPHERAL_IVA2WDT, DSPVA_PERIPHERAL_IVA2WDT},
+       {L4_PERIPHERAL_DISPLAY, DSPVA_PERIPHERAL_DISPLAY},
+       {L4_PERIPHERAL_SSI, DSPVA_PERIPHERAL_SSI},
+       {L4_PERIPHERAL_GDD, DSPVA_PERIPHERAL_GDD},
+       {L4_PERIPHERAL_SS1, DSPVA_PERIPHERAL_SS1},
+       {L4_PERIPHERAL_SS2, DSPVA_PERIPHERAL_SS2},
+       {L4_PERIPHERAL_UART1, DSPVA_PERIPHERAL_UART1},
+       {L4_PERIPHERAL_UART2, DSPVA_PERIPHERAL_UART2},
+       {L4_PERIPHERAL_UART3, DSPVA_PERIPHERAL_UART3},
+       {L4_PERIPHERAL_MCBSP1, DSPVA_PERIPHERAL_MCBSP1},
+       {L4_PERIPHERAL_MCBSP2, DSPVA_PERIPHERAL_MCBSP2},
+       {L4_PERIPHERAL_MCBSP3, DSPVA_PERIPHERAL_MCBSP3},
+       {L4_PERIPHERAL_MCBSP4, DSPVA_PERIPHERAL_MCBSP4},
+       {L4_PERIPHERAL_MCBSP5, DSPVA_PERIPHERAL_MCBSP5},
+       {L4_PERIPHERAL_CAMERA, DSPVA_PERIPHERAL_CAMERA},
+       {L4_PERIPHERAL_SPI1, DSPVA_PERIPHERAL_SPI1},
+       {L4_PERIPHERAL_SPI2, DSPVA_PERIPHERAL_SPI2},
+       {L4_PERIPHERAL_PRM, DSPVA_PERIPHERAL_PRM},
+       {L4_PERIPHERAL_CM, DSPVA_PERIPHERAL_CM},
+       {L4_PERIPHERAL_PER, DSPVA_PERIPHERAL_PER},
+       {PM_GRPSEL_BASE, DSPVA_GRPSEL_BASE},
+       {L4_PERIPHERAL_SIDETONE_MCBSP2, DSPVA_PERIPHERAL_SIDETONE_MCBSP2},
+       {L4_PERIPHERAL_SIDETONE_MCBSP3, DSPVA_PERIPHERAL_SIDETONE_MCBSP3},
+       {L4_PERIPHERAL_NULL, DSPVA_PERIPHERAL_NULL}
+};
+
+/*
+ *   15         10                  0
+ *   ---------------------------------
+ *  |0|0|1|0|0|0|c|c|c|i|i|i|i|i|i|i|
+ *  ---------------------------------
+ *  |  (class)  | (module specific) |
+ *
+ *  where  c -> Externel Clock Command: Clk & Autoidle Disable/Enable
+ *  i -> External Clock ID Timers 5,6,7,8, McBSP1,2 and WDT3
+ */
+
+/* MBX_PM_CLK_IDMASK: DSP External clock id mask. */
+#define MBX_PM_CLK_IDMASK   0x7F
+
+/* MBX_PM_CLK_CMDSHIFT: DSP External clock command shift. */
+#define MBX_PM_CLK_CMDSHIFT 7
+
+/* MBX_PM_CLK_CMDMASK: DSP External clock command mask. */
+#define MBX_PM_CLK_CMDMASK 7
+
+/* MBX_PM_MAX_RESOURCES: CORE 1 Clock resources. */
+#define MBX_CORE1_RESOURCES 7
+
+/* MBX_PM_MAX_RESOURCES: CORE 2 Clock Resources. */
+#define MBX_CORE2_RESOURCES 1
+
+/* MBX_PM_MAX_RESOURCES: TOTAL Clock Reosurces. */
+#define MBX_PM_MAX_RESOURCES 11
+
+/*  Power Management Commands */
+#define BPWR_DISABLE_CLOCK     0
+#define BPWR_ENABLE_CLOCK      1
+
+/* OMAP242x specific resources */
+enum bpwr_ext_clock_id {
+       BPWR_GP_TIMER5 = 0x10,
+       BPWR_GP_TIMER6,
+       BPWR_GP_TIMER7,
+       BPWR_GP_TIMER8,
+       BPWR_WD_TIMER3,
+       BPWR_MCBSP1,
+       BPWR_MCBSP2,
+       BPWR_MCBSP3,
+       BPWR_MCBSP4,
+       BPWR_MCBSP5,
+       BPWR_SSI = 0x20
+};
+
+static const u32 bpwr_clkid[] = {
+       (u32) BPWR_GP_TIMER5,
+       (u32) BPWR_GP_TIMER6,
+       (u32) BPWR_GP_TIMER7,
+       (u32) BPWR_GP_TIMER8,
+       (u32) BPWR_WD_TIMER3,
+       (u32) BPWR_MCBSP1,
+       (u32) BPWR_MCBSP2,
+       (u32) BPWR_MCBSP3,
+       (u32) BPWR_MCBSP4,
+       (u32) BPWR_MCBSP5,
+       (u32) BPWR_SSI
+};
+
+struct bpwr_clk_t {
+       u32 clk_id;
+       enum dsp_clk_id clk;
+};
+
+static const struct bpwr_clk_t bpwr_clks[] = {
+       {(u32) BPWR_GP_TIMER5, DSP_CLK_GPT5},
+       {(u32) BPWR_GP_TIMER6, DSP_CLK_GPT6},
+       {(u32) BPWR_GP_TIMER7, DSP_CLK_GPT7},
+       {(u32) BPWR_GP_TIMER8, DSP_CLK_GPT8},
+       {(u32) BPWR_WD_TIMER3, DSP_CLK_WDT3},
+       {(u32) BPWR_MCBSP1, DSP_CLK_MCBSP1},
+       {(u32) BPWR_MCBSP2, DSP_CLK_MCBSP2},
+       {(u32) BPWR_MCBSP3, DSP_CLK_MCBSP3},
+       {(u32) BPWR_MCBSP4, DSP_CLK_MCBSP4},
+       {(u32) BPWR_MCBSP5, DSP_CLK_MCBSP5},
+       {(u32) BPWR_SSI, DSP_CLK_SSI}
+};
+
+/* Interrupt Register Offsets */
+#define INTH_IT_REG_OFFSET              0x00   /* Interrupt register offset */
+#define INTH_MASK_IT_REG_OFFSET         0x04   /* Mask Interrupt reg offset */
+
+#define   DSP_MAILBOX1_INT              10
+/*
+ *  Bit definition of  Interrupt  Level  Registers
+ */
+
+/* Mail Box defines */
+#define MB_ARM2DSP1_REG_OFFSET          0x00
+
+#define MB_ARM2DSP1B_REG_OFFSET         0x04
+
+#define MB_DSP2ARM1B_REG_OFFSET         0x0C
+
+#define MB_ARM2DSP1_FLAG_REG_OFFSET     0x18
+
+#define MB_ARM2DSP_FLAG                 0x0001
+
+#define MBOX_ARM2DSP HW_MBOX_ID0
+#define MBOX_DSP2ARM HW_MBOX_ID1
+#define MBOX_ARM HW_MBOX_U0_ARM
+#define MBOX_DSP HW_MBOX_U1_DSP1
+
+#define ENABLE                          true
+#define DISABLE                         false
+
+#define HIGH_LEVEL                      true
+#define LOW_LEVEL                       false
+
+/* Macro's */
+#define REG16(A)    (*(reg_uword16 *)(A))
+
+#define CLEAR_BIT(reg, mask)             (reg &= ~mask)
+#define SET_BIT(reg, mask)               (reg |= mask)
+
+#define SET_GROUP_BITS16(reg, position, width, value) \
+       do {\
+               reg &= ~((0xFFFF >> (16 - (width))) << (position)) ; \
+               reg |= ((value & (0xFFFF >> (16 - (width)))) << (position)); \
+       } while (0);
+
+#define CLEAR_BIT_INDEX(reg, index)   (reg &= ~(1 << (index)))
+
+/* This Bridge driver's device context: */
+struct bridge_dev_context {
+       struct dev_object *hdev_obj;    /* Handle to Bridge device object. */
+       u32 dw_dsp_base_addr;   /* Arm's API to DSP virt base addr */
+       /*
+        * DSP External memory prog address as seen virtually by the OS on
+        * the host side.
+        */
+       u32 dw_dsp_ext_base_addr;       /* See the comment above */
+       u32 dw_api_reg_base;    /* API mem map'd registers */
+       void __iomem *dw_dsp_mmu_base;  /* DSP MMU Mapped registers */
+       u32 dw_api_clk_base;    /* CLK Registers */
+       u32 dw_dsp_clk_m2_base; /* DSP Clock Module m2 */
+       u32 dw_public_rhea;     /* Pub Rhea */
+       u32 dw_int_addr;        /* MB INTR reg */
+       u32 dw_tc_endianism;    /* TC Endianism register */
+       u32 dw_test_base;       /* DSP MMU Mapped registers */
+       u32 dw_self_loop;       /* Pointer to the selfloop */
+       u32 dw_dsp_start_add;   /* API Boot vector */
+       u32 dw_internal_size;   /* Internal memory size */
+
+       struct omap_mbox *mbox;         /* Mail box handle */
+
+       struct cfg_hostres *resources;  /* Host Resources */
+
+       /*
+        * Processor specific info is set when prog loaded and read from DCD.
+        * [See bridge_dev_ctrl()]  PROC info contains DSP-MMU TLB entries.
+        */
+       /* DMMU TLB entries */
+       struct bridge_ioctl_extproc atlb_entry[BRDIOCTL_NUMOFMMUTLB];
+       u32 dw_brd_state;       /* Last known board state. */
+       u32 ul_int_mask;        /* int mask */
+       u16 io_base;            /* Board I/O base */
+       u32 num_tlb_entries;    /* DSP MMU TLB entry counter */
+       u32 fixed_tlb_entries;  /* Fixed DSPMMU TLB entry count */
+
+       /* TC Settings */
+       bool tc_word_swap_on;   /* Traffic Controller Word Swap */
+       struct pg_table_attrs *pt_attrs;
+       u32 dsp_per_clks;
+};
+
+/*
+ * If dsp_debug is true, do not branch to the DSP entry
+ * point and wait for DSP to boot.
+ */
+extern s32 dsp_debug;
+
+/*
+ *  ======== sm_interrupt_dsp ========
+ *  Purpose:
+ *      Set interrupt value & send an interrupt to the DSP processor(s).
+ *      This is typicaly used when mailbox interrupt mechanisms allow data
+ *      to be associated with interrupt such as for OMAP's CMD/DATA regs.
+ *  Parameters:
+ *      dev_context:    Handle to Bridge driver defined device info.
+ *      mb_val:         Value associated with interrupt(e.g. mailbox value).
+ *  Returns:
+ *      0:        Interrupt sent;
+ *      else:           Unable to send interrupt.
+ *  Requires:
+ *  Ensures:
+ */
+int sm_interrupt_dsp(struct bridge_dev_context *dev_context, u16 mb_val);
+
+#endif /* _TIOMAP_ */
diff --git a/drivers/staging/tidspbridge/core/_tiomap_pwr.h b/drivers/staging/tidspbridge/core/_tiomap_pwr.h
new file mode 100644 (file)
index 0000000..b9a3453
--- /dev/null
@@ -0,0 +1,85 @@
+/*
+ * _tiomap_pwr.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Definitions and types for the DSP wake/sleep routines.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef _TIOMAP_PWR_
+#define _TIOMAP_PWR_
+
+#ifdef CONFIG_PM
+extern s32 dsp_test_sleepstate;
+#endif
+
+extern struct mailbox_context mboxsetting;
+
+/*
+ * ======== wake_dsp =========
+ * Wakes up the DSP from DeepSleep
+ */
+extern int wake_dsp(struct bridge_dev_context *dev_context,
+                                                       IN void *pargs);
+
+/*
+ * ======== sleep_dsp =========
+ * Places the DSP in DeepSleep.
+ */
+extern int sleep_dsp(struct bridge_dev_context *dev_context,
+                           IN u32 dw_cmd, IN void *pargs);
+/*
+ *  ========interrupt_dsp========
+ *       Sends an interrupt to DSP unconditionally.
+ */
+extern void interrupt_dsp(struct bridge_dev_context *dev_context,
+                                                       IN u16 mb_val);
+
+/*
+ * ======== wake_dsp =========
+ * Wakes up the DSP from DeepSleep
+ */
+extern int dsp_peripheral_clk_ctrl(struct bridge_dev_context
+                                       *dev_context, IN void *pargs);
+/*
+ *  ======== handle_hibernation_from_dsp ========
+ *     Handle Hibernation requested from DSP
+ */
+int handle_hibernation_from_dsp(struct bridge_dev_context *dev_context);
+/*
+ *  ======== post_scale_dsp ========
+ *     Handle Post Scale notification to DSP
+ */
+int post_scale_dsp(struct bridge_dev_context *dev_context,
+                                                       IN void *pargs);
+/*
+ *  ======== pre_scale_dsp ========
+ *     Handle Pre Scale notification to DSP
+ */
+int pre_scale_dsp(struct bridge_dev_context *dev_context,
+                                                       IN void *pargs);
+/*
+ *  ======== handle_constraints_set ========
+ *     Handle constraints request from DSP
+ */
+int handle_constraints_set(struct bridge_dev_context *dev_context,
+                                 IN void *pargs);
+
+/*
+ *  ======== dsp_clk_wakeup_event_ctrl ========
+ *     This function sets the group selction bits for while
+ *     enabling/disabling.
+ */
+void dsp_clk_wakeup_event_ctrl(u32 ClkId, bool enable);
+
+#endif /* _TIOMAP_PWR_ */
diff --git a/drivers/staging/tidspbridge/core/chnl_sm.c b/drivers/staging/tidspbridge/core/chnl_sm.c
new file mode 100644 (file)
index 0000000..714b6f7
--- /dev/null
@@ -0,0 +1,1015 @@
+/*
+ * chnl_sm.c
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Implements upper edge functions for Bridge driver channel module.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+/*
+ *      The lower edge functions must be implemented by the Bridge driver
+ *      writer, and are declared in chnl_sm.h.
+ *
+ *      Care is taken in this code to prevent simulataneous access to channel
+ *      queues from
+ *      1. Threads.
+ *      2. io_dpc(), scheduled from the io_isr() as an event.
+ *
+ *      This is done primarily by:
+ *      - Semaphores.
+ *      - state flags in the channel object; and
+ *      - ensuring the IO_Dispatch() routine, which is called from both
+ *        CHNL_AddIOReq() and the DPC(if implemented), is not re-entered.
+ *
+ *  Channel Invariant:
+ *      There is an important invariant condition which must be maintained per
+ *      channel outside of bridge_chnl_get_ioc() and IO_Dispatch(), violation of
+ *      which may cause timeouts and/or failure offunction sync_wait_on_event.
+ *      This invariant condition is:
+ *
+ *          LST_Empty(pchnl->pio_completions) ==> pchnl->sync_event is reset
+ *      and
+ *          !LST_Empty(pchnl->pio_completions) ==> pchnl->sync_event is set.
+ */
+
+/*  ----------------------------------- OS */
+#include <dspbridge/host_os.h>
+
+/*  ----------------------------------- DSP/BIOS Bridge */
+#include <dspbridge/std.h>
+#include <dspbridge/dbdefs.h>
+
+/*  ----------------------------------- Trace & Debug */
+#include <dspbridge/dbc.h>
+
+/*  ----------------------------------- OS Adaptation Layer */
+#include <dspbridge/cfg.h>
+#include <dspbridge/sync.h>
+
+/*  ----------------------------------- Bridge Driver */
+#include <dspbridge/dspdefs.h>
+#include <dspbridge/dspchnl.h>
+#include "_tiomap.h"
+
+/*  ----------------------------------- Platform Manager */
+#include <dspbridge/dev.h>
+
+/*  ----------------------------------- Others */
+#include <dspbridge/io_sm.h>
+
+/*  ----------------------------------- Define for This */
+#define USERMODE_ADDR   PAGE_OFFSET
+
+#define MAILBOX_IRQ INT_MAIL_MPU_IRQ
+
+/*  ----------------------------------- Function Prototypes */
+static struct lst_list *create_chirp_list(u32 uChirps);
+
+static void free_chirp_list(struct lst_list *pList);
+
+static struct chnl_irp *make_new_chirp(void);
+
+static int search_free_channel(struct chnl_mgr *chnl_mgr_obj,
+                                     OUT u32 *pdwChnl);
+
+/*
+ *  ======== bridge_chnl_add_io_req ========
+ *      Enqueue an I/O request for data transfer on a channel to the DSP.
+ *      The direction (mode) is specified in the channel object. Note the DSP
+ *      address is specified for channels opened in direct I/O mode.
+ */
+int bridge_chnl_add_io_req(struct chnl_object *chnl_obj, void *pHostBuf,
+                              u32 byte_size, u32 buf_size,
+                              OPTIONAL u32 dw_dsp_addr, u32 dw_arg)
+{
+       int status = 0;
+       struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
+       struct chnl_irp *chnl_packet_obj = NULL;
+       struct bridge_dev_context *dev_ctxt;
+       struct dev_object *dev_obj;
+       u8 dw_state;
+       bool is_eos;
+       struct chnl_mgr *chnl_mgr_obj = pchnl->chnl_mgr_obj;
+       u8 *host_sys_buf = NULL;
+       bool sched_dpc = false;
+       u16 mb_val = 0;
+
+       is_eos = (byte_size == 0);
+
+       /* Validate args */
+       if (!pHostBuf || !pchnl) {
+               status = -EFAULT;
+       } else if (is_eos && CHNL_IS_INPUT(pchnl->chnl_mode)) {
+               status = -EPERM;
+       } else {
+               /*
+                * Check the channel state: only queue chirp if channel state
+                * allows it.
+                */
+               dw_state = pchnl->dw_state;
+               if (dw_state != CHNL_STATEREADY) {
+                       if (dw_state & CHNL_STATECANCEL)
+                               status = -ECANCELED;
+                       else if ((dw_state & CHNL_STATEEOS) &&
+                                CHNL_IS_OUTPUT(pchnl->chnl_mode))
+                               status = -EPIPE;
+                       else
+                               /* No other possible states left */
+                               DBC_ASSERT(0);
+               }
+       }
+
+       dev_obj = dev_get_first();
+       dev_get_bridge_context(dev_obj, &dev_ctxt);
+       if (!dev_ctxt)
+               status = -EFAULT;
+
+       if (DSP_FAILED(status))
+               goto func_end;
+
+       if (pchnl->chnl_type == CHNL_PCPY && pchnl->chnl_id > 1 && pHostBuf) {
+               if (!(pHostBuf < (void *)USERMODE_ADDR)) {
+                       host_sys_buf = pHostBuf;
+                       goto func_cont;
+               }
+               /* if addr in user mode, then copy to kernel space */
+               host_sys_buf = kmalloc(buf_size, GFP_KERNEL);
+               if (host_sys_buf == NULL) {
+                       status = -ENOMEM;
+                       goto func_end;
+               }
+               if (CHNL_IS_OUTPUT(pchnl->chnl_mode)) {
+                       status = copy_from_user(host_sys_buf, pHostBuf,
+                                               buf_size);
+                       if (status) {
+                               kfree(host_sys_buf);
+                               host_sys_buf = NULL;
+                               status = -EFAULT;
+                               goto func_end;
+                       }
+               }
+       }
+func_cont:
+       /* Mailbox IRQ is disabled to avoid race condition with DMA/ZCPY
+        * channels. DPCCS is held to avoid race conditions with PCPY channels.
+        * If DPC is scheduled in process context (iosm_schedule) and any
+        * non-mailbox interrupt occurs, that DPC will run and break CS. Hence
+        * we disable ALL DPCs. We will try to disable ONLY IO DPC later. */
+       spin_lock_bh(&chnl_mgr_obj->chnl_mgr_lock);
+       omap_mbox_disable_irq(dev_ctxt->mbox, IRQ_RX);
+       if (pchnl->chnl_type == CHNL_PCPY) {
+               /* This is a processor-copy channel. */
+               if (DSP_SUCCEEDED(status) && CHNL_IS_OUTPUT(pchnl->chnl_mode)) {
+                       /* Check buffer size on output channels for fit. */
+                       if (byte_size >
+                           io_buf_size(pchnl->chnl_mgr_obj->hio_mgr))
+                               status = -EINVAL;
+
+               }
+       }
+       if (DSP_SUCCEEDED(status)) {
+               /* Get a free chirp: */
+               chnl_packet_obj =
+                   (struct chnl_irp *)lst_get_head(pchnl->free_packets_list);
+               if (chnl_packet_obj == NULL)
+                       status = -EIO;
+
+       }
+       if (DSP_SUCCEEDED(status)) {
+               /* Enqueue the chirp on the chnl's IORequest queue: */
+               chnl_packet_obj->host_user_buf = chnl_packet_obj->host_sys_buf =
+                   pHostBuf;
+               if (pchnl->chnl_type == CHNL_PCPY && pchnl->chnl_id > 1)
+                       chnl_packet_obj->host_sys_buf = host_sys_buf;
+
+               /*
+                * Note: for dma chans dw_dsp_addr contains dsp address
+                * of SM buffer.
+                */
+               DBC_ASSERT(chnl_mgr_obj->word_size != 0);
+               /* DSP address */
+               chnl_packet_obj->dsp_tx_addr =
+                   dw_dsp_addr / chnl_mgr_obj->word_size;
+               chnl_packet_obj->byte_size = byte_size;
+               chnl_packet_obj->buf_size = buf_size;
+               /* Only valid for output channel */
+               chnl_packet_obj->dw_arg = dw_arg;
+               chnl_packet_obj->status = (is_eos ? CHNL_IOCSTATEOS :
+                                          CHNL_IOCSTATCOMPLETE);
+               lst_put_tail(pchnl->pio_requests,
+                            (struct list_head *)chnl_packet_obj);
+               pchnl->cio_reqs++;
+               DBC_ASSERT(pchnl->cio_reqs <= pchnl->chnl_packets);
+               /*
+                * If end of stream, update the channel state to prevent
+                * more IOR's.
+                */
+               if (is_eos)
+                       pchnl->dw_state |= CHNL_STATEEOS;
+
+               /* Legacy DSM Processor-Copy */
+               DBC_ASSERT(pchnl->chnl_type == CHNL_PCPY);
+               /* Request IO from the DSP */
+               io_request_chnl(chnl_mgr_obj->hio_mgr, pchnl,
+                               (CHNL_IS_INPUT(pchnl->chnl_mode) ? IO_INPUT :
+                                IO_OUTPUT), &mb_val);
+               sched_dpc = true;
+
+       }
+       omap_mbox_enable_irq(dev_ctxt->mbox, IRQ_RX);
+       spin_unlock_bh(&chnl_mgr_obj->chnl_mgr_lock);
+       if (mb_val != 0)
+               io_intr_dsp2(chnl_mgr_obj->hio_mgr, mb_val);
+
+       /* Schedule a DPC, to do the actual data transfer */
+       if (sched_dpc)
+               iosm_schedule(chnl_mgr_obj->hio_mgr);
+
+func_end:
+       return status;
+}
+
+/*
+ *  ======== bridge_chnl_cancel_io ========
+ *      Return all I/O requests to the client which have not yet been
+ *      transferred.  The channel's I/O completion object is
+ *      signalled, and all the I/O requests are queued as IOC's, with the
+ *      status field set to CHNL_IOCSTATCANCEL.
+ *      This call is typically used in abort situations, and is a prelude to
+ *      chnl_close();
+ */
+int bridge_chnl_cancel_io(struct chnl_object *chnl_obj)
+{
+       int status = 0;
+       struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
+       u32 chnl_id = -1;
+       s8 chnl_mode;
+       struct chnl_irp *chnl_packet_obj;
+       struct chnl_mgr *chnl_mgr_obj = NULL;
+
+       /* Check args: */
+       if (pchnl && pchnl->chnl_mgr_obj) {
+               chnl_id = pchnl->chnl_id;
+               chnl_mode = pchnl->chnl_mode;
+               chnl_mgr_obj = pchnl->chnl_mgr_obj;
+       } else {
+               status = -EFAULT;
+       }
+       if (DSP_FAILED(status))
+               goto func_end;
+
+       /*  Mark this channel as cancelled, to prevent further IORequests or
+        *  IORequests or dispatching. */
+       spin_lock_bh(&chnl_mgr_obj->chnl_mgr_lock);
+       pchnl->dw_state |= CHNL_STATECANCEL;
+       if (LST_IS_EMPTY(pchnl->pio_requests))
+               goto func_cont;
+
+       if (pchnl->chnl_type == CHNL_PCPY) {
+               /* Indicate we have no more buffers available for transfer: */
+               if (CHNL_IS_INPUT(pchnl->chnl_mode)) {
+                       io_cancel_chnl(chnl_mgr_obj->hio_mgr, chnl_id);
+               } else {
+                       /* Record that we no longer have output buffers
+                        * available: */
+                       chnl_mgr_obj->dw_output_mask &= ~(1 << chnl_id);
+               }
+       }
+       /* Move all IOR's to IOC queue: */
+       while (!LST_IS_EMPTY(pchnl->pio_requests)) {
+               chnl_packet_obj =
+                   (struct chnl_irp *)lst_get_head(pchnl->pio_requests);
+               if (chnl_packet_obj) {
+                       chnl_packet_obj->byte_size = 0;
+                       chnl_packet_obj->status |= CHNL_IOCSTATCANCEL;
+                       lst_put_tail(pchnl->pio_completions,
+                                    (struct list_head *)chnl_packet_obj);
+                       pchnl->cio_cs++;
+                       pchnl->cio_reqs--;
+                       DBC_ASSERT(pchnl->cio_reqs >= 0);
+               }
+       }
+func_cont:
+       spin_unlock_bh(&chnl_mgr_obj->chnl_mgr_lock);
+func_end:
+       return status;
+}
+
+/*
+ *  ======== bridge_chnl_close ========
+ *  Purpose:
+ *      Ensures all pending I/O on this channel is cancelled, discards all
+ *      queued I/O completion notifications, then frees the resources allocated
+ *      for this channel, and makes the corresponding logical channel id
+ *      available for subsequent use.
+ */
+int bridge_chnl_close(struct chnl_object *chnl_obj)
+{
+       int status;
+       struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
+
+       /* Check args: */
+       if (!pchnl) {
+               status = -EFAULT;
+               goto func_cont;
+       }
+       {
+               /* Cancel IO: this ensures no further IO requests or
+                * notifications. */
+               status = bridge_chnl_cancel_io(chnl_obj);
+       }
+func_cont:
+       if (DSP_SUCCEEDED(status)) {
+               /* Assert I/O on this channel is now cancelled: Protects
+                * from io_dpc. */
+               DBC_ASSERT((pchnl->dw_state & CHNL_STATECANCEL));
+               /* Invalidate channel object: Protects from
+                * CHNL_GetIOCompletion(). */
+               /* Free the slot in the channel manager: */
+               pchnl->chnl_mgr_obj->ap_channel[pchnl->chnl_id] = NULL;
+               spin_lock_bh(&pchnl->chnl_mgr_obj->chnl_mgr_lock);
+               pchnl->chnl_mgr_obj->open_channels -= 1;
+               spin_unlock_bh(&pchnl->chnl_mgr_obj->chnl_mgr_lock);
+               if (pchnl->ntfy_obj) {
+                       ntfy_delete(pchnl->ntfy_obj);
+                       kfree(pchnl->ntfy_obj);
+                       pchnl->ntfy_obj = NULL;
+               }
+               /* Reset channel event: (NOTE: user_event freed in user
+                * context.). */
+               if (pchnl->sync_event) {
+                       sync_reset_event(pchnl->sync_event);
+                       kfree(pchnl->sync_event);
+                       pchnl->sync_event = NULL;
+               }
+               /* Free I/O request and I/O completion queues: */
+               if (pchnl->pio_completions) {
+                       free_chirp_list(pchnl->pio_completions);
+                       pchnl->pio_completions = NULL;
+                       pchnl->cio_cs = 0;
+               }
+               if (pchnl->pio_requests) {
+                       free_chirp_list(pchnl->pio_requests);
+                       pchnl->pio_requests = NULL;
+                       pchnl->cio_reqs = 0;
+               }
+               if (pchnl->free_packets_list) {
+                       free_chirp_list(pchnl->free_packets_list);
+                       pchnl->free_packets_list = NULL;
+               }
+               /* Release channel object. */
+               kfree(pchnl);
+               pchnl = NULL;
+       }
+       DBC_ENSURE(DSP_FAILED(status) || !pchnl);
+       return status;
+}
+
+/*
+ *  ======== bridge_chnl_create ========
+ *      Create a channel manager object, responsible for opening new channels
+ *      and closing old ones for a given board.
+ */
+int bridge_chnl_create(OUT struct chnl_mgr **phChnlMgr,
+                             struct dev_object *hdev_obj,
+                             IN CONST struct chnl_mgrattrs *pMgrAttrs)
+{
+       int status = 0;
+       struct chnl_mgr *chnl_mgr_obj = NULL;
+       u8 max_channels;
+
+       /* Check DBC requirements: */
+       DBC_REQUIRE(phChnlMgr != NULL);
+       DBC_REQUIRE(pMgrAttrs != NULL);
+       DBC_REQUIRE(pMgrAttrs->max_channels > 0);
+       DBC_REQUIRE(pMgrAttrs->max_channels <= CHNL_MAXCHANNELS);
+       DBC_REQUIRE(pMgrAttrs->word_size != 0);
+
+       /* Allocate channel manager object */
+       chnl_mgr_obj = kzalloc(sizeof(struct chnl_mgr), GFP_KERNEL);
+       if (chnl_mgr_obj) {
+               /*
+                * The max_channels attr must equal the # of supported chnls for
+                * each transport(# chnls for PCPY = DDMA = ZCPY): i.e.
+                *      pMgrAttrs->max_channels = CHNL_MAXCHANNELS =
+                *                       DDMA_MAXDDMACHNLS = DDMA_MAXZCPYCHNLS.
+                */
+               DBC_ASSERT(pMgrAttrs->max_channels == CHNL_MAXCHANNELS);
+               max_channels = CHNL_MAXCHANNELS + CHNL_MAXCHANNELS * CHNL_PCPY;
+               /* Create array of channels */
+               chnl_mgr_obj->ap_channel = kzalloc(sizeof(struct chnl_object *)
+                                               * max_channels, GFP_KERNEL);
+               if (chnl_mgr_obj->ap_channel) {
+                       /* Initialize chnl_mgr object */
+                       chnl_mgr_obj->dw_type = CHNL_TYPESM;
+                       chnl_mgr_obj->word_size = pMgrAttrs->word_size;
+                       /* Total # chnls supported */
+                       chnl_mgr_obj->max_channels = max_channels;
+                       chnl_mgr_obj->open_channels = 0;
+                       chnl_mgr_obj->dw_output_mask = 0;
+                       chnl_mgr_obj->dw_last_output = 0;
+                       chnl_mgr_obj->hdev_obj = hdev_obj;
+                       if (DSP_SUCCEEDED(status))
+                               spin_lock_init(&chnl_mgr_obj->chnl_mgr_lock);
+               } else {
+                       status = -ENOMEM;
+               }
+       } else {
+               status = -ENOMEM;
+       }
+
+       if (DSP_FAILED(status)) {
+               bridge_chnl_destroy(chnl_mgr_obj);
+               *phChnlMgr = NULL;
+       } else {
+               /* Return channel manager object to caller... */
+               *phChnlMgr = chnl_mgr_obj;
+       }
+       return status;
+}
+
+/*
+ *  ======== bridge_chnl_destroy ========
+ *  Purpose:
+ *      Close all open channels, and destroy the channel manager.
+ */
+int bridge_chnl_destroy(struct chnl_mgr *hchnl_mgr)
+{
+       int status = 0;
+       struct chnl_mgr *chnl_mgr_obj = hchnl_mgr;
+       u32 chnl_id;
+
+       if (hchnl_mgr) {
+               /* Close all open channels: */
+               for (chnl_id = 0; chnl_id < chnl_mgr_obj->max_channels;
+                    chnl_id++) {
+                       status =
+                           bridge_chnl_close(chnl_mgr_obj->ap_channel
+                                             [chnl_id]);
+                       if (DSP_FAILED(status))
+                               dev_dbg(bridge, "%s: Error status 0x%x\n",
+                                       __func__, status);
+               }
+
+               /* Free channel manager object: */
+               kfree(chnl_mgr_obj->ap_channel);
+
+               /* Set hchnl_mgr to NULL in device object. */
+               dev_set_chnl_mgr(chnl_mgr_obj->hdev_obj, NULL);
+               /* Free this Chnl Mgr object: */
+               kfree(hchnl_mgr);
+       } else {
+               status = -EFAULT;
+       }
+       return status;
+}
+
+/*
+ *  ======== bridge_chnl_flush_io ========
+ *  purpose:
+ *      Flushes all the outstanding data requests on a channel.
+ */
+int bridge_chnl_flush_io(struct chnl_object *chnl_obj, u32 dwTimeOut)
+{
+       int status = 0;
+       struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
+       s8 chnl_mode = -1;
+       struct chnl_mgr *chnl_mgr_obj;
+       struct chnl_ioc chnl_ioc_obj;
+       /* Check args: */
+       if (pchnl) {
+               if ((dwTimeOut == CHNL_IOCNOWAIT)
+                   && CHNL_IS_OUTPUT(pchnl->chnl_mode)) {
+                       status = -EINVAL;
+               } else {
+                       chnl_mode = pchnl->chnl_mode;
+                       chnl_mgr_obj = pchnl->chnl_mgr_obj;
+               }
+       } else {
+               status = -EFAULT;
+       }
+       if (DSP_SUCCEEDED(status)) {
+               /* Note: Currently, if another thread continues to add IO
+                * requests to this channel, this function will continue to
+                * flush all such queued IO requests. */
+               if (CHNL_IS_OUTPUT(chnl_mode)
+                   && (pchnl->chnl_type == CHNL_PCPY)) {
+                       /* Wait for IO completions, up to the specified
+                        * timeout: */
+                       while (!LST_IS_EMPTY(pchnl->pio_requests) &&
+                              DSP_SUCCEEDED(status)) {
+                               status = bridge_chnl_get_ioc(chnl_obj,
+                                               dwTimeOut, &chnl_ioc_obj);
+                               if (DSP_FAILED(status))
+                                       continue;
+
+                               if (chnl_ioc_obj.status & CHNL_IOCSTATTIMEOUT)
+                                       status = -ETIMEDOUT;
+
+                       }
+               } else {
+                       status = bridge_chnl_cancel_io(chnl_obj);
+                       /* Now, leave the channel in the ready state: */
+                       pchnl->dw_state &= ~CHNL_STATECANCEL;
+               }
+       }
+       DBC_ENSURE(DSP_FAILED(status) || LST_IS_EMPTY(pchnl->pio_requests));
+       return status;
+}
+
+/*
+ *  ======== bridge_chnl_get_info ========
+ *  Purpose:
+ *      Retrieve information related to a channel.
+ */
+int bridge_chnl_get_info(struct chnl_object *chnl_obj,
+                            OUT struct chnl_info *pInfo)
+{
+       int status = 0;
+       struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
+       if (pInfo != NULL) {
+               if (pchnl) {
+                       /* Return the requested information: */
+                       pInfo->hchnl_mgr = pchnl->chnl_mgr_obj;
+                       pInfo->event_obj = pchnl->user_event;
+                       pInfo->cnhl_id = pchnl->chnl_id;
+                       pInfo->dw_mode = pchnl->chnl_mode;
+                       pInfo->bytes_tx = pchnl->bytes_moved;
+                       pInfo->process = pchnl->process;
+                       pInfo->sync_event = pchnl->sync_event;
+                       pInfo->cio_cs = pchnl->cio_cs;
+                       pInfo->cio_reqs = pchnl->cio_reqs;
+                       pInfo->dw_state = pchnl->dw_state;
+               } else {
+                       status = -EFAULT;
+               }
+       } else {
+               status = -EFAULT;
+       }
+       return status;
+}
+
+/*
+ *  ======== bridge_chnl_get_ioc ========
+ *      Optionally wait for I/O completion on a channel.  Dequeue an I/O
+ *      completion record, which contains information about the completed
+ *      I/O request.
+ *      Note: Ensures Channel Invariant (see notes above).
+ */
+int bridge_chnl_get_ioc(struct chnl_object *chnl_obj, u32 dwTimeOut,
+                           OUT struct chnl_ioc *pIOC)
+{
+       int status = 0;
+       struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
+       struct chnl_irp *chnl_packet_obj;
+       int stat_sync;
+       bool dequeue_ioc = true;
+       struct chnl_ioc ioc = { NULL, 0, 0, 0, 0 };
+       u8 *host_sys_buf = NULL;
+       struct bridge_dev_context *dev_ctxt;
+       struct dev_object *dev_obj;
+
+       /* Check args: */
+       if (!pIOC || !pchnl) {
+               status = -EFAULT;
+       } else if (dwTimeOut == CHNL_IOCNOWAIT) {
+               if (LST_IS_EMPTY(pchnl->pio_completions))
+                       status = -EREMOTEIO;
+
+       }
+
+       dev_obj = dev_get_first();
+       dev_get_bridge_context(dev_obj, &dev_ctxt);
+       if (!dev_ctxt)
+               status = -EFAULT;
+
+       if (DSP_FAILED(status))
+               goto func_end;
+
+       ioc.status = CHNL_IOCSTATCOMPLETE;
+       if (dwTimeOut !=
+           CHNL_IOCNOWAIT && LST_IS_EMPTY(pchnl->pio_completions)) {
+               if (dwTimeOut == CHNL_IOCINFINITE)
+                       dwTimeOut = SYNC_INFINITE;
+
+               stat_sync = sync_wait_on_event(pchnl->sync_event, dwTimeOut);
+               if (stat_sync == -ETIME) {
+                       /* No response from DSP */
+                       ioc.status |= CHNL_IOCSTATTIMEOUT;
+                       dequeue_ioc = false;
+               } else if (stat_sync == -EPERM) {
+                       /* This can occur when the user mode thread is
+                        * aborted (^C), or when _VWIN32_WaitSingleObject()
+                        * fails due to unkown causes. */
+                       /* Even though Wait failed, there may be something in
+                        * the Q: */
+                       if (LST_IS_EMPTY(pchnl->pio_completions)) {
+                               ioc.status |= CHNL_IOCSTATCANCEL;
+                               dequeue_ioc = false;
+                       }
+               }
+       }
+       /* See comment in AddIOReq */
+       spin_lock_bh(&pchnl->chnl_mgr_obj->chnl_mgr_lock);
+       omap_mbox_disable_irq(dev_ctxt->mbox, IRQ_RX);
+       if (dequeue_ioc) {
+               /* Dequeue IOC and set pIOC; */
+               DBC_ASSERT(!LST_IS_EMPTY(pchnl->pio_completions));
+               chnl_packet_obj =
+                   (struct chnl_irp *)lst_get_head(pchnl->pio_completions);
+               /* Update pIOC from channel state and chirp: */
+               if (chnl_packet_obj) {
+                       pchnl->cio_cs--;
+                       /*  If this is a zero-copy channel, then set IOC's pbuf
+                        *  to the DSP's address. This DSP address will get
+                        *  translated to user's virtual addr later. */
+                       {
+                               host_sys_buf = chnl_packet_obj->host_sys_buf;
+                               ioc.pbuf = chnl_packet_obj->host_user_buf;
+                       }
+                       ioc.byte_size = chnl_packet_obj->byte_size;
+                       ioc.buf_size = chnl_packet_obj->buf_size;
+                       ioc.dw_arg = chnl_packet_obj->dw_arg;
+                       ioc.status |= chnl_packet_obj->status;
+                       /* Place the used chirp on the free list: */
+                       lst_put_tail(pchnl->free_packets_list,
+                                    (struct list_head *)chnl_packet_obj);
+               } else {
+                       ioc.pbuf = NULL;
+                       ioc.byte_size = 0;
+               }
+       } else {
+               ioc.pbuf = NULL;
+               ioc.byte_size = 0;
+               ioc.dw_arg = 0;
+               ioc.buf_size = 0;
+       }
+       /* Ensure invariant: If any IOC's are queued for this channel... */
+       if (!LST_IS_EMPTY(pchnl->pio_completions)) {
+               /*  Since DSPStream_Reclaim() does not take a timeout
+                *  parameter, we pass the stream's timeout value to
+                *  bridge_chnl_get_ioc. We cannot determine whether or not
+                *  we have waited in User mode. Since the stream's timeout
+                *  value may be non-zero, we still have to set the event.
+                *  Therefore, this optimization is taken out.
+                *
+                *  if (dwTimeOut == CHNL_IOCNOWAIT) {
+                *    ... ensure event is set..
+                *      sync_set_event(pchnl->sync_event);
+                *  } */
+               sync_set_event(pchnl->sync_event);
+       } else {
+               /* else, if list is empty, ensure event is reset. */
+               sync_reset_event(pchnl->sync_event);
+       }
+       omap_mbox_enable_irq(dev_ctxt->mbox, IRQ_RX);
+       spin_unlock_bh(&pchnl->chnl_mgr_obj->chnl_mgr_lock);
+       if (dequeue_ioc
+           && (pchnl->chnl_type == CHNL_PCPY && pchnl->chnl_id > 1)) {
+               if (!(ioc.pbuf < (void *)USERMODE_ADDR))
+                       goto func_cont;
+
+               /* If the addr is in user mode, then copy it */
+               if (!host_sys_buf || !ioc.pbuf) {
+                       status = -EFAULT;
+                       goto func_cont;
+               }
+               if (!CHNL_IS_INPUT(pchnl->chnl_mode))
+                       goto func_cont1;
+
+               /*host_user_buf */
+               status = copy_to_user(ioc.pbuf, host_sys_buf, ioc.byte_size);
+               if (status) {
+                       if (current->flags & PF_EXITING)
+                               status = 0;
+               }
+               if (status)
+                       status = -EFAULT;
+func_cont1:
+               kfree(host_sys_buf);
+       }
+func_cont:
+       /* Update User's IOC block: */
+       *pIOC = ioc;
+func_end:
+       return status;
+}
+
+/*
+ *  ======== bridge_chnl_get_mgr_info ========
+ *      Retrieve information related to the channel manager.
+ */
+int bridge_chnl_get_mgr_info(struct chnl_mgr *hchnl_mgr, u32 uChnlID,
+                                OUT struct chnl_mgrinfo *pMgrInfo)
+{
+       int status = 0;
+       struct chnl_mgr *chnl_mgr_obj = (struct chnl_mgr *)hchnl_mgr;
+
+       if (pMgrInfo != NULL) {
+               if (uChnlID <= CHNL_MAXCHANNELS) {
+                       if (hchnl_mgr) {
+                               /* Return the requested information: */
+                               pMgrInfo->chnl_obj =
+                                   chnl_mgr_obj->ap_channel[uChnlID];
+                               pMgrInfo->open_channels =
+                                   chnl_mgr_obj->open_channels;
+                               pMgrInfo->dw_type = chnl_mgr_obj->dw_type;
+                               /* total # of chnls */
+                               pMgrInfo->max_channels =
+                                   chnl_mgr_obj->max_channels;
+                       } else {
+                               status = -EFAULT;
+                       }
+               } else {
+                       status = -ECHRNG;
+               }
+       } else {
+               status = -EFAULT;
+       }
+
+       return status;
+}
+
+/*
+ *  ======== bridge_chnl_idle ========
+ *      Idles a particular channel.
+ */
+int bridge_chnl_idle(struct chnl_object *chnl_obj, u32 dwTimeOut,
+                           bool fFlush)
+{
+       s8 chnl_mode;
+       struct chnl_mgr *chnl_mgr_obj;
+       int status = 0;
+
+       DBC_REQUIRE(chnl_obj);
+
+       chnl_mode = chnl_obj->chnl_mode;
+       chnl_mgr_obj = chnl_obj->chnl_mgr_obj;
+
+       if (CHNL_IS_OUTPUT(chnl_mode) && !fFlush) {
+               /* Wait for IO completions, up to the specified timeout: */
+               status = bridge_chnl_flush_io(chnl_obj, dwTimeOut);
+       } else {
+               status = bridge_chnl_cancel_io(chnl_obj);
+
+               /* Reset the byte count and put channel back in ready state. */
+               chnl_obj->bytes_moved = 0;
+               chnl_obj->dw_state &= ~CHNL_STATECANCEL;
+       }
+
+       return status;
+}
+
+/*
+ *  ======== bridge_chnl_open ========
+ *      Open a new half-duplex channel to the DSP board.
+ */
+int bridge_chnl_open(OUT struct chnl_object **phChnl,
+                           struct chnl_mgr *hchnl_mgr, s8 chnl_mode,
+                           u32 uChnlId, CONST IN struct chnl_attr *pattrs)
+{
+       int status = 0;
+       struct chnl_mgr *chnl_mgr_obj = hchnl_mgr;
+       struct chnl_object *pchnl = NULL;
+       struct sync_object *sync_event = NULL;
+       /* Ensure DBC requirements: */
+       DBC_REQUIRE(phChnl != NULL);
+       DBC_REQUIRE(pattrs != NULL);
+       DBC_REQUIRE(hchnl_mgr != NULL);
+       *phChnl = NULL;
+       /* Validate Args: */
+       if (pattrs->uio_reqs == 0) {
+               status = -EINVAL;
+       } else {
+               if (!hchnl_mgr) {
+                       status = -EFAULT;
+               } else {
+                       if (uChnlId != CHNL_PICKFREE) {
+                               if (uChnlId >= chnl_mgr_obj->max_channels)
+                                       status = -ECHRNG;
+                               else if (chnl_mgr_obj->ap_channel[uChnlId] !=
+                                        NULL)
+                                       status = -EALREADY;
+                       } else {
+                               /* Check for free channel */
+                               status =
+                                   search_free_channel(chnl_mgr_obj, &uChnlId);
+                       }
+               }
+       }
+       if (DSP_FAILED(status))
+               goto func_end;
+
+       DBC_ASSERT(uChnlId < chnl_mgr_obj->max_channels);
+       /* Create channel object: */
+       pchnl = kzalloc(sizeof(struct chnl_object), GFP_KERNEL);
+       if (!pchnl) {
+               status = -ENOMEM;
+               goto func_end;
+       }
+       /* Protect queues from io_dpc: */
+       pchnl->dw_state = CHNL_STATECANCEL;
+       /* Allocate initial IOR and IOC queues: */
+       pchnl->free_packets_list = create_chirp_list(pattrs->uio_reqs);
+       pchnl->pio_requests = create_chirp_list(0);
+       pchnl->pio_completions = create_chirp_list(0);
+       pchnl->chnl_packets = pattrs->uio_reqs;
+       pchnl->cio_cs = 0;
+       pchnl->cio_reqs = 0;
+       sync_event = kzalloc(sizeof(struct sync_object), GFP_KERNEL);
+       if (sync_event)
+               sync_init_event(sync_event);
+       else
+               status = -ENOMEM;
+
+       if (DSP_SUCCEEDED(status)) {
+               pchnl->ntfy_obj = kmalloc(sizeof(struct ntfy_object),
+                                                       GFP_KERNEL);
+               if (pchnl->ntfy_obj)
+                       ntfy_init(pchnl->ntfy_obj);
+               else
+                       status = -ENOMEM;
+       }
+
+       if (DSP_SUCCEEDED(status)) {
+               if (pchnl->pio_completions && pchnl->pio_requests &&
+                   pchnl->free_packets_list) {
+                       /* Initialize CHNL object fields: */
+                       pchnl->chnl_mgr_obj = chnl_mgr_obj;
+                       pchnl->chnl_id = uChnlId;
+                       pchnl->chnl_mode = chnl_mode;
+                       pchnl->user_event = sync_event;
+                       pchnl->sync_event = sync_event;
+                       /* Get the process handle */
+                       pchnl->process = current->tgid;
+                       pchnl->pcb_arg = 0;
+                       pchnl->bytes_moved = 0;
+                       /* Default to proc-copy */
+                       pchnl->chnl_type = CHNL_PCPY;
+               } else {
+                       status = -ENOMEM;
+               }
+       }
+
+       if (DSP_FAILED(status)) {
+               /* Free memory */
+               if (pchnl->pio_completions) {
+                       free_chirp_list(pchnl->pio_completions);
+                       pchnl->pio_completions = NULL;
+                       pchnl->cio_cs = 0;
+               }
+               if (pchnl->pio_requests) {
+                       free_chirp_list(pchnl->pio_requests);
+                       pchnl->pio_requests = NULL;
+               }
+               if (pchnl->free_packets_list) {
+                       free_chirp_list(pchnl->free_packets_list);
+                       pchnl->free_packets_list = NULL;
+               }
+               kfree(sync_event);
+               sync_event = NULL;
+
+               if (pchnl->ntfy_obj) {
+                       ntfy_delete(pchnl->ntfy_obj);
+                       kfree(pchnl->ntfy_obj);
+                       pchnl->ntfy_obj = NULL;
+               }
+               kfree(pchnl);
+       } else {
+               /* Insert channel object in channel manager: */
+               chnl_mgr_obj->ap_channel[pchnl->chnl_id] = pchnl;
+               spin_lock_bh(&chnl_mgr_obj->chnl_mgr_lock);
+               chnl_mgr_obj->open_channels++;
+               spin_unlock_bh(&chnl_mgr_obj->chnl_mgr_lock);
+               /* Return result... */
+               pchnl->dw_state = CHNL_STATEREADY;
+               *phChnl = pchnl;
+       }
+func_end:
+       DBC_ENSURE((DSP_SUCCEEDED(status) && pchnl) || (*phChnl == NULL));
+       return status;
+}
+
+/*
+ *  ======== bridge_chnl_register_notify ========
+ *      Registers for events on a particular channel.
+ */
+int bridge_chnl_register_notify(struct chnl_object *chnl_obj,
+                                   u32 event_mask, u32 notify_type,
+                                   struct dsp_notification *hnotification)
+{
+       int status = 0;
+
+       DBC_ASSERT(!(event_mask & ~(DSP_STREAMDONE | DSP_STREAMIOCOMPLETION)));
+
+       if (event_mask)
+               status = ntfy_register(chnl_obj->ntfy_obj, hnotification,
+                                               event_mask, notify_type);
+       else
+               status = ntfy_unregister(chnl_obj->ntfy_obj, hnotification);
+
+       return status;
+}
+
+/*
+ *  ======== create_chirp_list ========
+ *  Purpose:
+ *      Initialize a queue of channel I/O Request/Completion packets.
+ *  Parameters:
+ *      uChirps:    Number of Chirps to allocate.
+ *  Returns:
+ *      Pointer to queue of IRPs, or NULL.
+ *  Requires:
+ *  Ensures:
+ */
+static struct lst_list *create_chirp_list(u32 uChirps)
+{
+       struct lst_list *chirp_list;
+       struct chnl_irp *chnl_packet_obj;
+       u32 i;
+
+       chirp_list = kzalloc(sizeof(struct lst_list), GFP_KERNEL);
+
+       if (chirp_list) {
+               INIT_LIST_HEAD(&chirp_list->head);
+               /* Make N chirps and place on queue. */
+               for (i = 0; (i < uChirps)
+                    && ((chnl_packet_obj = make_new_chirp()) != NULL); i++) {
+                       lst_put_tail(chirp_list,
+                                    (struct list_head *)chnl_packet_obj);
+               }
+
+               /* If we couldn't allocate all chirps, free those allocated: */
+               if (i != uChirps) {
+                       free_chirp_list(chirp_list);
+                       chirp_list = NULL;
+               }
+       }
+
+       return chirp_list;
+}
+
+/*
+ *  ======== free_chirp_list ========
+ *  Purpose:
+ *      Free the queue of Chirps.
+ */
+static void free_chirp_list(struct lst_list *chirp_list)
+{
+       DBC_REQUIRE(chirp_list != NULL);
+
+       while (!LST_IS_EMPTY(chirp_list))
+               kfree(lst_get_head(chirp_list));
+
+       kfree(chirp_list);
+}
+
+/*
+ *  ======== make_new_chirp ========
+ *      Allocate the memory for a new channel IRP.
+ */
+static struct chnl_irp *make_new_chirp(void)
+{
+       struct chnl_irp *chnl_packet_obj;
+
+       chnl_packet_obj = kzalloc(sizeof(struct chnl_irp), GFP_KERNEL);
+       if (chnl_packet_obj != NULL) {
+               /* lst_init_elem only resets the list's member values. */
+               lst_init_elem(&chnl_packet_obj->link);
+       }
+
+       return chnl_packet_obj;
+}
+
+/*
+ *  ======== search_free_channel ========
+ *      Search for a free channel slot in the array of channel pointers.
+ */
+static int search_free_channel(struct chnl_mgr *chnl_mgr_obj,
+                                     OUT u32 *pdwChnl)
+{
+       int status = -ENOSR;
+       u32 i;
+
+       DBC_REQUIRE(chnl_mgr_obj);
+
+       for (i = 0; i < chnl_mgr_obj->max_channels; i++) {
+               if (chnl_mgr_obj->ap_channel[i] == NULL) {
+                       status = 0;
+                       *pdwChnl = i;
+                       break;
+               }
+       }
+
+       return status;
+}
diff --git a/drivers/staging/tidspbridge/core/dsp-clock.c b/drivers/staging/tidspbridge/core/dsp-clock.c
new file mode 100644 (file)
index 0000000..abaa595
--- /dev/null
@@ -0,0 +1,421 @@
+/*
+ * clk.c
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Clock and Timer services.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+/*  ----------------------------------- Host OS */
+#include <dspbridge/host_os.h>
+#include <plat/dmtimer.h>
+#include <plat/mcbsp.h>
+
+/*  ----------------------------------- DSP/BIOS Bridge */
+#include <dspbridge/std.h>
+#include <dspbridge/dbdefs.h>
+#include <dspbridge/cfg.h>
+#include <dspbridge/drv.h>
+#include <dspbridge/dev.h>
+#include "_tiomap.h"
+
+/*  ----------------------------------- Trace & Debug */
+#include <dspbridge/dbc.h>
+
+/*  ----------------------------------- This */
+#include <dspbridge/clk.h>
+
+/*  ----------------------------------- Defines, Data Structures, Typedefs */
+
+#define OMAP_SSI_OFFSET                        0x58000
+#define OMAP_SSI_SIZE                  0x1000
+#define OMAP_SSI_SYSCONFIG_OFFSET      0x10
+
+#define SSI_AUTOIDLE                   (1 << 0)
+#define SSI_SIDLE_SMARTIDLE            (2 << 3)
+#define SSI_MIDLE_NOIDLE               (1 << 12)
+
+/* Clk types requested by the dsp */
+#define IVA2_CLK       0
+#define GPT_CLK                1
+#define WDT_CLK                2
+#define MCBSP_CLK      3
+#define SSI_CLK                4
+
+/* Bridge GPT id (1 - 4), DM Timer id (5 - 8) */
+#define DMT_ID(id) ((id) + 4)
+
+/* Bridge MCBSP id (6 - 10), OMAP Mcbsp id (0 - 4) */
+#define MCBSP_ID(id) ((id) - 6)
+
+static struct omap_dm_timer *timer[4];
+
+struct clk *iva2_clk;
+
+struct dsp_ssi {
+       struct clk *sst_fck;
+       struct clk *ssr_fck;
+       struct clk *ick;
+};
+
+static struct dsp_ssi ssi;
+
+static u32 dsp_clocks;
+
+static inline u32 is_dsp_clk_active(u32 clk, u8 id)
+{
+       return clk & (1 << id);
+}
+
+static inline void set_dsp_clk_active(u32 *clk, u8 id)
+{
+       *clk |= (1 << id);
+}
+
+static inline void set_dsp_clk_inactive(u32 *clk, u8 id)
+{
+       *clk &= ~(1 << id);
+}
+
+static s8 get_clk_type(u8 id)
+{
+       s8 type;
+
+       if (id == DSP_CLK_IVA2)
+               type = IVA2_CLK;
+       else if (id <= DSP_CLK_GPT8)
+               type = GPT_CLK;
+       else if (id == DSP_CLK_WDT3)
+               type = WDT_CLK;
+       else if (id <= DSP_CLK_MCBSP5)
+               type = MCBSP_CLK;
+       else if (id == DSP_CLK_SSI)
+               type = SSI_CLK;
+       else
+               type = -1;
+
+       return type;
+}
+
+/*
+ *  ======== dsp_clk_exit ========
+ *  Purpose:
+ *      Cleanup CLK module.
+ */
+void dsp_clk_exit(void)
+{
+       dsp_clock_disable_all(dsp_clocks);
+
+       clk_put(iva2_clk);
+       clk_put(ssi.sst_fck);
+       clk_put(ssi.ssr_fck);
+       clk_put(ssi.ick);
+}
+
+/*
+ *  ======== dsp_clk_init ========
+ *  Purpose:
+ *      Initialize CLK module.
+ */
+void dsp_clk_init(void)
+{
+       static struct platform_device dspbridge_device;
+
+       dspbridge_device.dev.bus = &platform_bus_type;
+
+       iva2_clk = clk_get(&dspbridge_device.dev, "iva2_ck");
+       if (IS_ERR(iva2_clk))
+               dev_err(bridge, "failed to get iva2 clock %p\n", iva2_clk);
+
+       ssi.sst_fck = clk_get(&dspbridge_device.dev, "ssi_sst_fck");
+       ssi.ssr_fck = clk_get(&dspbridge_device.dev, "ssi_ssr_fck");
+       ssi.ick = clk_get(&dspbridge_device.dev, "ssi_ick");
+
+       if (IS_ERR(ssi.sst_fck) || IS_ERR(ssi.ssr_fck) || IS_ERR(ssi.ick))
+               dev_err(bridge, "failed to get ssi: sst %p, ssr %p, ick %p\n",
+                                       ssi.sst_fck, ssi.ssr_fck, ssi.ick);
+}
+
+#ifdef CONFIG_OMAP_MCBSP
+static void mcbsp_clk_prepare(bool flag, u8 id)
+{
+       struct cfg_hostres *resources;
+       struct dev_object *hdev_object = NULL;
+       struct bridge_dev_context *bridge_context = NULL;
+       u32 val;
+
+       hdev_object = (struct dev_object *)drv_get_first_dev_object();
+       if (!hdev_object)
+               return;
+
+       dev_get_bridge_context(hdev_object, &bridge_context);
+       if (!bridge_context)
+               return;
+
+       resources = bridge_context->resources;
+       if (!resources)
+               return;
+
+       if (flag) {
+               if (id == DSP_CLK_MCBSP1) {
+                       /* set MCBSP1_CLKS, on McBSP1 ON */
+                       val = __raw_readl(resources->dw_sys_ctrl_base + 0x274);
+                       val |= 1 << 2;
+                       __raw_writel(val, resources->dw_sys_ctrl_base + 0x274);
+               } else if (id == DSP_CLK_MCBSP2) {
+                       /* set MCBSP2_CLKS, on McBSP2 ON */
+                       val = __raw_readl(resources->dw_sys_ctrl_base + 0x274);
+                       val |= 1 << 6;
+                       __raw_writel(val, resources->dw_sys_ctrl_base + 0x274);
+               }
+       } else {
+               if (id == DSP_CLK_MCBSP1) {
+                       /* clear MCBSP1_CLKS, on McBSP1 OFF */
+                       val = __raw_readl(resources->dw_sys_ctrl_base + 0x274);
+                       val &= ~(1 << 2);
+                       __raw_writel(val, resources->dw_sys_ctrl_base + 0x274);
+               } else if (id == DSP_CLK_MCBSP2) {
+                       /* clear MCBSP2_CLKS, on McBSP2 OFF */
+                       val = __raw_readl(resources->dw_sys_ctrl_base + 0x274);
+                       val &= ~(1 << 6);
+                       __raw_writel(val, resources->dw_sys_ctrl_base + 0x274);
+               }
+       }
+}
+#endif
+
+/**
+ * dsp_gpt_wait_overflow - set gpt overflow and wait for fixed timeout
+ * @clk_id:      GP Timer clock id.
+ * @load:        Overflow value.
+ *
+ * Sets an overflow interrupt for the desired GPT waiting for a timeout
+ * of 5 msecs for the interrupt to occur.
+ */
+void dsp_gpt_wait_overflow(short int clk_id, unsigned int load)
+{
+       struct omap_dm_timer *gpt = timer[clk_id - 1];
+       unsigned long timeout;
+
+       if (!gpt)
+               return;
+
+       /* Enable overflow interrupt */
+       omap_dm_timer_set_int_enable(gpt, OMAP_TIMER_INT_OVERFLOW);
+
+       /*
+        * Set counter value to overflow counter after
+        * one tick and start timer.
+        */
+       omap_dm_timer_set_load_start(gpt, 0, load);
+
+       /* Wait 80us for timer to overflow */
+       udelay(80);
+
+       timeout = msecs_to_jiffies(5);
+       /* Check interrupt status and wait for interrupt */
+       while (!(omap_dm_timer_read_status(gpt) & OMAP_TIMER_INT_OVERFLOW)) {
+               if (time_is_after_jiffies(timeout)) {
+                       pr_err("%s: GPTimer interrupt failed\n", __func__);
+                       break;
+               }
+       }
+}
+
+/*
+ *  ======== dsp_clk_enable ========
+ *  Purpose:
+ *      Enable Clock .
+ *
+ */
+int dsp_clk_enable(IN enum dsp_clk_id clk_id)
+{
+       int status = 0;
+
+       if (is_dsp_clk_active(dsp_clocks, clk_id)) {
+               dev_err(bridge, "WARN: clock id %d already enabled\n", clk_id);
+               goto out;
+       }
+
+       switch (get_clk_type(clk_id)) {
+       case IVA2_CLK:
+               clk_enable(iva2_clk);
+               break;
+       case GPT_CLK:
+               timer[clk_id - 1] =
+                               omap_dm_timer_request_specific(DMT_ID(clk_id));
+               break;
+#ifdef CONFIG_OMAP_MCBSP
+       case MCBSP_CLK:
+               mcbsp_clk_prepare(true, clk_id);
+               omap_mcbsp_set_io_type(MCBSP_ID(clk_id), OMAP_MCBSP_POLL_IO);
+               omap_mcbsp_request(MCBSP_ID(clk_id));
+               break;
+#endif
+       case WDT_CLK:
+               dev_err(bridge, "ERROR: DSP requested to enable WDT3 clk\n");
+               break;
+       case SSI_CLK:
+               clk_enable(ssi.sst_fck);
+               clk_enable(ssi.ssr_fck);
+               clk_enable(ssi.ick);
+
+               /*
+                * The SSI module need to configured not to have the Forced
+                * idle for master interface. If it is set to forced idle,
+                * the SSI module is transitioning to standby thereby causing
+                * the client in the DSP hang waiting for the SSI module to
+                * be active after enabling the clocks
+                */
+               ssi_clk_prepare(true);
+               break;
+       default:
+               dev_err(bridge, "Invalid clock id for enable\n");
+               status = -EPERM;
+       }
+
+       if (DSP_SUCCEEDED(status))
+               set_dsp_clk_active(&dsp_clocks, clk_id);
+
+out:
+       return status;
+}
+
+/**
+ * dsp_clock_enable_all - Enable clocks used by the DSP
+ * @dev_context                Driver's device context strucure
+ *
+ * This function enables all the peripheral clocks that were requested by DSP.
+ */
+u32 dsp_clock_enable_all(u32 dsp_per_clocks)
+{
+       u32 clk_id;
+       u32 status = -EPERM;
+
+       for (clk_id = 0; clk_id < DSP_CLK_NOT_DEFINED; clk_id++) {
+               if (is_dsp_clk_active(dsp_per_clocks, clk_id))
+                       status = dsp_clk_enable(clk_id);
+       }
+
+       return status;
+}
+
+/*
+ *  ======== dsp_clk_disable ========
+ *  Purpose:
+ *      Disable the clock.
+ *
+ */
+int dsp_clk_disable(IN enum dsp_clk_id clk_id)
+{
+       int status = 0;
+
+       if (!is_dsp_clk_active(dsp_clocks, clk_id)) {
+               dev_err(bridge, "ERR: clock id %d already disabled\n", clk_id);
+               goto out;
+       }
+
+       switch (get_clk_type(clk_id)) {
+       case IVA2_CLK:
+               clk_disable(iva2_clk);
+               break;
+       case GPT_CLK:
+               omap_dm_timer_free(timer[clk_id - 1]);
+               break;
+#ifdef CONFIG_OMAP_MCBSP
+       case MCBSP_CLK:
+               mcbsp_clk_prepare(false, clk_id);
+               omap_mcbsp_free(MCBSP_ID(clk_id));
+               break;
+#endif
+       case WDT_CLK:
+               dev_err(bridge, "ERROR: DSP requested to disable WDT3 clk\n");
+               break;
+       case SSI_CLK:
+               ssi_clk_prepare(false);
+               ssi_clk_prepare(false);
+               clk_disable(ssi.sst_fck);
+               clk_disable(ssi.ssr_fck);
+               clk_disable(ssi.ick);
+               break;
+       default:
+               dev_err(bridge, "Invalid clock id for disable\n");
+               status = -EPERM;
+       }
+
+       if (DSP_SUCCEEDED(status))
+               set_dsp_clk_inactive(&dsp_clocks, clk_id);
+
+out:
+       return status;
+}
+
+/**
+ * dsp_clock_disable_all - Disable all active clocks
+ * @dev_context                Driver's device context structure
+ *
+ * This function disables all the peripheral clocks that were enabled by DSP.
+ * It is meant to be called only when DSP is entering hibernation or when DSP
+ * is in error state.
+ */
+u32 dsp_clock_disable_all(u32 dsp_per_clocks)
+{
+       u32 clk_id;
+       u32 status = -EPERM;
+
+       for (clk_id = 0; clk_id < DSP_CLK_NOT_DEFINED; clk_id++) {
+               if (is_dsp_clk_active(dsp_per_clocks, clk_id))
+                       status = dsp_clk_disable(clk_id);
+       }
+
+       return status;
+}
+
+u32 dsp_clk_get_iva2_rate(void)
+{
+       u32 clk_speed_khz;
+
+       clk_speed_khz = clk_get_rate(iva2_clk);
+       clk_speed_khz /= 1000;
+       dev_dbg(bridge, "%s: clk speed Khz = %d\n", __func__, clk_speed_khz);
+
+       return clk_speed_khz;
+}
+
+void ssi_clk_prepare(bool FLAG)
+{
+       void __iomem *ssi_base;
+       unsigned int value;
+
+       ssi_base = ioremap(L4_34XX_BASE + OMAP_SSI_OFFSET, OMAP_SSI_SIZE);
+       if (!ssi_base) {
+               pr_err("%s: error, SSI not configured\n", __func__);
+               return;
+       }
+
+       if (FLAG) {
+               /* Set Autoidle, SIDLEMode to smart idle, and MIDLEmode to
+                * no idle
+                */
+               value = SSI_AUTOIDLE | SSI_SIDLE_SMARTIDLE | SSI_MIDLE_NOIDLE;
+       } else {
+               /* Set Autoidle, SIDLEMode to forced idle, and MIDLEmode to
+                * forced idle
+                */
+               value = SSI_AUTOIDLE;
+       }
+
+       __raw_writel(value, ssi_base + OMAP_SSI_SYSCONFIG_OFFSET);
+       iounmap(ssi_base);
+}
+
diff --git a/drivers/staging/tidspbridge/core/io_sm.c b/drivers/staging/tidspbridge/core/io_sm.c
new file mode 100644 (file)
index 0000000..7fb840d
--- /dev/null
@@ -0,0 +1,2410 @@
+/*
+ * io_sm.c
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * IO dispatcher for a shared memory channel driver.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+/*
+ * Channel Invariant:
+ * There is an important invariant condition which must be maintained per
+ * channel outside of bridge_chnl_get_ioc() and IO_Dispatch(), violation of
+ * which may cause timeouts and/or failure of the sync_wait_on_event
+ * function.
+ */
+
+/* Host OS */
+#include <dspbridge/host_os.h>
+#include <linux/workqueue.h>
+
+/*  ----------------------------------- DSP/BIOS Bridge */
+#include <dspbridge/std.h>
+#include <dspbridge/dbdefs.h>
+
+/* Trace & Debug */
+#include <dspbridge/dbc.h>
+
+/* Services Layer */
+#include <dspbridge/cfg.h>
+#include <dspbridge/ntfy.h>
+#include <dspbridge/sync.h>
+
+/* Hardware Abstraction Layer */
+#include <hw_defs.h>
+#include <hw_mmu.h>
+
+/* Bridge Driver */
+#include <dspbridge/dspdeh.h>
+#include <dspbridge/dspio.h>
+#include <dspbridge/dspioctl.h>
+#include <dspbridge/wdt.h>
+#include <_tiomap.h>
+#include <tiomap_io.h>
+#include <_tiomap_pwr.h>
+
+/* Platform Manager */
+#include <dspbridge/cod.h>
+#include <dspbridge/node.h>
+#include <dspbridge/dev.h>
+
+/* Others */
+#include <dspbridge/rms_sh.h>
+#include <dspbridge/mgr.h>
+#include <dspbridge/drv.h>
+#include "_cmm.h"
+#include "module_list.h"
+
+/* This */
+#include <dspbridge/io_sm.h>
+#include "_msg_sm.h"
+
+/* Defines, Data Structures, Typedefs */
+#define OUTPUTNOTREADY  0xffff
+#define NOTENABLED      0xffff /* Channel(s) not enabled */
+
+#define EXTEND      "_EXT_END"
+
+#define SWAP_WORD(x)     (x)
+#define UL_PAGE_ALIGN_SIZE 0x10000     /* Page Align Size */
+
+#define MAX_PM_REQS 32
+
+#define MMU_FAULT_HEAD1 0xa5a5a5a5
+#define MMU_FAULT_HEAD2 0x96969696
+#define POLL_MAX 1000
+#define MAX_MMU_DBGBUFF 10240
+
+/* IO Manager: only one created per board */
+struct io_mgr {
+       /* These four fields must be the first fields in a io_mgr_ struct */
+       /* Bridge device context */
+       struct bridge_dev_context *hbridge_context;
+       /* Function interface to Bridge driver */
+       struct bridge_drv_interface *intf_fxns;
+       struct dev_object *hdev_obj;    /* Device this board represents */
+
+       /* These fields initialized in bridge_io_create() */
+       struct chnl_mgr *hchnl_mgr;
+       struct shm *shared_mem; /* Shared Memory control */
+       u8 *input;              /* Address of input channel */
+       u8 *output;             /* Address of output channel */
+       struct msg_mgr *hmsg_mgr;       /* Message manager */
+       /* Msg control for from DSP messages */
+       struct msg_ctrl *msg_input_ctrl;
+       /* Msg control for to DSP messages */
+       struct msg_ctrl *msg_output_ctrl;
+       u8 *msg_input;          /* Address of input messages */
+       u8 *msg_output;         /* Address of output messages */
+       u32 usm_buf_size;       /* Size of a shared memory I/O channel */
+       bool shared_irq;        /* Is this IRQ shared? */
+       u32 word_size;          /* Size in bytes of DSP word */
+       u16 intr_val;           /* Interrupt value */
+       /* Private extnd proc info; mmu setup */
+       struct mgr_processorextinfo ext_proc_info;
+       struct cmm_object *hcmm_mgr;    /* Shared Mem Mngr */
+       struct work_struct io_workq;    /* workqueue */
+#ifndef DSP_TRACEBUF_DISABLED
+       u32 ul_trace_buffer_begin;      /* Trace message start address */
+       u32 ul_trace_buffer_end;        /* Trace message end address */
+       u32 ul_trace_buffer_current;    /* Trace message current address */
+       u32 ul_gpp_read_pointer;        /* GPP Read pointer to Trace buffer */
+       u8 *pmsg;
+       u32 ul_gpp_va;
+       u32 ul_dsp_va;
+#endif
+       /* IO Dpc */
+       u32 dpc_req;            /* Number of requested DPC's. */
+       u32 dpc_sched;          /* Number of executed DPC's. */
+       struct tasklet_struct dpc_tasklet;
+       spinlock_t dpc_lock;
+
+};
+
+/* Function Prototypes */
+static void io_dispatch_chnl(IN struct io_mgr *pio_mgr,
+                               IN OUT struct chnl_object *pchnl, u8 iMode);
+static void io_dispatch_msg(IN struct io_mgr *pio_mgr,
+                           struct msg_mgr *hmsg_mgr);
+static void io_dispatch_pm(struct io_mgr *pio_mgr);
+static void notify_chnl_complete(struct chnl_object *pchnl,
+                                struct chnl_irp *chnl_packet_obj);
+static void input_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl,
+                       u8 iMode);
+static void output_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl,
+                       u8 iMode);
+static void input_msg(struct io_mgr *pio_mgr, struct msg_mgr *hmsg_mgr);
+static void output_msg(struct io_mgr *pio_mgr, struct msg_mgr *hmsg_mgr);
+static u32 find_ready_output(struct chnl_mgr *chnl_mgr_obj,
+                            struct chnl_object *pchnl, u32 dwMask);
+static u32 read_data(struct bridge_dev_context *hDevContext, void *dest,
+                    void *pSrc, u32 usize);
+static u32 write_data(struct bridge_dev_context *hDevContext, void *dest,
+                     void *pSrc, u32 usize);
+
+/* Bus Addr (cached kernel) */
+static int register_shm_segs(struct io_mgr *hio_mgr,
+                                   struct cod_manager *cod_man,
+                                   u32 dw_gpp_base_pa);
+
+/*
+ *  ======== bridge_io_create ========
+ *      Create an IO manager object.
+ */
+int bridge_io_create(OUT struct io_mgr **phIOMgr,
+                           struct dev_object *hdev_obj,
+                           IN CONST struct io_attrs *pMgrAttrs)
+{
+       int status = 0;
+       struct io_mgr *pio_mgr = NULL;
+       struct shm *shared_mem = NULL;
+       struct bridge_dev_context *hbridge_context = NULL;
+       struct cfg_devnode *dev_node_obj;
+       struct chnl_mgr *hchnl_mgr;
+       u8 dev_type;
+
+       /* Check requirements */
+       if (!phIOMgr || !pMgrAttrs || pMgrAttrs->word_size == 0) {
+               status = -EFAULT;
+               goto func_end;
+       }
+       dev_get_chnl_mgr(hdev_obj, &hchnl_mgr);
+       if (!hchnl_mgr || hchnl_mgr->hio_mgr) {
+               status = -EFAULT;
+               goto func_end;
+       }
+       /*
+        * Message manager will be created when a file is loaded, since
+        * size of message buffer in shared memory is configurable in
+        * the base image.
+        */
+       dev_get_bridge_context(hdev_obj, &hbridge_context);
+       if (!hbridge_context) {
+               status = -EFAULT;
+               goto func_end;
+       }
+       dev_get_dev_type(hdev_obj, &dev_type);
+       /*
+        * DSP shared memory area will get set properly when
+        * a program is loaded. They are unknown until a COFF file is
+        * loaded. I chose the value -1 because it was less likely to be
+        * a valid address than 0.
+        */
+       shared_mem = (struct shm *)-1;
+
+       /* Allocate IO manager object */
+       pio_mgr = kzalloc(sizeof(struct io_mgr), GFP_KERNEL);
+       if (pio_mgr == NULL) {
+               status = -ENOMEM;
+               goto func_end;
+       }
+
+       /* Initialize chnl_mgr object */
+#ifndef DSP_TRACEBUF_DISABLED
+       pio_mgr->pmsg = NULL;
+#endif
+       pio_mgr->hchnl_mgr = hchnl_mgr;
+       pio_mgr->word_size = pMgrAttrs->word_size;
+       pio_mgr->shared_mem = shared_mem;
+
+       if (dev_type == DSP_UNIT) {
+               /* Create an IO DPC */
+               tasklet_init(&pio_mgr->dpc_tasklet, io_dpc, (u32) pio_mgr);
+
+               /* Initialize DPC counters */
+               pio_mgr->dpc_req = 0;
+               pio_mgr->dpc_sched = 0;
+
+               spin_lock_init(&pio_mgr->dpc_lock);
+
+               if (DSP_SUCCEEDED(status))
+                       status = dev_get_dev_node(hdev_obj, &dev_node_obj);
+       }
+
+       if (DSP_SUCCEEDED(status)) {
+               pio_mgr->hbridge_context = hbridge_context;
+               pio_mgr->shared_irq = pMgrAttrs->irq_shared;
+               if (dsp_wdt_init())
+                       status = -EPERM;
+       } else {
+               status = -EIO;
+       }
+func_end:
+       if (DSP_FAILED(status)) {
+               /* Cleanup */
+               bridge_io_destroy(pio_mgr);
+               if (phIOMgr)
+                       *phIOMgr = NULL;
+       } else {
+               /* Return IO manager object to caller... */
+               hchnl_mgr->hio_mgr = pio_mgr;
+               *phIOMgr = pio_mgr;
+       }
+       return status;
+}
+
+/*
+ *  ======== bridge_io_destroy ========
+ *  Purpose:
+ *      Disable interrupts, destroy the IO manager.
+ */
+int bridge_io_destroy(struct io_mgr *hio_mgr)
+{
+       int status = 0;
+       if (hio_mgr) {
+               /* Free IO DPC object */
+               tasklet_kill(&hio_mgr->dpc_tasklet);
+
+#ifndef DSP_TRACEBUF_DISABLED
+               kfree(hio_mgr->pmsg);
+#endif
+               dsp_wdt_exit();
+               /* Free this IO manager object */
+               kfree(hio_mgr);
+       } else {
+               status = -EFAULT;
+       }
+
+       return status;
+}
+
+/*
+ *  ======== bridge_io_on_loaded ========
+ *  Purpose:
+ *      Called when a new program is loaded to get shared memory buffer
+ *      parameters from COFF file. ulSharedBufferBase and ulSharedBufferLimit
+ *      are in DSP address units.
+ */
+int bridge_io_on_loaded(struct io_mgr *hio_mgr)
+{
+       struct cod_manager *cod_man;
+       struct chnl_mgr *hchnl_mgr;
+       struct msg_mgr *hmsg_mgr;
+       u32 ul_shm_base;
+       u32 ul_shm_base_offset;
+       u32 ul_shm_limit;
+       u32 ul_shm_length = -1;
+       u32 ul_mem_length = -1;
+       u32 ul_msg_base;
+       u32 ul_msg_limit;
+       u32 ul_msg_length = -1;
+       u32 ul_ext_end;
+       u32 ul_gpp_pa = 0;
+       u32 ul_gpp_va = 0;
+       u32 ul_dsp_va = 0;
+       u32 ul_seg_size = 0;
+       u32 ul_pad_size = 0;
+       u32 i;
+       int status = 0;
+       u8 num_procs = 0;
+       s32 ndx = 0;
+       /* DSP MMU setup table */
+       struct bridge_ioctl_extproc ae_proc[BRDIOCTL_NUMOFMMUTLB];
+       struct cfg_hostres *host_res;
+       struct bridge_dev_context *pbridge_context;
+       u32 map_attrs;
+       u32 shm0_end;
+       u32 ul_dyn_ext_base;
+       u32 ul_seg1_size = 0;
+       u32 pa_curr = 0;
+       u32 va_curr = 0;
+       u32 gpp_va_curr = 0;
+       u32 num_bytes = 0;
+       u32 all_bits = 0;
+       u32 page_size[] = { HW_PAGE_SIZE16MB, HW_PAGE_SIZE1MB,
+               HW_PAGE_SIZE64KB, HW_PAGE_SIZE4KB
+       };
+
+       status = dev_get_bridge_context(hio_mgr->hdev_obj, &pbridge_context);
+       if (!pbridge_context) {
+               status = -EFAULT;
+               goto func_end;
+       }
+
+       host_res = pbridge_context->resources;
+       if (!host_res) {
+               status = -EFAULT;
+               goto func_end;
+       }
+       status = dev_get_cod_mgr(hio_mgr->hdev_obj, &cod_man);
+       if (!cod_man) {
+               status = -EFAULT;
+               goto func_end;
+       }
+       hchnl_mgr = hio_mgr->hchnl_mgr;
+       /* The message manager is destroyed when the board is stopped. */
+       dev_get_msg_mgr(hio_mgr->hdev_obj, &hio_mgr->hmsg_mgr);
+       hmsg_mgr = hio_mgr->hmsg_mgr;
+       if (!hchnl_mgr || !hmsg_mgr) {
+               status = -EFAULT;
+               goto func_end;
+       }
+       if (hio_mgr->shared_mem)
+               hio_mgr->shared_mem = NULL;
+
+       /* Get start and length of channel part of shared memory */
+       status = cod_get_sym_value(cod_man, CHNL_SHARED_BUFFER_BASE_SYM,
+                                  &ul_shm_base);
+       if (DSP_FAILED(status)) {
+               status = -EFAULT;
+               goto func_end;
+       }
+       status = cod_get_sym_value(cod_man, CHNL_SHARED_BUFFER_LIMIT_SYM,
+                                  &ul_shm_limit);
+       if (DSP_FAILED(status)) {
+               status = -EFAULT;
+               goto func_end;
+       }
+       if (ul_shm_limit <= ul_shm_base) {
+               status = -EINVAL;
+               goto func_end;
+       }
+       /* Get total length in bytes */
+       ul_shm_length = (ul_shm_limit - ul_shm_base + 1) * hio_mgr->word_size;
+       /* Calculate size of a PROCCOPY shared memory region */
+       dev_dbg(bridge, "%s: (proc)proccopy shmmem size: 0x%x bytes\n",
+               __func__, (ul_shm_length - sizeof(struct shm)));
+
+       if (DSP_SUCCEEDED(status)) {
+               /* Get start and length of message part of shared memory */
+               status = cod_get_sym_value(cod_man, MSG_SHARED_BUFFER_BASE_SYM,
+                                          &ul_msg_base);
+       }
+       if (DSP_SUCCEEDED(status)) {
+               status = cod_get_sym_value(cod_man, MSG_SHARED_BUFFER_LIMIT_SYM,
+                                          &ul_msg_limit);
+               if (DSP_SUCCEEDED(status)) {
+                       if (ul_msg_limit <= ul_msg_base) {
+                               status = -EINVAL;
+                       } else {
+                               /*
+                                * Length (bytes) of messaging part of shared
+                                * memory.
+                                */
+                               ul_msg_length =
+                                   (ul_msg_limit - ul_msg_base +
+                                    1) * hio_mgr->word_size;
+                               /*
+                                * Total length (bytes) of shared memory:
+                                * chnl + msg.
+                                */
+                               ul_mem_length = ul_shm_length + ul_msg_length;
+                       }
+               } else {
+                       status = -EFAULT;
+               }
+       } else {
+               status = -EFAULT;
+       }
+       if (DSP_SUCCEEDED(status)) {
+#ifndef DSP_TRACEBUF_DISABLED
+               status =
+                   cod_get_sym_value(cod_man, DSP_TRACESEC_END, &shm0_end);
+#else
+               status = cod_get_sym_value(cod_man, SHM0_SHARED_END_SYM,
+                                          &shm0_end);
+#endif
+               if (DSP_FAILED(status))
+                       status = -EFAULT;
+       }
+       if (DSP_SUCCEEDED(status)) {
+               status =
+                   cod_get_sym_value(cod_man, DYNEXTBASE, &ul_dyn_ext_base);
+               if (DSP_FAILED(status))
+                       status = -EFAULT;
+       }
+       if (DSP_SUCCEEDED(status)) {
+               status = cod_get_sym_value(cod_man, EXTEND, &ul_ext_end);
+               if (DSP_FAILED(status))
+                       status = -EFAULT;
+       }
+       if (DSP_SUCCEEDED(status)) {
+               /* Get memory reserved in host resources */
+               (void)mgr_enum_processor_info(0, (struct dsp_processorinfo *)
+                                             &hio_mgr->ext_proc_info,
+                                             sizeof(struct
+                                                    mgr_processorextinfo),
+                                             &num_procs);
+
+               /* The first MMU TLB entry(TLB_0) in DCD is ShmBase. */
+               ndx = 0;
+               ul_gpp_pa = host_res->dw_mem_phys[1];
+               ul_gpp_va = host_res->dw_mem_base[1];
+               /* This is the virtual uncached ioremapped address!!! */
+               /* Why can't we directly take the DSPVA from the symbols? */
+               ul_dsp_va = hio_mgr->ext_proc_info.ty_tlb[0].ul_dsp_virt;
+               ul_seg_size = (shm0_end - ul_dsp_va) * hio_mgr->word_size;
+               ul_seg1_size =
+                   (ul_ext_end - ul_dyn_ext_base) * hio_mgr->word_size;
+               /* 4K align */
+               ul_seg1_size = (ul_seg1_size + 0xFFF) & (~0xFFFUL);
+               /* 64K align */
+               ul_seg_size = (ul_seg_size + 0xFFFF) & (~0xFFFFUL);
+               ul_pad_size = UL_PAGE_ALIGN_SIZE - ((ul_gpp_pa + ul_seg1_size) %
+                                                   UL_PAGE_ALIGN_SIZE);
+               if (ul_pad_size == UL_PAGE_ALIGN_SIZE)
+                       ul_pad_size = 0x0;
+
+               dev_dbg(bridge, "%s: ul_gpp_pa %x, ul_gpp_va %x, ul_dsp_va %x, "
+                       "shm0_end %x, ul_dyn_ext_base %x, ul_ext_end %x, "
+                       "ul_seg_size %x ul_seg1_size %x \n", __func__,
+                       ul_gpp_pa, ul_gpp_va, ul_dsp_va, shm0_end,
+                       ul_dyn_ext_base, ul_ext_end, ul_seg_size, ul_seg1_size);
+
+               if ((ul_seg_size + ul_seg1_size + ul_pad_size) >
+                   host_res->dw_mem_length[1]) {
+                       pr_err("%s: shm Error, reserved 0x%x required 0x%x\n",
+                              __func__, host_res->dw_mem_length[1],
+                              ul_seg_size + ul_seg1_size + ul_pad_size);
+                       status = -ENOMEM;
+               }
+       }
+       if (DSP_FAILED(status))
+               goto func_end;
+
+       pa_curr = ul_gpp_pa;
+       va_curr = ul_dyn_ext_base * hio_mgr->word_size;
+       gpp_va_curr = ul_gpp_va;
+       num_bytes = ul_seg1_size;
+
+       /*
+        * Try to fit into TLB entries. If not possible, push them to page
+        * tables. It is quite possible that if sections are not on
+        * bigger page boundary, we may end up making several small pages.
+        * So, push them onto page tables, if that is the case.
+        */
+       map_attrs = 0x00000000;
+       map_attrs = DSP_MAPLITTLEENDIAN;
+       map_attrs |= DSP_MAPPHYSICALADDR;
+       map_attrs |= DSP_MAPELEMSIZE32;
+       map_attrs |= DSP_MAPDONOTLOCK;
+
+       while (num_bytes) {
+               /*
+                * To find the max. page size with which both PA & VA are
+                * aligned.
+                */
+               all_bits = pa_curr | va_curr;
+               dev_dbg(bridge, "all_bits %x, pa_curr %x, va_curr %x, "
+                       "num_bytes %x\n", all_bits, pa_curr, va_curr,
+                       num_bytes);
+               for (i = 0; i < 4; i++) {
+                       if ((num_bytes >= page_size[i]) && ((all_bits &
+                                                            (page_size[i] -
+                                                             1)) == 0)) {
+                               status =
+                                   hio_mgr->intf_fxns->
+                                   pfn_brd_mem_map(hio_mgr->hbridge_context,
+                                                   pa_curr, va_curr,
+                                                   page_size[i], map_attrs,
+                                                   NULL);
+                               if (DSP_FAILED(status))
+                                       goto func_end;
+                               pa_curr += page_size[i];
+                               va_curr += page_size[i];
+                               gpp_va_curr += page_size[i];
+                               num_bytes -= page_size[i];
+                               /*
+                                * Don't try smaller sizes. Hopefully we have
+                                * reached an address aligned to a bigger page
+                                * size.
+                                */
+                               break;
+                       }
+               }
+       }
+       pa_curr += ul_pad_size;
+       va_curr += ul_pad_size;
+       gpp_va_curr += ul_pad_size;
+
+       /* Configure the TLB entries for the next cacheable segment */
+       num_bytes = ul_seg_size;
+       va_curr = ul_dsp_va * hio_mgr->word_size;
+       while (num_bytes) {
+               /*
+                * To find the max. page size with which both PA & VA are
+                * aligned.
+                */
+               all_bits = pa_curr | va_curr;
+               dev_dbg(bridge, "all_bits for Seg1 %x, pa_curr %x, "
+                       "va_curr %x, num_bytes %x\n", all_bits, pa_curr,
+                       va_curr, num_bytes);
+               for (i = 0; i < 4; i++) {
+                       if (!(num_bytes >= page_size[i]) ||
+                           !((all_bits & (page_size[i] - 1)) == 0))
+                               continue;
+                       if (ndx < MAX_LOCK_TLB_ENTRIES) {
+                               /*
+                                * This is the physical address written to
+                                * DSP MMU.
+                                */
+                               ae_proc[ndx].ul_gpp_pa = pa_curr;
+                               /*
+                                * This is the virtual uncached ioremapped
+                                * address!!!
+                                */
+                               ae_proc[ndx].ul_gpp_va = gpp_va_curr;
+                               ae_proc[ndx].ul_dsp_va =
+                                   va_curr / hio_mgr->word_size;
+                               ae_proc[ndx].ul_size = page_size[i];
+                               ae_proc[ndx].endianism = HW_LITTLE_ENDIAN;
+                               ae_proc[ndx].elem_size = HW_ELEM_SIZE16BIT;
+                               ae_proc[ndx].mixed_mode = HW_MMU_CPUES;
+                               dev_dbg(bridge, "shm MMU TLB entry PA %x"
+                                       " VA %x DSP_VA %x Size %x\n",
+                                       ae_proc[ndx].ul_gpp_pa,
+                                       ae_proc[ndx].ul_gpp_va,
+                                       ae_proc[ndx].ul_dsp_va *
+                                       hio_mgr->word_size, page_size[i]);
+                               ndx++;
+                       } else {
+                               status =
+                                   hio_mgr->intf_fxns->
+                                   pfn_brd_mem_map(hio_mgr->hbridge_context,
+                                                   pa_curr, va_curr,
+                                                   page_size[i], map_attrs,
+                                                   NULL);
+                               dev_dbg(bridge,
+                                       "shm MMU PTE entry PA %x"
+                                       " VA %x DSP_VA %x Size %x\n",
+                                       ae_proc[ndx].ul_gpp_pa,
+                                       ae_proc[ndx].ul_gpp_va,
+                                       ae_proc[ndx].ul_dsp_va *
+                                       hio_mgr->word_size, page_size[i]);
+                               if (DSP_FAILED(status))
+                                       goto func_end;
+                       }
+                       pa_curr += page_size[i];
+                       va_curr += page_size[i];
+                       gpp_va_curr += page_size[i];
+                       num_bytes -= page_size[i];
+                       /*
+                        * Don't try smaller sizes. Hopefully we have reached
+                        * an address aligned to a bigger page size.
+                        */
+                       break;
+               }
+       }
+
+       /*
+        * Copy remaining entries from CDB. All entries are 1 MB and
+        * should not conflict with shm entries on MPU or DSP side.
+        */
+       for (i = 3; i < 7 && ndx < BRDIOCTL_NUMOFMMUTLB; i++) {
+               if (hio_mgr->ext_proc_info.ty_tlb[i].ul_gpp_phys == 0)
+                       continue;
+
+               if ((hio_mgr->ext_proc_info.ty_tlb[i].ul_gpp_phys >
+                    ul_gpp_pa - 0x100000
+                    && hio_mgr->ext_proc_info.ty_tlb[i].ul_gpp_phys <=
+                    ul_gpp_pa + ul_seg_size)
+                   || (hio_mgr->ext_proc_info.ty_tlb[i].ul_dsp_virt >
+                       ul_dsp_va - 0x100000 / hio_mgr->word_size
+                       && hio_mgr->ext_proc_info.ty_tlb[i].ul_dsp_virt <=
+                       ul_dsp_va + ul_seg_size / hio_mgr->word_size)) {
+                       dev_dbg(bridge,
+                               "CDB MMU entry %d conflicts with "
+                               "shm.\n\tCDB: GppPa %x, DspVa %x.\n\tSHM: "
+                               "GppPa %x, DspVa %x, Bytes %x.\n", i,
+                               hio_mgr->ext_proc_info.ty_tlb[i].ul_gpp_phys,
+                               hio_mgr->ext_proc_info.ty_tlb[i].ul_dsp_virt,
+                               ul_gpp_pa, ul_dsp_va, ul_seg_size);
+                       status = -EPERM;
+               } else {
+                       if (ndx < MAX_LOCK_TLB_ENTRIES) {
+                               ae_proc[ndx].ul_dsp_va =
+                                   hio_mgr->ext_proc_info.ty_tlb[i].
+                                   ul_dsp_virt;
+                               ae_proc[ndx].ul_gpp_pa =
+                                   hio_mgr->ext_proc_info.ty_tlb[i].
+                                   ul_gpp_phys;
+                               ae_proc[ndx].ul_gpp_va = 0;
+                               /* 1 MB */
+                               ae_proc[ndx].ul_size = 0x100000;
+                               dev_dbg(bridge, "shm MMU entry PA %x "
+                                       "DSP_VA 0x%x\n", ae_proc[ndx].ul_gpp_pa,
+                                       ae_proc[ndx].ul_dsp_va);
+                               ndx++;
+                       } else {
+                               status = hio_mgr->intf_fxns->pfn_brd_mem_map
+                                   (hio_mgr->hbridge_context,
+                                    hio_mgr->ext_proc_info.ty_tlb[i].
+                                    ul_gpp_phys,
+                                    hio_mgr->ext_proc_info.ty_tlb[i].
+                                    ul_dsp_virt, 0x100000, map_attrs,
+                                    NULL);
+                       }
+               }
+               if (DSP_FAILED(status))
+                       goto func_end;
+       }
+
+       map_attrs = 0x00000000;
+       map_attrs = DSP_MAPLITTLEENDIAN;
+       map_attrs |= DSP_MAPPHYSICALADDR;
+       map_attrs |= DSP_MAPELEMSIZE32;
+       map_attrs |= DSP_MAPDONOTLOCK;
+
+       /* Map the L4 peripherals */
+       i = 0;
+       while (l4_peripheral_table[i].phys_addr) {
+               status = hio_mgr->intf_fxns->pfn_brd_mem_map
+                   (hio_mgr->hbridge_context, l4_peripheral_table[i].phys_addr,
+                    l4_peripheral_table[i].dsp_virt_addr, HW_PAGE_SIZE4KB,
+                    map_attrs, NULL);
+               if (DSP_FAILED(status))
+                       goto func_end;
+               i++;
+       }
+
+       for (i = ndx; i < BRDIOCTL_NUMOFMMUTLB; i++) {
+               ae_proc[i].ul_dsp_va = 0;
+               ae_proc[i].ul_gpp_pa = 0;
+               ae_proc[i].ul_gpp_va = 0;
+               ae_proc[i].ul_size = 0;
+       }
+       /*
+        * Set the shm physical address entry (grayed out in CDB file)
+        * to the virtual uncached ioremapped address of shm reserved
+        * on MPU.
+        */
+       hio_mgr->ext_proc_info.ty_tlb[0].ul_gpp_phys =
+           (ul_gpp_va + ul_seg1_size + ul_pad_size);
+
+       /*
+        * Need shm Phys addr. IO supports only one DSP for now:
+        * num_procs = 1.
+        */
+       if (!hio_mgr->ext_proc_info.ty_tlb[0].ul_gpp_phys || num_procs != 1) {
+               status = -EFAULT;
+               goto func_end;
+       } else {
+               if (ae_proc[0].ul_dsp_va > ul_shm_base) {
+                       status = -EPERM;
+                       goto func_end;
+               }
+               /* ul_shm_base may not be at ul_dsp_va address */
+               ul_shm_base_offset = (ul_shm_base - ae_proc[0].ul_dsp_va) *
+                   hio_mgr->word_size;
+               /*
+                * bridge_dev_ctrl() will set dev context dsp-mmu info. In
+                * bridge_brd_start() the MMU will be re-programed with MMU
+                * DSPVa-GPPPa pair info while DSP is in a known
+                * (reset) state.
+                */
+
+               status =
+                   hio_mgr->intf_fxns->pfn_dev_cntrl(hio_mgr->hbridge_context,
+                                                     BRDIOCTL_SETMMUCONFIG,
+                                                     ae_proc);
+               if (DSP_FAILED(status))
+                       goto func_end;
+               ul_shm_base = hio_mgr->ext_proc_info.ty_tlb[0].ul_gpp_phys;
+               ul_shm_base += ul_shm_base_offset;
+               ul_shm_base = (u32) MEM_LINEAR_ADDRESS((void *)ul_shm_base,
+                                                      ul_mem_length);
+               if (ul_shm_base == 0) {
+                       status = -EFAULT;
+                       goto func_end;
+               }
+               /* Register SM */
+               status =
+                   register_shm_segs(hio_mgr, cod_man, ae_proc[0].ul_gpp_pa);
+       }
+
+       hio_mgr->shared_mem = (struct shm *)ul_shm_base;
+       hio_mgr->input = (u8 *) hio_mgr->shared_mem + sizeof(struct shm);
+       hio_mgr->output = hio_mgr->input + (ul_shm_length -
+                                           sizeof(struct shm)) / 2;
+       hio_mgr->usm_buf_size = hio_mgr->output - hio_mgr->input;
+
+       /*  Set up Shared memory addresses for messaging. */
+       hio_mgr->msg_input_ctrl = (struct msg_ctrl *)((u8 *) hio_mgr->shared_mem
+                                                     + ul_shm_length);
+       hio_mgr->msg_input =
+           (u8 *) hio_mgr->msg_input_ctrl + sizeof(struct msg_ctrl);
+       hio_mgr->msg_output_ctrl =
+           (struct msg_ctrl *)((u8 *) hio_mgr->msg_input_ctrl +
+                               ul_msg_length / 2);
+       hio_mgr->msg_output =
+           (u8 *) hio_mgr->msg_output_ctrl + sizeof(struct msg_ctrl);
+       hmsg_mgr->max_msgs =
+           ((u8 *) hio_mgr->msg_output_ctrl - hio_mgr->msg_input)
+           / sizeof(struct msg_dspmsg);
+       dev_dbg(bridge, "IO MGR shm details: shared_mem %p, input %p, "
+               "output %p, msg_input_ctrl %p, msg_input %p, "
+               "msg_output_ctrl %p, msg_output %p\n",
+               (u8 *) hio_mgr->shared_mem, hio_mgr->input,
+               hio_mgr->output, (u8 *) hio_mgr->msg_input_ctrl,
+               hio_mgr->msg_input, (u8 *) hio_mgr->msg_output_ctrl,
+               hio_mgr->msg_output);
+       dev_dbg(bridge, "(proc) Mas msgs in shared memory: 0x%x\n",
+               hmsg_mgr->max_msgs);
+       memset((void *)hio_mgr->shared_mem, 0, sizeof(struct shm));
+
+#ifndef DSP_TRACEBUF_DISABLED
+       /* Get the start address of trace buffer */
+       status = cod_get_sym_value(cod_man, SYS_PUTCBEG,
+                                  &hio_mgr->ul_trace_buffer_begin);
+       if (DSP_FAILED(status)) {
+               status = -EFAULT;
+               goto func_end;
+       }
+
+       hio_mgr->ul_gpp_read_pointer = hio_mgr->ul_trace_buffer_begin =
+           (ul_gpp_va + ul_seg1_size + ul_pad_size) +
+           (hio_mgr->ul_trace_buffer_begin - ul_dsp_va);
+       /* Get the end address of trace buffer */
+       status = cod_get_sym_value(cod_man, SYS_PUTCEND,
+                                  &hio_mgr->ul_trace_buffer_end);
+       if (DSP_FAILED(status)) {
+               status = -EFAULT;
+               goto func_end;
+       }
+       hio_mgr->ul_trace_buffer_end =
+           (ul_gpp_va + ul_seg1_size + ul_pad_size) +
+           (hio_mgr->ul_trace_buffer_end - ul_dsp_va);
+       /* Get the current address of DSP write pointer */
+       status = cod_get_sym_value(cod_man, BRIDGE_SYS_PUTC_CURRENT,
+                                  &hio_mgr->ul_trace_buffer_current);
+       if (DSP_FAILED(status)) {
+               status = -EFAULT;
+               goto func_end;
+       }
+       hio_mgr->ul_trace_buffer_current =
+           (ul_gpp_va + ul_seg1_size + ul_pad_size) +
+           (hio_mgr->ul_trace_buffer_current - ul_dsp_va);
+       /* Calculate the size of trace buffer */
+       kfree(hio_mgr->pmsg);
+       hio_mgr->pmsg = kmalloc(((hio_mgr->ul_trace_buffer_end -
+                               hio_mgr->ul_trace_buffer_begin) *
+                               hio_mgr->word_size) + 2, GFP_KERNEL);
+       if (!hio_mgr->pmsg)
+               status = -ENOMEM;
+
+       hio_mgr->ul_dsp_va = ul_dsp_va;
+       hio_mgr->ul_gpp_va = (ul_gpp_va + ul_seg1_size + ul_pad_size);
+
+#endif
+func_end:
+       return status;
+}
+
+/*
+ *  ======== io_buf_size ========
+ *      Size of shared memory I/O channel.
+ */
+u32 io_buf_size(struct io_mgr *hio_mgr)
+{
+       if (hio_mgr)
+               return hio_mgr->usm_buf_size;
+       else
+               return 0;
+}
+
+/*
+ *  ======== io_cancel_chnl ========
+ *      Cancel IO on a given PCPY channel.
+ */
+void io_cancel_chnl(struct io_mgr *hio_mgr, u32 ulChnl)
+{
+       struct io_mgr *pio_mgr = (struct io_mgr *)hio_mgr;
+       struct shm *sm;
+
+       if (!hio_mgr)
+               goto func_end;
+       sm = hio_mgr->shared_mem;
+
+       /* Inform DSP that we have no more buffers on this channel */
+       IO_AND_VALUE(pio_mgr->hbridge_context, struct shm, sm, host_free_mask,
+                    (~(1 << ulChnl)));
+
+       sm_interrupt_dsp(pio_mgr->hbridge_context, MBX_PCPY_CLASS);
+func_end:
+       return;
+}
+
+/*
+ *  ======== io_dispatch_chnl ========
+ *      Proc-copy chanl dispatch.
+ */
+static void io_dispatch_chnl(IN struct io_mgr *pio_mgr,
+                               IN OUT struct chnl_object *pchnl, u8 iMode)
+{
+       if (!pio_mgr)
+               goto func_end;
+
+       /* See if there is any data available for transfer */
+       if (iMode != IO_SERVICE)
+               goto func_end;
+
+       /* Any channel will do for this mode */
+       input_chnl(pio_mgr, pchnl, iMode);
+       output_chnl(pio_mgr, pchnl, iMode);
+func_end:
+       return;
+}
+
+/*
+ *  ======== io_dispatch_msg ========
+ *      Performs I/O dispatch on message queues.
+ */
+static void io_dispatch_msg(IN struct io_mgr *pio_mgr, struct msg_mgr *hmsg_mgr)
+{
+       if (!pio_mgr)
+               goto func_end;
+
+       /* We are performing both input and output processing. */
+       input_msg(pio_mgr, hmsg_mgr);
+       output_msg(pio_mgr, hmsg_mgr);
+func_end:
+       return;
+}
+
+/*
+ *  ======== io_dispatch_pm ========
+ *      Performs I/O dispatch on PM related messages from DSP
+ */
+static void io_dispatch_pm(struct io_mgr *pio_mgr)
+{
+       int status;
+       u32 parg[2];
+
+       /* Perform Power message processing here */
+       parg[0] = pio_mgr->intr_val;
+
+       /* Send the command to the Bridge clk/pwr manager to handle */
+       if (parg[0] == MBX_PM_HIBERNATE_EN) {
+               dev_dbg(bridge, "PM: Hibernate command\n");
+               status = pio_mgr->intf_fxns->
+                               pfn_dev_cntrl(pio_mgr->hbridge_context,
+                                             BRDIOCTL_PWR_HIBERNATE, parg);
+               if (DSP_FAILED(status))
+                       pr_err("%s: hibernate cmd failed 0x%x\n",
+                                      __func__, status);
+       } else if (parg[0] == MBX_PM_OPP_REQ) {
+               parg[1] = pio_mgr->shared_mem->opp_request.rqst_opp_pt;
+               dev_dbg(bridge, "PM: Requested OPP = 0x%x\n", parg[1]);
+               status = pio_mgr->intf_fxns->
+                               pfn_dev_cntrl(pio_mgr->hbridge_context,
+                                       BRDIOCTL_CONSTRAINT_REQUEST, parg);
+               if (DSP_FAILED(status))
+                       dev_dbg(bridge, "PM: Failed to set constraint "
+                               "= 0x%x \n", parg[1]);
+       } else {
+               dev_dbg(bridge, "PM: clk control value of msg = 0x%x\n",
+                       parg[0]);
+               status = pio_mgr->intf_fxns->
+                               pfn_dev_cntrl(pio_mgr->hbridge_context,
+                                             BRDIOCTL_CLK_CTRL, parg);
+               if (DSP_FAILED(status))
+                       dev_dbg(bridge, "PM: Failed to ctrl the DSP clk"
+                               "= 0x%x\n", *parg);
+       }
+}
+
+/*
+ *  ======== io_dpc ========
+ *      Deferred procedure call for shared memory channel driver ISR.  Carries
+ *      out the dispatch of I/O as a non-preemptible event.It can only be
+ *      pre-empted      by an ISR.
+ */
+void io_dpc(IN OUT unsigned long pRefData)
+{
+       struct io_mgr *pio_mgr = (struct io_mgr *)pRefData;
+       struct chnl_mgr *chnl_mgr_obj;
+       struct msg_mgr *msg_mgr_obj;
+       struct deh_mgr *hdeh_mgr;
+       u32 requested;
+       u32 serviced;
+
+       if (!pio_mgr)
+               goto func_end;
+       chnl_mgr_obj = pio_mgr->hchnl_mgr;
+       dev_get_msg_mgr(pio_mgr->hdev_obj, &msg_mgr_obj);
+       dev_get_deh_mgr(pio_mgr->hdev_obj, &hdeh_mgr);
+       if (!chnl_mgr_obj)
+               goto func_end;
+
+       requested = pio_mgr->dpc_req;
+       serviced = pio_mgr->dpc_sched;
+
+       if (serviced == requested)
+               goto func_end;
+
+       /* Process pending DPC's */
+       do {
+               /* Check value of interrupt reg to ensure it's a valid error */
+               if ((pio_mgr->intr_val > DEH_BASE) &&
+                   (pio_mgr->intr_val < DEH_LIMIT)) {
+                       /* Notify DSP/BIOS exception */
+                       if (hdeh_mgr) {
+#ifndef DSP_TRACE_BUF_DISABLED
+                               print_dsp_debug_trace(pio_mgr);
+#endif
+                               bridge_deh_notify(hdeh_mgr, DSP_SYSERROR,
+                                                 pio_mgr->intr_val);
+                       }
+               }
+               io_dispatch_chnl(pio_mgr, NULL, IO_SERVICE);
+#ifdef CHNL_MESSAGES
+               if (msg_mgr_obj)
+                       io_dispatch_msg(pio_mgr, msg_mgr_obj);
+#endif
+#ifndef DSP_TRACEBUF_DISABLED
+               if (pio_mgr->intr_val & MBX_DBG_SYSPRINTF) {
+                       /* Notify DSP Trace message */
+                       print_dsp_debug_trace(pio_mgr);
+               }
+#endif
+               serviced++;
+       } while (serviced != requested);
+       pio_mgr->dpc_sched = requested;
+func_end:
+       return;
+}
+
+/*
+ *  ======== io_mbox_msg ========
+ *      Main interrupt handler for the shared memory IO manager.
+ *      Calls the Bridge's CHNL_ISR to determine if this interrupt is ours, then
+ *      schedules a DPC to dispatch I/O.
+ */
+void io_mbox_msg(u32 msg)
+{
+       struct io_mgr *pio_mgr;
+       struct dev_object *dev_obj;
+       unsigned long flags;
+
+       dev_obj = dev_get_first();
+       dev_get_io_mgr(dev_obj, &pio_mgr);
+
+       if (!pio_mgr)
+               return;
+
+       pio_mgr->intr_val = (u16)msg;
+       if (pio_mgr->intr_val & MBX_PM_CLASS)
+               io_dispatch_pm(pio_mgr);
+
+       if (pio_mgr->intr_val == MBX_DEH_RESET) {
+               pio_mgr->intr_val = 0;
+       } else {
+               spin_lock_irqsave(&pio_mgr->dpc_lock, flags);
+               pio_mgr->dpc_req++;
+               spin_unlock_irqrestore(&pio_mgr->dpc_lock, flags);
+               tasklet_schedule(&pio_mgr->dpc_tasklet);
+       }
+       return;
+}
+
+/*
+ *  ======== io_request_chnl ========
+ *  Purpose:
+ *      Request chanenel I/O from the DSP. Sets flags in shared memory, then
+ *      interrupts the DSP.
+ */
+void io_request_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl,
+                       u8 iMode, OUT u16 *pwMbVal)
+{
+       struct chnl_mgr *chnl_mgr_obj;
+       struct shm *sm;
+
+       if (!pchnl || !pwMbVal)
+               goto func_end;
+       chnl_mgr_obj = pio_mgr->hchnl_mgr;
+       sm = pio_mgr->shared_mem;
+       if (iMode == IO_INPUT) {
+               /*
+                * Assertion fires if CHNL_AddIOReq() called on a stream
+                * which was cancelled, or attached to a dead board.
+                */
+               DBC_ASSERT((pchnl->dw_state == CHNL_STATEREADY) ||
+                          (pchnl->dw_state == CHNL_STATEEOS));
+               /* Indicate to the DSP we have a buffer available for input */
+               IO_OR_VALUE(pio_mgr->hbridge_context, struct shm, sm,
+                           host_free_mask, (1 << pchnl->chnl_id));
+               *pwMbVal = MBX_PCPY_CLASS;
+       } else if (iMode == IO_OUTPUT) {
+               /*
+                * This assertion fails if CHNL_AddIOReq() was called on a
+                * stream which was cancelled, or attached to a dead board.
+                */
+               DBC_ASSERT((pchnl->dw_state & ~CHNL_STATEEOS) ==
+                          CHNL_STATEREADY);
+               /*
+                * Record the fact that we have a buffer available for
+                * output.
+                */
+               chnl_mgr_obj->dw_output_mask |= (1 << pchnl->chnl_id);
+       } else {
+               DBC_ASSERT(iMode);      /* Shouldn't get here. */
+       }
+func_end:
+       return;
+}
+
+/*
+ *  ======== iosm_schedule ========
+ *      Schedule DPC for IO.
+ */
+void iosm_schedule(struct io_mgr *pio_mgr)
+{
+       unsigned long flags;
+
+       if (!pio_mgr)
+               return;
+
+       /* Increment count of DPC's pending. */
+       spin_lock_irqsave(&pio_mgr->dpc_lock, flags);
+       pio_mgr->dpc_req++;
+       spin_unlock_irqrestore(&pio_mgr->dpc_lock, flags);
+
+       /* Schedule DPC */
+       tasklet_schedule(&pio_mgr->dpc_tasklet);
+}
+
+/*
+ *  ======== find_ready_output ========
+ *      Search for a host output channel which is ready to send.  If this is
+ *      called as a result of servicing the DPC, then implement a round
+ *      robin search; otherwise, this was called by a client thread (via
+ *      IO_Dispatch()), so just start searching from the current channel id.
+ */
+static u32 find_ready_output(struct chnl_mgr *chnl_mgr_obj,
+                            struct chnl_object *pchnl, u32 dwMask)
+{
+       u32 ret = OUTPUTNOTREADY;
+       u32 id, start_id;
+       u32 shift;
+
+       id = (pchnl !=
+             NULL ? pchnl->chnl_id : (chnl_mgr_obj->dw_last_output + 1));
+       id = ((id == CHNL_MAXCHANNELS) ? 0 : id);
+       if (id >= CHNL_MAXCHANNELS)
+               goto func_end;
+       if (dwMask) {
+               shift = (1 << id);
+               start_id = id;
+               do {
+                       if (dwMask & shift) {
+                               ret = id;
+                               if (pchnl == NULL)
+                                       chnl_mgr_obj->dw_last_output = id;
+                               break;
+                       }
+                       id = id + 1;
+                       id = ((id == CHNL_MAXCHANNELS) ? 0 : id);
+                       shift = (1 << id);
+               } while (id != start_id);
+       }
+func_end:
+       return ret;
+}
+
+/*
+ *  ======== input_chnl ========
+ *      Dispatch a buffer on an input channel.
+ */
+static void input_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl,
+                       u8 iMode)
+{
+       struct chnl_mgr *chnl_mgr_obj;
+       struct shm *sm;
+       u32 chnl_id;
+       u32 bytes;
+       struct chnl_irp *chnl_packet_obj = NULL;
+       u32 dw_arg;
+       bool clear_chnl = false;
+       bool notify_client = false;
+
+       sm = pio_mgr->shared_mem;
+       chnl_mgr_obj = pio_mgr->hchnl_mgr;
+
+       /* Attempt to perform input */
+       if (!IO_GET_VALUE(pio_mgr->hbridge_context, struct shm, sm, input_full))
+               goto func_end;
+
+       bytes =
+           IO_GET_VALUE(pio_mgr->hbridge_context, struct shm, sm,
+                        input_size) * chnl_mgr_obj->word_size;
+       chnl_id = IO_GET_VALUE(pio_mgr->hbridge_context, struct shm,
+                                                       sm, input_id);
+       dw_arg = IO_GET_LONG(pio_mgr->hbridge_context, struct shm, sm, arg);
+       if (chnl_id >= CHNL_MAXCHANNELS) {
+               /* Shouldn't be here: would indicate corrupted shm. */
+               DBC_ASSERT(chnl_id);
+               goto func_end;
+       }
+       pchnl = chnl_mgr_obj->ap_channel[chnl_id];
+       if ((pchnl != NULL) && CHNL_IS_INPUT(pchnl->chnl_mode)) {
+               if ((pchnl->dw_state & ~CHNL_STATEEOS) == CHNL_STATEREADY) {
+                       if (!pchnl->pio_requests)
+                               goto func_end;
+                       /* Get the I/O request, and attempt a transfer */
+                       chnl_packet_obj = (struct chnl_irp *)
+                           lst_get_head(pchnl->pio_requests);
+                       if (chnl_packet_obj) {
+                               pchnl->cio_reqs--;
+                               if (pchnl->cio_reqs < 0)
+                                       goto func_end;
+                               /*
+                                * Ensure we don't overflow the client's
+                                * buffer.
+                                */
+                               bytes = min(bytes, chnl_packet_obj->byte_size);
+                               /* Transfer buffer from DSP side */
+                               bytes = read_data(pio_mgr->hbridge_context,
+                                                 chnl_packet_obj->host_sys_buf,
+                                                 pio_mgr->input, bytes);
+                               pchnl->bytes_moved += bytes;
+                               chnl_packet_obj->byte_size = bytes;
+                               chnl_packet_obj->dw_arg = dw_arg;
+                               chnl_packet_obj->status = CHNL_IOCSTATCOMPLETE;
+
+                               if (bytes == 0) {
+                                       /*
+                                        * This assertion fails if the DSP
+                                        * sends EOS more than once on this
+                                        * channel.
+                                        */
+                                       if (pchnl->dw_state & CHNL_STATEEOS)
+                                               goto func_end;
+                                       /*
+                                        * Zero bytes indicates EOS. Update
+                                        * IOC status for this chirp, and also
+                                        * the channel state.
+                                        */
+                                       chnl_packet_obj->status |=
+                                           CHNL_IOCSTATEOS;
+                                       pchnl->dw_state |= CHNL_STATEEOS;
+                                       /*
+                                        * Notify that end of stream has
+                                        * occurred.
+                                        */
+                                       ntfy_notify(pchnl->ntfy_obj,
+                                                   DSP_STREAMDONE);
+                               }
+                               /* Tell DSP if no more I/O buffers available */
+                               if (!pchnl->pio_requests)
+                                       goto func_end;
+                               if (LST_IS_EMPTY(pchnl->pio_requests)) {
+                                       IO_AND_VALUE(pio_mgr->hbridge_context,
+                                                    struct shm, sm,
+                                                    host_free_mask,
+                                                    ~(1 << pchnl->chnl_id));
+                               }
+                               clear_chnl = true;
+                               notify_client = true;
+                       } else {
+                               /*
+                                * Input full for this channel, but we have no
+                                * buffers available.  The channel must be
+                                * "idling". Clear out the physical input
+                                * channel.
+                                */
+                               clear_chnl = true;
+                       }
+               } else {
+                       /* Input channel cancelled: clear input channel */
+                       clear_chnl = true;
+               }
+       } else {
+               /* DPC fired after host closed channel: clear input channel */
+               clear_chnl = true;
+       }
+       if (clear_chnl) {
+               /* Indicate to the DSP we have read the input */
+               IO_SET_VALUE(pio_mgr->hbridge_context, struct shm, sm,
+                                                       input_full, 0);
+               sm_interrupt_dsp(pio_mgr->hbridge_context, MBX_PCPY_CLASS);
+       }
+       if (notify_client) {
+               /* Notify client with IO completion record */
+               notify_chnl_complete(pchnl, chnl_packet_obj);
+       }
+func_end:
+       return;
+}
+
+/*
+ *  ======== input_msg ========
+ *      Copies messages from shared memory to the message queues.
+ */
+static void input_msg(struct io_mgr *pio_mgr, struct msg_mgr *hmsg_mgr)
+{
+       u32 num_msgs;
+       u32 i;
+       u8 *msg_input;
+       struct msg_queue *msg_queue_obj;
+       struct msg_frame *pmsg;
+       struct msg_dspmsg msg;
+       struct msg_ctrl *msg_ctr_obj;
+       u32 input_empty;
+       u32 addr;
+
+       msg_ctr_obj = pio_mgr->msg_input_ctrl;
+       /* Get the number of input messages to be read */
+       input_empty =
+           IO_GET_VALUE(pio_mgr->hbridge_context, struct msg_ctrl, msg_ctr_obj,
+                        buf_empty);
+       num_msgs =
+           IO_GET_VALUE(pio_mgr->hbridge_context, struct msg_ctrl, msg_ctr_obj,
+                        size);
+       if (input_empty)
+               goto func_end;
+
+       msg_input = pio_mgr->msg_input;
+       for (i = 0; i < num_msgs; i++) {
+               /* Read the next message */
+               addr = (u32) &(((struct msg_dspmsg *)msg_input)->msg.dw_cmd);
+               msg.msg.dw_cmd =
+                   read_ext32_bit_dsp_data(pio_mgr->hbridge_context, addr);
+               addr = (u32) &(((struct msg_dspmsg *)msg_input)->msg.dw_arg1);
+               msg.msg.dw_arg1 =
+                   read_ext32_bit_dsp_data(pio_mgr->hbridge_context, addr);
+               addr = (u32) &(((struct msg_dspmsg *)msg_input)->msg.dw_arg2);
+               msg.msg.dw_arg2 =
+                   read_ext32_bit_dsp_data(pio_mgr->hbridge_context, addr);
+               addr = (u32) &(((struct msg_dspmsg *)msg_input)->msgq_id);
+               msg.msgq_id =
+                   read_ext32_bit_dsp_data(pio_mgr->hbridge_context, addr);
+               msg_input += sizeof(struct msg_dspmsg);
+               if (!hmsg_mgr->queue_list)
+                       goto func_end;
+
+               /* Determine which queue to put the message in */
+               msg_queue_obj =
+                   (struct msg_queue *)lst_first(hmsg_mgr->queue_list);
+               dev_dbg(bridge, "input msg: dw_cmd=0x%x dw_arg1=0x%x "
+                       "dw_arg2=0x%x msgq_id=0x%x \n", msg.msg.dw_cmd,
+                       msg.msg.dw_arg1, msg.msg.dw_arg2, msg.msgq_id);
+               /*
+                * Interrupt may occur before shared memory and message
+                * input locations have been set up. If all nodes were
+                * cleaned up, hmsg_mgr->max_msgs should be 0.
+                */
+               while (msg_queue_obj != NULL) {
+                       if (msg.msgq_id == msg_queue_obj->msgq_id) {
+                               /* Found it */
+                               if (msg.msg.dw_cmd == RMS_EXITACK) {
+                                       /*
+                                        * Call the node exit notification.
+                                        * The exit message does not get
+                                        * queued.
+                                        */
+                                       (*hmsg_mgr->on_exit) ((void *)
+                                                          msg_queue_obj->arg,
+                                                          msg.msg.dw_arg1);
+                               } else {
+                                       /*
+                                        * Not an exit acknowledgement, queue
+                                        * the message.
+                                        */
+                                       if (!msg_queue_obj->msg_free_list)
+                                               goto func_end;
+                                       pmsg = (struct msg_frame *)lst_get_head
+                                           (msg_queue_obj->msg_free_list);
+                                       if (msg_queue_obj->msg_used_list
+                                           && pmsg) {
+                                               pmsg->msg_data = msg;
+                                               lst_put_tail
+                                                (msg_queue_obj->msg_used_list,
+                                                    (struct list_head *)pmsg);
+                                               ntfy_notify
+                                                   (msg_queue_obj->ntfy_obj,
+                                                    DSP_NODEMESSAGEREADY);
+                                               sync_set_event
+                                                   (msg_queue_obj->sync_event);
+                                       } else {
+                                               /*
+                                                * No free frame to copy the
+                                                * message into.
+                                                */
+                                               pr_err("%s: no free msg frames,"
+                                                      " discarding msg\n",
+                                                      __func__);
+                                       }
+                               }
+                               break;
+                       }
+
+                       if (!hmsg_mgr->queue_list || !msg_queue_obj)
+                               goto func_end;
+                       msg_queue_obj =
+                           (struct msg_queue *)lst_next(hmsg_mgr->queue_list,
+                                                        (struct list_head *)
+                                                        msg_queue_obj);
+               }
+       }
+       /* Set the post SWI flag */
+       if (num_msgs > 0) {
+               /* Tell the DSP we've read the messages */
+               IO_SET_VALUE(pio_mgr->hbridge_context, struct msg_ctrl,
+                            msg_ctr_obj, buf_empty, true);
+               IO_SET_VALUE(pio_mgr->hbridge_context, struct msg_ctrl,
+                            msg_ctr_obj, post_swi, true);
+               sm_interrupt_dsp(pio_mgr->hbridge_context, MBX_PCPY_CLASS);
+       }
+func_end:
+       return;
+}
+
+/*
+ *  ======== notify_chnl_complete ========
+ *  Purpose:
+ *      Signal the channel event, notifying the client that I/O has completed.
+ */
+static void notify_chnl_complete(struct chnl_object *pchnl,
+                                struct chnl_irp *chnl_packet_obj)
+{
+       bool signal_event;
+
+       if (!pchnl || !pchnl->sync_event ||
+           !pchnl->pio_completions || !chnl_packet_obj)
+               goto func_end;
+
+       /*
+        * Note: we signal the channel event only if the queue of IO
+        * completions is empty.  If it is not empty, the event is sure to be
+        * signalled by the only IO completion list consumer:
+        * bridge_chnl_get_ioc().
+        */
+       signal_event = LST_IS_EMPTY(pchnl->pio_completions);
+       /* Enqueue the IO completion info for the client */
+       lst_put_tail(pchnl->pio_completions,
+                    (struct list_head *)chnl_packet_obj);
+       pchnl->cio_cs++;
+
+       if (pchnl->cio_cs > pchnl->chnl_packets)
+               goto func_end;
+       /* Signal the channel event (if not already set) that IO is complete */
+       if (signal_event)
+               sync_set_event(pchnl->sync_event);
+
+       /* Notify that IO is complete */
+       ntfy_notify(pchnl->ntfy_obj, DSP_STREAMIOCOMPLETION);
+func_end:
+       return;
+}
+
+/*
+ *  ======== output_chnl ========
+ *  Purpose:
+ *      Dispatch a buffer on an output channel.
+ */
+static void output_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl,
+                       u8 iMode)
+{
+       struct chnl_mgr *chnl_mgr_obj;
+       struct shm *sm;
+       u32 chnl_id;
+       struct chnl_irp *chnl_packet_obj;
+       u32 dw_dsp_f_mask;
+
+       chnl_mgr_obj = pio_mgr->hchnl_mgr;
+       sm = pio_mgr->shared_mem;
+       /* Attempt to perform output */
+       if (IO_GET_VALUE(pio_mgr->hbridge_context, struct shm, sm, output_full))
+               goto func_end;
+
+       if (pchnl && !((pchnl->dw_state & ~CHNL_STATEEOS) == CHNL_STATEREADY))
+               goto func_end;
+
+       /* Look to see if both a PC and DSP output channel are ready */
+       dw_dsp_f_mask = IO_GET_VALUE(pio_mgr->hbridge_context, struct shm, sm,
+                                    dsp_free_mask);
+       chnl_id =
+           find_ready_output(chnl_mgr_obj, pchnl,
+                             (chnl_mgr_obj->dw_output_mask & dw_dsp_f_mask));
+       if (chnl_id == OUTPUTNOTREADY)
+               goto func_end;
+
+       pchnl = chnl_mgr_obj->ap_channel[chnl_id];
+       if (!pchnl || !pchnl->pio_requests) {
+               /* Shouldn't get here */
+               goto func_end;
+       }
+       /* Get the I/O request, and attempt a transfer */
+       chnl_packet_obj = (struct chnl_irp *)lst_get_head(pchnl->pio_requests);
+       if (!chnl_packet_obj)
+               goto func_end;
+
+       pchnl->cio_reqs--;
+       if (pchnl->cio_reqs < 0 || !pchnl->pio_requests)
+               goto func_end;
+
+       /* Record fact that no more I/O buffers available */
+       if (LST_IS_EMPTY(pchnl->pio_requests))
+               chnl_mgr_obj->dw_output_mask &= ~(1 << chnl_id);
+
+       /* Transfer buffer to DSP side */
+       chnl_packet_obj->byte_size =
+           write_data(pio_mgr->hbridge_context, pio_mgr->output,
+                      chnl_packet_obj->host_sys_buf, min(pio_mgr->usm_buf_size,
+                                                 chnl_packet_obj->byte_size));
+       pchnl->bytes_moved += chnl_packet_obj->byte_size;
+       /* Write all 32 bits of arg */
+       IO_SET_LONG(pio_mgr->hbridge_context, struct shm, sm, arg,
+                   chnl_packet_obj->dw_arg);
+#if _CHNL_WORDSIZE == 2
+       IO_SET_VALUE(pio_mgr->hbridge_context, struct shm, sm, output_id,
+                    (u16) chnl_id);
+       IO_SET_VALUE(pio_mgr->hbridge_context, struct shm, sm, output_size,
+                    (u16) (chnl_packet_obj->byte_size +
+                           (chnl_mgr_obj->word_size -
+                            1)) / (u16) chnl_mgr_obj->word_size);
+#else
+       IO_SET_VALUE(pio_mgr->hbridge_context, struct shm, sm, output_id,
+                                                               chnl_id);
+       IO_SET_VALUE(pio_mgr->hbridge_context, struct shm, sm, output_size,
+                    (chnl_packet_obj->byte_size +
+                     (chnl_mgr_obj->word_size - 1)) / chnl_mgr_obj->word_size);
+#endif
+       IO_SET_VALUE(pio_mgr->hbridge_context, struct shm, sm, output_full, 1);
+       /* Indicate to the DSP we have written the output */
+       sm_interrupt_dsp(pio_mgr->hbridge_context, MBX_PCPY_CLASS);
+       /* Notify client with IO completion record (keep EOS) */
+       chnl_packet_obj->status &= CHNL_IOCSTATEOS;
+       notify_chnl_complete(pchnl, chnl_packet_obj);
+       /* Notify if stream is done. */
+       if (chnl_packet_obj->status & CHNL_IOCSTATEOS)
+               ntfy_notify(pchnl->ntfy_obj, DSP_STREAMDONE);
+
+func_end:
+       return;
+}
+
+/*
+ *  ======== output_msg ========
+ *      Copies messages from the message queues to the shared memory.
+ */
+static void output_msg(struct io_mgr *pio_mgr, struct msg_mgr *hmsg_mgr)
+{
+       u32 num_msgs = 0;
+       u32 i;
+       u8 *msg_output;
+       struct msg_frame *pmsg;
+       struct msg_ctrl *msg_ctr_obj;
+       u32 output_empty;
+       u32 val;
+       u32 addr;
+
+       msg_ctr_obj = pio_mgr->msg_output_ctrl;
+
+       /* Check if output has been cleared */
+       output_empty =
+           IO_GET_VALUE(pio_mgr->hbridge_context, struct msg_ctrl, msg_ctr_obj,
+                        buf_empty);
+       if (output_empty) {
+               num_msgs = (hmsg_mgr->msgs_pending > hmsg_mgr->max_msgs) ?
+                   hmsg_mgr->max_msgs : hmsg_mgr->msgs_pending;
+               msg_output = pio_mgr->msg_output;
+               /* Copy num_msgs messages into shared memory */
+               for (i = 0; i < num_msgs; i++) {
+                       if (!hmsg_mgr->msg_used_list) {
+                               pmsg = NULL;
+                               goto func_end;
+                       } else {
+                               pmsg = (struct msg_frame *)
+                                   lst_get_head(hmsg_mgr->msg_used_list);
+                       }
+                       if (pmsg != NULL) {
+                               val = (pmsg->msg_data).msgq_id;
+                               addr = (u32) &(((struct msg_dspmsg *)
+                                                msg_output)->msgq_id);
+                               write_ext32_bit_dsp_data(
+                                       pio_mgr->hbridge_context, addr, val);
+                               val = (pmsg->msg_data).msg.dw_cmd;
+                               addr = (u32) &((((struct msg_dspmsg *)
+                                                 msg_output)->msg).dw_cmd);
+                               write_ext32_bit_dsp_data(
+                                       pio_mgr->hbridge_context, addr, val);
+                               val = (pmsg->msg_data).msg.dw_arg1;
+                               addr = (u32) &((((struct msg_dspmsg *)
+                                                 msg_output)->msg).dw_arg1);
+                               write_ext32_bit_dsp_data(
+                                       pio_mgr->hbridge_context, addr, val);
+                               val = (pmsg->msg_data).msg.dw_arg2;
+                               addr = (u32) &((((struct msg_dspmsg *)
+                                                 msg_output)->msg).dw_arg2);
+                               write_ext32_bit_dsp_data(
+                                       pio_mgr->hbridge_context, addr, val);
+                               msg_output += sizeof(struct msg_dspmsg);
+                               if (!hmsg_mgr->msg_free_list)
+                                       goto func_end;
+                               lst_put_tail(hmsg_mgr->msg_free_list,
+                                            (struct list_head *)pmsg);
+                               sync_set_event(hmsg_mgr->sync_event);
+                       }
+               }
+
+               if (num_msgs > 0) {
+                       hmsg_mgr->msgs_pending -= num_msgs;
+#if _CHNL_WORDSIZE == 2
+                       IO_SET_VALUE(pio_mgr->hbridge_context, struct msg_ctrl,
+                                    msg_ctr_obj, size, (u16) num_msgs);
+#else
+                       IO_SET_VALUE(pio_mgr->hbridge_context, struct msg_ctrl,
+                                    msg_ctr_obj, size, num_msgs);
+#endif
+                       IO_SET_VALUE(pio_mgr->hbridge_context, struct msg_ctrl,
+                                    msg_ctr_obj, buf_empty, false);
+                       /* Set the post SWI flag */
+                       IO_SET_VALUE(pio_mgr->hbridge_context, struct msg_ctrl,
+                                    msg_ctr_obj, post_swi, true);
+                       /* Tell the DSP we have written the output. */
+                       sm_interrupt_dsp(pio_mgr->hbridge_context,
+                                               MBX_PCPY_CLASS);
+               }
+       }
+func_end:
+       return;
+}
+
+/*
+ *  ======== register_shm_segs ========
+ *  purpose:
+ *      Registers GPP SM segment with CMM.
+ */
+static int register_shm_segs(struct io_mgr *hio_mgr,
+                                   struct cod_manager *cod_man,
+                                   u32 dw_gpp_base_pa)
+{
+       int status = 0;
+       u32 ul_shm0_base = 0;
+       u32 shm0_end = 0;
+       u32 ul_shm0_rsrvd_start = 0;
+       u32 ul_rsrvd_size = 0;
+       u32 ul_gpp_phys;
+       u32 ul_dsp_virt;
+       u32 ul_shm_seg_id0 = 0;
+       u32 dw_offset, dw_gpp_base_va, ul_dsp_size;
+
+       /*
+        * Read address and size info for first SM region.
+        * Get start of 1st SM Heap region.
+        */
+       status =
+           cod_get_sym_value(cod_man, SHM0_SHARED_BASE_SYM, &ul_shm0_base);
+       if (ul_shm0_base == 0) {
+               status = -EPERM;
+               goto func_end;
+       }
+       /* Get end of 1st SM Heap region */
+       if (DSP_SUCCEEDED(status)) {
+               /* Get start and length of message part of shared memory */
+               status = cod_get_sym_value(cod_man, SHM0_SHARED_END_SYM,
+                                          &shm0_end);
+               if (shm0_end == 0) {
+                       status = -EPERM;
+                       goto func_end;
+               }
+       }
+       /* Start of Gpp reserved region */
+       if (DSP_SUCCEEDED(status)) {
+               /* Get start and length of message part of shared memory */
+               status =
+                   cod_get_sym_value(cod_man, SHM0_SHARED_RESERVED_BASE_SYM,
+                                     &ul_shm0_rsrvd_start);
+               if (ul_shm0_rsrvd_start == 0) {
+                       status = -EPERM;
+                       goto func_end;
+               }
+       }
+       /* Register with CMM */
+       if (DSP_SUCCEEDED(status)) {
+               status = dev_get_cmm_mgr(hio_mgr->hdev_obj, &hio_mgr->hcmm_mgr);
+               if (DSP_SUCCEEDED(status)) {
+                       status = cmm_un_register_gppsm_seg(hio_mgr->hcmm_mgr,
+                                                          CMM_ALLSEGMENTS);
+               }
+       }
+       /* Register new SM region(s) */
+       if (DSP_SUCCEEDED(status) && (shm0_end - ul_shm0_base) > 0) {
+               /* Calc size (bytes) of SM the GPP can alloc from */
+               ul_rsrvd_size =
+                   (shm0_end - ul_shm0_rsrvd_start + 1) * hio_mgr->word_size;
+               if (ul_rsrvd_size <= 0) {
+                       status = -EPERM;
+                       goto func_end;
+               }
+               /* Calc size of SM DSP can alloc from */
+               ul_dsp_size =
+                   (ul_shm0_rsrvd_start - ul_shm0_base) * hio_mgr->word_size;
+               if (ul_dsp_size <= 0) {
+                       status = -EPERM;
+                       goto func_end;
+               }
+               /* First TLB entry reserved for Bridge SM use. */
+               ul_gpp_phys = hio_mgr->ext_proc_info.ty_tlb[0].ul_gpp_phys;
+               /* Get size in bytes */
+               ul_dsp_virt =
+                   hio_mgr->ext_proc_info.ty_tlb[0].ul_dsp_virt *
+                   hio_mgr->word_size;
+               /*
+                * Calc byte offset used to convert GPP phys <-> DSP byte
+                * address.
+                */
+               if (dw_gpp_base_pa > ul_dsp_virt)
+                       dw_offset = dw_gpp_base_pa - ul_dsp_virt;
+               else
+                       dw_offset = ul_dsp_virt - dw_gpp_base_pa;
+
+               if (ul_shm0_rsrvd_start * hio_mgr->word_size < ul_dsp_virt) {
+                       status = -EPERM;
+                       goto func_end;
+               }
+               /*
+                * Calc Gpp phys base of SM region.
+                * This is actually uncached kernel virtual address.
+                */
+               dw_gpp_base_va =
+                   ul_gpp_phys + ul_shm0_rsrvd_start * hio_mgr->word_size -
+                   ul_dsp_virt;
+               /*
+                * Calc Gpp phys base of SM region.
+                * This is the physical address.
+                */
+               dw_gpp_base_pa =
+                   dw_gpp_base_pa + ul_shm0_rsrvd_start * hio_mgr->word_size -
+                   ul_dsp_virt;
+               /* Register SM Segment 0. */
+               status =
+                   cmm_register_gppsm_seg(hio_mgr->hcmm_mgr, dw_gpp_base_pa,
+                                          ul_rsrvd_size, dw_offset,
+                                          (dw_gpp_base_pa >
+                                           ul_dsp_virt) ? CMM_ADDTODSPPA :
+                                          CMM_SUBFROMDSPPA,
+                                          (u32) (ul_shm0_base *
+                                                 hio_mgr->word_size),
+                                          ul_dsp_size, &ul_shm_seg_id0,
+                                          dw_gpp_base_va);
+               /* First SM region is seg_id = 1 */
+               if (ul_shm_seg_id0 != 1)
+                       status = -EPERM;
+       }
+func_end:
+       return status;
+}
+
+/*
+ *  ======== read_data ========
+ *      Copies buffers from the shared memory to the host buffer.
+ */
+static u32 read_data(struct bridge_dev_context *hDevContext, void *dest,
+                    void *pSrc, u32 usize)
+{
+       memcpy(dest, pSrc, usize);
+       return usize;
+}
+
+/*
+ *  ======== write_data ========
+ *      Copies buffers from the host side buffer to the shared memory.
+ */
+static u32 write_data(struct bridge_dev_context *hDevContext, void *dest,
+                     void *pSrc, u32 usize)
+{
+       memcpy(dest, pSrc, usize);
+       return usize;
+}
+
+/* ZCPY IO routines. */
+void io_intr_dsp2(IN struct io_mgr *pio_mgr, IN u16 mb_val)
+{
+       sm_interrupt_dsp(pio_mgr->hbridge_context, mb_val);
+}
+
+/*
+ *  ======== IO_SHMcontrol ========
+ *      Sets the requested shm setting.
+ */
+int io_sh_msetting(struct io_mgr *hio_mgr, u8 desc, void *pargs)
+{
+#ifdef CONFIG_BRIDGE_DVFS
+       u32 i;
+       struct dspbridge_platform_data *pdata =
+           omap_dspbridge_dev->dev.platform_data;
+
+       switch (desc) {
+       case SHM_CURROPP:
+               /* Update the shared memory with requested OPP information */
+               if (pargs != NULL)
+                       hio_mgr->shared_mem->opp_table_struct.curr_opp_pt =
+                           *(u32 *) pargs;
+               else
+                       return -EPERM;
+               break;
+       case SHM_OPPINFO:
+               /*
+                * Update the shared memory with the voltage, frequency,
+                * min and max frequency values for an OPP.
+                */
+               for (i = 0; i <= dsp_max_opps; i++) {
+                       hio_mgr->shared_mem->opp_table_struct.opp_point[i].
+                           voltage = vdd1_dsp_freq[i][0];
+                       dev_dbg(bridge, "OPP-shm: voltage: %d\n",
+                               vdd1_dsp_freq[i][0]);
+                       hio_mgr->shared_mem->opp_table_struct.
+                           opp_point[i].frequency = vdd1_dsp_freq[i][1];
+                       dev_dbg(bridge, "OPP-shm: frequency: %d\n",
+                               vdd1_dsp_freq[i][1]);
+                       hio_mgr->shared_mem->opp_table_struct.opp_point[i].
+                           min_freq = vdd1_dsp_freq[i][2];
+                       dev_dbg(bridge, "OPP-shm: min freq: %d\n",
+                               vdd1_dsp_freq[i][2]);
+                       hio_mgr->shared_mem->opp_table_struct.opp_point[i].
+                           max_freq = vdd1_dsp_freq[i][3];
+                       dev_dbg(bridge, "OPP-shm: max freq: %d\n",
+                               vdd1_dsp_freq[i][3]);
+               }
+               hio_mgr->shared_mem->opp_table_struct.num_opp_pts =
+                   dsp_max_opps;
+               dev_dbg(bridge, "OPP-shm: max OPP number: %d\n", dsp_max_opps);
+               /* Update the current OPP number */
+               if (pdata->dsp_get_opp)
+                       i = (*pdata->dsp_get_opp) ();
+               hio_mgr->shared_mem->opp_table_struct.curr_opp_pt = i;
+               dev_dbg(bridge, "OPP-shm: value programmed = %d\n", i);
+               break;
+       case SHM_GETOPP:
+               /* Get the OPP that DSP has requested */
+               *(u32 *) pargs = hio_mgr->shared_mem->opp_request.rqst_opp_pt;
+               break;
+       default:
+               break;
+       }
+#endif
+       return 0;
+}
+
+/*
+ *  ======== bridge_io_get_proc_load ========
+ *      Gets the Processor's Load information
+ */
+int bridge_io_get_proc_load(IN struct io_mgr *hio_mgr,
+                               OUT struct dsp_procloadstat *pProcStat)
+{
+       pProcStat->curr_load = hio_mgr->shared_mem->load_mon_info.curr_dsp_load;
+       pProcStat->predicted_load =
+           hio_mgr->shared_mem->load_mon_info.pred_dsp_load;
+       pProcStat->curr_dsp_freq =
+           hio_mgr->shared_mem->load_mon_info.curr_dsp_freq;
+       pProcStat->predicted_freq =
+           hio_mgr->shared_mem->load_mon_info.pred_dsp_freq;
+
+       dev_dbg(bridge, "Curr Load = %d, Pred Load = %d, Curr Freq = %d, "
+               "Pred Freq = %d\n", pProcStat->curr_load,
+               pProcStat->predicted_load, pProcStat->curr_dsp_freq,
+               pProcStat->predicted_freq);
+       return 0;
+}
+
+#ifndef DSP_TRACEBUF_DISABLED
+void print_dsp_debug_trace(struct io_mgr *hio_mgr)
+{
+       u32 ul_new_message_length = 0, ul_gpp_cur_pointer;
+
+       while (true) {
+               /* Get the DSP current pointer */
+               ul_gpp_cur_pointer =
+                   *(u32 *) (hio_mgr->ul_trace_buffer_current);
+               ul_gpp_cur_pointer =
+                   hio_mgr->ul_gpp_va + (ul_gpp_cur_pointer -
+                                         hio_mgr->ul_dsp_va);
+
+               /* No new debug messages available yet */
+               if (ul_gpp_cur_pointer == hio_mgr->ul_gpp_read_pointer) {
+                       break;
+               } else if (ul_gpp_cur_pointer > hio_mgr->ul_gpp_read_pointer) {
+                       /* Continuous data */
+                       ul_new_message_length =
+                           ul_gpp_cur_pointer - hio_mgr->ul_gpp_read_pointer;
+
+                       memcpy(hio_mgr->pmsg,
+                              (char *)hio_mgr->ul_gpp_read_pointer,
+                              ul_new_message_length);
+                       hio_mgr->pmsg[ul_new_message_length] = '\0';
+                       /*
+                        * Advance the GPP trace pointer to DSP current
+                        * pointer.
+                        */
+                       hio_mgr->ul_gpp_read_pointer += ul_new_message_length;
+                       /* Print the trace messages */
+                       pr_info("DSPTrace: %s\n", hio_mgr->pmsg);
+               } else if (ul_gpp_cur_pointer < hio_mgr->ul_gpp_read_pointer) {
+                       /* Handle trace buffer wraparound */
+                       memcpy(hio_mgr->pmsg,
+                              (char *)hio_mgr->ul_gpp_read_pointer,
+                              hio_mgr->ul_trace_buffer_end -
+                              hio_mgr->ul_gpp_read_pointer);
+                       ul_new_message_length =
+                           ul_gpp_cur_pointer - hio_mgr->ul_trace_buffer_begin;
+                       memcpy(&hio_mgr->pmsg[hio_mgr->ul_trace_buffer_end -
+                                             hio_mgr->ul_gpp_read_pointer],
+                              (char *)hio_mgr->ul_trace_buffer_begin,
+                              ul_new_message_length);
+                       hio_mgr->pmsg[hio_mgr->ul_trace_buffer_end -
+                                     hio_mgr->ul_gpp_read_pointer +
+                                     ul_new_message_length] = '\0';
+                       /*
+                        * Advance the GPP trace pointer to DSP current
+                        * pointer.
+                        */
+                       hio_mgr->ul_gpp_read_pointer =
+                           hio_mgr->ul_trace_buffer_begin +
+                           ul_new_message_length;
+                       /* Print the trace messages */
+                       pr_info("DSPTrace: %s\n", hio_mgr->pmsg);
+               }
+       }
+}
+#endif
+
+/*
+ *  ======== print_dsp_trace_buffer ========
+ *      Prints the trace buffer returned from the DSP (if DBG_Trace is enabled).
+ *  Parameters:
+ *    hdeh_mgr:          Handle to DEH manager object
+ *                      number of extra carriage returns to generate.
+ *  Returns:
+ *      0:        Success.
+ *      -ENOMEM:    Unable to allocate memory.
+ *  Requires:
+ *      hdeh_mgr muse be valid. Checked in bridge_deh_notify.
+ */
+int print_dsp_trace_buffer(struct bridge_dev_context *hbridge_context)
+{
+       int status = 0;
+       struct cod_manager *cod_mgr;
+       u32 ul_trace_end;
+       u32 ul_trace_begin;
+       u32 trace_cur_pos;
+       u32 ul_num_bytes = 0;
+       u32 ul_num_words = 0;
+       u32 ul_word_size = 2;
+       char *psz_buf;
+       char *str_beg;
+       char *trace_end;
+       char *buf_end;
+       char *new_line;
+
+       struct bridge_dev_context *pbridge_context = hbridge_context;
+       struct bridge_drv_interface *intf_fxns;
+       struct dev_object *dev_obj = (struct dev_object *)
+           pbridge_context->hdev_obj;
+
+       status = dev_get_cod_mgr(dev_obj, &cod_mgr);
+
+       if (cod_mgr) {
+               /* Look for SYS_PUTCBEG/SYS_PUTCEND */
+               status =
+                   cod_get_sym_value(cod_mgr, COD_TRACEBEG, &ul_trace_begin);
+       } else {
+               status = -EFAULT;
+       }
+       if (DSP_SUCCEEDED(status))
+               status =
+                   cod_get_sym_value(cod_mgr, COD_TRACEEND, &ul_trace_end);
+
+       if (DSP_SUCCEEDED(status))
+               /* trace_cur_pos will hold the address of a DSP pointer */
+               status = cod_get_sym_value(cod_mgr, COD_TRACECURPOS,
+                                                       &trace_cur_pos);
+
+       if (DSP_FAILED(status))
+               goto func_end;
+
+       ul_num_bytes = (ul_trace_end - ul_trace_begin);
+
+       ul_num_words = ul_num_bytes * ul_word_size;
+       status = dev_get_intf_fxns(dev_obj, &intf_fxns);
+
+       if (DSP_FAILED(status))
+               goto func_end;
+
+       psz_buf = kzalloc(ul_num_bytes + 2, GFP_ATOMIC);
+       if (psz_buf != NULL) {
+               /* Read trace buffer data */
+               status = (*intf_fxns->pfn_brd_read)(pbridge_context,
+                       (u8 *)psz_buf, (u32)ul_trace_begin,
+                       ul_num_bytes, 0);
+
+               if (DSP_FAILED(status))
+                       goto func_end;
+
+               /* Pack and do newline conversion */
+               pr_debug("PrintDspTraceBuffer: "
+                       "before pack and unpack.\n");
+               pr_debug("%s: DSP Trace Buffer Begin:\n"
+                       "=======================\n%s\n",
+                       __func__, psz_buf);
+
+               /* Read the value at the DSP address in trace_cur_pos. */
+               status = (*intf_fxns->pfn_brd_read)(pbridge_context,
+                               (u8 *)&trace_cur_pos, (u32)trace_cur_pos,
+                               4, 0);
+               if (DSP_FAILED(status))
+                       goto func_end;
+               /* Pack and do newline conversion */
+               pr_info("DSP Trace Buffer Begin:\n"
+                       "=======================\n%s\n",
+                       psz_buf);
+
+
+               /* convert to offset */
+               trace_cur_pos = trace_cur_pos - ul_trace_begin;
+
+               if (ul_num_bytes) {
+                       /*
+                        * The buffer is not full, find the end of the
+                        * data -- buf_end will be >= pszBuf after
+                        * while.
+                        */
+                       buf_end = &psz_buf[ul_num_bytes+1];
+                       /* DSP print position */
+                       trace_end = &psz_buf[trace_cur_pos];
+
+                       /*
+                        * Search buffer for a new_line and replace it
+                        * with '\0', then print as string.
+                        * Continue until end of buffer is reached.
+                        */
+                       str_beg = trace_end;
+                       ul_num_bytes = buf_end - str_beg;
+
+                       while (str_beg < buf_end) {
+                               new_line = strnchr(str_beg, ul_num_bytes,
+                                                               '\n');
+                               if (new_line && new_line < buf_end) {
+                                       *new_line = 0;
+                                       pr_debug("%s\n", str_beg);
+                                       str_beg = ++new_line;
+                                       ul_num_bytes = buf_end - str_beg;
+                               } else {
+                                       /*
+                                        * Assume buffer empty if it contains
+                                        * a zero
+                                        */
+                                       if (*str_beg != '\0') {
+                                               str_beg[ul_num_bytes] = 0;
+                                               pr_debug("%s\n", str_beg);
+                                       }
+                                       str_beg = buf_end;
+                                       ul_num_bytes = 0;
+                               }
+                       }
+                       /*
+                        * Search buffer for a nNewLine and replace it
+                        * with '\0', then print as string.
+                        * Continue until buffer is exhausted.
+                        */
+                       str_beg = psz_buf;
+                       ul_num_bytes = trace_end - str_beg;
+
+                       while (str_beg < trace_end) {
+                               new_line = strnchr(str_beg, ul_num_bytes, '\n');
+                               if (new_line != NULL && new_line < trace_end) {
+                                       *new_line = 0;
+                                       pr_debug("%s\n", str_beg);
+                                       str_beg = ++new_line;
+                                       ul_num_bytes = trace_end - str_beg;
+                               } else {
+                                       /*
+                                        * Assume buffer empty if it contains
+                                        * a zero
+                                        */
+                                       if (*str_beg != '\0') {
+                                               str_beg[ul_num_bytes] = 0;
+                                               pr_debug("%s\n", str_beg);
+                                       }
+                                       str_beg = trace_end;
+                                       ul_num_bytes = 0;
+                               }
+                       }
+               }
+               pr_info("\n=======================\n"
+                       "DSP Trace Buffer End:\n");
+               kfree(psz_buf);
+       } else {
+               status = -ENOMEM;
+       }
+func_end:
+       if (DSP_FAILED(status))
+               dev_dbg(bridge, "%s Failed, status 0x%x\n", __func__, status);
+       return status;
+}
+
+void io_sm_init(void)
+{
+       /* Do nothing */
+}
+/**
+ * dump_dsp_stack() - This function dumps the data on the DSP stack.
+ * @bridge_context:    Bridge driver's device context pointer.
+ *
+ */
+int dump_dsp_stack(struct bridge_dev_context *bridge_context)
+{
+       int status = 0;
+       struct cod_manager *code_mgr;
+       struct node_mgr *node_mgr;
+       u32 trace_begin;
+       char name[256];
+       struct {
+               u32 head[2];
+               u32 size;
+       } mmu_fault_dbg_info;
+       u32 *buffer;
+       u32 *buffer_beg;
+       u32 *buffer_end;
+       u32 exc_type;
+       u32 dyn_ext_base;
+       u32 i;
+       u32 offset_output;
+       u32 total_size;
+       u32 poll_cnt;
+       const char *dsp_regs[] = {"EFR", "IERR", "ITSR", "NTSR",
+                               "IRP", "NRP", "AMR", "SSR",
+                               "ILC", "RILC", "IER", "CSR"};
+       const char *exec_ctxt[] = {"Task", "SWI", "HWI", "Unknown"};
+       struct bridge_drv_interface *intf_fxns;
+       struct dev_object *dev_object = bridge_context->hdev_obj;
+
+       status = dev_get_cod_mgr(dev_object, &code_mgr);
+       if (!code_mgr) {
+               pr_debug("%s: Failed on dev_get_cod_mgr.\n", __func__);
+               status = -EFAULT;
+       }
+
+       if (DSP_SUCCEEDED(status)) {
+               status = dev_get_node_manager(dev_object, &node_mgr);
+               if (!node_mgr) {
+                       pr_debug("%s: Failed on dev_get_node_manager.\n",
+                                                               __func__);
+                       status = -EFAULT;
+               }
+       }
+
+       if (DSP_SUCCEEDED(status)) {
+               /* Look for SYS_PUTCBEG/SYS_PUTCEND: */
+               status =
+                       cod_get_sym_value(code_mgr, COD_TRACEBEG, &trace_begin);
+               pr_debug("%s: trace_begin Value 0x%x\n",
+                       __func__, trace_begin);
+               if (DSP_FAILED(status))
+                       pr_debug("%s: Failed on cod_get_sym_value.\n",
+                                                               __func__);
+       }
+       if (DSP_SUCCEEDED(status))
+               status = dev_get_intf_fxns(dev_object, &intf_fxns);
+       /*
+        * Check for the "magic number" in the trace buffer.  If it has
+        * yet to appear then poll the trace buffer to wait for it.  Its
+        * appearance signals that the DSP has finished dumping its state.
+        */
+       mmu_fault_dbg_info.head[0] = 0;
+       mmu_fault_dbg_info.head[1] = 0;
+       if (DSP_SUCCEEDED(status)) {
+               poll_cnt = 0;
+               while ((mmu_fault_dbg_info.head[0] != MMU_FAULT_HEAD1 ||
+                       mmu_fault_dbg_info.head[1] != MMU_FAULT_HEAD2) &&
+                       poll_cnt < POLL_MAX) {
+
+                       /* Read DSP dump size from the DSP trace buffer... */
+                       status = (*intf_fxns->pfn_brd_read)(bridge_context,
+                               (u8 *)&mmu_fault_dbg_info, (u32)trace_begin,
+                               sizeof(mmu_fault_dbg_info), 0);
+
+                       if (DSP_FAILED(status))
+                               break;
+
+                       poll_cnt++;
+               }
+
+               if (mmu_fault_dbg_info.head[0] != MMU_FAULT_HEAD1 &&
+                       mmu_fault_dbg_info.head[1] != MMU_FAULT_HEAD2) {
+                       status = -ETIME;
+                       pr_err("%s:No DSP MMU-Fault information available.\n",
+                                                       __func__);
+               }
+       }
+
+       if (DSP_SUCCEEDED(status)) {
+               total_size = mmu_fault_dbg_info.size;
+               /* Limit the size in case DSP went crazy */
+               if (total_size > MAX_MMU_DBGBUFF)
+                       total_size = MAX_MMU_DBGBUFF;
+
+               buffer = kzalloc(total_size, GFP_ATOMIC);
+               if (!buffer) {
+                       status = -ENOMEM;
+                       pr_debug("%s: Failed to "
+                               "allocate stack dump buffer.\n", __func__);
+                       goto func_end;
+               }
+
+               buffer_beg = buffer;
+               buffer_end =  buffer + total_size / 4;
+
+               /* Read bytes from the DSP trace buffer... */
+               status = (*intf_fxns->pfn_brd_read)(bridge_context,
+                               (u8 *)buffer, (u32)trace_begin,
+                               total_size, 0);
+               if (DSP_FAILED(status)) {
+                       pr_debug("%s: Failed to Read Trace Buffer.\n",
+                                                               __func__);
+                       goto func_end;
+               }
+
+               pr_err("\nAproximate Crash Position:\n"
+                       "--------------------------\n");
+
+               exc_type = buffer[3];
+               if (!exc_type)
+                       i = buffer[79];         /* IRP */
+               else
+                       i = buffer[80];         /* NRP */
+
+               status =
+                   cod_get_sym_value(code_mgr, DYNEXTBASE, &dyn_ext_base);
+               if (DSP_FAILED(status)) {
+                       status = -EFAULT;
+                       goto func_end;
+               }
+
+               if ((i > dyn_ext_base) && (node_find_addr(node_mgr, i,
+                       0x1000, &offset_output, name) == 0))
+                       pr_err("0x%-8x [\"%s\" + 0x%x]\n", i, name,
+                                                       i - offset_output);
+               else
+                       pr_err("0x%-8x [Unable to match to a symbol.]\n", i);
+
+               buffer += 4;
+
+               pr_err("\nExecution Info:\n"
+                       "---------------\n");
+
+               if (*buffer < ARRAY_SIZE(exec_ctxt)) {
+                       pr_err("Execution context \t%s\n",
+                               exec_ctxt[*buffer++]);
+               } else {
+                       pr_err("Execution context corrupt\n");
+                       kfree(buffer_beg);
+                       return -EFAULT;
+               }
+               pr_err("Task Handle\t\t0x%x\n", *buffer++);
+               pr_err("Stack Pointer\t\t0x%x\n", *buffer++);
+               pr_err("Stack Top\t\t0x%x\n", *buffer++);
+               pr_err("Stack Bottom\t\t0x%x\n", *buffer++);
+               pr_err("Stack Size\t\t0x%x\n", *buffer++);
+               pr_err("Stack Size In Use\t0x%x\n", *buffer++);
+
+               pr_err("\nCPU Registers\n"
+                       "---------------\n");
+
+               for (i = 0; i < 32; i++) {
+                       if (i == 4 || i == 6 || i == 8)
+                               pr_err("A%d 0x%-8x [Function Argument %d]\n",
+                                                       i, *buffer++, i-3);
+                       else if (i == 15)
+                               pr_err("A15 0x%-8x [Frame Pointer]\n",
+                                                               *buffer++);
+                       else
+                               pr_err("A%d 0x%x\n", i, *buffer++);
+               }
+
+               pr_err("\nB0 0x%x\n", *buffer++);
+               pr_err("B1 0x%x\n", *buffer++);
+               pr_err("B2 0x%x\n", *buffer++);
+
+               if ((*buffer > dyn_ext_base) && (node_find_addr(node_mgr,
+                       *buffer, 0x1000, &offset_output, name) == 0))
+
+                       pr_err("B3 0x%-8x [Function Return Pointer:"
+                               " \"%s\" + 0x%x]\n", *buffer, name,
+                               *buffer - offset_output);
+               else
+                       pr_err("B3 0x%-8x [Function Return Pointer:"
+                               "Unable to match to a symbol.]\n", *buffer);
+
+               buffer++;
+
+               for (i = 4; i < 32; i++) {
+                       if (i == 4 || i == 6 || i == 8)
+                               pr_err("B%d 0x%-8x [Function Argument %d]\n",
+                                                       i, *buffer++, i-2);
+                       else if (i == 14)
+                               pr_err("B14 0x%-8x [Data Page Pointer]\n",
+                                                               *buffer++);
+                       else
+                               pr_err("B%d 0x%x\n", i, *buffer++);
+               }
+
+               pr_err("\n");
+
+               for (i = 0; i < ARRAY_SIZE(dsp_regs); i++)
+                       pr_err("%s 0x%x\n", dsp_regs[i], *buffer++);
+
+               pr_err("\nStack:\n"
+                       "------\n");
+
+               for (i = 0; buffer < buffer_end; i++, buffer++) {
+                       if ((*buffer > dyn_ext_base) && (
+                               node_find_addr(node_mgr, *buffer , 0x600,
+                               &offset_output, name) == 0))
+                               pr_err("[%d] 0x%-8x [\"%s\" + 0x%x]\n",
+                                       i, *buffer, name,
+                                       *buffer - offset_output);
+                       else
+                               pr_err("[%d] 0x%x\n", i, *buffer);
+               }
+               kfree(buffer_beg);
+       }
+func_end:
+       return status;
+}
+
+/**
+ * dump_dl_modules() - This functions dumps the _DLModules loaded in DSP side
+ * @bridge_context:            Bridge driver's device context pointer.
+ *
+ */
+void dump_dl_modules(struct bridge_dev_context *bridge_context)
+{
+       struct cod_manager *code_mgr;
+       struct bridge_drv_interface *intf_fxns;
+       struct bridge_dev_context *bridge_ctxt = bridge_context;
+       struct dev_object *dev_object = bridge_ctxt->hdev_obj;
+       struct modules_header modules_hdr;
+       struct dll_module *module_struct = NULL;
+       u32 module_dsp_addr;
+       u32 module_size;
+       u32 module_struct_size = 0;
+       u32 sect_ndx;
+       char *sect_str ;
+       int status = 0;
+
+       status = dev_get_intf_fxns(dev_object, &intf_fxns);
+       if (DSP_FAILED(status)) {
+               pr_debug("%s: Failed on dev_get_intf_fxns.\n", __func__);
+               goto func_end;
+       }
+
+       status = dev_get_cod_mgr(dev_object, &code_mgr);
+       if (!code_mgr) {
+               pr_debug("%s: Failed on dev_get_cod_mgr.\n", __func__);
+               status = -EFAULT;
+               goto func_end;
+       }
+
+       /* Lookup  the address of the modules_header structure */
+       status = cod_get_sym_value(code_mgr, "_DLModules", &module_dsp_addr);
+       if (DSP_FAILED(status)) {
+               pr_debug("%s: Failed on cod_get_sym_value for _DLModules.\n",
+                       __func__);
+               goto func_end;
+       }
+
+       pr_debug("%s: _DLModules at 0x%x\n", __func__, module_dsp_addr);
+
+       /* Copy the modules_header structure from DSP memory. */
+       status = (*intf_fxns->pfn_brd_read)(bridge_context, (u8 *) &modules_hdr,
+                               (u32) module_dsp_addr, sizeof(modules_hdr), 0);
+
+       if (DSP_FAILED(status)) {
+               pr_debug("%s: Failed failed to read modules header.\n",
+                                                               __func__);
+               goto func_end;
+       }
+
+       module_dsp_addr = modules_hdr.first_module;
+       module_size = modules_hdr.first_module_size;
+
+       pr_debug("%s: dll_module_header 0x%x %d\n", __func__, module_dsp_addr,
+                                                               module_size);
+
+       pr_err("\nDynamically Loaded Modules:\n"
+               "---------------------------\n");
+
+       /* For each dll_module structure in the list... */
+       while (module_size) {
+               /*
+                * Allocate/re-allocate memory to hold the dll_module
+                * structure. The memory is re-allocated only if the existing
+                * allocation is too small.
+                */
+               if (module_size > module_struct_size) {
+                       kfree(module_struct);
+                       module_struct = kzalloc(module_size+128, GFP_ATOMIC);
+                       module_struct_size = module_size+128;
+                       pr_debug("%s: allocated module struct %p %d\n",
+                               __func__, module_struct, module_struct_size);
+                       if (!module_struct)
+                               goto func_end;
+               }
+               /* Copy the dll_module structure from DSP memory */
+               status = (*intf_fxns->pfn_brd_read)(bridge_context,
+                       (u8 *)module_struct, module_dsp_addr, module_size, 0);
+
+               if (DSP_FAILED(status)) {
+                       pr_debug(
+                       "%s: Failed to read dll_module stuct for 0x%x.\n",
+                       __func__, module_dsp_addr);
+                       break;
+               }
+
+               /* Update info regarding the _next_ module in the list. */
+               module_dsp_addr = module_struct->next_module;
+               module_size = module_struct->next_module_size;
+
+               pr_debug("%s: next module 0x%x %d, this module num sects %d\n",
+                       __func__, module_dsp_addr, module_size,
+                       module_struct->num_sects);
+
+               /*
+                * The section name strings start immedialty following
+                * the array of dll_sect structures.
+                */
+               sect_str = (char *) &module_struct->
+                                       sects[module_struct->num_sects];
+               pr_err("%s\n", sect_str);
+
+               /*
+                * Advance to the first section name string.
+                * Each string follows the one before.
+                */
+               sect_str += strlen(sect_str) + 1;
+
+               /* Access each dll_sect structure and its name string. */
+               for (sect_ndx = 0;
+                       sect_ndx < module_struct->num_sects; sect_ndx++) {
+                       pr_err("    Section: 0x%x ",
+                               module_struct->sects[sect_ndx].sect_load_adr);
+
+                       if (((u32) sect_str - (u32) module_struct) <
+                               module_struct_size) {
+                               pr_err("%s\n", sect_str);
+                               /* Each string follows the one before. */
+                               sect_str += strlen(sect_str)+1;
+                       } else {
+                               pr_err("<string error>\n");
+                               pr_debug("%s: section name sting address "
+                                       "is invalid %p\n", __func__, sect_str);
+                       }
+               }
+       }
+func_end:
+       kfree(module_struct);
+}
+
diff --git a/drivers/staging/tidspbridge/core/mmu_fault.c b/drivers/staging/tidspbridge/core/mmu_fault.c
new file mode 100644 (file)
index 0000000..5c0124f
--- /dev/null
@@ -0,0 +1,139 @@
+/*
+ * mmu_fault.c
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Implements DSP MMU fault handling functions.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+/*  ----------------------------------- DSP/BIOS Bridge */
+#include <dspbridge/std.h>
+#include <dspbridge/dbdefs.h>
+
+/*  ----------------------------------- Trace & Debug */
+#include <dspbridge/host_os.h>
+#include <dspbridge/dbc.h>
+
+/*  ----------------------------------- OS Adaptation Layer */
+#include <dspbridge/drv.h>
+
+/*  ----------------------------------- Link Driver */
+#include <dspbridge/dspdeh.h>
+
+/* ------------------------------------ Hardware Abstraction Layer */
+#include <hw_defs.h>
+#include <hw_mmu.h>
+
+/*  ----------------------------------- This */
+#include "_deh.h"
+#include <dspbridge/cfg.h>
+#include "_tiomap.h"
+#include "mmu_fault.h"
+
+static u32 dmmu_event_mask;
+u32 fault_addr;
+
+static bool mmu_check_if_fault(struct bridge_dev_context *dev_context);
+
+/*
+ *  ======== mmu_fault_dpc ========
+ *      Deferred procedure call to handle DSP MMU fault.
+ */
+void mmu_fault_dpc(IN unsigned long pRefData)
+{
+       struct deh_mgr *hdeh_mgr = (struct deh_mgr *)pRefData;
+
+       if (hdeh_mgr)
+               bridge_deh_notify(hdeh_mgr, DSP_MMUFAULT, 0L);
+
+}
+
+/*
+ *  ======== mmu_fault_isr ========
+ *      ISR to be triggered by a DSP MMU fault interrupt.
+ */
+irqreturn_t mmu_fault_isr(int irq, IN void *pRefData)
+{
+       struct deh_mgr *deh_mgr_obj = (struct deh_mgr *)pRefData;
+       struct bridge_dev_context *dev_context;
+       struct cfg_hostres *resources;
+
+       DBC_REQUIRE(irq == INT_DSP_MMU_IRQ);
+       DBC_REQUIRE(deh_mgr_obj);
+
+       if (deh_mgr_obj) {
+
+               dev_context =
+                   (struct bridge_dev_context *)deh_mgr_obj->hbridge_context;
+
+               resources = dev_context->resources;
+
+               if (!resources) {
+                       dev_dbg(bridge, "%s: Failed to get Host Resources\n",
+                               __func__);
+                       return IRQ_HANDLED;
+               }
+               if (mmu_check_if_fault(dev_context)) {
+                       printk(KERN_INFO "***** DSPMMU FAULT ***** IRQStatus "
+                              "0x%x\n", dmmu_event_mask);
+                       printk(KERN_INFO "***** DSPMMU FAULT ***** fault_addr "
+                              "0x%x\n", fault_addr);
+                       /*
+                        * Schedule a DPC directly. In the future, it may be
+                        * necessary to check if DSP MMU fault is intended for
+                        * Bridge.
+                        */
+                       tasklet_schedule(&deh_mgr_obj->dpc_tasklet);
+
+                       /* Reset err_info structure before use. */
+                       deh_mgr_obj->err_info.dw_err_mask = DSP_MMUFAULT;
+                       deh_mgr_obj->err_info.dw_val1 = fault_addr >> 16;
+                       deh_mgr_obj->err_info.dw_val2 = fault_addr & 0xFFFF;
+                       deh_mgr_obj->err_info.dw_val3 = 0L;
+                       /* Disable the MMU events, else once we clear it will
+                        * start to raise INTs again */
+                       hw_mmu_event_disable(resources->dw_dmmu_base,
+                                            HW_MMU_TRANSLATION_FAULT);
+               } else {
+                       hw_mmu_event_disable(resources->dw_dmmu_base,
+                                            HW_MMU_ALL_INTERRUPTS);
+               }
+       }
+       return IRQ_HANDLED;
+}
+
+/*
+ *  ======== mmu_check_if_fault ========
+ *      Check to see if MMU Fault is valid TLB miss from DSP
+ *  Note: This function is called from an ISR
+ */
+static bool mmu_check_if_fault(struct bridge_dev_context *dev_context)
+{
+
+       bool ret = false;
+       hw_status hw_status_obj;
+       struct cfg_hostres *resources = dev_context->resources;
+
+       if (!resources) {
+               dev_dbg(bridge, "%s: Failed to get Host Resources in\n",
+                       __func__);
+               return ret;
+       }
+       hw_status_obj =
+           hw_mmu_event_status(resources->dw_dmmu_base, &dmmu_event_mask);
+       if (dmmu_event_mask == HW_MMU_TRANSLATION_FAULT) {
+               hw_mmu_fault_addr_read(resources->dw_dmmu_base, &fault_addr);
+               ret = true;
+       }
+       return ret;
+}
diff --git a/drivers/staging/tidspbridge/core/mmu_fault.h b/drivers/staging/tidspbridge/core/mmu_fault.h
new file mode 100644 (file)
index 0000000..74db489
--- /dev/null
@@ -0,0 +1,36 @@
+/*
+ * mmu_fault.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Defines DSP MMU fault handling functions.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef MMU_FAULT_
+#define MMU_FAULT_
+
+extern u32 fault_addr;
+
+/*
+ *  ======== mmu_fault_dpc ========
+ *      Deferred procedure call to handle DSP MMU fault.
+ */
+void mmu_fault_dpc(IN unsigned long pRefData);
+
+/*
+ *  ======== mmu_fault_isr ========
+ *      ISR to be triggered by a DSP MMU fault interrupt.
+ */
+irqreturn_t mmu_fault_isr(int irq, IN void *pRefData);
+
+#endif /* MMU_FAULT_ */
diff --git a/drivers/staging/tidspbridge/core/msg_sm.c b/drivers/staging/tidspbridge/core/msg_sm.c
new file mode 100644 (file)
index 0000000..7c6d6cc
--- /dev/null
@@ -0,0 +1,673 @@
+/*
+ * msg_sm.c
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Implements upper edge functions for Bridge message module.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+/*  ----------------------------------- DSP/BIOS Bridge */
+#include <dspbridge/std.h>
+#include <dspbridge/dbdefs.h>
+
+/*  ----------------------------------- Trace & Debug */
+#include <dspbridge/dbc.h>
+
+/*  ----------------------------------- OS Adaptation Layer */
+#include <dspbridge/list.h>
+#include <dspbridge/sync.h>
+
+/*  ----------------------------------- Platform Manager */
+#include <dspbridge/dev.h>
+
+/*  ----------------------------------- Others */
+#include <dspbridge/io_sm.h>
+
+/*  ----------------------------------- This */
+#include <_msg_sm.h>
+#include <dspbridge/dspmsg.h>
+
+/*  ----------------------------------- Function Prototypes */
+static int add_new_msg(struct lst_list *msgList);
+static void delete_msg_mgr(struct msg_mgr *hmsg_mgr);
+static void delete_msg_queue(struct msg_queue *msg_queue_obj, u32 uNumToDSP);
+static void free_msg_list(struct lst_list *msgList);
+
+/*
+ *  ======== bridge_msg_create ========
+ *      Create an object to manage message queues. Only one of these objects
+ *      can exist per device object.
+ */
+int bridge_msg_create(OUT struct msg_mgr **phMsgMgr,
+                            struct dev_object *hdev_obj,
+                            msg_onexit msgCallback)
+{
+       struct msg_mgr *msg_mgr_obj;
+       struct io_mgr *hio_mgr;
+       int status = 0;
+
+       if (!phMsgMgr || !msgCallback || !hdev_obj) {
+               status = -EFAULT;
+               goto func_end;
+       }
+       dev_get_io_mgr(hdev_obj, &hio_mgr);
+       if (!hio_mgr) {
+               status = -EFAULT;
+               goto func_end;
+       }
+       *phMsgMgr = NULL;
+       /* Allocate msg_ctrl manager object */
+       msg_mgr_obj = kzalloc(sizeof(struct msg_mgr), GFP_KERNEL);
+
+       if (msg_mgr_obj) {
+               msg_mgr_obj->on_exit = msgCallback;
+               msg_mgr_obj->hio_mgr = hio_mgr;
+               /* List of MSG_QUEUEs */
+               msg_mgr_obj->queue_list = kzalloc(sizeof(struct lst_list),
+                                                       GFP_KERNEL);
+               /*  Queues of message frames for messages to the DSP. Message
+                * frames will only be added to the free queue when a
+                * msg_queue object is created. */
+               msg_mgr_obj->msg_free_list = kzalloc(sizeof(struct lst_list),
+                                                       GFP_KERNEL);
+               msg_mgr_obj->msg_used_list = kzalloc(sizeof(struct lst_list),
+                                                       GFP_KERNEL);
+               if (msg_mgr_obj->queue_list == NULL ||
+                   msg_mgr_obj->msg_free_list == NULL ||
+                   msg_mgr_obj->msg_used_list == NULL) {
+                       status = -ENOMEM;
+               } else {
+                       INIT_LIST_HEAD(&msg_mgr_obj->queue_list->head);
+                       INIT_LIST_HEAD(&msg_mgr_obj->msg_free_list->head);
+                       INIT_LIST_HEAD(&msg_mgr_obj->msg_used_list->head);
+                       spin_lock_init(&msg_mgr_obj->msg_mgr_lock);
+               }
+
+               /*  Create an event to be used by bridge_msg_put() in waiting
+                *  for an available free frame from the message manager. */
+               msg_mgr_obj->sync_event =
+                               kzalloc(sizeof(struct sync_object), GFP_KERNEL);
+               if (!msg_mgr_obj->sync_event)
+                       status = -ENOMEM;
+               else
+                       sync_init_event(msg_mgr_obj->sync_event);
+
+               if (DSP_SUCCEEDED(status))
+                       *phMsgMgr = msg_mgr_obj;
+               else
+                       delete_msg_mgr(msg_mgr_obj);
+
+       } else {
+               status = -ENOMEM;
+       }
+func_end:
+       return status;
+}
+
+/*
+ *  ======== bridge_msg_create_queue ========
+ *      Create a msg_queue for sending/receiving messages to/from a node
+ *      on the DSP.
+ */
+int bridge_msg_create_queue(struct msg_mgr *hmsg_mgr,
+                               OUT struct msg_queue **phMsgQueue,
+                               u32 msgq_id, u32 max_msgs, void *arg)
+{
+       u32 i;
+       u32 num_allocated = 0;
+       struct msg_queue *msg_q;
+       int status = 0;
+
+       if (!hmsg_mgr || phMsgQueue == NULL || !hmsg_mgr->msg_free_list) {
+               status = -EFAULT;
+               goto func_end;
+       }
+
+       *phMsgQueue = NULL;
+       /* Allocate msg_queue object */
+       msg_q = kzalloc(sizeof(struct msg_queue), GFP_KERNEL);
+       if (!msg_q) {
+               status = -ENOMEM;
+               goto func_end;
+       }
+       lst_init_elem((struct list_head *)msg_q);
+       msg_q->max_msgs = max_msgs;
+       msg_q->hmsg_mgr = hmsg_mgr;
+       msg_q->arg = arg;       /* Node handle */
+       msg_q->msgq_id = msgq_id;       /* Node env (not valid yet) */
+       /* Queues of Message frames for messages from the DSP */
+       msg_q->msg_free_list = kzalloc(sizeof(struct lst_list), GFP_KERNEL);
+       msg_q->msg_used_list = kzalloc(sizeof(struct lst_list), GFP_KERNEL);
+       if (msg_q->msg_free_list == NULL || msg_q->msg_used_list == NULL)
+               status = -ENOMEM;
+       else {
+               INIT_LIST_HEAD(&msg_q->msg_free_list->head);
+               INIT_LIST_HEAD(&msg_q->msg_used_list->head);
+       }
+
+       /*  Create event that will be signalled when a message from
+        *  the DSP is available. */
+       if (DSP_SUCCEEDED(status)) {
+               msg_q->sync_event = kzalloc(sizeof(struct sync_object),
+                                                       GFP_KERNEL);
+               if (msg_q->sync_event)
+                       sync_init_event(msg_q->sync_event);
+               else
+                       status = -ENOMEM;
+       }
+
+       /* Create a notification list for message ready notification. */
+       if (DSP_SUCCEEDED(status)) {
+               msg_q->ntfy_obj = kmalloc(sizeof(struct ntfy_object),
+                                                       GFP_KERNEL);
+               if (msg_q->ntfy_obj)
+                       ntfy_init(msg_q->ntfy_obj);
+               else
+                       status = -ENOMEM;
+       }
+
+       /*  Create events that will be used to synchronize cleanup
+        *  when the object is deleted. sync_done will be set to
+        *  unblock threads in MSG_Put() or MSG_Get(). sync_done_ack
+        *  will be set by the unblocked thread to signal that it
+        *  is unblocked and will no longer reference the object. */
+       if (DSP_SUCCEEDED(status)) {
+               msg_q->sync_done = kzalloc(sizeof(struct sync_object),
+                                                       GFP_KERNEL);
+               if (msg_q->sync_done)
+                       sync_init_event(msg_q->sync_done);
+               else
+                       status = -ENOMEM;
+       }
+
+       if (DSP_SUCCEEDED(status)) {
+               msg_q->sync_done_ack = kzalloc(sizeof(struct sync_object),
+                                                       GFP_KERNEL);
+               if (msg_q->sync_done_ack)
+                       sync_init_event(msg_q->sync_done_ack);
+               else
+                       status = -ENOMEM;
+       }
+
+       if (DSP_SUCCEEDED(status)) {
+               /* Enter critical section */
+               spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
+               /* Initialize message frames and put in appropriate queues */
+               for (i = 0; i < max_msgs && DSP_SUCCEEDED(status); i++) {
+                       status = add_new_msg(hmsg_mgr->msg_free_list);
+                       if (DSP_SUCCEEDED(status)) {
+                               num_allocated++;
+                               status = add_new_msg(msg_q->msg_free_list);
+                       }
+               }
+               if (DSP_FAILED(status)) {
+                       /*  Stay inside CS to prevent others from taking any
+                        *  of the newly allocated message frames. */
+                       delete_msg_queue(msg_q, num_allocated);
+               } else {
+                       lst_put_tail(hmsg_mgr->queue_list,
+                                    (struct list_head *)msg_q);
+                       *phMsgQueue = msg_q;
+                       /* Signal that free frames are now available */
+                       if (!LST_IS_EMPTY(hmsg_mgr->msg_free_list))
+                               sync_set_event(hmsg_mgr->sync_event);
+
+               }
+               /* Exit critical section */
+               spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
+       } else {
+               delete_msg_queue(msg_q, 0);
+       }
+func_end:
+       return status;
+}
+
+/*
+ *  ======== bridge_msg_delete ========
+ *      Delete a msg_ctrl manager allocated in bridge_msg_create().
+ */
+void bridge_msg_delete(struct msg_mgr *hmsg_mgr)
+{
+       if (hmsg_mgr)
+               delete_msg_mgr(hmsg_mgr);
+}
+
+/*
+ *  ======== bridge_msg_delete_queue ========
+ *      Delete a msg_ctrl queue allocated in bridge_msg_create_queue.
+ */
+void bridge_msg_delete_queue(struct msg_queue *msg_queue_obj)
+{
+       struct msg_mgr *hmsg_mgr;
+       u32 io_msg_pend;
+
+       if (!msg_queue_obj || !msg_queue_obj->hmsg_mgr)
+               goto func_end;
+
+       hmsg_mgr = msg_queue_obj->hmsg_mgr;
+       msg_queue_obj->done = true;
+       /*  Unblock all threads blocked in MSG_Get() or MSG_Put(). */
+       io_msg_pend = msg_queue_obj->io_msg_pend;
+       while (io_msg_pend) {
+               /* Unblock thread */
+               sync_set_event(msg_queue_obj->sync_done);
+               /* Wait for acknowledgement */
+               sync_wait_on_event(msg_queue_obj->sync_done_ack, SYNC_INFINITE);
+               io_msg_pend = msg_queue_obj->io_msg_pend;
+       }
+       /* Remove message queue from hmsg_mgr->queue_list */
+       spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
+       lst_remove_elem(hmsg_mgr->queue_list,
+                       (struct list_head *)msg_queue_obj);
+       /* Free the message queue object */
+       delete_msg_queue(msg_queue_obj, msg_queue_obj->max_msgs);
+       if (!hmsg_mgr->msg_free_list)
+               goto func_cont;
+       if (LST_IS_EMPTY(hmsg_mgr->msg_free_list))
+               sync_reset_event(hmsg_mgr->sync_event);
+func_cont:
+       spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
+func_end:
+       return;
+}
+
+/*
+ *  ======== bridge_msg_get ========
+ *      Get a message from a msg_ctrl queue.
+ */
+int bridge_msg_get(struct msg_queue *msg_queue_obj,
+                         struct dsp_msg *pmsg, u32 utimeout)
+{
+       struct msg_frame *msg_frame_obj;
+       struct msg_mgr *hmsg_mgr;
+       bool got_msg = false;
+       struct sync_object *syncs[2];
+       u32 index;
+       int status = 0;
+
+       if (!msg_queue_obj || pmsg == NULL) {
+               status = -ENOMEM;
+               goto func_end;
+       }
+
+       hmsg_mgr = msg_queue_obj->hmsg_mgr;
+       if (!msg_queue_obj->msg_used_list) {
+               status = -EFAULT;
+               goto func_end;
+       }
+
+       /* Enter critical section */
+       spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
+       /* If a message is already there, get it */
+       if (!LST_IS_EMPTY(msg_queue_obj->msg_used_list)) {
+               msg_frame_obj = (struct msg_frame *)
+                   lst_get_head(msg_queue_obj->msg_used_list);
+               if (msg_frame_obj != NULL) {
+                       *pmsg = msg_frame_obj->msg_data.msg;
+                       lst_put_tail(msg_queue_obj->msg_free_list,
+                                    (struct list_head *)msg_frame_obj);
+                       if (LST_IS_EMPTY(msg_queue_obj->msg_used_list))
+                               sync_reset_event(msg_queue_obj->sync_event);
+
+                       got_msg = true;
+               }
+       } else {
+               if (msg_queue_obj->done)
+                       status = -EPERM;
+               else
+                       msg_queue_obj->io_msg_pend++;
+
+       }
+       /* Exit critical section */
+       spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
+       if (DSP_SUCCEEDED(status) && !got_msg) {
+               /*  Wait til message is available, timeout, or done. We don't
+                *  have to schedule the DPC, since the DSP will send messages
+                *  when they are available. */
+               syncs[0] = msg_queue_obj->sync_event;
+               syncs[1] = msg_queue_obj->sync_done;
+               status = sync_wait_on_multiple_events(syncs, 2, utimeout,
+                                                     &index);
+               /* Enter critical section */
+               spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
+               if (msg_queue_obj->done) {
+                       msg_queue_obj->io_msg_pend--;
+                       /* Exit critical section */
+                       spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
+                       /*  Signal that we're not going to access msg_queue_obj
+                        *  anymore, so it can be deleted. */
+                       (void)sync_set_event(msg_queue_obj->sync_done_ack);
+                       status = -EPERM;
+               } else {
+                       if (DSP_SUCCEEDED(status)) {
+                               DBC_ASSERT(!LST_IS_EMPTY
+                                          (msg_queue_obj->msg_used_list));
+                               /* Get msg from used list */
+                               msg_frame_obj = (struct msg_frame *)
+                                   lst_get_head(msg_queue_obj->msg_used_list);
+                               /* Copy message into pmsg and put frame on the
+                                * free list */
+                               if (msg_frame_obj != NULL) {
+                                       *pmsg = msg_frame_obj->msg_data.msg;
+                                       lst_put_tail
+                                           (msg_queue_obj->msg_free_list,
+                                            (struct list_head *)
+                                            msg_frame_obj);
+                               }
+                       }
+                       msg_queue_obj->io_msg_pend--;
+                       /* Reset the event if there are still queued messages */
+                       if (!LST_IS_EMPTY(msg_queue_obj->msg_used_list))
+                               sync_set_event(msg_queue_obj->sync_event);
+
+                       /* Exit critical section */
+                       spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
+               }
+       }
+func_end:
+       return status;
+}
+
+/*
+ *  ======== bridge_msg_put ========
+ *      Put a message onto a msg_ctrl queue.
+ */
+int bridge_msg_put(struct msg_queue *msg_queue_obj,
+                         IN CONST struct dsp_msg *pmsg, u32 utimeout)
+{
+       struct msg_frame *msg_frame_obj;
+       struct msg_mgr *hmsg_mgr;
+       bool put_msg = false;
+       struct sync_object *syncs[2];
+       u32 index;
+       int status = 0;
+
+       if (!msg_queue_obj || !pmsg || !msg_queue_obj->hmsg_mgr) {
+               status = -ENOMEM;
+               goto func_end;
+       }
+       hmsg_mgr = msg_queue_obj->hmsg_mgr;
+       if (!hmsg_mgr->msg_free_list) {
+               status = -EFAULT;
+               goto func_end;
+       }
+
+       spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
+
+       /* If a message frame is available, use it */
+       if (!LST_IS_EMPTY(hmsg_mgr->msg_free_list)) {
+               msg_frame_obj =
+                   (struct msg_frame *)lst_get_head(hmsg_mgr->msg_free_list);
+               if (msg_frame_obj != NULL) {
+                       msg_frame_obj->msg_data.msg = *pmsg;
+                       msg_frame_obj->msg_data.msgq_id =
+                           msg_queue_obj->msgq_id;
+                       lst_put_tail(hmsg_mgr->msg_used_list,
+                                    (struct list_head *)msg_frame_obj);
+                       hmsg_mgr->msgs_pending++;
+                       put_msg = true;
+               }
+               if (LST_IS_EMPTY(hmsg_mgr->msg_free_list))
+                       sync_reset_event(hmsg_mgr->sync_event);
+
+               /* Release critical section before scheduling DPC */
+               spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
+               /* Schedule a DPC, to do the actual data transfer: */
+               iosm_schedule(hmsg_mgr->hio_mgr);
+       } else {
+               if (msg_queue_obj->done)
+                       status = -EPERM;
+               else
+                       msg_queue_obj->io_msg_pend++;
+
+               spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
+       }
+       if (DSP_SUCCEEDED(status) && !put_msg) {
+               /* Wait til a free message frame is available, timeout,
+                * or done */
+               syncs[0] = hmsg_mgr->sync_event;
+               syncs[1] = msg_queue_obj->sync_done;
+               status = sync_wait_on_multiple_events(syncs, 2, utimeout,
+                                                     &index);
+               if (DSP_FAILED(status))
+                       goto func_end;
+               /* Enter critical section */
+               spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
+               if (msg_queue_obj->done) {
+                       msg_queue_obj->io_msg_pend--;
+                       /* Exit critical section */
+                       spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
+                       /*  Signal that we're not going to access msg_queue_obj
+                        *  anymore, so it can be deleted. */
+                       (void)sync_set_event(msg_queue_obj->sync_done_ack);
+                       status = -EPERM;
+               } else {
+                       if (LST_IS_EMPTY(hmsg_mgr->msg_free_list)) {
+                               status = -EFAULT;
+                               goto func_cont;
+                       }
+                       /* Get msg from free list */
+                       msg_frame_obj = (struct msg_frame *)
+                           lst_get_head(hmsg_mgr->msg_free_list);
+                       /*
+                        * Copy message into pmsg and put frame on the
+                        * used list.
+                        */
+                       if (msg_frame_obj) {
+                               msg_frame_obj->msg_data.msg = *pmsg;
+                               msg_frame_obj->msg_data.msgq_id =
+                                   msg_queue_obj->msgq_id;
+                               lst_put_tail(hmsg_mgr->msg_used_list,
+                                            (struct list_head *)msg_frame_obj);
+                               hmsg_mgr->msgs_pending++;
+                               /*
+                                * Schedule a DPC, to do the actual
+                                * data transfer.
+                                */
+                               iosm_schedule(hmsg_mgr->hio_mgr);
+                       }
+
+                       msg_queue_obj->io_msg_pend--;
+                       /* Reset event if there are still frames available */
+                       if (!LST_IS_EMPTY(hmsg_mgr->msg_free_list))
+                               sync_set_event(hmsg_mgr->sync_event);
+func_cont:
+                       /* Exit critical section */
+                       spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
+               }
+       }
+func_end:
+       return status;
+}
+
+/*
+ *  ======== bridge_msg_register_notify ========
+ */
+int bridge_msg_register_notify(struct msg_queue *msg_queue_obj,
+                                  u32 event_mask, u32 notify_type,
+                                  struct dsp_notification *hnotification)
+{
+       int status = 0;
+
+       if (!msg_queue_obj || !hnotification) {
+               status = -ENOMEM;
+               goto func_end;
+       }
+
+       if (!(event_mask == DSP_NODEMESSAGEREADY || event_mask == 0)) {
+               status = -EPERM;
+               goto func_end;
+       }
+
+       if (notify_type != DSP_SIGNALEVENT) {
+               status = -EBADR;
+               goto func_end;
+       }
+
+       if (event_mask)
+               status = ntfy_register(msg_queue_obj->ntfy_obj, hnotification,
+                                               event_mask, notify_type);
+       else
+               status = ntfy_unregister(msg_queue_obj->ntfy_obj,
+                                                       hnotification);
+
+       if (status == -EINVAL) {
+               /*  Not registered. Ok, since we couldn't have known. Node
+                *  notifications are split between node state change handled
+                *  by NODE, and message ready handled by msg_ctrl. */
+               status = 0;
+       }
+func_end:
+       return status;
+}
+
+/*
+ *  ======== bridge_msg_set_queue_id ========
+ */
+void bridge_msg_set_queue_id(struct msg_queue *msg_queue_obj, u32 msgq_id)
+{
+       /*
+        *  A message queue must be created when a node is allocated,
+        *  so that node_register_notify() can be called before the node
+        *  is created. Since we don't know the node environment until the
+        *  node is created, we need this function to set msg_queue_obj->msgq_id
+        *  to the node environment, after the node is created.
+        */
+       if (msg_queue_obj)
+               msg_queue_obj->msgq_id = msgq_id;
+}
+
+/*
+ *  ======== add_new_msg ========
+ *      Must be called in message manager critical section.
+ */
+static int add_new_msg(struct lst_list *msgList)
+{
+       struct msg_frame *pmsg;
+       int status = 0;
+
+       pmsg = kzalloc(sizeof(struct msg_frame), GFP_ATOMIC);
+       if (pmsg != NULL) {
+               lst_init_elem((struct list_head *)pmsg);
+               lst_put_tail(msgList, (struct list_head *)pmsg);
+       } else {
+               status = -ENOMEM;
+       }
+
+       return status;
+}
+
+/*
+ *  ======== delete_msg_mgr ========
+ */
+static void delete_msg_mgr(struct msg_mgr *hmsg_mgr)
+{
+       if (!hmsg_mgr)
+               goto func_end;
+
+       if (hmsg_mgr->queue_list) {
+               if (LST_IS_EMPTY(hmsg_mgr->queue_list)) {
+                       kfree(hmsg_mgr->queue_list);
+                       hmsg_mgr->queue_list = NULL;
+               }
+       }
+
+       if (hmsg_mgr->msg_free_list) {
+               free_msg_list(hmsg_mgr->msg_free_list);
+               hmsg_mgr->msg_free_list = NULL;
+       }
+
+       if (hmsg_mgr->msg_used_list) {
+               free_msg_list(hmsg_mgr->msg_used_list);
+               hmsg_mgr->msg_used_list = NULL;
+       }
+
+       kfree(hmsg_mgr->sync_event);
+
+       kfree(hmsg_mgr);
+func_end:
+       return;
+}
+
+/*
+ *  ======== delete_msg_queue ========
+ */
+static void delete_msg_queue(struct msg_queue *msg_queue_obj, u32 uNumToDSP)
+{
+       struct msg_mgr *hmsg_mgr;
+       struct msg_frame *pmsg;
+       u32 i;
+
+       if (!msg_queue_obj ||
+           !msg_queue_obj->hmsg_mgr || !msg_queue_obj->hmsg_mgr->msg_free_list)
+               goto func_end;
+
+       hmsg_mgr = msg_queue_obj->hmsg_mgr;
+
+       /* Pull off uNumToDSP message frames from Msg manager and free */
+       for (i = 0; i < uNumToDSP; i++) {
+
+               if (!LST_IS_EMPTY(hmsg_mgr->msg_free_list)) {
+                       pmsg = (struct msg_frame *)
+                           lst_get_head(hmsg_mgr->msg_free_list);
+                       kfree(pmsg);
+               } else {
+                       /* Cannot free all of the message frames */
+                       break;
+               }
+       }
+
+       if (msg_queue_obj->msg_free_list) {
+               free_msg_list(msg_queue_obj->msg_free_list);
+               msg_queue_obj->msg_free_list = NULL;
+       }
+
+       if (msg_queue_obj->msg_used_list) {
+               free_msg_list(msg_queue_obj->msg_used_list);
+               msg_queue_obj->msg_used_list = NULL;
+       }
+
+       if (msg_queue_obj->ntfy_obj) {
+               ntfy_delete(msg_queue_obj->ntfy_obj);
+               kfree(msg_queue_obj->ntfy_obj);
+       }
+
+       kfree(msg_queue_obj->sync_event);
+       kfree(msg_queue_obj->sync_done);
+       kfree(msg_queue_obj->sync_done_ack);
+
+       kfree(msg_queue_obj);
+func_end:
+       return;
+
+}
+
+/*
+ *  ======== free_msg_list ========
+ */
+static void free_msg_list(struct lst_list *msgList)
+{
+       struct msg_frame *pmsg;
+
+       if (!msgList)
+               goto func_end;
+
+       while ((pmsg = (struct msg_frame *)lst_get_head(msgList)) != NULL)
+               kfree(pmsg);
+
+       DBC_ASSERT(LST_IS_EMPTY(msgList));
+
+       kfree(msgList);
+func_end:
+       return;
+}
diff --git a/drivers/staging/tidspbridge/core/tiomap3430.c b/drivers/staging/tidspbridge/core/tiomap3430.c
new file mode 100644 (file)
index 0000000..ee9205b
--- /dev/null
@@ -0,0 +1,1887 @@
+/*
+ * tiomap.c
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Processor Manager Driver for TI OMAP3430 EVM.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+/*  ----------------------------------- Host OS */
+#include <dspbridge/host_os.h>
+#include <linux/mm.h>
+#include <linux/mmzone.h>
+#include <plat/control.h>
+
+/*  ----------------------------------- DSP/BIOS Bridge */
+#include <dspbridge/std.h>
+#include <dspbridge/dbdefs.h>
+
+/*  ----------------------------------- Trace & Debug */
+#include <dspbridge/dbc.h>
+
+/*  ----------------------------------- OS Adaptation Layer */
+#include <dspbridge/cfg.h>
+#include <dspbridge/drv.h>
+#include <dspbridge/sync.h>
+
+/* ------------------------------------ Hardware Abstraction Layer */
+#include <hw_defs.h>
+#include <hw_mmu.h>
+
+/*  ----------------------------------- Link Driver */
+#include <dspbridge/dspdefs.h>
+#include <dspbridge/dspchnl.h>
+#include <dspbridge/dspdeh.h>
+#include <dspbridge/dspio.h>
+#include <dspbridge/dspmsg.h>
+#include <dspbridge/pwr.h>
+#include <dspbridge/io_sm.h>
+
+/*  ----------------------------------- Platform Manager */
+#include <dspbridge/dev.h>
+#include <dspbridge/dspapi.h>
+#include <dspbridge/dmm.h>
+#include <dspbridge/wdt.h>
+
+/*  ----------------------------------- Local */
+#include "_tiomap.h"
+#include "_tiomap_pwr.h"
+#include "tiomap_io.h"
+
+/* Offset in shared mem to write to in order to synchronize start with DSP */
+#define SHMSYNCOFFSET 4                /* GPP byte offset */
+
+#define BUFFERSIZE 1024
+
+#define TIHELEN_ACKTIMEOUT  10000
+
+#define MMU_SECTION_ADDR_MASK    0xFFF00000
+#define MMU_SSECTION_ADDR_MASK   0xFF000000
+#define MMU_LARGE_PAGE_MASK      0xFFFF0000
+#define MMU_SMALL_PAGE_MASK      0xFFFFF000
+#define OMAP3_IVA2_BOOTADDR_MASK 0xFFFFFC00
+#define PAGES_II_LVL_TABLE   512
+#define PHYS_TO_PAGE(phys)      pfn_to_page((phys) >> PAGE_SHIFT)
+
+#define MMU_GFLUSH 0x60
+
+/* Forward Declarations: */
+static int bridge_brd_monitor(struct bridge_dev_context *dev_context);
+static int bridge_brd_read(struct bridge_dev_context *dev_context,
+                                 OUT u8 *pbHostBuf,
+                                 u32 dwDSPAddr, u32 ul_num_bytes,
+                                 u32 ulMemType);
+static int bridge_brd_start(struct bridge_dev_context *dev_context,
+                                  u32 dwDSPAddr);
+static int bridge_brd_status(struct bridge_dev_context *dev_context,
+                                   int *pdwState);
+static int bridge_brd_stop(struct bridge_dev_context *dev_context);
+static int bridge_brd_write(struct bridge_dev_context *dev_context,
+                                  IN u8 *pbHostBuf,
+                                  u32 dwDSPAddr, u32 ul_num_bytes,
+                                  u32 ulMemType);
+static int bridge_brd_set_state(struct bridge_dev_context *hDevContext,
+                                   u32 ulBrdState);
+static int bridge_brd_mem_copy(struct bridge_dev_context *hDevContext,
+                                  u32 ulDspDestAddr, u32 ulDspSrcAddr,
+                                  u32 ul_num_bytes, u32 ulMemType);
+static int bridge_brd_mem_write(struct bridge_dev_context *dev_context,
+                                   IN u8 *pbHostBuf, u32 dwDSPAddr,
+                                   u32 ul_num_bytes, u32 ulMemType);
+static int bridge_brd_mem_map(struct bridge_dev_context *hDevContext,
+                                 u32 ul_mpu_addr, u32 ulVirtAddr,
+                                 u32 ul_num_bytes, u32 ul_map_attr,
+                                 struct page **mapped_pages);
+static int bridge_brd_mem_un_map(struct bridge_dev_context *hDevContext,
+                                    u32 ulVirtAddr, u32 ul_num_bytes);
+static int bridge_dev_create(OUT struct bridge_dev_context
+                                       **ppDevContext,
+                                       struct dev_object *hdev_obj,
+                                       IN struct cfg_hostres *pConfig);
+static int bridge_dev_ctrl(struct bridge_dev_context *dev_context,
+                                 u32 dw_cmd, IN OUT void *pargs);
+static int bridge_dev_destroy(struct bridge_dev_context *dev_context);
+static u32 user_va2_pa(struct mm_struct *mm, u32 address);
+static int pte_update(struct bridge_dev_context *hDevContext, u32 pa,
+                            u32 va, u32 size,
+                            struct hw_mmu_map_attrs_t *map_attrs);
+static int pte_set(struct pg_table_attrs *pt, u32 pa, u32 va,
+                         u32 size, struct hw_mmu_map_attrs_t *attrs);
+static int mem_map_vmalloc(struct bridge_dev_context *hDevContext,
+                                 u32 ul_mpu_addr, u32 ulVirtAddr,
+                                 u32 ul_num_bytes,
+                                 struct hw_mmu_map_attrs_t *hw_attrs);
+
+bool wait_for_start(struct bridge_dev_context *dev_context, u32 dw_sync_addr);
+
+/*  ----------------------------------- Globals */
+
+/* Attributes of L2 page tables for DSP MMU */
+struct page_info {
+       u32 num_entries;        /* Number of valid PTEs in the L2 PT */
+};
+
+/* Attributes used to manage the DSP MMU page tables */
+struct pg_table_attrs {
+       spinlock_t pg_lock;     /* Critical section object handle */
+
+       u32 l1_base_pa;         /* Physical address of the L1 PT */
+       u32 l1_base_va;         /* Virtual  address of the L1 PT */
+       u32 l1_size;            /* Size of the L1 PT */
+       u32 l1_tbl_alloc_pa;
+       /* Physical address of Allocated mem for L1 table. May not be aligned */
+       u32 l1_tbl_alloc_va;
+       /* Virtual address of Allocated mem for L1 table. May not be aligned */
+       u32 l1_tbl_alloc_sz;
+       /* Size of consistent memory allocated for L1 table.
+        * May not be aligned */
+
+       u32 l2_base_pa;         /* Physical address of the L2 PT */
+       u32 l2_base_va;         /* Virtual  address of the L2 PT */
+       u32 l2_size;            /* Size of the L2 PT */
+       u32 l2_tbl_alloc_pa;
+       /* Physical address of Allocated mem for L2 table. May not be aligned */
+       u32 l2_tbl_alloc_va;
+       /* Virtual address of Allocated mem for L2 table. May not be aligned */
+       u32 l2_tbl_alloc_sz;
+       /* Size of consistent memory allocated for L2 table.
+        * May not be aligned */
+
+       u32 l2_num_pages;       /* Number of allocated L2 PT */
+       /* Array [l2_num_pages] of L2 PT info structs */
+       struct page_info *pg_info;
+};
+
+/*
+ *  This Bridge driver's function interface table.
+ */
+static struct bridge_drv_interface drv_interface_fxns = {
+       /* Bridge API ver. for which this bridge driver is built. */
+       BRD_API_MAJOR_VERSION,
+       BRD_API_MINOR_VERSION,
+       bridge_dev_create,
+       bridge_dev_destroy,
+       bridge_dev_ctrl,
+       bridge_brd_monitor,
+       bridge_brd_start,
+       bridge_brd_stop,
+       bridge_brd_status,
+       bridge_brd_read,
+       bridge_brd_write,
+       bridge_brd_set_state,
+       bridge_brd_mem_copy,
+       bridge_brd_mem_write,
+       bridge_brd_mem_map,
+       bridge_brd_mem_un_map,
+       /* The following CHNL functions are provided by chnl_io.lib: */
+       bridge_chnl_create,
+       bridge_chnl_destroy,
+       bridge_chnl_open,
+       bridge_chnl_close,
+       bridge_chnl_add_io_req,
+       bridge_chnl_get_ioc,
+       bridge_chnl_cancel_io,
+       bridge_chnl_flush_io,
+       bridge_chnl_get_info,
+       bridge_chnl_get_mgr_info,
+       bridge_chnl_idle,
+       bridge_chnl_register_notify,
+       /* The following DEH functions are provided by tihelen_ue_deh.c */
+       bridge_deh_create,
+       bridge_deh_destroy,
+       bridge_deh_notify,
+       bridge_deh_register_notify,
+       bridge_deh_get_info,
+       /* The following IO functions are provided by chnl_io.lib: */
+       bridge_io_create,
+       bridge_io_destroy,
+       bridge_io_on_loaded,
+       bridge_io_get_proc_load,
+       /* The following msg_ctrl functions are provided by chnl_io.lib: */
+       bridge_msg_create,
+       bridge_msg_create_queue,
+       bridge_msg_delete,
+       bridge_msg_delete_queue,
+       bridge_msg_get,
+       bridge_msg_put,
+       bridge_msg_register_notify,
+       bridge_msg_set_queue_id,
+};
+
+static inline void tlb_flush_all(const void __iomem *base)
+{
+       __raw_writeb(__raw_readb(base + MMU_GFLUSH) | 1, base + MMU_GFLUSH);
+}
+
+static inline void flush_all(struct bridge_dev_context *dev_context)
+{
+       if (dev_context->dw_brd_state == BRD_DSP_HIBERNATION ||
+           dev_context->dw_brd_state == BRD_HIBERNATION)
+               wake_dsp(dev_context, NULL);
+
+       tlb_flush_all(dev_context->dw_dsp_mmu_base);
+}
+
+static void bad_page_dump(u32 pa, struct page *pg)
+{
+       pr_emerg("DSPBRIDGE: MAP function: COUNT 0 FOR PA 0x%x\n", pa);
+       pr_emerg("Bad page state in process '%s'\n"
+                "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n"
+                "Backtrace:\n",
+                current->comm, pg, (int)(2 * sizeof(unsigned long)),
+                (unsigned long)pg->flags, pg->mapping,
+                page_mapcount(pg), page_count(pg));
+       dump_stack();
+}
+
+/*
+ *  ======== bridge_drv_entry ========
+ *  purpose:
+ *      Bridge Driver entry point.
+ */
+void bridge_drv_entry(OUT struct bridge_drv_interface **ppDrvInterface,
+                  IN CONST char *driver_file_name)
+{
+
+       DBC_REQUIRE(driver_file_name != NULL);
+
+       io_sm_init();           /* Initialization of io_sm module */
+
+       if (strcmp(driver_file_name, "UMA") == 0)
+               *ppDrvInterface = &drv_interface_fxns;
+       else
+               dev_dbg(bridge, "%s Unknown Bridge file name", __func__);
+
+}
+
+/*
+ *  ======== bridge_brd_monitor ========
+ *  purpose:
+ *      This bridge_brd_monitor puts DSP into a Loadable state.
+ *      i.e Application can load and start the device.
+ *
+ *  Preconditions:
+ *      Device in 'OFF' state.
+ */
+static int bridge_brd_monitor(struct bridge_dev_context *hDevContext)
+{
+       int status = 0;
+       struct bridge_dev_context *dev_context = hDevContext;
+       u32 temp;
+       struct dspbridge_platform_data *pdata =
+                                   omap_dspbridge_dev->dev.platform_data;
+
+       temp = (*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST) &
+                                       OMAP_POWERSTATEST_MASK;
+       if (!(temp & 0x02)) {
+               /* IVA2 is not in ON state */
+               /* Read and set PM_PWSTCTRL_IVA2  to ON */
+               (*pdata->dsp_prm_rmw_bits)(OMAP_POWERSTATEST_MASK,
+                       PWRDM_POWER_ON, OMAP3430_IVA2_MOD, OMAP2_PM_PWSTCTRL);
+               /* Set the SW supervised state transition */
+               (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_FORCE_WAKEUP,
+                                       OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);
+
+               /* Wait until the state has moved to ON */
+               while ((*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST) &
+                                               OMAP_INTRANSITION_MASK)
+                       ;
+               /* Disable Automatic transition */
+               (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_DISABLE_AUTO,
+                                       OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);
+       }
+       (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0,
+                                       OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
+       dsp_clk_enable(DSP_CLK_IVA2);
+
+       if (DSP_SUCCEEDED(status)) {
+               /* set the device state to IDLE */
+               dev_context->dw_brd_state = BRD_IDLE;
+       }
+       return status;
+}
+
+/*
+ *  ======== bridge_brd_read ========
+ *  purpose:
+ *      Reads buffers for DSP memory.
+ */
+static int bridge_brd_read(struct bridge_dev_context *hDevContext,
+                                 OUT u8 *pbHostBuf, u32 dwDSPAddr,
+                                 u32 ul_num_bytes, u32 ulMemType)
+{
+       int status = 0;
+       struct bridge_dev_context *dev_context = hDevContext;
+       u32 offset;
+       u32 dsp_base_addr = hDevContext->dw_dsp_base_addr;
+
+       if (dwDSPAddr < dev_context->dw_dsp_start_add) {
+               status = -EPERM;
+               return status;
+       }
+       /* change here to account for the 3 bands of the DSP internal memory */
+       if ((dwDSPAddr - dev_context->dw_dsp_start_add) <
+           dev_context->dw_internal_size) {
+               offset = dwDSPAddr - dev_context->dw_dsp_start_add;
+       } else {
+               status = read_ext_dsp_data(dev_context, pbHostBuf, dwDSPAddr,
+                                          ul_num_bytes, ulMemType);
+               return status;
+       }
+       /* copy the data from  DSP memory, */
+       memcpy(pbHostBuf, (void *)(dsp_base_addr + offset), ul_num_bytes);
+       return status;
+}
+
+/*
+ *  ======== bridge_brd_set_state ========
+ *  purpose:
+ *      This routine updates the Board status.
+ */
+static int bridge_brd_set_state(struct bridge_dev_context *hDevContext,
+                                   u32 ulBrdState)
+{
+       int status = 0;
+       struct bridge_dev_context *dev_context = hDevContext;
+
+       dev_context->dw_brd_state = ulBrdState;
+       return status;
+}
+
+/*
+ *  ======== bridge_brd_start ========
+ *  purpose:
+ *      Initializes DSP MMU and Starts DSP.
+ *
+ *  Preconditions:
+ *  a) DSP domain is 'ACTIVE'.
+ *  b) DSP_RST1 is asserted.
+ *  b) DSP_RST2 is released.
+ */
+static int bridge_brd_start(struct bridge_dev_context *hDevContext,
+                                  u32 dwDSPAddr)
+{
+       int status = 0;
+       struct bridge_dev_context *dev_context = hDevContext;
+       u32 dw_sync_addr = 0;
+       u32 ul_shm_base;        /* Gpp Phys SM base addr(byte) */
+       u32 ul_shm_base_virt;   /* Dsp Virt SM base addr */
+       u32 ul_tlb_base_virt;   /* Base of MMU TLB entry */
+       /* Offset of shm_base_virt from tlb_base_virt */
+       u32 ul_shm_offset_virt;
+       s32 entry_ndx;
+       s32 itmp_entry_ndx = 0; /* DSP-MMU TLB entry base address */
+       struct cfg_hostres *resources = NULL;
+       u32 temp;
+       u32 ul_dsp_clk_rate;
+       u32 ul_dsp_clk_addr;
+       u32 ul_bios_gp_timer;
+       u32 clk_cmd;
+       struct io_mgr *hio_mgr;
+       u32 ul_load_monitor_timer;
+       struct dspbridge_platform_data *pdata =
+                               omap_dspbridge_dev->dev.platform_data;
+
+       /* The device context contains all the mmu setup info from when the
+        * last dsp base image was loaded. The first entry is always
+        * SHMMEM base. */
+       /* Get SHM_BEG - convert to byte address */
+       (void)dev_get_symbol(dev_context->hdev_obj, SHMBASENAME,
+                            &ul_shm_base_virt);
+       ul_shm_base_virt *= DSPWORDSIZE;
+       DBC_ASSERT(ul_shm_base_virt != 0);
+       /* DSP Virtual address */
+       ul_tlb_base_virt = dev_context->atlb_entry[0].ul_dsp_va;
+       DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt);
+       ul_shm_offset_virt =
+           ul_shm_base_virt - (ul_tlb_base_virt * DSPWORDSIZE);
+       /* Kernel logical address */
+       ul_shm_base = dev_context->atlb_entry[0].ul_gpp_va + ul_shm_offset_virt;
+
+       DBC_ASSERT(ul_shm_base != 0);
+       /* 2nd wd is used as sync field */
+       dw_sync_addr = ul_shm_base + SHMSYNCOFFSET;
+       /* Write a signature into the shm base + offset; this will
+        * get cleared when the DSP program starts. */
+       if ((ul_shm_base_virt == 0) || (ul_shm_base == 0)) {
+               pr_err("%s: Illegal SM base\n", __func__);
+               status = -EPERM;
+       } else
+               *((volatile u32 *)dw_sync_addr) = 0xffffffff;
+
+       if (DSP_SUCCEEDED(status)) {
+               resources = dev_context->resources;
+               if (!resources)
+                       status = -EPERM;
+
+               /* Assert RST1 i.e only the RST only for DSP megacell */
+               if (DSP_SUCCEEDED(status)) {
+                       (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK,
+                                       OMAP3430_RST1_IVA2_MASK, OMAP3430_IVA2_MOD,
+                                       OMAP2_RM_RSTCTRL);
+                       /* Mask address with 1K for compatibility */
+                       __raw_writel(dwDSPAddr & OMAP3_IVA2_BOOTADDR_MASK,
+                                       OMAP343X_CTRL_REGADDR(
+                                       OMAP343X_CONTROL_IVA2_BOOTADDR));
+                       /*
+                        * Set bootmode to self loop if dsp_debug flag is true
+                        */
+                       __raw_writel((dsp_debug) ? OMAP3_IVA2_BOOTMOD_IDLE : 0,
+                                       OMAP343X_CTRL_REGADDR(
+                                       OMAP343X_CONTROL_IVA2_BOOTMOD));
+               }
+       }
+       if (DSP_SUCCEEDED(status)) {
+               /* Reset and Unreset the RST2, so that BOOTADDR is copied to
+                * IVA2 SYSC register */
+               (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK,
+                       OMAP3430_RST2_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
+               udelay(100);
+               (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0,
+                                       OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
+               udelay(100);
+
+               /* Disbale the DSP MMU */
+               hw_mmu_disable(resources->dw_dmmu_base);
+               /* Disable TWL */
+               hw_mmu_twl_disable(resources->dw_dmmu_base);
+
+               /* Only make TLB entry if both addresses are non-zero */
+               for (entry_ndx = 0; entry_ndx < BRDIOCTL_NUMOFMMUTLB;
+                    entry_ndx++) {
+                       struct bridge_ioctl_extproc *e = &dev_context->atlb_entry[entry_ndx];
+                       struct hw_mmu_map_attrs_t map_attrs = {
+                               .endianism = e->endianism,
+                               .element_size = e->elem_size,
+                               .mixed_size = e->mixed_mode,
+                       };
+
+                       if (!e->ul_gpp_pa || !e->ul_dsp_va)
+                               continue;
+
+                       dev_dbg(bridge,
+                                       "MMU %d, pa: 0x%x, va: 0x%x, size: 0x%x",
+                                       itmp_entry_ndx,
+                                       e->ul_gpp_pa,
+                                       e->ul_dsp_va,
+                                       e->ul_size);
+
+                       hw_mmu_tlb_add(dev_context->dw_dsp_mmu_base,
+                                       e->ul_gpp_pa,
+                                       e->ul_dsp_va,
+                                       e->ul_size,
+                                       itmp_entry_ndx,
+                                       &map_attrs, 1, 1);
+
+                       itmp_entry_ndx++;
+               }
+       }
+
+       /* Lock the above TLB entries and get the BIOS and load monitor timer
+        * information */
+       if (DSP_SUCCEEDED(status)) {
+               hw_mmu_num_locked_set(resources->dw_dmmu_base, itmp_entry_ndx);
+               hw_mmu_victim_num_set(resources->dw_dmmu_base, itmp_entry_ndx);
+               hw_mmu_ttb_set(resources->dw_dmmu_base,
+                              dev_context->pt_attrs->l1_base_pa);
+               hw_mmu_twl_enable(resources->dw_dmmu_base);
+               /* Enable the SmartIdle and AutoIdle bit for MMU_SYSCONFIG */
+
+               temp = __raw_readl((resources->dw_dmmu_base) + 0x10);
+               temp = (temp & 0xFFFFFFEF) | 0x11;
+               __raw_writel(temp, (resources->dw_dmmu_base) + 0x10);
+
+               /* Let the DSP MMU run */
+               hw_mmu_enable(resources->dw_dmmu_base);
+
+               /* Enable the BIOS clock */
+               (void)dev_get_symbol(dev_context->hdev_obj,
+                                    BRIDGEINIT_BIOSGPTIMER, &ul_bios_gp_timer);
+               (void)dev_get_symbol(dev_context->hdev_obj,
+                                    BRIDGEINIT_LOADMON_GPTIMER,
+                                    &ul_load_monitor_timer);
+       }
+
+       if (DSP_SUCCEEDED(status)) {
+               if (ul_load_monitor_timer != 0xFFFF) {
+                       clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) |
+                           ul_load_monitor_timer;
+                       dsp_peripheral_clk_ctrl(dev_context, &clk_cmd);
+               } else {
+                       dev_dbg(bridge, "Not able to get the symbol for Load "
+                               "Monitor Timer\n");
+               }
+       }
+
+       if (DSP_SUCCEEDED(status)) {
+               if (ul_bios_gp_timer != 0xFFFF) {
+                       clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) |
+                           ul_bios_gp_timer;
+                       dsp_peripheral_clk_ctrl(dev_context, &clk_cmd);
+               } else {
+                       dev_dbg(bridge,
+                               "Not able to get the symbol for BIOS Timer\n");
+               }
+       }
+
+       if (DSP_SUCCEEDED(status)) {
+               /* Set the DSP clock rate */
+               (void)dev_get_symbol(dev_context->hdev_obj,
+                                    "_BRIDGEINIT_DSP_FREQ", &ul_dsp_clk_addr);
+               /*Set Autoidle Mode for IVA2 PLL */
+               (*pdata->dsp_cm_write)(1 << OMAP3430_AUTO_IVA2_DPLL_SHIFT,
+                               OMAP3430_IVA2_MOD, OMAP3430_CM_AUTOIDLE_PLL);
+
+               if ((unsigned int *)ul_dsp_clk_addr != NULL) {
+                       /* Get the clock rate */
+                       ul_dsp_clk_rate = dsp_clk_get_iva2_rate();
+                       dev_dbg(bridge, "%s: DSP clock rate (KHZ): 0x%x \n",
+                               __func__, ul_dsp_clk_rate);
+                       (void)bridge_brd_write(dev_context,
+                                              (u8 *) &ul_dsp_clk_rate,
+                                              ul_dsp_clk_addr, sizeof(u32), 0);
+               }
+               /*
+                * Enable Mailbox events and also drain any pending
+                * stale messages.
+                */
+               dev_context->mbox = omap_mbox_get("dsp");
+               if (IS_ERR(dev_context->mbox)) {
+                       dev_context->mbox = NULL;
+                       pr_err("%s: Failed to get dsp mailbox handle\n",
+                                                               __func__);
+                       status = -EPERM;
+               }
+
+       }
+       if (DSP_SUCCEEDED(status)) {
+               dev_context->mbox->rxq->callback = (int (*)(void *))io_mbox_msg;
+
+/*PM_IVA2GRPSEL_PER = 0xC0;*/
+               temp = (u32) *((reg_uword32 *)
+                               ((u32) (resources->dw_per_pm_base) + 0xA8));
+               temp = (temp & 0xFFFFFF30) | 0xC0;
+               *((reg_uword32 *) ((u32) (resources->dw_per_pm_base) + 0xA8)) =
+                   (u32) temp;
+
+/*PM_MPUGRPSEL_PER &= 0xFFFFFF3F; */
+               temp = (u32) *((reg_uword32 *)
+                               ((u32) (resources->dw_per_pm_base) + 0xA4));
+               temp = (temp & 0xFFFFFF3F);
+               *((reg_uword32 *) ((u32) (resources->dw_per_pm_base) + 0xA4)) =
+                   (u32) temp;
+/*CM_SLEEPDEP_PER |= 0x04; */
+               temp = (u32) *((reg_uword32 *)
+                               ((u32) (resources->dw_per_base) + 0x44));
+               temp = (temp & 0xFFFFFFFB) | 0x04;
+               *((reg_uword32 *) ((u32) (resources->dw_per_base) + 0x44)) =
+                   (u32) temp;
+
+/*CM_CLKSTCTRL_IVA2 = 0x00000003 -To Allow automatic transitions */
+               (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_ENABLE_AUTO,
+                                       OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);
+
+               /* Let DSP go */
+               dev_dbg(bridge, "%s Unreset\n", __func__);
+               /* Enable DSP MMU Interrupts */
+               hw_mmu_event_enable(resources->dw_dmmu_base,
+                                   HW_MMU_ALL_INTERRUPTS);
+               /* release the RST1, DSP starts executing now .. */
+               (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK, 0,
+                                       OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
+
+               dev_dbg(bridge, "Waiting for Sync @ 0x%x\n", dw_sync_addr);
+               dev_dbg(bridge, "DSP c_int00 Address =  0x%x\n", dwDSPAddr);
+               if (dsp_debug)
+                       while (*((volatile u16 *)dw_sync_addr))
+                               ;;
+
+               /* Wait for DSP to clear word in shared memory */
+               /* Read the Location */
+               if (!wait_for_start(dev_context, dw_sync_addr))
+                       status = -ETIMEDOUT;
+
+               /* Start wdt */
+               dsp_wdt_sm_set((void *)ul_shm_base);
+               dsp_wdt_enable(true);
+
+               status = dev_get_io_mgr(dev_context->hdev_obj, &hio_mgr);
+               if (hio_mgr) {
+                       io_sh_msetting(hio_mgr, SHM_OPPINFO, NULL);
+                       /* Write the synchronization bit to indicate the
+                        * completion of OPP table update to DSP
+                        */
+                       *((volatile u32 *)dw_sync_addr) = 0XCAFECAFE;
+
+                       /* update board state */
+                       dev_context->dw_brd_state = BRD_RUNNING;
+                       /* (void)chnlsm_enable_interrupt(dev_context); */
+               } else {
+                       dev_context->dw_brd_state = BRD_UNKNOWN;
+               }
+       }
+       return status;
+}
+
+/*
+ *  ======== bridge_brd_stop ========
+ *  purpose:
+ *      Puts DSP in self loop.
+ *
+ *  Preconditions :
+ *  a) None
+ */
+static int bridge_brd_stop(struct bridge_dev_context *hDevContext)
+{
+       int status = 0;
+       struct bridge_dev_context *dev_context = hDevContext;
+       struct pg_table_attrs *pt_attrs;
+       u32 dsp_pwr_state;
+       int clk_status;
+       struct dspbridge_platform_data *pdata =
+                               omap_dspbridge_dev->dev.platform_data;
+
+       if (dev_context->dw_brd_state == BRD_STOPPED)
+               return status;
+
+       /* as per TRM, it is advised to first drive the IVA2 to 'Standby' mode,
+        * before turning off the clocks.. This is to ensure that there are no
+        * pending L3 or other transactons from IVA2 */
+       dsp_pwr_state = (*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST) &
+                                       OMAP_POWERSTATEST_MASK;
+       if (dsp_pwr_state != PWRDM_POWER_OFF) {
+               sm_interrupt_dsp(dev_context, MBX_PM_DSPIDLE);
+               mdelay(10);
+
+               clk_status = dsp_clk_disable(DSP_CLK_IVA2);
+
+               /* IVA2 is not in OFF state */
+               /* Set PM_PWSTCTRL_IVA2  to OFF */
+               (*pdata->dsp_prm_rmw_bits)(OMAP_POWERSTATEST_MASK,
+                       PWRDM_POWER_OFF, OMAP3430_IVA2_MOD, OMAP2_PM_PWSTCTRL);
+               /* Set the SW supervised state transition for Sleep */
+               (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_FORCE_SLEEP,
+                                       OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);
+       } else {
+               clk_status = dsp_clk_disable(DSP_CLK_IVA2);
+       }
+       udelay(10);
+       /* Release the Ext Base virtual Address as the next DSP Program
+        * may have a different load address */
+       if (dev_context->dw_dsp_ext_base_addr)
+               dev_context->dw_dsp_ext_base_addr = 0;
+
+       dev_context->dw_brd_state = BRD_STOPPED;        /* update board state */
+
+       dsp_wdt_enable(false);
+
+       /* This is a good place to clear the MMU page tables as well */
+       if (dev_context->pt_attrs) {
+               pt_attrs = dev_context->pt_attrs;
+               memset((u8 *) pt_attrs->l1_base_va, 0x00, pt_attrs->l1_size);
+               memset((u8 *) pt_attrs->l2_base_va, 0x00, pt_attrs->l2_size);
+               memset((u8 *) pt_attrs->pg_info, 0x00,
+                      (pt_attrs->l2_num_pages * sizeof(struct page_info)));
+       }
+       /* Disable the mailbox interrupts */
+       if (dev_context->mbox) {
+               omap_mbox_disable_irq(dev_context->mbox, IRQ_RX);
+               omap_mbox_put(dev_context->mbox);
+               dev_context->mbox = NULL;
+       }
+       /* Reset IVA2 clocks*/
+       (*pdata->dsp_prm_write)(OMAP3430_RST1_IVA2_MASK | OMAP3430_RST2_IVA2_MASK |
+                       OMAP3430_RST3_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
+
+       return status;
+}
+
+/*
+ *  ======== bridge_brd_delete ========
+ *  purpose:
+ *      Puts DSP in Low power mode
+ *
+ *  Preconditions :
+ *  a) None
+ */
+static int bridge_brd_delete(struct bridge_dev_context *hDevContext)
+{
+       int status = 0;
+       struct bridge_dev_context *dev_context = hDevContext;
+       struct pg_table_attrs *pt_attrs;
+       int clk_status;
+       struct dspbridge_platform_data *pdata =
+                               omap_dspbridge_dev->dev.platform_data;
+
+       if (dev_context->dw_brd_state == BRD_STOPPED)
+               return status;
+
+       /* as per TRM, it is advised to first drive
+        * the IVA2 to 'Standby' mode, before turning off the clocks.. This is
+        * to ensure that there are no pending L3 or other transactons from
+        * IVA2 */
+       status = sleep_dsp(dev_context, PWR_EMERGENCYDEEPSLEEP, NULL);
+       clk_status = dsp_clk_disable(DSP_CLK_IVA2);
+
+       /* Release the Ext Base virtual Address as the next DSP Program
+        * may have a different load address */
+       if (dev_context->dw_dsp_ext_base_addr)
+               dev_context->dw_dsp_ext_base_addr = 0;
+
+       dev_context->dw_brd_state = BRD_STOPPED;        /* update board state */
+
+       /* This is a good place to clear the MMU page tables as well */
+       if (dev_context->pt_attrs) {
+               pt_attrs = dev_context->pt_attrs;
+               memset((u8 *) pt_attrs->l1_base_va, 0x00, pt_attrs->l1_size);
+               memset((u8 *) pt_attrs->l2_base_va, 0x00, pt_attrs->l2_size);
+               memset((u8 *) pt_attrs->pg_info, 0x00,
+                      (pt_attrs->l2_num_pages * sizeof(struct page_info)));
+       }
+       /* Disable the mail box interrupts */
+       if (dev_context->mbox) {
+               omap_mbox_disable_irq(dev_context->mbox, IRQ_RX);
+               omap_mbox_put(dev_context->mbox);
+               dev_context->mbox = NULL;
+       }
+       /* Reset IVA2 clocks*/
+       (*pdata->dsp_prm_write)(OMAP3430_RST1_IVA2_MASK | OMAP3430_RST2_IVA2_MASK |
+                       OMAP3430_RST3_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
+
+       return status;
+}
+
+/*
+ *  ======== bridge_brd_status ========
+ *      Returns the board status.
+ */
+static int bridge_brd_status(struct bridge_dev_context *hDevContext,
+                                   int *pdwState)
+{
+       struct bridge_dev_context *dev_context = hDevContext;
+       *pdwState = dev_context->dw_brd_state;
+       return 0;
+}
+
+/*
+ *  ======== bridge_brd_write ========
+ *      Copies the buffers to DSP internal or external memory.
+ */
+static int bridge_brd_write(struct bridge_dev_context *hDevContext,
+                                  IN u8 *pbHostBuf, u32 dwDSPAddr,
+                                  u32 ul_num_bytes, u32 ulMemType)
+{
+       int status = 0;
+       struct bridge_dev_context *dev_context = hDevContext;
+
+       if (dwDSPAddr < dev_context->dw_dsp_start_add) {
+               status = -EPERM;
+               return status;
+       }
+       if ((dwDSPAddr - dev_context->dw_dsp_start_add) <
+           dev_context->dw_internal_size) {
+               status = write_dsp_data(hDevContext, pbHostBuf, dwDSPAddr,
+                                       ul_num_bytes, ulMemType);
+       } else {
+               status = write_ext_dsp_data(dev_context, pbHostBuf, dwDSPAddr,
+                                           ul_num_bytes, ulMemType, false);
+       }
+
+       return status;
+}
+
+/*
+ *  ======== bridge_dev_create ========
+ *      Creates a driver object. Puts DSP in self loop.
+ */
+static int bridge_dev_create(OUT struct bridge_dev_context
+                                       **ppDevContext,
+                                       struct dev_object *hdev_obj,
+                                       IN struct cfg_hostres *pConfig)
+{
+       int status = 0;
+       struct bridge_dev_context *dev_context = NULL;
+       s32 entry_ndx;
+       struct cfg_hostres *resources = pConfig;
+       struct pg_table_attrs *pt_attrs;
+       u32 pg_tbl_pa;
+       u32 pg_tbl_va;
+       u32 align_size;
+       struct drv_data *drv_datap = dev_get_drvdata(bridge);
+
+       /* Allocate and initialize a data structure to contain the bridge driver
+        *  state, which becomes the context for later calls into this driver */
+       dev_context = kzalloc(sizeof(struct bridge_dev_context), GFP_KERNEL);
+       if (!dev_context) {
+               status = -ENOMEM;
+               goto func_end;
+       }
+
+       dev_context->dw_dsp_start_add = (u32) OMAP_GEM_BASE;
+       dev_context->dw_self_loop = (u32) NULL;
+       dev_context->dsp_per_clks = 0;
+       dev_context->dw_internal_size = OMAP_DSP_SIZE;
+       /*  Clear dev context MMU table entries.
+        *  These get set on bridge_io_on_loaded() call after program loaded. */
+       for (entry_ndx = 0; entry_ndx < BRDIOCTL_NUMOFMMUTLB; entry_ndx++) {
+               dev_context->atlb_entry[entry_ndx].ul_gpp_pa =
+                   dev_context->atlb_entry[entry_ndx].ul_dsp_va = 0;
+       }
+       dev_context->num_tlb_entries = 0;
+       dev_context->dw_dsp_base_addr = (u32) MEM_LINEAR_ADDRESS((void *)
+                                                                (pConfig->
+                                                                 dw_mem_base
+                                                                 [3]),
+                                                                pConfig->
+                                                                dw_mem_length
+                                                                [3]);
+       if (!dev_context->dw_dsp_base_addr)
+               status = -EPERM;
+
+       pt_attrs = kzalloc(sizeof(struct pg_table_attrs), GFP_KERNEL);
+       if (pt_attrs != NULL) {
+               /* Assuming that we use only DSP's memory map
+                * until 0x4000:0000 , we would need only 1024
+                * L1 enties i.e L1 size = 4K */
+               pt_attrs->l1_size = 0x1000;
+               align_size = pt_attrs->l1_size;
+               /* Align sizes are expected to be power of 2 */
+               /* we like to get aligned on L1 table size */
+               pg_tbl_va = (u32) mem_alloc_phys_mem(pt_attrs->l1_size,
+                                                    align_size, &pg_tbl_pa);
+
+               /* Check if the PA is aligned for us */
+               if ((pg_tbl_pa) & (align_size - 1)) {
+                       /* PA not aligned to page table size ,
+                        * try with more allocation and align */
+                       mem_free_phys_mem((void *)pg_tbl_va, pg_tbl_pa,
+                                         pt_attrs->l1_size);
+                       /* we like to get aligned on L1 table size */
+                       pg_tbl_va =
+                           (u32) mem_alloc_phys_mem((pt_attrs->l1_size) * 2,
+                                                    align_size, &pg_tbl_pa);
+                       /* We should be able to get aligned table now */
+                       pt_attrs->l1_tbl_alloc_pa = pg_tbl_pa;
+                       pt_attrs->l1_tbl_alloc_va = pg_tbl_va;
+                       pt_attrs->l1_tbl_alloc_sz = pt_attrs->l1_size * 2;
+                       /* Align the PA to the next 'align'  boundary */
+                       pt_attrs->l1_base_pa =
+                           ((pg_tbl_pa) +
+                            (align_size - 1)) & (~(align_size - 1));
+                       pt_attrs->l1_base_va =
+                           pg_tbl_va + (pt_attrs->l1_base_pa - pg_tbl_pa);
+               } else {
+                       /* We got aligned PA, cool */
+                       pt_attrs->l1_tbl_alloc_pa = pg_tbl_pa;
+                       pt_attrs->l1_tbl_alloc_va = pg_tbl_va;
+                       pt_attrs->l1_tbl_alloc_sz = pt_attrs->l1_size;
+                       pt_attrs->l1_base_pa = pg_tbl_pa;
+                       pt_attrs->l1_base_va = pg_tbl_va;
+               }
+               if (pt_attrs->l1_base_va)
+                       memset((u8 *) pt_attrs->l1_base_va, 0x00,
+                              pt_attrs->l1_size);
+
+               /* number of L2 page tables = DMM pool used + SHMMEM +EXTMEM +
+                * L4 pages */
+               pt_attrs->l2_num_pages = ((DMMPOOLSIZE >> 20) + 6);
+               pt_attrs->l2_size = HW_MMU_COARSE_PAGE_SIZE *
+                   pt_attrs->l2_num_pages;
+               align_size = 4; /* Make it u32 aligned */
+               /* we like to get aligned on L1 table size */
+               pg_tbl_va = (u32) mem_alloc_phys_mem(pt_attrs->l2_size,
+                                                    align_size, &pg_tbl_pa);
+               pt_attrs->l2_tbl_alloc_pa = pg_tbl_pa;
+               pt_attrs->l2_tbl_alloc_va = pg_tbl_va;
+               pt_attrs->l2_tbl_alloc_sz = pt_attrs->l2_size;
+               pt_attrs->l2_base_pa = pg_tbl_pa;
+               pt_attrs->l2_base_va = pg_tbl_va;
+
+               if (pt_attrs->l2_base_va)
+                       memset((u8 *) pt_attrs->l2_base_va, 0x00,
+                              pt_attrs->l2_size);
+
+               pt_attrs->pg_info = kzalloc(pt_attrs->l2_num_pages *
+                                       sizeof(struct page_info), GFP_KERNEL);
+               dev_dbg(bridge,
+                       "L1 pa %x, va %x, size %x\n L2 pa %x, va "
+                       "%x, size %x\n", pt_attrs->l1_base_pa,
+                       pt_attrs->l1_base_va, pt_attrs->l1_size,
+                       pt_attrs->l2_base_pa, pt_attrs->l2_base_va,
+                       pt_attrs->l2_size);
+               dev_dbg(bridge, "pt_attrs %p L2 NumPages %x pg_info %p\n",
+                       pt_attrs, pt_attrs->l2_num_pages, pt_attrs->pg_info);
+       }
+       if ((pt_attrs != NULL) && (pt_attrs->l1_base_va != 0) &&
+           (pt_attrs->l2_base_va != 0) && (pt_attrs->pg_info != NULL))
+               dev_context->pt_attrs = pt_attrs;
+       else
+               status = -ENOMEM;
+
+       if (DSP_SUCCEEDED(status)) {
+               spin_lock_init(&pt_attrs->pg_lock);
+               dev_context->tc_word_swap_on = drv_datap->tc_wordswapon;
+
+               /* Set the Clock Divisor for the DSP module */
+               udelay(5);
+               /* MMU address is obtained from the host
+                * resources struct */
+               dev_context->dw_dsp_mmu_base = resources->dw_dmmu_base;
+       }
+       if (DSP_SUCCEEDED(status)) {
+               dev_context->hdev_obj = hdev_obj;
+               dev_context->ul_int_mask = 0;
+               /* Store current board state. */
+               dev_context->dw_brd_state = BRD_STOPPED;
+               dev_context->resources = resources;
+               /* Return ptr to our device state to the DSP API for storage */
+               *ppDevContext = dev_context;
+       } else {
+               if (pt_attrs != NULL) {
+                       kfree(pt_attrs->pg_info);
+
+                       if (pt_attrs->l2_tbl_alloc_va) {
+                               mem_free_phys_mem((void *)
+                                                 pt_attrs->l2_tbl_alloc_va,
+                                                 pt_attrs->l2_tbl_alloc_pa,
+                                                 pt_attrs->l2_tbl_alloc_sz);
+                       }
+                       if (pt_attrs->l1_tbl_alloc_va) {
+                               mem_free_phys_mem((void *)
+                                                 pt_attrs->l1_tbl_alloc_va,
+                                                 pt_attrs->l1_tbl_alloc_pa,
+                                                 pt_attrs->l1_tbl_alloc_sz);
+                       }
+               }
+               kfree(pt_attrs);
+               kfree(dev_context);
+       }
+func_end:
+       return status;
+}
+
+/*
+ *  ======== bridge_dev_ctrl ========
+ *      Receives device specific commands.
+ */
+static int bridge_dev_ctrl(struct bridge_dev_context *dev_context,
+                                 u32 dw_cmd, IN OUT void *pargs)
+{
+       int status = 0;
+       struct bridge_ioctl_extproc *pa_ext_proc =
+                                       (struct bridge_ioctl_extproc *)pargs;
+       s32 ndx;
+
+       switch (dw_cmd) {
+       case BRDIOCTL_CHNLREAD:
+               break;
+       case BRDIOCTL_CHNLWRITE:
+               break;
+       case BRDIOCTL_SETMMUCONFIG:
+               /* store away dsp-mmu setup values for later use */
+               for (ndx = 0; ndx < BRDIOCTL_NUMOFMMUTLB; ndx++, pa_ext_proc++)
+                       dev_context->atlb_entry[ndx] = *pa_ext_proc;
+               break;
+       case BRDIOCTL_DEEPSLEEP:
+       case BRDIOCTL_EMERGENCYSLEEP:
+               /* Currently only DSP Idle is supported Need to update for
+                * later releases */
+               status = sleep_dsp(dev_context, PWR_DEEPSLEEP, pargs);
+               break;
+       case BRDIOCTL_WAKEUP:
+               status = wake_dsp(dev_context, pargs);
+               break;
+       case BRDIOCTL_CLK_CTRL:
+               status = 0;
+               /* Looking For Baseport Fix for Clocks */
+               status = dsp_peripheral_clk_ctrl(dev_context, pargs);
+               break;
+       case BRDIOCTL_PWR_HIBERNATE:
+               status = handle_hibernation_from_dsp(dev_context);
+               break;
+       case BRDIOCTL_PRESCALE_NOTIFY:
+               status = pre_scale_dsp(dev_context, pargs);
+               break;
+       case BRDIOCTL_POSTSCALE_NOTIFY:
+               status = post_scale_dsp(dev_context, pargs);
+               break;
+       case BRDIOCTL_CONSTRAINT_REQUEST:
+               status = handle_constraints_set(dev_context, pargs);
+               break;
+       default:
+               status = -EPERM;
+               break;
+       }
+       return status;
+}
+
+/*
+ *  ======== bridge_dev_destroy ========
+ *      Destroys the driver object.
+ */
+static int bridge_dev_destroy(struct bridge_dev_context *hDevContext)
+{
+       struct pg_table_attrs *pt_attrs;
+       int status = 0;
+       struct bridge_dev_context *dev_context = (struct bridge_dev_context *)
+           hDevContext;
+       struct cfg_hostres *host_res;
+       u32 shm_size;
+       struct drv_data *drv_datap = dev_get_drvdata(bridge);
+
+       /* It should never happen */
+       if (!hDevContext)
+               return -EFAULT;
+
+       /* first put the device to stop state */
+       bridge_brd_delete(dev_context);
+       if (dev_context->pt_attrs) {
+               pt_attrs = dev_context->pt_attrs;
+               kfree(pt_attrs->pg_info);
+
+               if (pt_attrs->l2_tbl_alloc_va) {
+                       mem_free_phys_mem((void *)pt_attrs->l2_tbl_alloc_va,
+                                         pt_attrs->l2_tbl_alloc_pa,
+                                         pt_attrs->l2_tbl_alloc_sz);
+               }
+               if (pt_attrs->l1_tbl_alloc_va) {
+                       mem_free_phys_mem((void *)pt_attrs->l1_tbl_alloc_va,
+                                         pt_attrs->l1_tbl_alloc_pa,
+                                         pt_attrs->l1_tbl_alloc_sz);
+               }
+               kfree(pt_attrs);
+
+       }
+
+       if (dev_context->resources) {
+               host_res = dev_context->resources;
+               shm_size = drv_datap->shm_size;
+               if (shm_size >= 0x10000) {
+                       if ((host_res->dw_mem_base[1]) &&
+                           (host_res->dw_mem_phys[1])) {
+                               mem_free_phys_mem((void *)
+                                                 host_res->dw_mem_base
+                                                 [1],
+                                                 host_res->dw_mem_phys
+                                                 [1], shm_size);
+                       }
+               } else {
+                       dev_dbg(bridge, "%s: Error getting shm size "
+                               "from registry: %x. Not calling "
+                               "mem_free_phys_mem\n", __func__,
+                               status);
+               }
+               host_res->dw_mem_base[1] = 0;
+               host_res->dw_mem_phys[1] = 0;
+
+               if (host_res->dw_mem_base[0])
+                       iounmap((void *)host_res->dw_mem_base[0]);
+               if (host_res->dw_mem_base[2])
+                       iounmap((void *)host_res->dw_mem_base[2]);
+               if (host_res->dw_mem_base[3])
+                       iounmap((void *)host_res->dw_mem_base[3]);
+               if (host_res->dw_mem_base[4])
+                       iounmap((void *)host_res->dw_mem_base[4]);
+               if (host_res->dw_dmmu_base)
+                       iounmap(host_res->dw_dmmu_base);
+               if (host_res->dw_per_base)
+                       iounmap(host_res->dw_per_base);
+               if (host_res->dw_per_pm_base)
+                       iounmap((void *)host_res->dw_per_pm_base);
+               if (host_res->dw_core_pm_base)
+                       iounmap((void *)host_res->dw_core_pm_base);
+               if (host_res->dw_sys_ctrl_base)
+                       iounmap(host_res->dw_sys_ctrl_base);
+
+               host_res->dw_mem_base[0] = (u32) NULL;
+               host_res->dw_mem_base[2] = (u32) NULL;
+               host_res->dw_mem_base[3] = (u32) NULL;
+               host_res->dw_mem_base[4] = (u32) NULL;
+               host_res->dw_dmmu_base = NULL;
+               host_res->dw_sys_ctrl_base = NULL;
+
+               kfree(host_res);
+       }
+
+       /* Free the driver's device context: */
+       kfree(drv_datap->base_img);
+       kfree(drv_datap);
+       dev_set_drvdata(bridge, NULL);
+       kfree((void *)hDevContext);
+       return status;
+}
+
+static int bridge_brd_mem_copy(struct bridge_dev_context *hDevContext,
+                                  u32 ulDspDestAddr, u32 ulDspSrcAddr,
+                                  u32 ul_num_bytes, u32 ulMemType)
+{
+       int status = 0;
+       u32 src_addr = ulDspSrcAddr;
+       u32 dest_addr = ulDspDestAddr;
+       u32 copy_bytes = 0;
+       u32 total_bytes = ul_num_bytes;
+       u8 host_buf[BUFFERSIZE];
+       struct bridge_dev_context *dev_context = hDevContext;
+       while ((total_bytes > 0) && DSP_SUCCEEDED(status)) {
+               copy_bytes =
+                   total_bytes > BUFFERSIZE ? BUFFERSIZE : total_bytes;
+               /* Read from External memory */
+               status = read_ext_dsp_data(hDevContext, host_buf, src_addr,
+                                          copy_bytes, ulMemType);
+               if (DSP_SUCCEEDED(status)) {
+                       if (dest_addr < (dev_context->dw_dsp_start_add +
+                                        dev_context->dw_internal_size)) {
+                               /* Write to Internal memory */
+                               status = write_dsp_data(hDevContext, host_buf,
+                                                       dest_addr, copy_bytes,
+                                                       ulMemType);
+                       } else {
+                               /* Write to External memory */
+                               status =
+                                   write_ext_dsp_data(hDevContext, host_buf,
+                                                      dest_addr, copy_bytes,
+                                                      ulMemType, false);
+                       }
+               }
+               total_bytes -= copy_bytes;
+               src_addr += copy_bytes;
+               dest_addr += copy_bytes;
+       }
+       return status;
+}
+
+/* Mem Write does not halt the DSP to write unlike bridge_brd_write */
+static int bridge_brd_mem_write(struct bridge_dev_context *hDevContext,
+                                   IN u8 *pbHostBuf, u32 dwDSPAddr,
+                                   u32 ul_num_bytes, u32 ulMemType)
+{
+       int status = 0;
+       struct bridge_dev_context *dev_context = hDevContext;
+       u32 ul_remain_bytes = 0;
+       u32 ul_bytes = 0;
+       ul_remain_bytes = ul_num_bytes;
+       while (ul_remain_bytes > 0 && DSP_SUCCEEDED(status)) {
+               ul_bytes =
+                   ul_remain_bytes > BUFFERSIZE ? BUFFERSIZE : ul_remain_bytes;
+               if (dwDSPAddr < (dev_context->dw_dsp_start_add +
+                                dev_context->dw_internal_size)) {
+                       status =
+                           write_dsp_data(hDevContext, pbHostBuf, dwDSPAddr,
+                                          ul_bytes, ulMemType);
+               } else {
+                       status = write_ext_dsp_data(hDevContext, pbHostBuf,
+                                                   dwDSPAddr, ul_bytes,
+                                                   ulMemType, true);
+               }
+               ul_remain_bytes -= ul_bytes;
+               dwDSPAddr += ul_bytes;
+               pbHostBuf = pbHostBuf + ul_bytes;
+       }
+       return status;
+}
+
+/*
+ *  ======== bridge_brd_mem_map ========
+ *      This function maps MPU buffer to the DSP address space. It performs
+ *  linear to physical address translation if required. It translates each
+ *  page since linear addresses can be physically non-contiguous
+ *  All address & size arguments are assumed to be page aligned (in proc.c)
+ *
+ *  TODO: Disable MMU while updating the page tables (but that'll stall DSP)
+ */
+static int bridge_brd_mem_map(struct bridge_dev_context *hDevContext,
+                                 u32 ul_mpu_addr, u32 ulVirtAddr,
+                                 u32 ul_num_bytes, u32 ul_map_attr,
+                                 struct page **mapped_pages)
+{
+       u32 attrs;
+       int status = 0;
+       struct bridge_dev_context *dev_context = hDevContext;
+       struct hw_mmu_map_attrs_t hw_attrs;
+       struct vm_area_struct *vma;
+       struct mm_struct *mm = current->mm;
+       u32 write = 0;
+       u32 num_usr_pgs = 0;
+       struct page *mapped_page, *pg;
+       s32 pg_num;
+       u32 va = ulVirtAddr;
+       struct task_struct *curr_task = current;
+       u32 pg_i = 0;
+       u32 mpu_addr, pa;
+
+       dev_dbg(bridge,
+               "%s hDevCtxt %p, pa %x, va %x, size %x, ul_map_attr %x\n",
+               __func__, hDevContext, ul_mpu_addr, ulVirtAddr, ul_num_bytes,
+               ul_map_attr);
+       if (ul_num_bytes == 0)
+               return -EINVAL;
+
+       if (ul_map_attr & DSP_MAP_DIR_MASK) {
+               attrs = ul_map_attr;
+       } else {
+               /* Assign default attributes */
+               attrs = ul_map_attr | (DSP_MAPVIRTUALADDR | DSP_MAPELEMSIZE16);
+       }
+       /* Take mapping properties */
+       if (attrs & DSP_MAPBIGENDIAN)
+               hw_attrs.endianism = HW_BIG_ENDIAN;
+       else
+               hw_attrs.endianism = HW_LITTLE_ENDIAN;
+
+       hw_attrs.mixed_size = (enum hw_mmu_mixed_size_t)
+           ((attrs & DSP_MAPMIXEDELEMSIZE) >> 2);
+       /* Ignore element_size if mixed_size is enabled */
+       if (hw_attrs.mixed_size == 0) {
+               if (attrs & DSP_MAPELEMSIZE8) {
+                       /* Size is 8 bit */
+                       hw_attrs.element_size = HW_ELEM_SIZE8BIT;
+               } else if (attrs & DSP_MAPELEMSIZE16) {
+                       /* Size is 16 bit */
+                       hw_attrs.element_size = HW_ELEM_SIZE16BIT;
+               } else if (attrs & DSP_MAPELEMSIZE32) {
+                       /* Size is 32 bit */
+                       hw_attrs.element_size = HW_ELEM_SIZE32BIT;
+               } else if (attrs & DSP_MAPELEMSIZE64) {
+                       /* Size is 64 bit */
+                       hw_attrs.element_size = HW_ELEM_SIZE64BIT;
+               } else {
+                       /*
+                        * Mixedsize isn't enabled, so size can't be
+                        * zero here
+                        */
+                       return -EINVAL;
+               }
+       }
+       if (attrs & DSP_MAPDONOTLOCK)
+               hw_attrs.donotlockmpupage = 1;
+       else
+               hw_attrs.donotlockmpupage = 0;
+
+       if (attrs & DSP_MAPVMALLOCADDR) {
+               return mem_map_vmalloc(hDevContext, ul_mpu_addr, ulVirtAddr,
+                                      ul_num_bytes, &hw_attrs);
+       }
+       /*
+        * Do OS-specific user-va to pa translation.
+        * Combine physically contiguous regions to reduce TLBs.
+        * Pass the translated pa to pte_update.
+        */
+       if ((attrs & DSP_MAPPHYSICALADDR)) {
+               status = pte_update(dev_context, ul_mpu_addr, ulVirtAddr,
+                                   ul_num_bytes, &hw_attrs);
+               goto func_cont;
+       }
+
+       /*
+        * Important Note: ul_mpu_addr is mapped from user application process
+        * to current process - it must lie completely within the current
+        * virtual memory address space in order to be of use to us here!
+        */
+       down_read(&mm->mmap_sem);
+       vma = find_vma(mm, ul_mpu_addr);
+       if (vma)
+               dev_dbg(bridge,
+                       "VMAfor UserBuf: ul_mpu_addr=%x, ul_num_bytes=%x, "
+                       "vm_start=%lx, vm_end=%lx, vm_flags=%lx\n", ul_mpu_addr,
+                       ul_num_bytes, vma->vm_start, vma->vm_end,
+                       vma->vm_flags);
+
+       /*
+        * It is observed that under some circumstances, the user buffer is
+        * spread across several VMAs. So loop through and check if the entire
+        * user buffer is covered
+        */
+       while ((vma) && (ul_mpu_addr + ul_num_bytes > vma->vm_end)) {
+               /* jump to the next VMA region */
+               vma = find_vma(mm, vma->vm_end + 1);
+               dev_dbg(bridge,
+                       "VMA for UserBuf ul_mpu_addr=%x ul_num_bytes=%x, "
+                       "vm_start=%lx, vm_end=%lx, vm_flags=%lx\n", ul_mpu_addr,
+                       ul_num_bytes, vma->vm_start, vma->vm_end,
+                       vma->vm_flags);
+       }
+       if (!vma) {
+               pr_err("%s: Failed to get VMA region for 0x%x (%d)\n",
+                      __func__, ul_mpu_addr, ul_num_bytes);
+               status = -EINVAL;
+               up_read(&mm->mmap_sem);
+               goto func_cont;
+       }
+
+       if (vma->vm_flags & VM_IO) {
+               num_usr_pgs = ul_num_bytes / PG_SIZE4K;
+               mpu_addr = ul_mpu_addr;
+
+               /* Get the physical addresses for user buffer */
+               for (pg_i = 0; pg_i < num_usr_pgs; pg_i++) {
+                       pa = user_va2_pa(mm, mpu_addr);
+                       if (!pa) {
+                               status = -EPERM;
+                               pr_err("DSPBRIDGE: VM_IO mapping physical"
+                                      "address is invalid\n");
+                               break;
+                       }
+                       if (pfn_valid(__phys_to_pfn(pa))) {
+                               pg = PHYS_TO_PAGE(pa);
+                               get_page(pg);
+                               if (page_count(pg) < 1) {
+                                       pr_err("Bad page in VM_IO buffer\n");
+                                       bad_page_dump(pa, pg);
+                               }
+                       }
+                       status = pte_set(dev_context->pt_attrs, pa,
+                                        va, HW_PAGE_SIZE4KB, &hw_attrs);
+                       if (DSP_FAILED(status))
+                               break;
+
+                       va += HW_PAGE_SIZE4KB;
+                       mpu_addr += HW_PAGE_SIZE4KB;
+                       pa += HW_PAGE_SIZE4KB;
+               }
+       } else {
+               num_usr_pgs = ul_num_bytes / PG_SIZE4K;
+               if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE))
+                       write = 1;
+
+               for (pg_i = 0; pg_i < num_usr_pgs; pg_i++) {
+                       pg_num = get_user_pages(curr_task, mm, ul_mpu_addr, 1,
+                                               write, 1, &mapped_page, NULL);
+                       if (pg_num > 0) {
+                               if (page_count(mapped_page) < 1) {
+                                       pr_err("Bad page count after doing"
+                                              "get_user_pages on"
+                                              "user buffer\n");
+                                       bad_page_dump(page_to_phys(mapped_page),
+                                                     mapped_page);
+                               }
+                               status = pte_set(dev_context->pt_attrs,
+                                                page_to_phys(mapped_page), va,
+                                                HW_PAGE_SIZE4KB, &hw_attrs);
+                               if (DSP_FAILED(status))
+                                       break;
+
+                               if (mapped_pages)
+                                       mapped_pages[pg_i] = mapped_page;
+
+                               va += HW_PAGE_SIZE4KB;
+                               ul_mpu_addr += HW_PAGE_SIZE4KB;
+                       } else {
+                               pr_err("DSPBRIDGE: get_user_pages FAILED,"
+                                      "MPU addr = 0x%x,"
+                                      "vma->vm_flags = 0x%lx,"
+                                      "get_user_pages Err"
+                                      "Value = %d, Buffer"
+                                      "size=0x%x\n", ul_mpu_addr,
+                                      vma->vm_flags, pg_num, ul_num_bytes);
+                               status = -EPERM;
+                               break;
+                       }
+               }
+       }
+       up_read(&mm->mmap_sem);
+func_cont:
+       if (DSP_SUCCEEDED(status)) {
+               status = 0;
+       } else {
+               /*
+                * Roll out the mapped pages incase it failed in middle of
+                * mapping
+                */
+               if (pg_i) {
+                       bridge_brd_mem_un_map(dev_context, ulVirtAddr,
+                                          (pg_i * PG_SIZE4K));
+               }
+               status = -EPERM;
+       }
+       /*
+        * In any case, flush the TLB
+        * This is called from here instead from pte_update to avoid unnecessary
+        * repetition while mapping non-contiguous physical regions of a virtual
+        * region
+        */
+       flush_all(dev_context);
+       dev_dbg(bridge, "%s status %x\n", __func__, status);
+       return status;
+}
+
+/*
+ *  ======== bridge_brd_mem_un_map ========
+ *      Invalidate the PTEs for the DSP VA block to be unmapped.
+ *
+ *      PTEs of a mapped memory block are contiguous in any page table
+ *      So, instead of looking up the PTE address for every 4K block,
+ *      we clear consecutive PTEs until we unmap all the bytes
+ */
+static int bridge_brd_mem_un_map(struct bridge_dev_context *hDevContext,
+                                    u32 ulVirtAddr, u32 ul_num_bytes)
+{
+       u32 l1_base_va;
+       u32 l2_base_va;
+       u32 l2_base_pa;
+       u32 l2_page_num;
+       u32 pte_val;
+       u32 pte_size;
+       u32 pte_count;
+       u32 pte_addr_l1;
+       u32 pte_addr_l2 = 0;
+       u32 rem_bytes;
+       u32 rem_bytes_l2;
+       u32 va_curr;
+       struct page *pg = NULL;
+       int status = 0;
+       struct bridge_dev_context *dev_context = hDevContext;
+       struct pg_table_attrs *pt = dev_context->pt_attrs;
+       u32 temp;
+       u32 paddr;
+       u32 numof4k_pages = 0;
+
+       va_curr = ulVirtAddr;
+       rem_bytes = ul_num_bytes;
+       rem_bytes_l2 = 0;
+       l1_base_va = pt->l1_base_va;
+       pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va_curr);
+       dev_dbg(bridge, "%s hDevContext %p, va %x, NumBytes %x l1_base_va %x, "
+               "pte_addr_l1 %x\n", __func__, hDevContext, ulVirtAddr,
+               ul_num_bytes, l1_base_va, pte_addr_l1);
+
+       while (rem_bytes && (DSP_SUCCEEDED(status))) {
+               u32 va_curr_orig = va_curr;
+               /* Find whether the L1 PTE points to a valid L2 PT */
+               pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va_curr);
+               pte_val = *(u32 *) pte_addr_l1;
+               pte_size = hw_mmu_pte_size_l1(pte_val);
+
+               if (pte_size != HW_MMU_COARSE_PAGE_SIZE)
+                       goto skip_coarse_page;
+
+               /*
+                * Get the L2 PA from the L1 PTE, and find
+                * corresponding L2 VA
+                */
+               l2_base_pa = hw_mmu_pte_coarse_l1(pte_val);
+               l2_base_va = l2_base_pa - pt->l2_base_pa + pt->l2_base_va;
+               l2_page_num =
+                   (l2_base_pa - pt->l2_base_pa) / HW_MMU_COARSE_PAGE_SIZE;
+               /*
+                * Find the L2 PTE address from which we will start
+                * clearing, the number of PTEs to be cleared on this
+                * page, and the size of VA space that needs to be
+                * cleared on this L2 page
+                */
+               pte_addr_l2 = hw_mmu_pte_addr_l2(l2_base_va, va_curr);
+               pte_count = pte_addr_l2 & (HW_MMU_COARSE_PAGE_SIZE - 1);
+               pte_count = (HW_MMU_COARSE_PAGE_SIZE - pte_count) / sizeof(u32);
+               if (rem_bytes < (pte_count * PG_SIZE4K))
+                       pte_count = rem_bytes / PG_SIZE4K;
+               rem_bytes_l2 = pte_count * PG_SIZE4K;
+
+               /*
+                * Unmap the VA space on this L2 PT. A quicker way
+                * would be to clear pte_count entries starting from
+                * pte_addr_l2. However, below code checks that we don't
+                * clear invalid entries or less than 64KB for a 64KB
+                * entry. Similar checking is done for L1 PTEs too
+                * below
+                */
+               while (rem_bytes_l2 && (DSP_SUCCEEDED(status))) {
+                       pte_val = *(u32 *) pte_addr_l2;
+                       pte_size = hw_mmu_pte_size_l2(pte_val);
+                       /* va_curr aligned to pte_size? */
+                       if (pte_size == 0 || rem_bytes_l2 < pte_size ||
+                           va_curr & (pte_size - 1)) {
+                               status = -EPERM;
+                               break;
+                       }
+
+                       /* Collect Physical addresses from VA */
+                       paddr = (pte_val & ~(pte_size - 1));
+                       if (pte_size == HW_PAGE_SIZE64KB)
+                               numof4k_pages = 16;
+                       else
+                               numof4k_pages = 1;
+                       temp = 0;
+                       while (temp++ < numof4k_pages) {
+                               if (!pfn_valid(__phys_to_pfn(paddr))) {
+                                       paddr += HW_PAGE_SIZE4KB;
+                                       continue;
+                               }
+                               pg = PHYS_TO_PAGE(paddr);
+                               if (page_count(pg) < 1) {
+                                       pr_info("DSPBRIDGE: UNMAP function: "
+                                               "COUNT 0 FOR PA 0x%x, size = "
+                                               "0x%x\n", paddr, ul_num_bytes);
+                                       bad_page_dump(paddr, pg);
+                               } else {
+                                       SetPageDirty(pg);
+                                       page_cache_release(pg);
+                               }
+                               paddr += HW_PAGE_SIZE4KB;
+                       }
+                       if (hw_mmu_pte_clear(pte_addr_l2, va_curr, pte_size)
+                           == RET_FAIL) {
+                               status = -EPERM;
+                               goto EXIT_LOOP;
+                       }
+
+                       status = 0;
+                       rem_bytes_l2 -= pte_size;
+                       va_curr += pte_size;
+                       pte_addr_l2 += (pte_size >> 12) * sizeof(u32);
+               }
+               spin_lock(&pt->pg_lock);
+               if (rem_bytes_l2 == 0) {
+                       pt->pg_info[l2_page_num].num_entries -= pte_count;
+                       if (pt->pg_info[l2_page_num].num_entries == 0) {
+                               /*
+                                * Clear the L1 PTE pointing to the L2 PT
+                                */
+                               if (hw_mmu_pte_clear(l1_base_va, va_curr_orig,
+                                                    HW_MMU_COARSE_PAGE_SIZE) ==
+                                   RET_OK)
+                                       status = 0;
+                               else {
+                                       status = -EPERM;
+                                       spin_unlock(&pt->pg_lock);
+                                       goto EXIT_LOOP;
+                               }
+                       }
+                       rem_bytes -= pte_count * PG_SIZE4K;
+               } else
+                       status = -EPERM;
+
+               spin_unlock(&pt->pg_lock);
+               continue;
+skip_coarse_page:
+               /* va_curr aligned to pte_size? */
+               /* pte_size = 1 MB or 16 MB */
+               if (pte_size == 0 || rem_bytes < pte_size ||
+                   va_curr & (pte_size - 1)) {
+                       status = -EPERM;
+                       break;
+               }
+
+               if (pte_size == HW_PAGE_SIZE1MB)
+                       numof4k_pages = 256;
+               else
+                       numof4k_pages = 4096;
+               temp = 0;
+               /* Collect Physical addresses from VA */
+               paddr = (pte_val & ~(pte_size - 1));
+               while (temp++ < numof4k_pages) {
+                       if (pfn_valid(__phys_to_pfn(paddr))) {
+                               pg = PHYS_TO_PAGE(paddr);
+                               if (page_count(pg) < 1) {
+                                       pr_info("DSPBRIDGE: UNMAP function: "
+                                               "COUNT 0 FOR PA 0x%x, size = "
+                                               "0x%x\n", paddr, ul_num_bytes);
+                                       bad_page_dump(paddr, pg);
+                               } else {
+                                       SetPageDirty(pg);
+                                       page_cache_release(pg);
+                               }
+                       }
+                       paddr += HW_PAGE_SIZE4KB;
+               }
+               if (hw_mmu_pte_clear(l1_base_va, va_curr, pte_size) == RET_OK) {
+                       status = 0;
+                       rem_bytes -= pte_size;
+                       va_curr += pte_size;
+               } else {
+                       status = -EPERM;
+                       goto EXIT_LOOP;
+               }
+       }
+       /*
+        * It is better to flush the TLB here, so that any stale old entries
+        * get flushed
+        */
+EXIT_LOOP:
+       flush_all(dev_context);
+       dev_dbg(bridge,
+               "%s: va_curr %x, pte_addr_l1 %x pte_addr_l2 %x rem_bytes %x,"
+               " rem_bytes_l2 %x status %x\n", __func__, va_curr, pte_addr_l1,
+               pte_addr_l2, rem_bytes, rem_bytes_l2, status);
+       return status;
+}
+
+/*
+ *  ======== user_va2_pa ========
+ *  Purpose:
+ *      This function walks through the page tables to convert a userland
+ *      virtual address to physical address
+ */
+static u32 user_va2_pa(struct mm_struct *mm, u32 address)
+{
+       pgd_t *pgd;
+       pmd_t *pmd;
+       pte_t *ptep, pte;
+
+       pgd = pgd_offset(mm, address);
+       if (!(pgd_none(*pgd) || pgd_bad(*pgd))) {
+               pmd = pmd_offset(pgd, address);
+               if (!(pmd_none(*pmd) || pmd_bad(*pmd))) {
+                       ptep = pte_offset_map(pmd, address);
+                       if (ptep) {
+                               pte = *ptep;
+                               if (pte_present(pte))
+                                       return pte & PAGE_MASK;
+                       }
+               }
+       }
+
+       return 0;
+}
+
+/*
+ *  ======== pte_update ========
+ *      This function calculates the optimum page-aligned addresses and sizes
+ *      Caller must pass page-aligned values
+ */
+static int pte_update(struct bridge_dev_context *hDevContext, u32 pa,
+                            u32 va, u32 size,
+                            struct hw_mmu_map_attrs_t *map_attrs)
+{
+       u32 i;
+       u32 all_bits;
+       u32 pa_curr = pa;
+       u32 va_curr = va;
+       u32 num_bytes = size;
+       struct bridge_dev_context *dev_context = hDevContext;
+       int status = 0;
+       u32 page_size[] = { HW_PAGE_SIZE16MB, HW_PAGE_SIZE1MB,
+               HW_PAGE_SIZE64KB, HW_PAGE_SIZE4KB
+       };
+
+       while (num_bytes && DSP_SUCCEEDED(status)) {
+               /* To find the max. page size with which both PA & VA are
+                * aligned */
+               all_bits = pa_curr | va_curr;
+
+               for (i = 0; i < 4; i++) {
+                       if ((num_bytes >= page_size[i]) && ((all_bits &
+                                                            (page_size[i] -
+                                                             1)) == 0)) {
+                               status =
+                                   pte_set(dev_context->pt_attrs, pa_curr,
+                                           va_curr, page_size[i], map_attrs);
+                               pa_curr += page_size[i];
+                               va_curr += page_size[i];
+                               num_bytes -= page_size[i];
+                               /* Don't try smaller sizes. Hopefully we have
+                                * reached an address aligned to a bigger page
+                                * size */
+                               break;
+                       }
+               }
+       }
+
+       return status;
+}
+
+/*
+ *  ======== pte_set ========
+ *      This function calculates PTE address (MPU virtual) to be updated
+ *      It also manages the L2 page tables
+ */
+static int pte_set(struct pg_table_attrs *pt, u32 pa, u32 va,
+                         u32 size, struct hw_mmu_map_attrs_t *attrs)
+{
+       u32 i;
+       u32 pte_val;
+       u32 pte_addr_l1;
+       u32 pte_size;
+       /* Base address of the PT that will be updated */
+       u32 pg_tbl_va;
+       u32 l1_base_va;
+       /* Compiler warns that the next three variables might be used
+        * uninitialized in this function. Doesn't seem so. Working around,
+        * anyways. */
+       u32 l2_base_va = 0;
+       u32 l2_base_pa = 0;
+       u32 l2_page_num = 0;
+       int status = 0;
+
+       l1_base_va = pt->l1_base_va;
+       pg_tbl_va = l1_base_va;
+       if ((size == HW_PAGE_SIZE64KB) || (size == HW_PAGE_SIZE4KB)) {
+               /* Find whether the L1 PTE points to a valid L2 PT */
+               pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va);
+               if (pte_addr_l1 <= (pt->l1_base_va + pt->l1_size)) {
+                       pte_val = *(u32 *) pte_addr_l1;
+                       pte_size = hw_mmu_pte_size_l1(pte_val);
+               } else {
+                       return -EPERM;
+               }
+               spin_lock(&pt->pg_lock);
+               if (pte_size == HW_MMU_COARSE_PAGE_SIZE) {
+                       /* Get the L2 PA from the L1 PTE, and find
+                        * corresponding L2 VA */
+                       l2_base_pa = hw_mmu_pte_coarse_l1(pte_val);
+                       l2_base_va =
+                           l2_base_pa - pt->l2_base_pa + pt->l2_base_va;
+                       l2_page_num =
+                           (l2_base_pa -
+                            pt->l2_base_pa) / HW_MMU_COARSE_PAGE_SIZE;
+               } else if (pte_size == 0) {
+                       /* L1 PTE is invalid. Allocate a L2 PT and
+                        * point the L1 PTE to it */
+                       /* Find a free L2 PT. */
+                       for (i = 0; (i < pt->l2_num_pages) &&
+                            (pt->pg_info[i].num_entries != 0); i++)
+                               ;;
+                       if (i < pt->l2_num_pages) {
+                               l2_page_num = i;
+                               l2_base_pa = pt->l2_base_pa + (l2_page_num *
+                                               HW_MMU_COARSE_PAGE_SIZE);
+                               l2_base_va = pt->l2_base_va + (l2_page_num *
+                                               HW_MMU_COARSE_PAGE_SIZE);
+                               /* Endianness attributes are ignored for
+                                * HW_MMU_COARSE_PAGE_SIZE */
+                               status =
+                                   hw_mmu_pte_set(l1_base_va, l2_base_pa, va,
+                                                  HW_MMU_COARSE_PAGE_SIZE,
+                                                  attrs);
+                       } else {
+                               status = -ENOMEM;
+                       }
+               } else {
+                       /* Found valid L1 PTE of another size.
+                        * Should not overwrite it. */
+                       status = -EPERM;
+               }
+               if (DSP_SUCCEEDED(status)) {
+                       pg_tbl_va = l2_base_va;
+                       if (size == HW_PAGE_SIZE64KB)
+                               pt->pg_info[l2_page_num].num_entries += 16;
+                       else
+                               pt->pg_info[l2_page_num].num_entries++;
+                       dev_dbg(bridge, "PTE: L2 BaseVa %x, BasePa %x, PageNum "
+                               "%x, num_entries %x\n", l2_base_va,
+                               l2_base_pa, l2_page_num,
+                               pt->pg_info[l2_page_num].num_entries);
+               }
+               spin_unlock(&pt->pg_lock);
+       }
+       if (DSP_SUCCEEDED(status)) {
+               dev_dbg(bridge, "PTE: pg_tbl_va %x, pa %x, va %x, size %x\n",
+                       pg_tbl_va, pa, va, size);
+               dev_dbg(bridge, "PTE: endianism %x, element_size %x, "
+                       "mixed_size %x\n", attrs->endianism,
+                       attrs->element_size, attrs->mixed_size);
+               status = hw_mmu_pte_set(pg_tbl_va, pa, va, size, attrs);
+       }
+
+       return status;
+}
+
+/* Memory map kernel VA -- memory allocated with vmalloc */
+static int mem_map_vmalloc(struct bridge_dev_context *dev_context,
+                                 u32 ul_mpu_addr, u32 ulVirtAddr,
+                                 u32 ul_num_bytes,
+                                 struct hw_mmu_map_attrs_t *hw_attrs)
+{
+       int status = 0;
+       struct page *page[1];
+       u32 i;
+       u32 pa_curr;
+       u32 pa_next;
+       u32 va_curr;
+       u32 size_curr;
+       u32 num_pages;
+       u32 pa;
+       u32 num_of4k_pages;
+       u32 temp = 0;
+
+       /*
+        * Do Kernel va to pa translation.
+        * Combine physically contiguous regions to reduce TLBs.
+        * Pass the translated pa to pte_update.
+        */
+       num_pages = ul_num_bytes / PAGE_SIZE;   /* PAGE_SIZE = OS page size */
+       i = 0;
+       va_curr = ul_mpu_addr;
+       page[0] = vmalloc_to_page((void *)va_curr);
+       pa_next = page_to_phys(page[0]);
+       while (DSP_SUCCEEDED(status) && (i < num_pages)) {
+               /*
+                * Reuse pa_next from the previous iteraion to avoid
+                * an extra va2pa call
+                */
+               pa_curr = pa_next;
+               size_curr = PAGE_SIZE;
+               /*
+                * If the next page is physically contiguous,
+                * map it with the current one by increasing
+                * the size of the region to be mapped
+                */
+               while (++i < num_pages) {
+                       page[0] =
+                           vmalloc_to_page((void *)(va_curr + size_curr));
+                       pa_next = page_to_phys(page[0]);
+
+                       if (pa_next == (pa_curr + size_curr))
+                               size_curr += PAGE_SIZE;
+                       else
+                               break;
+
+               }
+               if (pa_next == 0) {
+                       status = -ENOMEM;
+                       break;
+               }
+               pa = pa_curr;
+               num_of4k_pages = size_curr / HW_PAGE_SIZE4KB;
+               while (temp++ < num_of4k_pages) {
+                       get_page(PHYS_TO_PAGE(pa));
+                       pa += HW_PAGE_SIZE4KB;
+               }
+               status = pte_update(dev_context, pa_curr, ulVirtAddr +
+                                   (va_curr - ul_mpu_addr), size_curr,
+                                   hw_attrs);
+               va_curr += size_curr;
+       }
+       if (DSP_SUCCEEDED(status))
+               status = 0;
+       else
+               status = -EPERM;
+
+       /*
+        * In any case, flush the TLB
+        * This is called from here instead from pte_update to avoid unnecessary
+        * repetition while mapping non-contiguous physical regions of a virtual
+        * region
+        */
+       flush_all(dev_context);
+       dev_dbg(bridge, "%s status %x\n", __func__, status);
+       return status;
+}
+
+/*
+ *  ======== wait_for_start ========
+ *      Wait for the singal from DSP that it has started, or time out.
+ */
+bool wait_for_start(struct bridge_dev_context *dev_context, u32 dw_sync_addr)
+{
+       u16 timeout = TIHELEN_ACKTIMEOUT;
+
+       /*  Wait for response from board */
+       while (*((volatile u16 *)dw_sync_addr) && --timeout)
+               udelay(10);
+
+       /*  If timed out: return FALSE */
+       if (!timeout) {
+               pr_err("%s: Timed out waiting DSP to Start\n", __func__);
+               return FALSE;
+       }
+       return TRUE;
+}
diff --git a/drivers/staging/tidspbridge/core/tiomap3430_pwr.c b/drivers/staging/tidspbridge/core/tiomap3430_pwr.c
new file mode 100644 (file)
index 0000000..00ebc0b
--- /dev/null
@@ -0,0 +1,604 @@
+/*
+ * tiomap_pwr.c
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Implementation of DSP wake/sleep routines.
+ *
+ * Copyright (C) 2007-2008 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+/*  ----------------------------------- DSP/BIOS Bridge */
+#include <dspbridge/dbdefs.h>
+#include <dspbridge/cfg.h>
+#include <dspbridge/drv.h>
+#include <dspbridge/io_sm.h>
+
+/*  ----------------------------------- Platform Manager */
+#include <dspbridge/brddefs.h>
+#include <dspbridge/dev.h>
+#include <dspbridge/iodefs.h>
+
+/* ------------------------------------ Hardware Abstraction Layer */
+#include <hw_defs.h>
+#include <hw_mmu.h>
+
+#include <dspbridge/pwr_sh.h>
+
+/*  ----------------------------------- Bridge Driver */
+#include <dspbridge/dspdeh.h>
+#include <dspbridge/wdt.h>
+
+/*  ----------------------------------- specific to this file */
+#include "_tiomap.h"
+#include "_tiomap_pwr.h"
+#include <mach-omap2/prm-regbits-34xx.h>
+#include <mach-omap2/cm-regbits-34xx.h>
+
+#define PWRSTST_TIMEOUT          200
+
+/*
+ *  ======== handle_constraints_set ========
+ *     Sets new DSP constraint
+ */
+int handle_constraints_set(struct bridge_dev_context *dev_context,
+                                 IN void *pargs)
+{
+#ifdef CONFIG_BRIDGE_DVFS
+       u32 *constraint_val;
+       struct dspbridge_platform_data *pdata =
+           omap_dspbridge_dev->dev.platform_data;
+
+       constraint_val = (u32 *) (pargs);
+       /* Read the target value requested by DSP */
+       dev_dbg(bridge, "OPP: %s opp requested = 0x%x\n", __func__,
+               (u32) *(constraint_val + 1));
+
+       /* Set the new opp value */
+       if (pdata->dsp_set_min_opp)
+               (*pdata->dsp_set_min_opp) ((u32) *(constraint_val + 1));
+#endif /* #ifdef CONFIG_BRIDGE_DVFS */
+       return 0;
+}
+
+/*
+ *  ======== handle_hibernation_from_dsp ========
+ *     Handle Hibernation requested from DSP
+ */
+int handle_hibernation_from_dsp(struct bridge_dev_context *dev_context)
+{
+       int status = 0;
+#ifdef CONFIG_PM
+       u16 timeout = PWRSTST_TIMEOUT / 10;
+       u32 pwr_state;
+#ifdef CONFIG_BRIDGE_DVFS
+       u32 opplevel;
+       struct io_mgr *hio_mgr;
+#endif
+       struct dspbridge_platform_data *pdata =
+           omap_dspbridge_dev->dev.platform_data;
+
+       pwr_state = (*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST) &
+                                               OMAP_POWERSTATEST_MASK;
+       /* Wait for DSP to move into OFF state */
+       while ((pwr_state != PWRDM_POWER_OFF) && --timeout) {
+               if (msleep_interruptible(10)) {
+                       pr_err("Waiting for DSP OFF mode interrupted\n");
+                       return -EPERM;
+               }
+               pwr_state = (*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD,
+                                       OMAP2_PM_PWSTST) & OMAP_POWERSTATEST_MASK;
+       }
+       if (timeout == 0) {
+               pr_err("%s: Timed out waiting for DSP off mode\n", __func__);
+               status = -ETIMEDOUT;
+               return status;
+       } else {
+
+               /* Save mailbox settings */
+               omap_mbox_save_ctx(dev_context->mbox);
+
+               /* Turn off DSP Peripheral clocks and DSP Load monitor timer */
+               status = dsp_clock_disable_all(dev_context->dsp_per_clks);
+
+               /* Disable wdt on hibernation. */
+               dsp_wdt_enable(false);
+
+               if (DSP_SUCCEEDED(status)) {
+                       /* Update the Bridger Driver state */
+                       dev_context->dw_brd_state = BRD_DSP_HIBERNATION;
+#ifdef CONFIG_BRIDGE_DVFS
+                       status =
+                           dev_get_io_mgr(dev_context->hdev_obj, &hio_mgr);
+                       if (!hio_mgr) {
+                               status = DSP_EHANDLE;
+                               return status;
+                       }
+                       io_sh_msetting(hio_mgr, SHM_GETOPP, &opplevel);
+
+                       /*
+                        * Set the OPP to low level before moving to OFF
+                        * mode
+                        */
+                       if (pdata->dsp_set_min_opp)
+                               (*pdata->dsp_set_min_opp) (VDD1_OPP1);
+                       status = 0;
+#endif /* CONFIG_BRIDGE_DVFS */
+               }
+       }
+#endif
+       return status;
+}
+
+/*
+ *  ======== sleep_dsp ========
+ *     Put DSP in low power consuming state.
+ */
+int sleep_dsp(struct bridge_dev_context *dev_context, IN u32 dw_cmd,
+                    IN void *pargs)
+{
+       int status = 0;
+#ifdef CONFIG_PM
+#ifdef CONFIG_BRIDGE_NTFY_PWRERR
+       struct deh_mgr *hdeh_mgr;
+#endif /* CONFIG_BRIDGE_NTFY_PWRERR */
+       u16 timeout = PWRSTST_TIMEOUT / 10;
+       u32 pwr_state, target_pwr_state;
+       struct dspbridge_platform_data *pdata =
+                               omap_dspbridge_dev->dev.platform_data;
+
+       /* Check if sleep code is valid */
+       if ((dw_cmd != PWR_DEEPSLEEP) && (dw_cmd != PWR_EMERGENCYDEEPSLEEP))
+               return -EINVAL;
+
+       switch (dev_context->dw_brd_state) {
+       case BRD_RUNNING:
+               omap_mbox_save_ctx(dev_context->mbox);
+               if (dsp_test_sleepstate == PWRDM_POWER_OFF) {
+                       sm_interrupt_dsp(dev_context, MBX_PM_DSPHIBERNATE);
+                       dev_dbg(bridge, "PM: %s - sent hibernate cmd to DSP\n",
+                               __func__);
+                       target_pwr_state = PWRDM_POWER_OFF;
+               } else {
+                       sm_interrupt_dsp(dev_context, MBX_PM_DSPRETENTION);
+                       target_pwr_state = PWRDM_POWER_RET;
+               }
+               break;
+       case BRD_RETENTION:
+               omap_mbox_save_ctx(dev_context->mbox);
+               if (dsp_test_sleepstate == PWRDM_POWER_OFF) {
+                       sm_interrupt_dsp(dev_context, MBX_PM_DSPHIBERNATE);
+                       target_pwr_state = PWRDM_POWER_OFF;
+               } else
+                       return 0;
+               break;
+       case BRD_HIBERNATION:
+       case BRD_DSP_HIBERNATION:
+               /* Already in Hibernation, so just return */
+               dev_dbg(bridge, "PM: %s - DSP already in hibernation\n",
+                       __func__);
+               return 0;
+       case BRD_STOPPED:
+               dev_dbg(bridge, "PM: %s - Board in STOP state\n", __func__);
+               return 0;
+       default:
+               dev_dbg(bridge, "PM: %s - Bridge in Illegal state\n", __func__);
+               return -EPERM;
+       }
+
+       /* Get the PRCM DSP power domain status */
+       pwr_state = (*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST) &
+                                               OMAP_POWERSTATEST_MASK;
+
+       /* Wait for DSP to move into target power state */
+       while ((pwr_state != target_pwr_state) && --timeout) {
+               if (msleep_interruptible(10)) {
+                       pr_err("Waiting for DSP to Suspend interrupted\n");
+                       return -EPERM;
+               }
+               pwr_state = (*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD,
+                                       OMAP2_PM_PWSTST) & OMAP_POWERSTATEST_MASK;
+       }
+
+       if (!timeout) {
+               pr_err("%s: Timed out waiting for DSP off mode, state %x\n",
+                      __func__, pwr_state);
+#ifdef CONFIG_BRIDGE_NTFY_PWRERR
+               dev_get_deh_mgr(dev_context->hdev_obj, &hdeh_mgr);
+               bridge_deh_notify(hdeh_mgr, DSP_PWRERROR, 0);
+#endif /* CONFIG_BRIDGE_NTFY_PWRERR */
+               return -ETIMEDOUT;
+       } else {
+               /* Update the Bridger Driver state */
+               if (dsp_test_sleepstate == PWRDM_POWER_OFF)
+                       dev_context->dw_brd_state = BRD_HIBERNATION;
+               else
+                       dev_context->dw_brd_state = BRD_RETENTION;
+
+               /* Disable wdt on hibernation. */
+               dsp_wdt_enable(false);
+
+               /* Turn off DSP Peripheral clocks */
+               status = dsp_clock_disable_all(dev_context->dsp_per_clks);
+               if (DSP_FAILED(status))
+                       return status;
+#ifdef CONFIG_BRIDGE_DVFS
+               else if (target_pwr_state == PWRDM_POWER_OFF) {
+                       /*
+                        * Set the OPP to low level before moving to OFF mode
+                        */
+                       if (pdata->dsp_set_min_opp)
+                               (*pdata->dsp_set_min_opp) (VDD1_OPP1);
+               }
+#endif /* CONFIG_BRIDGE_DVFS */
+       }
+#endif /* CONFIG_PM */
+       return status;
+}
+
+/*
+ *  ======== wake_dsp ========
+ *     Wake up DSP from sleep.
+ */
+int wake_dsp(struct bridge_dev_context *dev_context, IN void *pargs)
+{
+       int status = 0;
+#ifdef CONFIG_PM
+
+       /* Check the board state, if it is not 'SLEEP' then return */
+       if (dev_context->dw_brd_state == BRD_RUNNING ||
+           dev_context->dw_brd_state == BRD_STOPPED) {
+               /* The Device is in 'RET' or 'OFF' state and Bridge state is not
+                * 'SLEEP', this means state inconsistency, so return */
+               return 0;
+       }
+
+       /* Send a wakeup message to DSP */
+       sm_interrupt_dsp(dev_context, MBX_PM_DSPWAKEUP);
+
+       /* Set the device state to RUNNIG */
+       dev_context->dw_brd_state = BRD_RUNNING;
+#endif /* CONFIG_PM */
+       return status;
+}
+
+/*
+ *  ======== dsp_peripheral_clk_ctrl ========
+ *     Enable/Disable the DSP peripheral clocks as needed..
+ */
+int dsp_peripheral_clk_ctrl(struct bridge_dev_context *dev_context,
+                                  IN void *pargs)
+{
+       u32 ext_clk = 0;
+       u32 ext_clk_id = 0;
+       u32 ext_clk_cmd = 0;
+       u32 clk_id_index = MBX_PM_MAX_RESOURCES;
+       u32 tmp_index;
+       u32 dsp_per_clks_before;
+       int status = 0;
+
+       dsp_per_clks_before = dev_context->dsp_per_clks;
+
+       ext_clk = (u32) *((u32 *) pargs);
+       ext_clk_id = ext_clk & MBX_PM_CLK_IDMASK;
+
+       /* process the power message -- TODO, keep it in a separate function */
+       for (tmp_index = 0; tmp_index < MBX_PM_MAX_RESOURCES; tmp_index++) {
+               if (ext_clk_id == bpwr_clkid[tmp_index]) {
+                       clk_id_index = tmp_index;
+                       break;
+               }
+       }
+       /* TODO -- Assert may be a too hard restriction here.. May be we should
+        * just return with failure when the CLK ID does not match */
+       /* DBC_ASSERT(clk_id_index < MBX_PM_MAX_RESOURCES); */
+       if (clk_id_index == MBX_PM_MAX_RESOURCES) {
+               /* return with a more meaningfull error code */
+               return -EPERM;
+       }
+       ext_clk_cmd = (ext_clk >> MBX_PM_CLK_CMDSHIFT) & MBX_PM_CLK_CMDMASK;
+       switch (ext_clk_cmd) {
+       case BPWR_DISABLE_CLOCK:
+               status = dsp_clk_disable(bpwr_clks[clk_id_index].clk);
+               dsp_clk_wakeup_event_ctrl(bpwr_clks[clk_id_index].clk_id,
+                                         false);
+               if (DSP_SUCCEEDED(status)) {
+                       (dev_context->dsp_per_clks) &=
+                               (~((u32) (1 << bpwr_clks[clk_id_index].clk)));
+               }
+               break;
+       case BPWR_ENABLE_CLOCK:
+               status = dsp_clk_enable(bpwr_clks[clk_id_index].clk);
+               dsp_clk_wakeup_event_ctrl(bpwr_clks[clk_id_index].clk_id, true);
+               if (DSP_SUCCEEDED(status))
+                       (dev_context->dsp_per_clks) |=
+                               (1 << bpwr_clks[clk_id_index].clk);
+               break;
+       default:
+               dev_dbg(bridge, "%s: Unsupported CMD\n", __func__);
+               /* unsupported cmd */
+               /* TODO -- provide support for AUTOIDLE Enable/Disable
+                * commands */
+       }
+       return status;
+}
+
+/*
+ *  ========pre_scale_dsp========
+ *  Sends prescale notification to DSP
+ *
+ */
+int pre_scale_dsp(struct bridge_dev_context *dev_context, IN void *pargs)
+{
+#ifdef CONFIG_BRIDGE_DVFS
+       u32 level;
+       u32 voltage_domain;
+
+       voltage_domain = *((u32 *) pargs);
+       level = *((u32 *) pargs + 1);
+
+       dev_dbg(bridge, "OPP: %s voltage_domain = %x, level = 0x%x\n",
+               __func__, voltage_domain, level);
+       if ((dev_context->dw_brd_state == BRD_HIBERNATION) ||
+           (dev_context->dw_brd_state == BRD_RETENTION) ||
+           (dev_context->dw_brd_state == BRD_DSP_HIBERNATION)) {
+               dev_dbg(bridge, "OPP: %s IVA in sleep. No message to DSP\n");
+               return 0;
+       } else if ((dev_context->dw_brd_state == BRD_RUNNING)) {
+               /* Send a prenotificatio to DSP */
+               dev_dbg(bridge, "OPP: %s sent notification to DSP\n", __func__);
+               sm_interrupt_dsp(dev_context, MBX_PM_SETPOINT_PRENOTIFY);
+               return 0;
+       } else {
+               return -EPERM;
+       }
+#endif /* #ifdef CONFIG_BRIDGE_DVFS */
+       return 0;
+}
+
+/*
+ *  ========post_scale_dsp========
+ *  Sends postscale notification to DSP
+ *
+ */
+int post_scale_dsp(struct bridge_dev_context *dev_context,
+                                                       IN void *pargs)
+{
+       int status = 0;
+#ifdef CONFIG_BRIDGE_DVFS
+       u32 level;
+       u32 voltage_domain;
+       struct io_mgr *hio_mgr;
+
+       status = dev_get_io_mgr(dev_context->hdev_obj, &hio_mgr);
+       if (!hio_mgr)
+               return -EFAULT;
+
+       voltage_domain = *((u32 *) pargs);
+       level = *((u32 *) pargs + 1);
+       dev_dbg(bridge, "OPP: %s voltage_domain = %x, level = 0x%x\n",
+               __func__, voltage_domain, level);
+       if ((dev_context->dw_brd_state == BRD_HIBERNATION) ||
+           (dev_context->dw_brd_state == BRD_RETENTION) ||
+           (dev_context->dw_brd_state == BRD_DSP_HIBERNATION)) {
+               /* Update the OPP value in shared memory */
+               io_sh_msetting(hio_mgr, SHM_CURROPP, &level);
+               dev_dbg(bridge, "OPP: %s IVA in sleep. Wrote to shm\n",
+                       __func__);
+       } else if ((dev_context->dw_brd_state == BRD_RUNNING)) {
+               /* Update the OPP value in shared memory */
+               io_sh_msetting(hio_mgr, SHM_CURROPP, &level);
+               /* Send a post notification to DSP */
+               sm_interrupt_dsp(dev_context, MBX_PM_SETPOINT_POSTNOTIFY);
+               dev_dbg(bridge, "OPP: %s wrote to shm. Sent post notification "
+                       "to DSP\n", __func__);
+       } else {
+               status = -EPERM;
+       }
+#endif /* #ifdef CONFIG_BRIDGE_DVFS */
+       return status;
+}
+
+void dsp_clk_wakeup_event_ctrl(u32 ClkId, bool enable)
+{
+       struct cfg_hostres *resources;
+       int status = 0;
+       u32 iva2_grpsel;
+       u32 mpu_grpsel;
+       struct dev_object *hdev_object = NULL;
+       struct bridge_dev_context *bridge_context = NULL;
+
+       hdev_object = (struct dev_object *)drv_get_first_dev_object();
+       if (!hdev_object)
+               return;
+
+       status = dev_get_bridge_context(hdev_object, &bridge_context);
+       if (!bridge_context)
+               return;
+
+       resources = bridge_context->resources;
+       if (!resources)
+               return;
+
+       switch (ClkId) {
+       case BPWR_GP_TIMER5:
+               iva2_grpsel = (u32) *((reg_uword32 *)
+                                      ((u32) (resources->dw_per_pm_base) +
+                                       0xA8));
+               mpu_grpsel = (u32) *((reg_uword32 *)
+                                     ((u32) (resources->dw_per_pm_base) +
+                                      0xA4));
+               if (enable) {
+                       iva2_grpsel |= OMAP3430_GRPSEL_GPT5_MASK;
+                       mpu_grpsel &= ~OMAP3430_GRPSEL_GPT5_MASK;
+               } else {
+                       mpu_grpsel |= OMAP3430_GRPSEL_GPT5_MASK;
+                       iva2_grpsel &= ~OMAP3430_GRPSEL_GPT5_MASK;
+               }
+               *((reg_uword32 *) ((u32) (resources->dw_per_pm_base) + 0xA8))
+                   = iva2_grpsel;
+               *((reg_uword32 *) ((u32) (resources->dw_per_pm_base) + 0xA4))
+                   = mpu_grpsel;
+               break;
+       case BPWR_GP_TIMER6:
+               iva2_grpsel = (u32) *((reg_uword32 *)
+                                      ((u32) (resources->dw_per_pm_base) +
+                                       0xA8));
+               mpu_grpsel = (u32) *((reg_uword32 *)
+                                     ((u32) (resources->dw_per_pm_base) +
+                                      0xA4));
+               if (enable) {
+                       iva2_grpsel |= OMAP3430_GRPSEL_GPT6_MASK;
+                       mpu_grpsel &= ~OMAP3430_GRPSEL_GPT6_MASK;
+               } else {
+                       mpu_grpsel |= OMAP3430_GRPSEL_GPT6_MASK;
+                       iva2_grpsel &= ~OMAP3430_GRPSEL_GPT6_MASK;
+               }
+               *((reg_uword32 *) ((u32) (resources->dw_per_pm_base) + 0xA8))
+                   = iva2_grpsel;
+               *((reg_uword32 *) ((u32) (resources->dw_per_pm_base) + 0xA4))
+                   = mpu_grpsel;
+               break;
+       case BPWR_GP_TIMER7:
+               iva2_grpsel = (u32) *((reg_uword32 *)
+                                      ((u32) (resources->dw_per_pm_base) +
+                                       0xA8));
+               mpu_grpsel = (u32) *((reg_uword32 *)
+                                     ((u32) (resources->dw_per_pm_base) +
+                                      0xA4));
+               if (enable) {
+                       iva2_grpsel |= OMAP3430_GRPSEL_GPT7_MASK;
+                       mpu_grpsel &= ~OMAP3430_GRPSEL_GPT7_MASK;
+               } else {
+                       mpu_grpsel |= OMAP3430_GRPSEL_GPT7_MASK;
+                       iva2_grpsel &= ~OMAP3430_GRPSEL_GPT7_MASK;
+               }
+               *((reg_uword32 *) ((u32) (resources->dw_per_pm_base) + 0xA8))
+                   = iva2_grpsel;
+               *((reg_uword32 *) ((u32) (resources->dw_per_pm_base) + 0xA4))
+                   = mpu_grpsel;
+               break;
+       case BPWR_GP_TIMER8:
+               iva2_grpsel = (u32) *((reg_uword32 *)
+                                      ((u32) (resources->dw_per_pm_base) +
+                                       0xA8));
+               mpu_grpsel = (u32) *((reg_uword32 *)
+                                     ((u32) (resources->dw_per_pm_base) +
+                                      0xA4));
+               if (enable) {
+                       iva2_grpsel |= OMAP3430_GRPSEL_GPT8_MASK;
+                       mpu_grpsel &= ~OMAP3430_GRPSEL_GPT8_MASK;
+               } else {
+                       mpu_grpsel |= OMAP3430_GRPSEL_GPT8_MASK;
+                       iva2_grpsel &= ~OMAP3430_GRPSEL_GPT8_MASK;
+               }
+               *((reg_uword32 *) ((u32) (resources->dw_per_pm_base) + 0xA8))
+                   = iva2_grpsel;
+               *((reg_uword32 *) ((u32) (resources->dw_per_pm_base) + 0xA4))
+                   = mpu_grpsel;
+               break;
+       case BPWR_MCBSP1:
+               iva2_grpsel = (u32) *((reg_uword32 *)
+                                      ((u32) (resources->dw_core_pm_base) +
+                                       0xA8));
+               mpu_grpsel = (u32) *((reg_uword32 *)
+                                     ((u32) (resources->dw_core_pm_base) +
+                                      0xA4));
+               if (enable) {
+                       iva2_grpsel |= OMAP3430_GRPSEL_MCBSP1_MASK;
+                       mpu_grpsel &= ~OMAP3430_GRPSEL_MCBSP1_MASK;
+               } else {
+                       mpu_grpsel |= OMAP3430_GRPSEL_MCBSP1_MASK;
+                       iva2_grpsel &= ~OMAP3430_GRPSEL_MCBSP1_MASK;
+               }
+               *((reg_uword32 *) ((u32) (resources->dw_core_pm_base) + 0xA8))
+                   = iva2_grpsel;
+               *((reg_uword32 *) ((u32) (resources->dw_core_pm_base) + 0xA4))
+                   = mpu_grpsel;
+               break;
+       case BPWR_MCBSP2:
+               iva2_grpsel = (u32) *((reg_uword32 *)
+                                      ((u32) (resources->dw_per_pm_base) +
+                                       0xA8));
+               mpu_grpsel = (u32) *((reg_uword32 *)
+                                     ((u32) (resources->dw_per_pm_base) +
+                                      0xA4));
+               if (enable) {
+                       iva2_grpsel |= OMAP3430_GRPSEL_MCBSP2_MASK;
+                       mpu_grpsel &= ~OMAP3430_GRPSEL_MCBSP2_MASK;
+               } else {
+                       mpu_grpsel |= OMAP3430_GRPSEL_MCBSP2_MASK;
+                       iva2_grpsel &= ~OMAP3430_GRPSEL_MCBSP2_MASK;
+               }
+               *((reg_uword32 *) ((u32) (resources->dw_per_pm_base) + 0xA8))
+                   = iva2_grpsel;
+               *((reg_uword32 *) ((u32) (resources->dw_per_pm_base) + 0xA4))
+                   = mpu_grpsel;
+               break;
+       case BPWR_MCBSP3:
+               iva2_grpsel = (u32) *((reg_uword32 *)
+                                      ((u32) (resources->dw_per_pm_base) +
+                                       0xA8));
+               mpu_grpsel = (u32) *((reg_uword32 *)
+                                     ((u32) (resources->dw_per_pm_base) +
+                                      0xA4));
+               if (enable) {
+                       iva2_grpsel |= OMAP3430_GRPSEL_MCBSP3_MASK;
+                       mpu_grpsel &= ~OMAP3430_GRPSEL_MCBSP3_MASK;
+               } else {
+                       mpu_grpsel |= OMAP3430_GRPSEL_MCBSP3_MASK;
+                       iva2_grpsel &= ~OMAP3430_GRPSEL_MCBSP3_MASK;
+               }
+               *((reg_uword32 *) ((u32) (resources->dw_per_pm_base) + 0xA8))
+                   = iva2_grpsel;
+               *((reg_uword32 *) ((u32) (resources->dw_per_pm_base) + 0xA4))
+                   = mpu_grpsel;
+               break;
+       case BPWR_MCBSP4:
+               iva2_grpsel = (u32) *((reg_uword32 *)
+                                      ((u32) (resources->dw_per_pm_base) +
+                                       0xA8));
+               mpu_grpsel = (u32) *((reg_uword32 *)
+                                     ((u32) (resources->dw_per_pm_base) +
+                                      0xA4));
+               if (enable) {
+                       iva2_grpsel |= OMAP3430_GRPSEL_MCBSP4_MASK;
+                       mpu_grpsel &= ~OMAP3430_GRPSEL_MCBSP4_MASK;
+               } else {
+                       mpu_grpsel |= OMAP3430_GRPSEL_MCBSP4_MASK;
+                       iva2_grpsel &= ~OMAP3430_GRPSEL_MCBSP4_MASK;
+               }
+               *((reg_uword32 *) ((u32) (resources->dw_per_pm_base) + 0xA8))
+                   = iva2_grpsel;
+               *((reg_uword32 *) ((u32) (resources->dw_per_pm_base) + 0xA4))
+                   = mpu_grpsel;
+               break;
+       case BPWR_MCBSP5:
+               iva2_grpsel = (u32) *((reg_uword32 *)
+                                      ((u32) (resources->dw_core_pm_base) +
+                                       0xA8));
+               mpu_grpsel = (u32) *((reg_uword32 *)
+                                     ((u32) (resources->dw_core_pm_base) +
+                                      0xA4));
+               if (enable) {
+                       iva2_grpsel |= OMAP3430_GRPSEL_MCBSP5_MASK;
+                       mpu_grpsel &= ~OMAP3430_GRPSEL_MCBSP5_MASK;
+               } else {
+                       mpu_grpsel |= OMAP3430_GRPSEL_MCBSP5_MASK;
+                       iva2_grpsel &= ~OMAP3430_GRPSEL_MCBSP5_MASK;
+               }
+               *((reg_uword32 *) ((u32) (resources->dw_core_pm_base) + 0xA8))
+                   = iva2_grpsel;
+               *((reg_uword32 *) ((u32) (resources->dw_core_pm_base) + 0xA4))
+                   = mpu_grpsel;
+               break;
+       }
+}
diff --git a/drivers/staging/tidspbridge/core/tiomap_io.c b/drivers/staging/tidspbridge/core/tiomap_io.c
new file mode 100644 (file)
index 0000000..3b2ea70
--- /dev/null
@@ -0,0 +1,458 @@
+/*
+ * tiomap_io.c
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Implementation for the io read/write routines.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+/*  ----------------------------------- DSP/BIOS Bridge */
+#include <dspbridge/dbdefs.h>
+
+/*  ----------------------------------- Trace & Debug */
+#include <dspbridge/dbc.h>
+
+/*  ----------------------------------- Platform Manager */
+#include <dspbridge/dev.h>
+#include <dspbridge/drv.h>
+
+/*  ----------------------------------- OS Adaptation Layer */
+#include <dspbridge/cfg.h>
+#include <dspbridge/wdt.h>
+
+/*  ----------------------------------- specific to this file */
+#include "_tiomap.h"
+#include "_tiomap_pwr.h"
+#include "tiomap_io.h"
+
+static u32 ul_ext_base;
+static u32 ul_ext_end;
+
+static u32 shm0_end;
+static u32 ul_dyn_ext_base;
+static u32 ul_trace_sec_beg;
+static u32 ul_trace_sec_end;
+static u32 ul_shm_base_virt;
+
+bool symbols_reloaded = true;
+
+/*
+ *  ======== read_ext_dsp_data ========
+ *      Copies DSP external memory buffers to the host side buffers.
+ */
+int read_ext_dsp_data(struct bridge_dev_context *hDevContext,
+                            OUT u8 *pbHostBuf, u32 dwDSPAddr,
+                            u32 ul_num_bytes, u32 ulMemType)
+{
+       int status = 0;
+       struct bridge_dev_context *dev_context = hDevContext;
+       u32 offset;
+       u32 ul_tlb_base_virt = 0;
+       u32 ul_shm_offset_virt = 0;
+       u32 dw_ext_prog_virt_mem;
+       u32 dw_base_addr = dev_context->dw_dsp_ext_base_addr;
+       bool trace_read = false;
+
+       if (!ul_shm_base_virt) {
+               status = dev_get_symbol(dev_context->hdev_obj,
+                                       SHMBASENAME, &ul_shm_base_virt);
+       }
+       DBC_ASSERT(ul_shm_base_virt != 0);
+
+       /* Check if it is a read of Trace section */
+       if (DSP_SUCCEEDED(status) && !ul_trace_sec_beg) {
+               status = dev_get_symbol(dev_context->hdev_obj,
+                                       DSP_TRACESEC_BEG, &ul_trace_sec_beg);
+       }
+       DBC_ASSERT(ul_trace_sec_beg != 0);
+
+       if (DSP_SUCCEEDED(status) && !ul_trace_sec_end) {
+               status = dev_get_symbol(dev_context->hdev_obj,
+                                       DSP_TRACESEC_END, &ul_trace_sec_end);
+       }
+       DBC_ASSERT(ul_trace_sec_end != 0);
+
+       if (DSP_SUCCEEDED(status)) {
+               if ((dwDSPAddr <= ul_trace_sec_end) &&
+                   (dwDSPAddr >= ul_trace_sec_beg))
+                       trace_read = true;
+       }
+
+       /* If reading from TRACE, force remap/unmap */
+       if (trace_read && dw_base_addr) {
+               dw_base_addr = 0;
+               dev_context->dw_dsp_ext_base_addr = 0;
+       }
+
+       if (!dw_base_addr) {
+               /* Initialize ul_ext_base and ul_ext_end */
+               ul_ext_base = 0;
+               ul_ext_end = 0;
+
+               /* Get DYNEXT_BEG, EXT_BEG and EXT_END. */
+               if (DSP_SUCCEEDED(status) && !ul_dyn_ext_base) {
+                       status = dev_get_symbol(dev_context->hdev_obj,
+                                               DYNEXTBASE, &ul_dyn_ext_base);
+               }
+               DBC_ASSERT(ul_dyn_ext_base != 0);
+
+               if (DSP_SUCCEEDED(status)) {
+                       status = dev_get_symbol(dev_context->hdev_obj,
+                                               EXTBASE, &ul_ext_base);
+               }
+               DBC_ASSERT(ul_ext_base != 0);
+
+               if (DSP_SUCCEEDED(status)) {
+                       status = dev_get_symbol(dev_context->hdev_obj,
+                                               EXTEND, &ul_ext_end);
+               }
+               DBC_ASSERT(ul_ext_end != 0);
+
+               /* Trace buffer is right after the shm SEG0,
+                *  so set the base address to SHMBASE */
+               if (trace_read) {
+                       ul_ext_base = ul_shm_base_virt;
+                       ul_ext_end = ul_trace_sec_end;
+               }
+
+               DBC_ASSERT(ul_ext_end != 0);
+               DBC_ASSERT(ul_ext_end > ul_ext_base);
+
+               if (ul_ext_end < ul_ext_base)
+                       status = -EPERM;
+
+               if (DSP_SUCCEEDED(status)) {
+                       ul_tlb_base_virt =
+                           dev_context->atlb_entry[0].ul_dsp_va * DSPWORDSIZE;
+                       DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt);
+                       dw_ext_prog_virt_mem =
+                           dev_context->atlb_entry[0].ul_gpp_va;
+
+                       if (!trace_read) {
+                               ul_shm_offset_virt =
+                                   ul_shm_base_virt - ul_tlb_base_virt;
+                               ul_shm_offset_virt +=
+                                   PG_ALIGN_HIGH(ul_ext_end - ul_dyn_ext_base +
+                                                 1, HW_PAGE_SIZE64KB);
+                               dw_ext_prog_virt_mem -= ul_shm_offset_virt;
+                               dw_ext_prog_virt_mem +=
+                                   (ul_ext_base - ul_dyn_ext_base);
+                               dev_context->dw_dsp_ext_base_addr =
+                                   dw_ext_prog_virt_mem;
+
+                               /*
+                                * This dw_dsp_ext_base_addr will get cleared
+                                * only when the board is stopped.
+                               */
+                               if (!dev_context->dw_dsp_ext_base_addr)
+                                       status = -EPERM;
+                       }
+
+                       dw_base_addr = dw_ext_prog_virt_mem;
+               }
+       }
+
+       if (!dw_base_addr || !ul_ext_base || !ul_ext_end)
+               status = -EPERM;
+
+       offset = dwDSPAddr - ul_ext_base;
+
+       if (DSP_SUCCEEDED(status))
+               memcpy(pbHostBuf, (u8 *) dw_base_addr + offset, ul_num_bytes);
+
+       return status;
+}
+
+/*
+ *  ======== write_dsp_data ========
+ *  purpose:
+ *      Copies buffers to the DSP internal/external memory.
+ */
+int write_dsp_data(struct bridge_dev_context *hDevContext,
+                         IN u8 *pbHostBuf, u32 dwDSPAddr, u32 ul_num_bytes,
+                         u32 ulMemType)
+{
+       u32 offset;
+       u32 dw_base_addr = hDevContext->dw_dsp_base_addr;
+       struct cfg_hostres *resources = hDevContext->resources;
+       int status = 0;
+       u32 base1, base2, base3;
+       base1 = OMAP_DSP_MEM1_SIZE;
+       base2 = OMAP_DSP_MEM2_BASE - OMAP_DSP_MEM1_BASE;
+       base3 = OMAP_DSP_MEM3_BASE - OMAP_DSP_MEM1_BASE;
+
+       if (!resources)
+               return -EPERM;
+
+       offset = dwDSPAddr - hDevContext->dw_dsp_start_add;
+       if (offset < base1) {
+               dw_base_addr = MEM_LINEAR_ADDRESS(resources->dw_mem_base[2],
+                                                 resources->dw_mem_length[2]);
+       } else if (offset > base1 && offset < base2 + OMAP_DSP_MEM2_SIZE) {
+               dw_base_addr = MEM_LINEAR_ADDRESS(resources->dw_mem_base[3],
+                                                 resources->dw_mem_length[3]);
+               offset = offset - base2;
+       } else if (offset >= base2 + OMAP_DSP_MEM2_SIZE &&
+                  offset < base3 + OMAP_DSP_MEM3_SIZE) {
+               dw_base_addr = MEM_LINEAR_ADDRESS(resources->dw_mem_base[4],
+                                                 resources->dw_mem_length[4]);
+               offset = offset - base3;
+       } else {
+               return -EPERM;
+       }
+       if (ul_num_bytes)
+               memcpy((u8 *) (dw_base_addr + offset), pbHostBuf, ul_num_bytes);
+       else
+               *((u32 *) pbHostBuf) = dw_base_addr + offset;
+
+       return status;
+}
+
+/*
+ *  ======== write_ext_dsp_data ========
+ *  purpose:
+ *      Copies buffers to the external memory.
+ *
+ */
+int write_ext_dsp_data(struct bridge_dev_context *dev_context,
+                             IN u8 *pbHostBuf, u32 dwDSPAddr,
+                             u32 ul_num_bytes, u32 ulMemType,
+                             bool bDynamicLoad)
+{
+       u32 dw_base_addr = dev_context->dw_dsp_ext_base_addr;
+       u32 dw_offset = 0;
+       u8 temp_byte1, temp_byte2;
+       u8 remain_byte[4];
+       s32 i;
+       int ret = 0;
+       u32 dw_ext_prog_virt_mem;
+       u32 ul_tlb_base_virt = 0;
+       u32 ul_shm_offset_virt = 0;
+       struct cfg_hostres *host_res = dev_context->resources;
+       bool trace_load = false;
+       temp_byte1 = 0x0;
+       temp_byte2 = 0x0;
+
+       if (symbols_reloaded) {
+               /* Check if it is a load to Trace section */
+               ret = dev_get_symbol(dev_context->hdev_obj,
+                                    DSP_TRACESEC_BEG, &ul_trace_sec_beg);
+               if (DSP_SUCCEEDED(ret))
+                       ret = dev_get_symbol(dev_context->hdev_obj,
+                                            DSP_TRACESEC_END,
+                                            &ul_trace_sec_end);
+       }
+       if (DSP_SUCCEEDED(ret)) {
+               if ((dwDSPAddr <= ul_trace_sec_end) &&
+                   (dwDSPAddr >= ul_trace_sec_beg))
+                       trace_load = true;
+       }
+
+       /* If dynamic, force remap/unmap */
+       if ((bDynamicLoad || trace_load) && dw_base_addr) {
+               dw_base_addr = 0;
+               MEM_UNMAP_LINEAR_ADDRESS((void *)
+                                        dev_context->dw_dsp_ext_base_addr);
+               dev_context->dw_dsp_ext_base_addr = 0x0;
+       }
+       if (!dw_base_addr) {
+               if (symbols_reloaded)
+                       /* Get SHM_BEG  EXT_BEG and EXT_END. */
+                       ret = dev_get_symbol(dev_context->hdev_obj,
+                                            SHMBASENAME, &ul_shm_base_virt);
+               DBC_ASSERT(ul_shm_base_virt != 0);
+               if (bDynamicLoad) {
+                       if (DSP_SUCCEEDED(ret)) {
+                               if (symbols_reloaded)
+                                       ret =
+                                           dev_get_symbol
+                                           (dev_context->hdev_obj, DYNEXTBASE,
+                                            &ul_ext_base);
+                       }
+                       DBC_ASSERT(ul_ext_base != 0);
+                       if (DSP_SUCCEEDED(ret)) {
+                               /* DR  OMAPS00013235 : DLModules array may be
+                                * in EXTMEM. It is expected that DYNEXTMEM and
+                                * EXTMEM are contiguous, so checking for the
+                                * upper bound at EXTEND should be Ok. */
+                               if (symbols_reloaded)
+                                       ret =
+                                           dev_get_symbol
+                                           (dev_context->hdev_obj, EXTEND,
+                                            &ul_ext_end);
+                       }
+               } else {
+                       if (symbols_reloaded) {
+                               if (DSP_SUCCEEDED(ret))
+                                       ret =
+                                           dev_get_symbol
+                                           (dev_context->hdev_obj, EXTBASE,
+                                            &ul_ext_base);
+                               DBC_ASSERT(ul_ext_base != 0);
+                               if (DSP_SUCCEEDED(ret))
+                                       ret =
+                                           dev_get_symbol
+                                           (dev_context->hdev_obj, EXTEND,
+                                            &ul_ext_end);
+                       }
+               }
+               /* Trace buffer it right after the shm SEG0, so set the
+                *      base address to SHMBASE */
+               if (trace_load)
+                       ul_ext_base = ul_shm_base_virt;
+
+               DBC_ASSERT(ul_ext_end != 0);
+               DBC_ASSERT(ul_ext_end > ul_ext_base);
+               if (ul_ext_end < ul_ext_base)
+                       ret = -EPERM;
+
+               if (DSP_SUCCEEDED(ret)) {
+                       ul_tlb_base_virt =
+                           dev_context->atlb_entry[0].ul_dsp_va * DSPWORDSIZE;
+                       DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt);
+
+                       if (symbols_reloaded) {
+                               if (DSP_SUCCEEDED(ret)) {
+                                       ret =
+                                           dev_get_symbol
+                                           (dev_context->hdev_obj,
+                                            DSP_TRACESEC_END, &shm0_end);
+                               }
+                               if (DSP_SUCCEEDED(ret)) {
+                                       ret =
+                                           dev_get_symbol
+                                           (dev_context->hdev_obj, DYNEXTBASE,
+                                            &ul_dyn_ext_base);
+                               }
+                       }
+                       ul_shm_offset_virt =
+                           ul_shm_base_virt - ul_tlb_base_virt;
+                       if (trace_load) {
+                               dw_ext_prog_virt_mem =
+                                   dev_context->atlb_entry[0].ul_gpp_va;
+                       } else {
+                               dw_ext_prog_virt_mem = host_res->dw_mem_base[1];
+                               dw_ext_prog_virt_mem +=
+                                   (ul_ext_base - ul_dyn_ext_base);
+                       }
+
+                       dev_context->dw_dsp_ext_base_addr =
+                           (u32) MEM_LINEAR_ADDRESS((void *)
+                                                    dw_ext_prog_virt_mem,
+                                                    ul_ext_end - ul_ext_base);
+                       dw_base_addr += dev_context->dw_dsp_ext_base_addr;
+                       /* This dw_dsp_ext_base_addr will get cleared only when
+                        * the board is stopped. */
+                       if (!dev_context->dw_dsp_ext_base_addr)
+                               ret = -EPERM;
+               }
+       }
+       if (!dw_base_addr || !ul_ext_base || !ul_ext_end)
+               ret = -EPERM;
+
+       if (DSP_SUCCEEDED(ret)) {
+               for (i = 0; i < 4; i++)
+                       remain_byte[i] = 0x0;
+
+               dw_offset = dwDSPAddr - ul_ext_base;
+               /* Also make sure the dwDSPAddr is < ul_ext_end */
+               if (dwDSPAddr > ul_ext_end || dw_offset > dwDSPAddr)
+                       ret = -EPERM;
+       }
+       if (DSP_SUCCEEDED(ret)) {
+               if (ul_num_bytes)
+                       memcpy((u8 *) dw_base_addr + dw_offset, pbHostBuf,
+                              ul_num_bytes);
+               else
+                       *((u32 *) pbHostBuf) = dw_base_addr + dw_offset;
+       }
+       /* Unmap here to force remap for other Ext loads */
+       if ((bDynamicLoad || trace_load) && dev_context->dw_dsp_ext_base_addr) {
+               MEM_UNMAP_LINEAR_ADDRESS((void *)
+                                        dev_context->dw_dsp_ext_base_addr);
+               dev_context->dw_dsp_ext_base_addr = 0x0;
+       }
+       symbols_reloaded = false;
+       return ret;
+}
+
+int sm_interrupt_dsp(struct bridge_dev_context *dev_context, u16 mb_val)
+{
+#ifdef CONFIG_BRIDGE_DVFS
+       u32 opplevel = 0;
+#endif
+       struct dspbridge_platform_data *pdata =
+               omap_dspbridge_dev->dev.platform_data;
+       struct cfg_hostres *resources = dev_context->resources;
+       int status = 0;
+       u32 temp;
+
+       if (!dev_context->mbox)
+               return 0;
+
+       if (!resources)
+               return -EPERM;
+
+       if (dev_context->dw_brd_state == BRD_DSP_HIBERNATION ||
+           dev_context->dw_brd_state == BRD_HIBERNATION) {
+#ifdef CONFIG_BRIDGE_DVFS
+               if (pdata->dsp_get_opp)
+                       opplevel = (*pdata->dsp_get_opp) ();
+               if (opplevel == VDD1_OPP1) {
+                       if (pdata->dsp_set_min_opp)
+                               (*pdata->dsp_set_min_opp) (VDD1_OPP2);
+               }
+#endif
+               /* Restart the peripheral clocks */
+               dsp_clock_enable_all(dev_context->dsp_per_clks);
+               dsp_wdt_enable(true);
+
+               /*
+                * 2:0 AUTO_IVA2_DPLL - Enabling IVA2 DPLL auto control
+                *     in CM_AUTOIDLE_PLL_IVA2 register
+                */
+               (*pdata->dsp_cm_write)(1 << OMAP3430_AUTO_IVA2_DPLL_SHIFT,
+                               OMAP3430_IVA2_MOD, OMAP3430_CM_AUTOIDLE_PLL);
+
+               /*
+                * 7:4 IVA2_DPLL_FREQSEL - IVA2 internal frq set to
+                *     0.75 MHz - 1.0 MHz
+                * 2:0 EN_IVA2_DPLL - Enable IVA2 DPLL in lock mode
+                */
+               (*pdata->dsp_cm_rmw_bits)(OMAP3430_IVA2_DPLL_FREQSEL_MASK |
+                               OMAP3430_EN_IVA2_DPLL_MASK,
+                               0x3 << OMAP3430_IVA2_DPLL_FREQSEL_SHIFT |
+                               0x7 << OMAP3430_EN_IVA2_DPLL_SHIFT,
+                               OMAP3430_IVA2_MOD, OMAP3430_CM_CLKEN_PLL);
+
+               /* Restore mailbox settings */
+               omap_mbox_restore_ctx(dev_context->mbox);
+
+               /* Access MMU SYS CONFIG register to generate a short wakeup */
+               temp = *(reg_uword32 *) (resources->dw_dmmu_base + 0x10);
+
+               dev_context->dw_brd_state = BRD_RUNNING;
+       } else if (dev_context->dw_brd_state == BRD_RETENTION) {
+               /* Restart the peripheral clocks */
+               dsp_clock_enable_all(dev_context->dsp_per_clks);
+       }
+
+       status = omap_mbox_msg_send(dev_context->mbox, mb_val);
+
+       if (status) {
+               pr_err("omap_mbox_msg_send Fail and status = %d\n", status);
+               status = -EPERM;
+       }
+
+       return 0;
+}
diff --git a/drivers/staging/tidspbridge/core/tiomap_io.h b/drivers/staging/tidspbridge/core/tiomap_io.h
new file mode 100644 (file)
index 0000000..a176e5c
--- /dev/null
@@ -0,0 +1,104 @@
+/*
+ * tiomap_io.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Definitions, types and function prototypes for the io (r/w external mem).
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef _TIOMAP_IO_
+#define _TIOMAP_IO_
+
+/*
+ * Symbol that defines beginning of shared memory.
+ * For OMAP (Helen) this is the DSP Virtual base address of SDRAM.
+ * This will be used to program DSP MMU to map DSP Virt to GPP phys.
+ * (see dspMmuTlbEntry()).
+ */
+#define SHMBASENAME "SHM_BEG"
+#define EXTBASE     "EXT_BEG"
+#define EXTEND      "_EXT_END"
+#define DYNEXTBASE  "_DYNEXT_BEG"
+#define DYNEXTEND   "_DYNEXT_END"
+#define IVAEXTMEMBASE   "_IVAEXTMEM_BEG"
+#define IVAEXTMEMEND   "_IVAEXTMEM_END"
+
+#define DSP_TRACESEC_BEG  "_BRIDGE_TRACE_BEG"
+#define DSP_TRACESEC_END  "_BRIDGE_TRACE_END"
+
+#define SYS_PUTCBEG               "_SYS_PUTCBEG"
+#define SYS_PUTCEND               "_SYS_PUTCEND"
+#define BRIDGE_SYS_PUTC_CURRENT   "_BRIDGE_SYS_PUTC_current"
+
+#define WORDSWAP_ENABLE 0x3    /* Enable word swap */
+
+/*
+ *  ======== read_ext_dsp_data ========
+ *  Reads it from DSP External memory. The external memory for the DSP
+ * is configured by the combination of DSP MMU and shm Memory manager in the CDB
+ */
+extern int read_ext_dsp_data(struct bridge_dev_context *dev_context,
+                                   OUT u8 *pbHostBuf, u32 dwDSPAddr,
+                                   u32 ul_num_bytes, u32 ulMemType);
+
+/*
+ *  ======== write_dsp_data ========
+ */
+extern int write_dsp_data(struct bridge_dev_context *dev_context,
+                                OUT u8 *pbHostBuf, u32 dwDSPAddr,
+                                u32 ul_num_bytes, u32 ulMemType);
+
+/*
+ *  ======== write_ext_dsp_data ========
+ *  Writes to the DSP External memory for external program.
+ *  The ext mem for progra is configured by the combination of DSP MMU and
+ *  shm Memory manager in the CDB
+ */
+extern int write_ext_dsp_data(struct bridge_dev_context *dev_context,
+                                    IN u8 *pbHostBuf, u32 dwDSPAddr,
+                                    u32 ul_num_bytes, u32 ulMemType,
+                                    bool bDynamicLoad);
+
+/*
+ * ======== write_ext32_bit_dsp_data ========
+ * Writes 32 bit data to the external memory
+ */
+extern inline void write_ext32_bit_dsp_data(IN const
+                                       struct bridge_dev_context *dev_context,
+                                       IN u32 dwDSPAddr, IN u32 val)
+{
+       *(u32 *) dwDSPAddr = ((dev_context->tc_word_swap_on) ? (((val << 16) &
+                                                                0xFFFF0000) |
+                                                               ((val >> 16) &
+                                                                0x0000FFFF)) :
+                             val);
+}
+
+/*
+ * ======== read_ext32_bit_dsp_data ========
+ * Reads 32 bit data from the external memory
+ */
+extern inline u32 read_ext32_bit_dsp_data(IN const struct bridge_dev_context
+                                         *dev_context, IN u32 dwDSPAddr)
+{
+       u32 ret;
+       ret = *(u32 *) dwDSPAddr;
+
+       ret = ((dev_context->tc_word_swap_on) ? (((ret << 16)
+                                                 & 0xFFFF0000) | ((ret >> 16) &
+                                                                  0x0000FFFF))
+              : ret);
+       return ret;
+}
+
+#endif /* _TIOMAP_IO_ */
diff --git a/drivers/staging/tidspbridge/core/ue_deh.c b/drivers/staging/tidspbridge/core/ue_deh.c
new file mode 100644 (file)
index 0000000..64e9366
--- /dev/null
@@ -0,0 +1,303 @@
+/*
+ * ue_deh.c
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Implements upper edge DSP exception handling (DEH) functions.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+/*  ----------------------------------- Host OS */
+#include <dspbridge/host_os.h>
+
+/*  ----------------------------------- DSP/BIOS Bridge */
+#include <dspbridge/std.h>
+#include <dspbridge/dbdefs.h>
+
+/*  ----------------------------------- Trace & Debug */
+#include <dspbridge/dbc.h>
+
+/*  ----------------------------------- OS Adaptation Layer */
+#include <dspbridge/cfg.h>
+#include <dspbridge/clk.h>
+#include <dspbridge/ntfy.h>
+#include <dspbridge/drv.h>
+
+/*  ----------------------------------- Link Driver */
+#include <dspbridge/dspdeh.h>
+
+/*  ----------------------------------- Platform Manager */
+#include <dspbridge/dev.h>
+#include <dspbridge/dspapi.h>
+#include <dspbridge/wdt.h>
+
+/* ------------------------------------ Hardware Abstraction Layer */
+#include <hw_defs.h>
+#include <hw_mmu.h>
+
+/*  ----------------------------------- This */
+#include "mmu_fault.h"
+#include "_tiomap.h"
+#include "_deh.h"
+#include "_tiomap_pwr.h"
+#include <dspbridge/io_sm.h>
+
+
+static struct hw_mmu_map_attrs_t map_attrs = { HW_LITTLE_ENDIAN,
+       HW_ELEM_SIZE16BIT,
+       HW_MMU_CPUES
+};
+
+static void *dummy_va_addr;
+
+int bridge_deh_create(struct deh_mgr **ret_deh_mgr,
+               struct dev_object *hdev_obj)
+{
+       int status = 0;
+       struct deh_mgr *deh_mgr;
+       struct bridge_dev_context *hbridge_context = NULL;
+
+       /*  Message manager will be created when a file is loaded, since
+        *  size of message buffer in shared memory is configurable in
+        *  the base image. */
+       /* Get Bridge context info. */
+       dev_get_bridge_context(hdev_obj, &hbridge_context);
+       DBC_ASSERT(hbridge_context);
+       dummy_va_addr = NULL;
+       /* Allocate IO manager object: */
+       deh_mgr = kzalloc(sizeof(struct deh_mgr), GFP_KERNEL);
+       if (!deh_mgr) {
+               status = -ENOMEM;
+               goto leave;
+       }
+
+       /* Create an NTFY object to manage notifications */
+       deh_mgr->ntfy_obj = kmalloc(sizeof(struct ntfy_object), GFP_KERNEL);
+       if (deh_mgr->ntfy_obj) {
+               ntfy_init(deh_mgr->ntfy_obj);
+       } else {
+               status = -ENOMEM;
+               goto err;
+       }
+
+       /* Create a MMUfault DPC */
+       tasklet_init(&deh_mgr->dpc_tasklet, mmu_fault_dpc, (u32) deh_mgr);
+
+       /* Fill in context structure */
+       deh_mgr->hbridge_context = hbridge_context;
+       deh_mgr->err_info.dw_err_mask = 0L;
+       deh_mgr->err_info.dw_val1 = 0L;
+       deh_mgr->err_info.dw_val2 = 0L;
+       deh_mgr->err_info.dw_val3 = 0L;
+
+       /* Install ISR function for DSP MMU fault */
+       if ((request_irq(INT_DSP_MMU_IRQ, mmu_fault_isr, 0,
+                                       "DspBridge\tiommu fault",
+                                       (void *)deh_mgr)) == 0)
+               status = 0;
+       else
+               status = -EPERM;
+
+err:
+       if (DSP_FAILED(status)) {
+               /* If create failed, cleanup */
+               bridge_deh_destroy(deh_mgr);
+               deh_mgr = NULL;
+       }
+leave:
+       *ret_deh_mgr = deh_mgr;
+
+       return status;
+}
+
+int bridge_deh_destroy(struct deh_mgr *deh_mgr)
+{
+       if (!deh_mgr)
+               return -EFAULT;
+
+       /* Release dummy VA buffer */
+       bridge_deh_release_dummy_mem();
+       /* If notification object exists, delete it */
+       if (deh_mgr->ntfy_obj) {
+               ntfy_delete(deh_mgr->ntfy_obj);
+               kfree(deh_mgr->ntfy_obj);
+       }
+       /* Disable DSP MMU fault */
+       free_irq(INT_DSP_MMU_IRQ, deh_mgr);
+
+       /* Free DPC object */
+       tasklet_kill(&deh_mgr->dpc_tasklet);
+
+       /* Deallocate the DEH manager object */
+       kfree(deh_mgr);
+
+       return 0;
+}
+
+int bridge_deh_register_notify(struct deh_mgr *deh_mgr, u32 event_mask,
+               u32 notify_type,
+               struct dsp_notification *hnotification)
+{
+       int status = 0;
+
+       if (!deh_mgr)
+               return -EFAULT;
+
+       if (event_mask)
+               status = ntfy_register(deh_mgr->ntfy_obj, hnotification,
+                                       event_mask, notify_type);
+       else
+               status = ntfy_unregister(deh_mgr->ntfy_obj, hnotification);
+
+       return status;
+}
+
+void bridge_deh_notify(struct deh_mgr *deh_mgr, u32 ulEventMask, u32 dwErrInfo)
+{
+       struct bridge_dev_context *dev_context;
+       int status = 0;
+       u32 hw_mmu_max_tlb_count = 31;
+       struct cfg_hostres *resources;
+       hw_status hw_status_obj;
+
+       if (!deh_mgr)
+               return;
+
+       dev_info(bridge, "%s: device exception\n", __func__);
+       dev_context = (struct bridge_dev_context *)deh_mgr->hbridge_context;
+       resources = dev_context->resources;
+
+       switch (ulEventMask) {
+       case DSP_SYSERROR:
+               /* reset err_info structure before use */
+               deh_mgr->err_info.dw_err_mask = DSP_SYSERROR;
+               deh_mgr->err_info.dw_val1 = 0L;
+               deh_mgr->err_info.dw_val2 = 0L;
+               deh_mgr->err_info.dw_val3 = 0L;
+               deh_mgr->err_info.dw_val1 = dwErrInfo;
+               dev_err(bridge, "%s: %s, err_info = 0x%x\n",
+                               __func__, "DSP_SYSERROR", dwErrInfo);
+               dump_dl_modules(dev_context);
+               dump_dsp_stack(dev_context);
+               break;
+       case DSP_MMUFAULT:
+               /* MMU fault routine should have set err info structure. */
+               deh_mgr->err_info.dw_err_mask = DSP_MMUFAULT;
+               dev_err(bridge, "%s: %s, err_info = 0x%x\n",
+                               __func__, "DSP_MMUFAULT", dwErrInfo);
+               dev_info(bridge, "%s: %s, high=0x%x, low=0x%x, "
+                       "fault=0x%x\n", __func__, "DSP_MMUFAULT",
+                       (unsigned int) deh_mgr->err_info.dw_val1,
+                       (unsigned int) deh_mgr->err_info.dw_val2,
+                       (unsigned int) fault_addr);
+               dummy_va_addr = (void*)__get_free_page(GFP_ATOMIC);
+               dev_context = (struct bridge_dev_context *)
+                       deh_mgr->hbridge_context;
+
+               print_dsp_trace_buffer(dev_context);
+               dump_dl_modules(dev_context);
+
+               /*
+                * Reset the dynamic mmu index to fixed count if it exceeds
+                * 31. So that the dynmmuindex is always between the range of
+                * standard/fixed entries and 31.
+                */
+               if (dev_context->num_tlb_entries >
+                               hw_mmu_max_tlb_count) {
+                       dev_context->num_tlb_entries =
+                               dev_context->fixed_tlb_entries;
+               }
+               if (DSP_SUCCEEDED(status)) {
+                       hw_status_obj =
+                               hw_mmu_tlb_add(resources->dw_dmmu_base,
+                                               virt_to_phys(dummy_va_addr), fault_addr,
+                                               HW_PAGE_SIZE4KB, 1,
+                                               &map_attrs, HW_SET, HW_SET);
+               }
+
+               dsp_clk_enable(DSP_CLK_GPT8);
+
+               dsp_gpt_wait_overflow(DSP_CLK_GPT8, 0xfffffffe);
+
+               /* Clear MMU interrupt */
+               hw_mmu_event_ack(resources->dw_dmmu_base,
+                               HW_MMU_TRANSLATION_FAULT);
+               dump_dsp_stack(deh_mgr->hbridge_context);
+               dsp_clk_disable(DSP_CLK_GPT8);
+               break;
+#ifdef CONFIG_BRIDGE_NTFY_PWRERR
+       case DSP_PWRERROR:
+               /* reset err_info structure before use */
+               deh_mgr->err_info.dw_err_mask = DSP_PWRERROR;
+               deh_mgr->err_info.dw_val1 = 0L;
+               deh_mgr->err_info.dw_val2 = 0L;
+               deh_mgr->err_info.dw_val3 = 0L;
+               deh_mgr->err_info.dw_val1 = dwErrInfo;
+               dev_err(bridge, "%s: %s, err_info = 0x%x\n",
+                               __func__, "DSP_PWRERROR", dwErrInfo);
+               break;
+#endif /* CONFIG_BRIDGE_NTFY_PWRERR */
+       case DSP_WDTOVERFLOW:
+               deh_mgr->err_info.dw_err_mask = DSP_WDTOVERFLOW;
+               deh_mgr->err_info.dw_val1 = 0L;
+               deh_mgr->err_info.dw_val2 = 0L;
+               deh_mgr->err_info.dw_val3 = 0L;
+               dev_err(bridge, "%s: DSP_WDTOVERFLOW\n", __func__);
+               break;
+       default:
+               dev_dbg(bridge, "%s: Unknown Error, err_info = 0x%x\n",
+                               __func__, dwErrInfo);
+               break;
+       }
+
+       /* Filter subsequent notifications when an error occurs */
+       if (dev_context->dw_brd_state != BRD_ERROR) {
+               ntfy_notify(deh_mgr->ntfy_obj, ulEventMask);
+#ifdef CONFIG_BRIDGE_RECOVERY
+               bridge_recover_schedule();
+#endif
+       }
+
+       /* Set the Board state as ERROR */
+       dev_context->dw_brd_state = BRD_ERROR;
+       /* Disable all the clocks that were enabled by DSP */
+       dsp_clock_disable_all(dev_context->dsp_per_clks);
+       /*
+        * Avoid the subsequent WDT if it happens once,
+        * also if fatal error occurs.
+        */
+       dsp_wdt_enable(false);
+}
+
+int bridge_deh_get_info(struct deh_mgr *deh_mgr,
+               struct dsp_errorinfo *pErrInfo)
+{
+       DBC_REQUIRE(deh_mgr);
+       DBC_REQUIRE(pErrInfo);
+
+       if (!deh_mgr)
+               return -EFAULT;
+
+       /* Copy DEH error info structure to PROC error info structure. */
+       pErrInfo->dw_err_mask = deh_mgr->err_info.dw_err_mask;
+       pErrInfo->dw_val1 = deh_mgr->err_info.dw_val1;
+       pErrInfo->dw_val2 = deh_mgr->err_info.dw_val2;
+       pErrInfo->dw_val3 = deh_mgr->err_info.dw_val3;
+
+       return 0;
+}
+
+void bridge_deh_release_dummy_mem(void)
+{
+       free_page((unsigned long)dummy_va_addr);
+       dummy_va_addr = NULL;
+}
diff --git a/drivers/staging/tidspbridge/core/wdt.c b/drivers/staging/tidspbridge/core/wdt.c
new file mode 100644 (file)
index 0000000..5881fe0
--- /dev/null
@@ -0,0 +1,150 @@
+/*
+ * wdt.c
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * IO dispatcher for a shared memory channel driver.
+ *
+ * Copyright (C) 2010 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include <dspbridge/std.h>
+#include <dspbridge/dbdefs.h>
+#include <dspbridge/dspdeh.h>
+#include <dspbridge/dev.h>
+#include <dspbridge/_chnl_sm.h>
+#include <dspbridge/wdt.h>
+#include <dspbridge/host_os.h>
+
+
+#ifdef CONFIG_BRIDGE_WDT3
+
+#define OMAP34XX_WDT3_BASE             (L4_PER_34XX_BASE + 0x30000)
+
+static struct dsp_wdt_setting dsp_wdt;
+
+void dsp_wdt_dpc(unsigned long data)
+{
+       struct deh_mgr *deh_mgr;
+       dev_get_deh_mgr(dev_get_first(), &deh_mgr);
+       if (deh_mgr)
+               bridge_deh_notify(deh_mgr, DSP_WDTOVERFLOW, 0);
+}
+
+irqreturn_t dsp_wdt_isr(int irq, void *data)
+{
+       u32 value;
+       /* ack wdt3 interrupt */
+       value = __raw_readl(dsp_wdt.reg_base + OMAP3_WDT3_ISR_OFFSET);
+       __raw_writel(value, dsp_wdt.reg_base + OMAP3_WDT3_ISR_OFFSET);
+
+       tasklet_schedule(&dsp_wdt.wdt3_tasklet);
+       return IRQ_HANDLED;
+}
+
+int dsp_wdt_init(void)
+{
+       int ret = 0;
+
+       dsp_wdt.sm_wdt = NULL;
+       dsp_wdt.reg_base = OMAP2_L4_IO_ADDRESS(OMAP34XX_WDT3_BASE);
+       tasklet_init(&dsp_wdt.wdt3_tasklet, dsp_wdt_dpc, 0);
+
+       dsp_wdt.fclk = clk_get(NULL, "wdt3_fck");
+
+       if (dsp_wdt.fclk) {
+               dsp_wdt.iclk = clk_get(NULL, "wdt3_ick");
+               if (!dsp_wdt.iclk) {
+                       clk_put(dsp_wdt.fclk);
+                       dsp_wdt.fclk = NULL;
+                       ret = -EFAULT;
+               }
+       } else
+               ret = -EFAULT;
+
+       if (!ret)
+               ret = request_irq(INT_34XX_WDT3_IRQ, dsp_wdt_isr, 0,
+                                                       "dsp_wdt", &dsp_wdt);
+
+       /* Disable at this moment, it will be enabled when DSP starts */
+       if (!ret)
+               disable_irq(INT_34XX_WDT3_IRQ);
+
+       return ret;
+}
+
+void dsp_wdt_sm_set(void *data)
+{
+       dsp_wdt.sm_wdt = data;
+       dsp_wdt.sm_wdt->wdt_overflow = CONFIG_WDT_TIMEOUT;
+}
+
+
+void dsp_wdt_exit(void)
+{
+       free_irq(INT_34XX_WDT3_IRQ, &dsp_wdt);
+       tasklet_kill(&dsp_wdt.wdt3_tasklet);
+
+       if (dsp_wdt.fclk)
+               clk_put(dsp_wdt.fclk);
+       if (dsp_wdt.iclk)
+               clk_put(dsp_wdt.iclk);
+
+       dsp_wdt.fclk = NULL;
+       dsp_wdt.iclk = NULL;
+       dsp_wdt.sm_wdt = NULL;
+       dsp_wdt.reg_base = NULL;
+}
+
+void dsp_wdt_enable(bool enable)
+{
+       u32 tmp;
+       static bool wdt_enable;
+
+       if (wdt_enable == enable || !dsp_wdt.fclk || !dsp_wdt.iclk)
+               return;
+
+       wdt_enable = enable;
+
+       if (enable) {
+               clk_enable(dsp_wdt.fclk);
+               clk_enable(dsp_wdt.iclk);
+               dsp_wdt.sm_wdt->wdt_setclocks = 1;
+               tmp = __raw_readl(dsp_wdt.reg_base + OMAP3_WDT3_ISR_OFFSET);
+               __raw_writel(tmp, dsp_wdt.reg_base + OMAP3_WDT3_ISR_OFFSET);
+               enable_irq(INT_34XX_WDT3_IRQ);
+       } else {
+               disable_irq(INT_34XX_WDT3_IRQ);
+               dsp_wdt.sm_wdt->wdt_setclocks = 0;
+               clk_disable(dsp_wdt.iclk);
+               clk_disable(dsp_wdt.fclk);
+       }
+}
+
+#else
+void dsp_wdt_enable(bool enable)
+{
+}
+
+void dsp_wdt_sm_set(void *data)
+{
+}
+
+int dsp_wdt_init(void)
+{
+       return 0;
+}
+
+void dsp_wdt_exit(void)
+{
+}
+#endif
+