]> git.karo-electronics.de Git - mv-sheeva.git/blob - drivers/staging/tidspbridge/core/tiomap3430.c
Merge branch 'fix/asoc' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound-2.6
[mv-sheeva.git] / drivers / staging / tidspbridge / core / tiomap3430.c
1 /*
2  * tiomap.c
3  *
4  * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5  *
6  * Processor Manager Driver for TI OMAP3430 EVM.
7  *
8  * Copyright (C) 2005-2006 Texas Instruments, Inc.
9  *
10  * This package is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as
12  * published by the Free Software Foundation.
13  *
14  * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15  * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16  * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
17  */
18
19 #include <plat/dsp.h>
20
21 #include <linux/types.h>
22 /*  ----------------------------------- Host OS */
23 #include <dspbridge/host_os.h>
24 #include <linux/mm.h>
25 #include <linux/mmzone.h>
26
27 /*  ----------------------------------- DSP/BIOS Bridge */
28 #include <dspbridge/dbdefs.h>
29
30 /*  ----------------------------------- Trace & Debug */
31 #include <dspbridge/dbc.h>
32
33 /*  ----------------------------------- OS Adaptation Layer */
34 #include <dspbridge/drv.h>
35 #include <dspbridge/sync.h>
36
37 /* ------------------------------------ Hardware Abstraction Layer */
38 #include <hw_defs.h>
39 #include <hw_mmu.h>
40
41 /*  ----------------------------------- Link Driver */
42 #include <dspbridge/dspdefs.h>
43 #include <dspbridge/dspchnl.h>
44 #include <dspbridge/dspdeh.h>
45 #include <dspbridge/dspio.h>
46 #include <dspbridge/dspmsg.h>
47 #include <dspbridge/pwr.h>
48 #include <dspbridge/io_sm.h>
49
50 /*  ----------------------------------- Platform Manager */
51 #include <dspbridge/dev.h>
52 #include <dspbridge/dspapi.h>
53 #include <dspbridge/dmm.h>
54 #include <dspbridge/wdt.h>
55
56 /*  ----------------------------------- Local */
57 #include "_tiomap.h"
58 #include "_tiomap_pwr.h"
59 #include "tiomap_io.h"
60
61 /* Offset in shared mem to write to in order to synchronize start with DSP */
62 #define SHMSYNCOFFSET 4         /* GPP byte offset */
63
64 #define BUFFERSIZE 1024
65
66 #define TIHELEN_ACKTIMEOUT  10000
67
68 #define MMU_SECTION_ADDR_MASK    0xFFF00000
69 #define MMU_SSECTION_ADDR_MASK   0xFF000000
70 #define MMU_LARGE_PAGE_MASK      0xFFFF0000
71 #define MMU_SMALL_PAGE_MASK      0xFFFFF000
72 #define OMAP3_IVA2_BOOTADDR_MASK 0xFFFFFC00
73 #define PAGES_II_LVL_TABLE   512
74 #define PHYS_TO_PAGE(phys)      pfn_to_page((phys) >> PAGE_SHIFT)
75
76 /*
77  * This is a totally ugly layer violation, but needed until
78  * omap_ctrl_set_dsp_boot*() are provided.
79  */
80 #define OMAP3_IVA2_BOOTMOD_IDLE 1
81 #define OMAP2_CONTROL_GENERAL 0x270
82 #define OMAP343X_CONTROL_IVA2_BOOTADDR (OMAP2_CONTROL_GENERAL + 0x0190)
83 #define OMAP343X_CONTROL_IVA2_BOOTMOD (OMAP2_CONTROL_GENERAL + 0x0194)
84
85 #define OMAP343X_CTRL_REGADDR(reg) \
86         OMAP2_L4_IO_ADDRESS(OMAP343X_CTRL_BASE + (reg))
87
88
89 /* Forward Declarations: */
90 static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt);
91 static int bridge_brd_read(struct bridge_dev_context *dev_ctxt,
92                                   u8 *host_buff,
93                                   u32 dsp_addr, u32 ul_num_bytes,
94                                   u32 mem_type);
95 static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
96                                    u32 dsp_addr);
97 static int bridge_brd_status(struct bridge_dev_context *dev_ctxt,
98                                     int *board_state);
99 static int bridge_brd_stop(struct bridge_dev_context *dev_ctxt);
100 static int bridge_brd_write(struct bridge_dev_context *dev_ctxt,
101                                    u8 *host_buff,
102                                    u32 dsp_addr, u32 ul_num_bytes,
103                                    u32 mem_type);
104 static int bridge_brd_set_state(struct bridge_dev_context *dev_ctxt,
105                                     u32 brd_state);
106 static int bridge_brd_mem_copy(struct bridge_dev_context *dev_ctxt,
107                                    u32 dsp_dest_addr, u32 dsp_src_addr,
108                                    u32 ul_num_bytes, u32 mem_type);
109 static int bridge_brd_mem_write(struct bridge_dev_context *dev_ctxt,
110                                     u8 *host_buff, u32 dsp_addr,
111                                     u32 ul_num_bytes, u32 mem_type);
112 static int bridge_brd_mem_map(struct bridge_dev_context *dev_ctxt,
113                                   u32 ul_mpu_addr, u32 virt_addr,
114                                   u32 ul_num_bytes, u32 ul_map_attr,
115                                   struct page **mapped_pages);
116 static int bridge_brd_mem_un_map(struct bridge_dev_context *dev_ctxt,
117                                      u32 virt_addr, u32 ul_num_bytes);
118 static int bridge_dev_create(struct bridge_dev_context
119                                         **dev_cntxt,
120                                         struct dev_object *hdev_obj,
121                                         struct cfg_hostres *config_param);
122 static int bridge_dev_ctrl(struct bridge_dev_context *dev_context,
123                                   u32 dw_cmd, void *pargs);
124 static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt);
125 static u32 user_va2_pa(struct mm_struct *mm, u32 address);
126 static int pte_update(struct bridge_dev_context *dev_ctxt, u32 pa,
127                              u32 va, u32 size,
128                              struct hw_mmu_map_attrs_t *map_attrs);
129 static int pte_set(struct pg_table_attrs *pt, u32 pa, u32 va,
130                           u32 size, struct hw_mmu_map_attrs_t *attrs);
131 static int mem_map_vmalloc(struct bridge_dev_context *dev_context,
132                                   u32 ul_mpu_addr, u32 virt_addr,
133                                   u32 ul_num_bytes,
134                                   struct hw_mmu_map_attrs_t *hw_attrs);
135
136 bool wait_for_start(struct bridge_dev_context *dev_context, u32 dw_sync_addr);
137
138 /*  ----------------------------------- Globals */
139
140 /* Attributes of L2 page tables for DSP MMU */
141 struct page_info {
142         u32 num_entries;        /* Number of valid PTEs in the L2 PT */
143 };
144
145 /* Attributes used to manage the DSP MMU page tables */
146 struct pg_table_attrs {
147         spinlock_t pg_lock;     /* Critical section object handle */
148
149         u32 l1_base_pa;         /* Physical address of the L1 PT */
150         u32 l1_base_va;         /* Virtual  address of the L1 PT */
151         u32 l1_size;            /* Size of the L1 PT */
152         u32 l1_tbl_alloc_pa;
153         /* Physical address of Allocated mem for L1 table. May not be aligned */
154         u32 l1_tbl_alloc_va;
155         /* Virtual address of Allocated mem for L1 table. May not be aligned */
156         u32 l1_tbl_alloc_sz;
157         /* Size of consistent memory allocated for L1 table.
158          * May not be aligned */
159
160         u32 l2_base_pa;         /* Physical address of the L2 PT */
161         u32 l2_base_va;         /* Virtual  address of the L2 PT */
162         u32 l2_size;            /* Size of the L2 PT */
163         u32 l2_tbl_alloc_pa;
164         /* Physical address of Allocated mem for L2 table. May not be aligned */
165         u32 l2_tbl_alloc_va;
166         /* Virtual address of Allocated mem for L2 table. May not be aligned */
167         u32 l2_tbl_alloc_sz;
168         /* Size of consistent memory allocated for L2 table.
169          * May not be aligned */
170
171         u32 l2_num_pages;       /* Number of allocated L2 PT */
172         /* Array [l2_num_pages] of L2 PT info structs */
173         struct page_info *pg_info;
174 };
175
176 /*
177  *  This Bridge driver's function interface table.
178  */
179 static struct bridge_drv_interface drv_interface_fxns = {
180         /* Bridge API ver. for which this bridge driver is built. */
181         BRD_API_MAJOR_VERSION,
182         BRD_API_MINOR_VERSION,
183         bridge_dev_create,
184         bridge_dev_destroy,
185         bridge_dev_ctrl,
186         bridge_brd_monitor,
187         bridge_brd_start,
188         bridge_brd_stop,
189         bridge_brd_status,
190         bridge_brd_read,
191         bridge_brd_write,
192         bridge_brd_set_state,
193         bridge_brd_mem_copy,
194         bridge_brd_mem_write,
195         bridge_brd_mem_map,
196         bridge_brd_mem_un_map,
197         /* The following CHNL functions are provided by chnl_io.lib: */
198         bridge_chnl_create,
199         bridge_chnl_destroy,
200         bridge_chnl_open,
201         bridge_chnl_close,
202         bridge_chnl_add_io_req,
203         bridge_chnl_get_ioc,
204         bridge_chnl_cancel_io,
205         bridge_chnl_flush_io,
206         bridge_chnl_get_info,
207         bridge_chnl_get_mgr_info,
208         bridge_chnl_idle,
209         bridge_chnl_register_notify,
210         /* The following IO functions are provided by chnl_io.lib: */
211         bridge_io_create,
212         bridge_io_destroy,
213         bridge_io_on_loaded,
214         bridge_io_get_proc_load,
215         /* The following msg_ctrl functions are provided by chnl_io.lib: */
216         bridge_msg_create,
217         bridge_msg_create_queue,
218         bridge_msg_delete,
219         bridge_msg_delete_queue,
220         bridge_msg_get,
221         bridge_msg_put,
222         bridge_msg_register_notify,
223         bridge_msg_set_queue_id,
224 };
225
226 static struct notifier_block dsp_mbox_notifier = {
227         .notifier_call = io_mbox_msg,
228 };
229
230 static inline void flush_all(struct bridge_dev_context *dev_context)
231 {
232         if (dev_context->dw_brd_state == BRD_DSP_HIBERNATION ||
233             dev_context->dw_brd_state == BRD_HIBERNATION)
234                 wake_dsp(dev_context, NULL);
235
236         hw_mmu_tlb_flush_all(dev_context->dw_dsp_mmu_base);
237 }
238
239 static void bad_page_dump(u32 pa, struct page *pg)
240 {
241         pr_emerg("DSPBRIDGE: MAP function: COUNT 0 FOR PA 0x%x\n", pa);
242         pr_emerg("Bad page state in process '%s'\n"
243                  "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n"
244                  "Backtrace:\n",
245                  current->comm, pg, (int)(2 * sizeof(unsigned long)),
246                  (unsigned long)pg->flags, pg->mapping,
247                  page_mapcount(pg), page_count(pg));
248         dump_stack();
249 }
250
251 /*
252  *  ======== bridge_drv_entry ========
253  *  purpose:
254  *      Bridge Driver entry point.
255  */
256 void bridge_drv_entry(struct bridge_drv_interface **drv_intf,
257                    const char *driver_file_name)
258 {
259
260         DBC_REQUIRE(driver_file_name != NULL);
261
262         io_sm_init();           /* Initialization of io_sm module */
263
264         if (strcmp(driver_file_name, "UMA") == 0)
265                 *drv_intf = &drv_interface_fxns;
266         else
267                 dev_dbg(bridge, "%s Unknown Bridge file name", __func__);
268
269 }
270
271 /*
272  *  ======== bridge_brd_monitor ========
273  *  purpose:
274  *      This bridge_brd_monitor puts DSP into a Loadable state.
275  *      i.e Application can load and start the device.
276  *
277  *  Preconditions:
278  *      Device in 'OFF' state.
279  */
280 static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt)
281 {
282         struct bridge_dev_context *dev_context = dev_ctxt;
283         u32 temp;
284         struct omap_dsp_platform_data *pdata =
285                 omap_dspbridge_dev->dev.platform_data;
286
287         temp = (*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST) &
288                                         OMAP_POWERSTATEST_MASK;
289         if (!(temp & 0x02)) {
290                 /* IVA2 is not in ON state */
291                 /* Read and set PM_PWSTCTRL_IVA2  to ON */
292                 (*pdata->dsp_prm_rmw_bits)(OMAP_POWERSTATEST_MASK,
293                         PWRDM_POWER_ON, OMAP3430_IVA2_MOD, OMAP2_PM_PWSTCTRL);
294                 /* Set the SW supervised state transition */
295                 (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_FORCE_WAKEUP,
296                                         OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);
297
298                 /* Wait until the state has moved to ON */
299                 while ((*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST) &
300                                                 OMAP_INTRANSITION_MASK)
301                         ;
302                 /* Disable Automatic transition */
303                 (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_DISABLE_AUTO,
304                                         OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);
305         }
306         (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0,
307                                         OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
308         dsp_clk_enable(DSP_CLK_IVA2);
309
310         /* set the device state to IDLE */
311         dev_context->dw_brd_state = BRD_IDLE;
312
313         return 0;
314 }
315
316 /*
317  *  ======== bridge_brd_read ========
318  *  purpose:
319  *      Reads buffers for DSP memory.
320  */
321 static int bridge_brd_read(struct bridge_dev_context *dev_ctxt,
322                                   u8 *host_buff, u32 dsp_addr,
323                                   u32 ul_num_bytes, u32 mem_type)
324 {
325         int status = 0;
326         struct bridge_dev_context *dev_context = dev_ctxt;
327         u32 offset;
328         u32 dsp_base_addr = dev_ctxt->dw_dsp_base_addr;
329
330         if (dsp_addr < dev_context->dw_dsp_start_add) {
331                 status = -EPERM;
332                 return status;
333         }
334         /* change here to account for the 3 bands of the DSP internal memory */
335         if ((dsp_addr - dev_context->dw_dsp_start_add) <
336             dev_context->dw_internal_size) {
337                 offset = dsp_addr - dev_context->dw_dsp_start_add;
338         } else {
339                 status = read_ext_dsp_data(dev_context, host_buff, dsp_addr,
340                                            ul_num_bytes, mem_type);
341                 return status;
342         }
343         /* copy the data from  DSP memory, */
344         memcpy(host_buff, (void *)(dsp_base_addr + offset), ul_num_bytes);
345         return status;
346 }
347
348 /*
349  *  ======== bridge_brd_set_state ========
350  *  purpose:
351  *      This routine updates the Board status.
352  */
353 static int bridge_brd_set_state(struct bridge_dev_context *dev_ctxt,
354                                     u32 brd_state)
355 {
356         int status = 0;
357         struct bridge_dev_context *dev_context = dev_ctxt;
358
359         dev_context->dw_brd_state = brd_state;
360         return status;
361 }
362
363 /*
364  *  ======== bridge_brd_start ========
365  *  purpose:
366  *      Initializes DSP MMU and Starts DSP.
367  *
368  *  Preconditions:
369  *  a) DSP domain is 'ACTIVE'.
370  *  b) DSP_RST1 is asserted.
371  *  b) DSP_RST2 is released.
372  */
373 static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
374                                    u32 dsp_addr)
375 {
376         int status = 0;
377         struct bridge_dev_context *dev_context = dev_ctxt;
378         u32 dw_sync_addr = 0;
379         u32 ul_shm_base;        /* Gpp Phys SM base addr(byte) */
380         u32 ul_shm_base_virt;   /* Dsp Virt SM base addr */
381         u32 ul_tlb_base_virt;   /* Base of MMU TLB entry */
382         /* Offset of shm_base_virt from tlb_base_virt */
383         u32 ul_shm_offset_virt;
384         s32 entry_ndx;
385         s32 itmp_entry_ndx = 0; /* DSP-MMU TLB entry base address */
386         struct cfg_hostres *resources = NULL;
387         u32 temp;
388         u32 ul_dsp_clk_rate;
389         u32 ul_dsp_clk_addr;
390         u32 ul_bios_gp_timer;
391         u32 clk_cmd;
392         struct io_mgr *hio_mgr;
393         u32 ul_load_monitor_timer;
394         struct omap_dsp_platform_data *pdata =
395                 omap_dspbridge_dev->dev.platform_data;
396
397         /* The device context contains all the mmu setup info from when the
398          * last dsp base image was loaded. The first entry is always
399          * SHMMEM base. */
400         /* Get SHM_BEG - convert to byte address */
401         (void)dev_get_symbol(dev_context->hdev_obj, SHMBASENAME,
402                              &ul_shm_base_virt);
403         ul_shm_base_virt *= DSPWORDSIZE;
404         DBC_ASSERT(ul_shm_base_virt != 0);
405         /* DSP Virtual address */
406         ul_tlb_base_virt = dev_context->atlb_entry[0].ul_dsp_va;
407         DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt);
408         ul_shm_offset_virt =
409             ul_shm_base_virt - (ul_tlb_base_virt * DSPWORDSIZE);
410         /* Kernel logical address */
411         ul_shm_base = dev_context->atlb_entry[0].ul_gpp_va + ul_shm_offset_virt;
412
413         DBC_ASSERT(ul_shm_base != 0);
414         /* 2nd wd is used as sync field */
415         dw_sync_addr = ul_shm_base + SHMSYNCOFFSET;
416         /* Write a signature into the shm base + offset; this will
417          * get cleared when the DSP program starts. */
418         if ((ul_shm_base_virt == 0) || (ul_shm_base == 0)) {
419                 pr_err("%s: Illegal SM base\n", __func__);
420                 status = -EPERM;
421         } else
422                 __raw_writel(0xffffffff, dw_sync_addr);
423
424         if (!status) {
425                 resources = dev_context->resources;
426                 if (!resources)
427                         status = -EPERM;
428
429                 /* Assert RST1 i.e only the RST only for DSP megacell */
430                 if (!status) {
431                         (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK,
432                                         OMAP3430_RST1_IVA2_MASK, OMAP3430_IVA2_MOD,
433                                         OMAP2_RM_RSTCTRL);
434                         /* Mask address with 1K for compatibility */
435                         __raw_writel(dsp_addr & OMAP3_IVA2_BOOTADDR_MASK,
436                                         OMAP343X_CTRL_REGADDR(
437                                         OMAP343X_CONTROL_IVA2_BOOTADDR));
438                         /*
439                          * Set bootmode to self loop if dsp_debug flag is true
440                          */
441                         __raw_writel((dsp_debug) ? OMAP3_IVA2_BOOTMOD_IDLE : 0,
442                                         OMAP343X_CTRL_REGADDR(
443                                         OMAP343X_CONTROL_IVA2_BOOTMOD));
444                 }
445         }
446         if (!status) {
447                 /* Reset and Unreset the RST2, so that BOOTADDR is copied to
448                  * IVA2 SYSC register */
449                 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK,
450                         OMAP3430_RST2_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
451                 udelay(100);
452                 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0,
453                                         OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
454                 udelay(100);
455
456                 /* Disbale the DSP MMU */
457                 hw_mmu_disable(resources->dw_dmmu_base);
458                 /* Disable TWL */
459                 hw_mmu_twl_disable(resources->dw_dmmu_base);
460
461                 /* Only make TLB entry if both addresses are non-zero */
462                 for (entry_ndx = 0; entry_ndx < BRDIOCTL_NUMOFMMUTLB;
463                      entry_ndx++) {
464                         struct bridge_ioctl_extproc *e = &dev_context->atlb_entry[entry_ndx];
465                         struct hw_mmu_map_attrs_t map_attrs = {
466                                 .endianism = e->endianism,
467                                 .element_size = e->elem_size,
468                                 .mixed_size = e->mixed_mode,
469                         };
470
471                         if (!e->ul_gpp_pa || !e->ul_dsp_va)
472                                 continue;
473
474                         dev_dbg(bridge,
475                                         "MMU %d, pa: 0x%x, va: 0x%x, size: 0x%x",
476                                         itmp_entry_ndx,
477                                         e->ul_gpp_pa,
478                                         e->ul_dsp_va,
479                                         e->ul_size);
480
481                         hw_mmu_tlb_add(dev_context->dw_dsp_mmu_base,
482                                         e->ul_gpp_pa,
483                                         e->ul_dsp_va,
484                                         e->ul_size,
485                                         itmp_entry_ndx,
486                                         &map_attrs, 1, 1);
487
488                         itmp_entry_ndx++;
489                 }
490         }
491
492         /* Lock the above TLB entries and get the BIOS and load monitor timer
493          * information */
494         if (!status) {
495                 hw_mmu_num_locked_set(resources->dw_dmmu_base, itmp_entry_ndx);
496                 hw_mmu_victim_num_set(resources->dw_dmmu_base, itmp_entry_ndx);
497                 hw_mmu_ttb_set(resources->dw_dmmu_base,
498                                dev_context->pt_attrs->l1_base_pa);
499                 hw_mmu_twl_enable(resources->dw_dmmu_base);
500                 /* Enable the SmartIdle and AutoIdle bit for MMU_SYSCONFIG */
501
502                 temp = __raw_readl((resources->dw_dmmu_base) + 0x10);
503                 temp = (temp & 0xFFFFFFEF) | 0x11;
504                 __raw_writel(temp, (resources->dw_dmmu_base) + 0x10);
505
506                 /* Let the DSP MMU run */
507                 hw_mmu_enable(resources->dw_dmmu_base);
508
509                 /* Enable the BIOS clock */
510                 (void)dev_get_symbol(dev_context->hdev_obj,
511                                      BRIDGEINIT_BIOSGPTIMER, &ul_bios_gp_timer);
512                 (void)dev_get_symbol(dev_context->hdev_obj,
513                                      BRIDGEINIT_LOADMON_GPTIMER,
514                                      &ul_load_monitor_timer);
515         }
516
517         if (!status) {
518                 if (ul_load_monitor_timer != 0xFFFF) {
519                         clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) |
520                             ul_load_monitor_timer;
521                         dsp_peripheral_clk_ctrl(dev_context, &clk_cmd);
522                 } else {
523                         dev_dbg(bridge, "Not able to get the symbol for Load "
524                                 "Monitor Timer\n");
525                 }
526         }
527
528         if (!status) {
529                 if (ul_bios_gp_timer != 0xFFFF) {
530                         clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) |
531                             ul_bios_gp_timer;
532                         dsp_peripheral_clk_ctrl(dev_context, &clk_cmd);
533                 } else {
534                         dev_dbg(bridge,
535                                 "Not able to get the symbol for BIOS Timer\n");
536                 }
537         }
538
539         if (!status) {
540                 /* Set the DSP clock rate */
541                 (void)dev_get_symbol(dev_context->hdev_obj,
542                                      "_BRIDGEINIT_DSP_FREQ", &ul_dsp_clk_addr);
543                 /*Set Autoidle Mode for IVA2 PLL */
544                 (*pdata->dsp_cm_write)(1 << OMAP3430_AUTO_IVA2_DPLL_SHIFT,
545                                 OMAP3430_IVA2_MOD, OMAP3430_CM_AUTOIDLE_PLL);
546
547                 if ((unsigned int *)ul_dsp_clk_addr != NULL) {
548                         /* Get the clock rate */
549                         ul_dsp_clk_rate = dsp_clk_get_iva2_rate();
550                         dev_dbg(bridge, "%s: DSP clock rate (KHZ): 0x%x \n",
551                                 __func__, ul_dsp_clk_rate);
552                         (void)bridge_brd_write(dev_context,
553                                                (u8 *) &ul_dsp_clk_rate,
554                                                ul_dsp_clk_addr, sizeof(u32), 0);
555                 }
556                 /*
557                  * Enable Mailbox events and also drain any pending
558                  * stale messages.
559                  */
560                 dev_context->mbox = omap_mbox_get("dsp", &dsp_mbox_notifier);
561                 if (IS_ERR(dev_context->mbox)) {
562                         dev_context->mbox = NULL;
563                         pr_err("%s: Failed to get dsp mailbox handle\n",
564                                                                 __func__);
565                         status = -EPERM;
566                 }
567
568         }
569         if (!status) {
570 /*PM_IVA2GRPSEL_PER = 0xC0;*/
571                 temp = readl(resources->dw_per_pm_base + 0xA8);
572                 temp = (temp & 0xFFFFFF30) | 0xC0;
573                 writel(temp, resources->dw_per_pm_base + 0xA8);
574
575 /*PM_MPUGRPSEL_PER &= 0xFFFFFF3F; */
576                 temp = readl(resources->dw_per_pm_base + 0xA4);
577                 temp = (temp & 0xFFFFFF3F);
578                 writel(temp, resources->dw_per_pm_base + 0xA4);
579 /*CM_SLEEPDEP_PER |= 0x04; */
580                 temp = readl(resources->dw_per_base + 0x44);
581                 temp = (temp & 0xFFFFFFFB) | 0x04;
582                 writel(temp, resources->dw_per_base + 0x44);
583
584 /*CM_CLKSTCTRL_IVA2 = 0x00000003 -To Allow automatic transitions */
585                 (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_ENABLE_AUTO,
586                                         OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);
587
588                 /* Let DSP go */
589                 dev_dbg(bridge, "%s Unreset\n", __func__);
590                 /* Enable DSP MMU Interrupts */
591                 hw_mmu_event_enable(resources->dw_dmmu_base,
592                                     HW_MMU_ALL_INTERRUPTS);
593                 /* release the RST1, DSP starts executing now .. */
594                 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK, 0,
595                                         OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
596
597                 dev_dbg(bridge, "Waiting for Sync @ 0x%x\n", dw_sync_addr);
598                 dev_dbg(bridge, "DSP c_int00 Address =  0x%x\n", dsp_addr);
599                 if (dsp_debug)
600                         while (__raw_readw(dw_sync_addr))
601                                 ;
602
603                 /* Wait for DSP to clear word in shared memory */
604                 /* Read the Location */
605                 if (!wait_for_start(dev_context, dw_sync_addr))
606                         status = -ETIMEDOUT;
607
608                 /* Start wdt */
609                 dsp_wdt_sm_set((void *)ul_shm_base);
610                 dsp_wdt_enable(true);
611
612                 status = dev_get_io_mgr(dev_context->hdev_obj, &hio_mgr);
613                 if (hio_mgr) {
614                         io_sh_msetting(hio_mgr, SHM_OPPINFO, NULL);
615                         /* Write the synchronization bit to indicate the
616                          * completion of OPP table update to DSP
617                          */
618                         __raw_writel(0XCAFECAFE, dw_sync_addr);
619
620                         /* update board state */
621                         dev_context->dw_brd_state = BRD_RUNNING;
622                         /* (void)chnlsm_enable_interrupt(dev_context); */
623                 } else {
624                         dev_context->dw_brd_state = BRD_UNKNOWN;
625                 }
626         }
627         return status;
628 }
629
630 /*
631  *  ======== bridge_brd_stop ========
632  *  purpose:
633  *      Puts DSP in self loop.
634  *
635  *  Preconditions :
636  *  a) None
637  */
638 static int bridge_brd_stop(struct bridge_dev_context *dev_ctxt)
639 {
640         int status = 0;
641         struct bridge_dev_context *dev_context = dev_ctxt;
642         struct pg_table_attrs *pt_attrs;
643         u32 dsp_pwr_state;
644         struct omap_dsp_platform_data *pdata =
645                 omap_dspbridge_dev->dev.platform_data;
646
647         if (dev_context->dw_brd_state == BRD_STOPPED)
648                 return status;
649
650         /* as per TRM, it is advised to first drive the IVA2 to 'Standby' mode,
651          * before turning off the clocks.. This is to ensure that there are no
652          * pending L3 or other transactons from IVA2 */
653         dsp_pwr_state = (*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST) &
654                                         OMAP_POWERSTATEST_MASK;
655         if (dsp_pwr_state != PWRDM_POWER_OFF) {
656                 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0,
657                                         OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
658                 sm_interrupt_dsp(dev_context, MBX_PM_DSPIDLE);
659                 mdelay(10);
660
661                 /* IVA2 is not in OFF state */
662                 /* Set PM_PWSTCTRL_IVA2  to OFF */
663                 (*pdata->dsp_prm_rmw_bits)(OMAP_POWERSTATEST_MASK,
664                         PWRDM_POWER_OFF, OMAP3430_IVA2_MOD, OMAP2_PM_PWSTCTRL);
665                 /* Set the SW supervised state transition for Sleep */
666                 (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_FORCE_SLEEP,
667                                         OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);
668         }
669         udelay(10);
670         /* Release the Ext Base virtual Address as the next DSP Program
671          * may have a different load address */
672         if (dev_context->dw_dsp_ext_base_addr)
673                 dev_context->dw_dsp_ext_base_addr = 0;
674
675         dev_context->dw_brd_state = BRD_STOPPED;        /* update board state */
676
677         dsp_wdt_enable(false);
678
679         /* This is a good place to clear the MMU page tables as well */
680         if (dev_context->pt_attrs) {
681                 pt_attrs = dev_context->pt_attrs;
682                 memset((u8 *) pt_attrs->l1_base_va, 0x00, pt_attrs->l1_size);
683                 memset((u8 *) pt_attrs->l2_base_va, 0x00, pt_attrs->l2_size);
684                 memset((u8 *) pt_attrs->pg_info, 0x00,
685                        (pt_attrs->l2_num_pages * sizeof(struct page_info)));
686         }
687         /* Disable the mailbox interrupts */
688         if (dev_context->mbox) {
689                 omap_mbox_disable_irq(dev_context->mbox, IRQ_RX);
690                 omap_mbox_put(dev_context->mbox, &dsp_mbox_notifier);
691                 dev_context->mbox = NULL;
692         }
693         /* Reset IVA2 clocks*/
694         (*pdata->dsp_prm_write)(OMAP3430_RST1_IVA2_MASK | OMAP3430_RST2_IVA2_MASK |
695                         OMAP3430_RST3_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
696
697         dsp_clock_disable_all(dev_context->dsp_per_clks);
698         dsp_clk_disable(DSP_CLK_IVA2);
699
700         return status;
701 }
702
703 /*
704  *  ======== bridge_brd_status ========
705  *      Returns the board status.
706  */
707 static int bridge_brd_status(struct bridge_dev_context *dev_ctxt,
708                                     int *board_state)
709 {
710         struct bridge_dev_context *dev_context = dev_ctxt;
711         *board_state = dev_context->dw_brd_state;
712         return 0;
713 }
714
715 /*
716  *  ======== bridge_brd_write ========
717  *      Copies the buffers to DSP internal or external memory.
718  */
719 static int bridge_brd_write(struct bridge_dev_context *dev_ctxt,
720                                    u8 *host_buff, u32 dsp_addr,
721                                    u32 ul_num_bytes, u32 mem_type)
722 {
723         int status = 0;
724         struct bridge_dev_context *dev_context = dev_ctxt;
725
726         if (dsp_addr < dev_context->dw_dsp_start_add) {
727                 status = -EPERM;
728                 return status;
729         }
730         if ((dsp_addr - dev_context->dw_dsp_start_add) <
731             dev_context->dw_internal_size) {
732                 status = write_dsp_data(dev_ctxt, host_buff, dsp_addr,
733                                         ul_num_bytes, mem_type);
734         } else {
735                 status = write_ext_dsp_data(dev_context, host_buff, dsp_addr,
736                                             ul_num_bytes, mem_type, false);
737         }
738
739         return status;
740 }
741
742 /*
743  *  ======== bridge_dev_create ========
744  *      Creates a driver object. Puts DSP in self loop.
745  */
746 static int bridge_dev_create(struct bridge_dev_context
747                                         **dev_cntxt,
748                                         struct dev_object *hdev_obj,
749                                         struct cfg_hostres *config_param)
750 {
751         int status = 0;
752         struct bridge_dev_context *dev_context = NULL;
753         s32 entry_ndx;
754         struct cfg_hostres *resources = config_param;
755         struct pg_table_attrs *pt_attrs;
756         u32 pg_tbl_pa;
757         u32 pg_tbl_va;
758         u32 align_size;
759         struct drv_data *drv_datap = dev_get_drvdata(bridge);
760
761         /* Allocate and initialize a data structure to contain the bridge driver
762          *  state, which becomes the context for later calls into this driver */
763         dev_context = kzalloc(sizeof(struct bridge_dev_context), GFP_KERNEL);
764         if (!dev_context) {
765                 status = -ENOMEM;
766                 goto func_end;
767         }
768
769         dev_context->dw_dsp_start_add = (u32) OMAP_GEM_BASE;
770         dev_context->dw_self_loop = (u32) NULL;
771         dev_context->dsp_per_clks = 0;
772         dev_context->dw_internal_size = OMAP_DSP_SIZE;
773         /*  Clear dev context MMU table entries.
774          *  These get set on bridge_io_on_loaded() call after program loaded. */
775         for (entry_ndx = 0; entry_ndx < BRDIOCTL_NUMOFMMUTLB; entry_ndx++) {
776                 dev_context->atlb_entry[entry_ndx].ul_gpp_pa =
777                     dev_context->atlb_entry[entry_ndx].ul_dsp_va = 0;
778         }
779         dev_context->dw_dsp_base_addr = (u32) MEM_LINEAR_ADDRESS((void *)
780                                                                  (config_param->
781                                                                   dw_mem_base
782                                                                   [3]),
783                                                                  config_param->
784                                                                  dw_mem_length
785                                                                  [3]);
786         if (!dev_context->dw_dsp_base_addr)
787                 status = -EPERM;
788
789         pt_attrs = kzalloc(sizeof(struct pg_table_attrs), GFP_KERNEL);
790         if (pt_attrs != NULL) {
791                 pt_attrs->l1_size = SZ_16K; /* 4096 entries of 32 bits */
792                 align_size = pt_attrs->l1_size;
793                 /* Align sizes are expected to be power of 2 */
794                 /* we like to get aligned on L1 table size */
795                 pg_tbl_va = (u32) mem_alloc_phys_mem(pt_attrs->l1_size,
796                                                      align_size, &pg_tbl_pa);
797
798                 /* Check if the PA is aligned for us */
799                 if ((pg_tbl_pa) & (align_size - 1)) {
800                         /* PA not aligned to page table size ,
801                          * try with more allocation and align */
802                         mem_free_phys_mem((void *)pg_tbl_va, pg_tbl_pa,
803                                           pt_attrs->l1_size);
804                         /* we like to get aligned on L1 table size */
805                         pg_tbl_va =
806                             (u32) mem_alloc_phys_mem((pt_attrs->l1_size) * 2,
807                                                      align_size, &pg_tbl_pa);
808                         /* We should be able to get aligned table now */
809                         pt_attrs->l1_tbl_alloc_pa = pg_tbl_pa;
810                         pt_attrs->l1_tbl_alloc_va = pg_tbl_va;
811                         pt_attrs->l1_tbl_alloc_sz = pt_attrs->l1_size * 2;
812                         /* Align the PA to the next 'align'  boundary */
813                         pt_attrs->l1_base_pa =
814                             ((pg_tbl_pa) +
815                              (align_size - 1)) & (~(align_size - 1));
816                         pt_attrs->l1_base_va =
817                             pg_tbl_va + (pt_attrs->l1_base_pa - pg_tbl_pa);
818                 } else {
819                         /* We got aligned PA, cool */
820                         pt_attrs->l1_tbl_alloc_pa = pg_tbl_pa;
821                         pt_attrs->l1_tbl_alloc_va = pg_tbl_va;
822                         pt_attrs->l1_tbl_alloc_sz = pt_attrs->l1_size;
823                         pt_attrs->l1_base_pa = pg_tbl_pa;
824                         pt_attrs->l1_base_va = pg_tbl_va;
825                 }
826                 if (pt_attrs->l1_base_va)
827                         memset((u8 *) pt_attrs->l1_base_va, 0x00,
828                                pt_attrs->l1_size);
829
830                 /* number of L2 page tables = DMM pool used + SHMMEM +EXTMEM +
831                  * L4 pages */
832                 pt_attrs->l2_num_pages = ((DMMPOOLSIZE >> 20) + 6);
833                 pt_attrs->l2_size = HW_MMU_COARSE_PAGE_SIZE *
834                     pt_attrs->l2_num_pages;
835                 align_size = 4; /* Make it u32 aligned */
836                 /* we like to get aligned on L1 table size */
837                 pg_tbl_va = (u32) mem_alloc_phys_mem(pt_attrs->l2_size,
838                                                      align_size, &pg_tbl_pa);
839                 pt_attrs->l2_tbl_alloc_pa = pg_tbl_pa;
840                 pt_attrs->l2_tbl_alloc_va = pg_tbl_va;
841                 pt_attrs->l2_tbl_alloc_sz = pt_attrs->l2_size;
842                 pt_attrs->l2_base_pa = pg_tbl_pa;
843                 pt_attrs->l2_base_va = pg_tbl_va;
844
845                 if (pt_attrs->l2_base_va)
846                         memset((u8 *) pt_attrs->l2_base_va, 0x00,
847                                pt_attrs->l2_size);
848
849                 pt_attrs->pg_info = kzalloc(pt_attrs->l2_num_pages *
850                                         sizeof(struct page_info), GFP_KERNEL);
851                 dev_dbg(bridge,
852                         "L1 pa %x, va %x, size %x\n L2 pa %x, va "
853                         "%x, size %x\n", pt_attrs->l1_base_pa,
854                         pt_attrs->l1_base_va, pt_attrs->l1_size,
855                         pt_attrs->l2_base_pa, pt_attrs->l2_base_va,
856                         pt_attrs->l2_size);
857                 dev_dbg(bridge, "pt_attrs %p L2 NumPages %x pg_info %p\n",
858                         pt_attrs, pt_attrs->l2_num_pages, pt_attrs->pg_info);
859         }
860         if ((pt_attrs != NULL) && (pt_attrs->l1_base_va != 0) &&
861             (pt_attrs->l2_base_va != 0) && (pt_attrs->pg_info != NULL))
862                 dev_context->pt_attrs = pt_attrs;
863         else
864                 status = -ENOMEM;
865
866         if (!status) {
867                 spin_lock_init(&pt_attrs->pg_lock);
868                 dev_context->tc_word_swap_on = drv_datap->tc_wordswapon;
869
870                 /* Set the Clock Divisor for the DSP module */
871                 udelay(5);
872                 /* MMU address is obtained from the host
873                  * resources struct */
874                 dev_context->dw_dsp_mmu_base = resources->dw_dmmu_base;
875         }
876         if (!status) {
877                 dev_context->hdev_obj = hdev_obj;
878                 /* Store current board state. */
879                 dev_context->dw_brd_state = BRD_UNKNOWN;
880                 dev_context->resources = resources;
881                 dsp_clk_enable(DSP_CLK_IVA2);
882                 bridge_brd_stop(dev_context);
883                 /* Return ptr to our device state to the DSP API for storage */
884                 *dev_cntxt = dev_context;
885         } else {
886                 if (pt_attrs != NULL) {
887                         kfree(pt_attrs->pg_info);
888
889                         if (pt_attrs->l2_tbl_alloc_va) {
890                                 mem_free_phys_mem((void *)
891                                                   pt_attrs->l2_tbl_alloc_va,
892                                                   pt_attrs->l2_tbl_alloc_pa,
893                                                   pt_attrs->l2_tbl_alloc_sz);
894                         }
895                         if (pt_attrs->l1_tbl_alloc_va) {
896                                 mem_free_phys_mem((void *)
897                                                   pt_attrs->l1_tbl_alloc_va,
898                                                   pt_attrs->l1_tbl_alloc_pa,
899                                                   pt_attrs->l1_tbl_alloc_sz);
900                         }
901                 }
902                 kfree(pt_attrs);
903                 kfree(dev_context);
904         }
905 func_end:
906         return status;
907 }
908
909 /*
910  *  ======== bridge_dev_ctrl ========
911  *      Receives device specific commands.
912  */
913 static int bridge_dev_ctrl(struct bridge_dev_context *dev_context,
914                                   u32 dw_cmd, void *pargs)
915 {
916         int status = 0;
917         struct bridge_ioctl_extproc *pa_ext_proc =
918                                         (struct bridge_ioctl_extproc *)pargs;
919         s32 ndx;
920
921         switch (dw_cmd) {
922         case BRDIOCTL_CHNLREAD:
923                 break;
924         case BRDIOCTL_CHNLWRITE:
925                 break;
926         case BRDIOCTL_SETMMUCONFIG:
927                 /* store away dsp-mmu setup values for later use */
928                 for (ndx = 0; ndx < BRDIOCTL_NUMOFMMUTLB; ndx++, pa_ext_proc++)
929                         dev_context->atlb_entry[ndx] = *pa_ext_proc;
930                 break;
931         case BRDIOCTL_DEEPSLEEP:
932         case BRDIOCTL_EMERGENCYSLEEP:
933                 /* Currently only DSP Idle is supported Need to update for
934                  * later releases */
935                 status = sleep_dsp(dev_context, PWR_DEEPSLEEP, pargs);
936                 break;
937         case BRDIOCTL_WAKEUP:
938                 status = wake_dsp(dev_context, pargs);
939                 break;
940         case BRDIOCTL_CLK_CTRL:
941                 status = 0;
942                 /* Looking For Baseport Fix for Clocks */
943                 status = dsp_peripheral_clk_ctrl(dev_context, pargs);
944                 break;
945         case BRDIOCTL_PWR_HIBERNATE:
946                 status = handle_hibernation_from_dsp(dev_context);
947                 break;
948         case BRDIOCTL_PRESCALE_NOTIFY:
949                 status = pre_scale_dsp(dev_context, pargs);
950                 break;
951         case BRDIOCTL_POSTSCALE_NOTIFY:
952                 status = post_scale_dsp(dev_context, pargs);
953                 break;
954         case BRDIOCTL_CONSTRAINT_REQUEST:
955                 status = handle_constraints_set(dev_context, pargs);
956                 break;
957         default:
958                 status = -EPERM;
959                 break;
960         }
961         return status;
962 }
963
964 /*
965  *  ======== bridge_dev_destroy ========
966  *      Destroys the driver object.
967  */
968 static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt)
969 {
970         struct pg_table_attrs *pt_attrs;
971         int status = 0;
972         struct bridge_dev_context *dev_context = (struct bridge_dev_context *)
973             dev_ctxt;
974         struct cfg_hostres *host_res;
975         u32 shm_size;
976         struct drv_data *drv_datap = dev_get_drvdata(bridge);
977
978         /* It should never happen */
979         if (!dev_ctxt)
980                 return -EFAULT;
981
982         /* first put the device to stop state */
983         bridge_brd_stop(dev_context);
984         if (dev_context->pt_attrs) {
985                 pt_attrs = dev_context->pt_attrs;
986                 kfree(pt_attrs->pg_info);
987
988                 if (pt_attrs->l2_tbl_alloc_va) {
989                         mem_free_phys_mem((void *)pt_attrs->l2_tbl_alloc_va,
990                                           pt_attrs->l2_tbl_alloc_pa,
991                                           pt_attrs->l2_tbl_alloc_sz);
992                 }
993                 if (pt_attrs->l1_tbl_alloc_va) {
994                         mem_free_phys_mem((void *)pt_attrs->l1_tbl_alloc_va,
995                                           pt_attrs->l1_tbl_alloc_pa,
996                                           pt_attrs->l1_tbl_alloc_sz);
997                 }
998                 kfree(pt_attrs);
999
1000         }
1001
1002         if (dev_context->resources) {
1003                 host_res = dev_context->resources;
1004                 shm_size = drv_datap->shm_size;
1005                 if (shm_size >= 0x10000) {
1006                         if ((host_res->dw_mem_base[1]) &&
1007                             (host_res->dw_mem_phys[1])) {
1008                                 mem_free_phys_mem((void *)
1009                                                   host_res->dw_mem_base
1010                                                   [1],
1011                                                   host_res->dw_mem_phys
1012                                                   [1], shm_size);
1013                         }
1014                 } else {
1015                         dev_dbg(bridge, "%s: Error getting shm size "
1016                                 "from registry: %x. Not calling "
1017                                 "mem_free_phys_mem\n", __func__,
1018                                 status);
1019                 }
1020                 host_res->dw_mem_base[1] = 0;
1021                 host_res->dw_mem_phys[1] = 0;
1022
1023                 if (host_res->dw_mem_base[0])
1024                         iounmap((void *)host_res->dw_mem_base[0]);
1025                 if (host_res->dw_mem_base[2])
1026                         iounmap((void *)host_res->dw_mem_base[2]);
1027                 if (host_res->dw_mem_base[3])
1028                         iounmap((void *)host_res->dw_mem_base[3]);
1029                 if (host_res->dw_mem_base[4])
1030                         iounmap((void *)host_res->dw_mem_base[4]);
1031                 if (host_res->dw_dmmu_base)
1032                         iounmap(host_res->dw_dmmu_base);
1033                 if (host_res->dw_per_base)
1034                         iounmap(host_res->dw_per_base);
1035                 if (host_res->dw_per_pm_base)
1036                         iounmap((void *)host_res->dw_per_pm_base);
1037                 if (host_res->dw_core_pm_base)
1038                         iounmap((void *)host_res->dw_core_pm_base);
1039                 if (host_res->dw_sys_ctrl_base)
1040                         iounmap(host_res->dw_sys_ctrl_base);
1041
1042                 host_res->dw_mem_base[0] = (u32) NULL;
1043                 host_res->dw_mem_base[2] = (u32) NULL;
1044                 host_res->dw_mem_base[3] = (u32) NULL;
1045                 host_res->dw_mem_base[4] = (u32) NULL;
1046                 host_res->dw_dmmu_base = NULL;
1047                 host_res->dw_sys_ctrl_base = NULL;
1048
1049                 kfree(host_res);
1050         }
1051
1052         /* Free the driver's device context: */
1053         kfree(drv_datap->base_img);
1054         kfree(drv_datap);
1055         dev_set_drvdata(bridge, NULL);
1056         kfree((void *)dev_ctxt);
1057         return status;
1058 }
1059
1060 static int bridge_brd_mem_copy(struct bridge_dev_context *dev_ctxt,
1061                                    u32 dsp_dest_addr, u32 dsp_src_addr,
1062                                    u32 ul_num_bytes, u32 mem_type)
1063 {
1064         int status = 0;
1065         u32 src_addr = dsp_src_addr;
1066         u32 dest_addr = dsp_dest_addr;
1067         u32 copy_bytes = 0;
1068         u32 total_bytes = ul_num_bytes;
1069         u8 host_buf[BUFFERSIZE];
1070         struct bridge_dev_context *dev_context = dev_ctxt;
1071         while (total_bytes > 0 && !status) {
1072                 copy_bytes =
1073                     total_bytes > BUFFERSIZE ? BUFFERSIZE : total_bytes;
1074                 /* Read from External memory */
1075                 status = read_ext_dsp_data(dev_ctxt, host_buf, src_addr,
1076                                            copy_bytes, mem_type);
1077                 if (!status) {
1078                         if (dest_addr < (dev_context->dw_dsp_start_add +
1079                                          dev_context->dw_internal_size)) {
1080                                 /* Write to Internal memory */
1081                                 status = write_dsp_data(dev_ctxt, host_buf,
1082                                                         dest_addr, copy_bytes,
1083                                                         mem_type);
1084                         } else {
1085                                 /* Write to External memory */
1086                                 status =
1087                                     write_ext_dsp_data(dev_ctxt, host_buf,
1088                                                        dest_addr, copy_bytes,
1089                                                        mem_type, false);
1090                         }
1091                 }
1092                 total_bytes -= copy_bytes;
1093                 src_addr += copy_bytes;
1094                 dest_addr += copy_bytes;
1095         }
1096         return status;
1097 }
1098
1099 /* Mem Write does not halt the DSP to write unlike bridge_brd_write */
1100 static int bridge_brd_mem_write(struct bridge_dev_context *dev_ctxt,
1101                                     u8 *host_buff, u32 dsp_addr,
1102                                     u32 ul_num_bytes, u32 mem_type)
1103 {
1104         int status = 0;
1105         struct bridge_dev_context *dev_context = dev_ctxt;
1106         u32 ul_remain_bytes = 0;
1107         u32 ul_bytes = 0;
1108         ul_remain_bytes = ul_num_bytes;
1109         while (ul_remain_bytes > 0 && !status) {
1110                 ul_bytes =
1111                     ul_remain_bytes > BUFFERSIZE ? BUFFERSIZE : ul_remain_bytes;
1112                 if (dsp_addr < (dev_context->dw_dsp_start_add +
1113                                  dev_context->dw_internal_size)) {
1114                         status =
1115                             write_dsp_data(dev_ctxt, host_buff, dsp_addr,
1116                                            ul_bytes, mem_type);
1117                 } else {
1118                         status = write_ext_dsp_data(dev_ctxt, host_buff,
1119                                                     dsp_addr, ul_bytes,
1120                                                     mem_type, true);
1121                 }
1122                 ul_remain_bytes -= ul_bytes;
1123                 dsp_addr += ul_bytes;
1124                 host_buff = host_buff + ul_bytes;
1125         }
1126         return status;
1127 }
1128
1129 /*
1130  *  ======== bridge_brd_mem_map ========
1131  *      This function maps MPU buffer to the DSP address space. It performs
1132  *  linear to physical address translation if required. It translates each
1133  *  page since linear addresses can be physically non-contiguous
1134  *  All address & size arguments are assumed to be page aligned (in proc.c)
1135  *
1136  *  TODO: Disable MMU while updating the page tables (but that'll stall DSP)
1137  */
1138 static int bridge_brd_mem_map(struct bridge_dev_context *dev_ctxt,
1139                                   u32 ul_mpu_addr, u32 virt_addr,
1140                                   u32 ul_num_bytes, u32 ul_map_attr,
1141                                   struct page **mapped_pages)
1142 {
1143         u32 attrs;
1144         int status = 0;
1145         struct bridge_dev_context *dev_context = dev_ctxt;
1146         struct hw_mmu_map_attrs_t hw_attrs;
1147         struct vm_area_struct *vma;
1148         struct mm_struct *mm = current->mm;
1149         u32 write = 0;
1150         u32 num_usr_pgs = 0;
1151         struct page *mapped_page, *pg;
1152         s32 pg_num;
1153         u32 va = virt_addr;
1154         struct task_struct *curr_task = current;
1155         u32 pg_i = 0;
1156         u32 mpu_addr, pa;
1157
1158         dev_dbg(bridge,
1159                 "%s hDevCtxt %p, pa %x, va %x, size %x, ul_map_attr %x\n",
1160                 __func__, dev_ctxt, ul_mpu_addr, virt_addr, ul_num_bytes,
1161                 ul_map_attr);
1162         if (ul_num_bytes == 0)
1163                 return -EINVAL;
1164
1165         if (ul_map_attr & DSP_MAP_DIR_MASK) {
1166                 attrs = ul_map_attr;
1167         } else {
1168                 /* Assign default attributes */
1169                 attrs = ul_map_attr | (DSP_MAPVIRTUALADDR | DSP_MAPELEMSIZE16);
1170         }
1171         /* Take mapping properties */
1172         if (attrs & DSP_MAPBIGENDIAN)
1173                 hw_attrs.endianism = HW_BIG_ENDIAN;
1174         else
1175                 hw_attrs.endianism = HW_LITTLE_ENDIAN;
1176
1177         hw_attrs.mixed_size = (enum hw_mmu_mixed_size_t)
1178             ((attrs & DSP_MAPMIXEDELEMSIZE) >> 2);
1179         /* Ignore element_size if mixed_size is enabled */
1180         if (hw_attrs.mixed_size == 0) {
1181                 if (attrs & DSP_MAPELEMSIZE8) {
1182                         /* Size is 8 bit */
1183                         hw_attrs.element_size = HW_ELEM_SIZE8BIT;
1184                 } else if (attrs & DSP_MAPELEMSIZE16) {
1185                         /* Size is 16 bit */
1186                         hw_attrs.element_size = HW_ELEM_SIZE16BIT;
1187                 } else if (attrs & DSP_MAPELEMSIZE32) {
1188                         /* Size is 32 bit */
1189                         hw_attrs.element_size = HW_ELEM_SIZE32BIT;
1190                 } else if (attrs & DSP_MAPELEMSIZE64) {
1191                         /* Size is 64 bit */
1192                         hw_attrs.element_size = HW_ELEM_SIZE64BIT;
1193                 } else {
1194                         /*
1195                          * Mixedsize isn't enabled, so size can't be
1196                          * zero here
1197                          */
1198                         return -EINVAL;
1199                 }
1200         }
1201         if (attrs & DSP_MAPDONOTLOCK)
1202                 hw_attrs.donotlockmpupage = 1;
1203         else
1204                 hw_attrs.donotlockmpupage = 0;
1205
1206         if (attrs & DSP_MAPVMALLOCADDR) {
1207                 return mem_map_vmalloc(dev_ctxt, ul_mpu_addr, virt_addr,
1208                                        ul_num_bytes, &hw_attrs);
1209         }
1210         /*
1211          * Do OS-specific user-va to pa translation.
1212          * Combine physically contiguous regions to reduce TLBs.
1213          * Pass the translated pa to pte_update.
1214          */
1215         if ((attrs & DSP_MAPPHYSICALADDR)) {
1216                 status = pte_update(dev_context, ul_mpu_addr, virt_addr,
1217                                     ul_num_bytes, &hw_attrs);
1218                 goto func_cont;
1219         }
1220
1221         /*
1222          * Important Note: ul_mpu_addr is mapped from user application process
1223          * to current process - it must lie completely within the current
1224          * virtual memory address space in order to be of use to us here!
1225          */
1226         down_read(&mm->mmap_sem);
1227         vma = find_vma(mm, ul_mpu_addr);
1228         if (vma)
1229                 dev_dbg(bridge,
1230                         "VMAfor UserBuf: ul_mpu_addr=%x, ul_num_bytes=%x, "
1231                         "vm_start=%lx, vm_end=%lx, vm_flags=%lx\n", ul_mpu_addr,
1232                         ul_num_bytes, vma->vm_start, vma->vm_end,
1233                         vma->vm_flags);
1234
1235         /*
1236          * It is observed that under some circumstances, the user buffer is
1237          * spread across several VMAs. So loop through and check if the entire
1238          * user buffer is covered
1239          */
1240         while ((vma) && (ul_mpu_addr + ul_num_bytes > vma->vm_end)) {
1241                 /* jump to the next VMA region */
1242                 vma = find_vma(mm, vma->vm_end + 1);
1243                 dev_dbg(bridge,
1244                         "VMA for UserBuf ul_mpu_addr=%x ul_num_bytes=%x, "
1245                         "vm_start=%lx, vm_end=%lx, vm_flags=%lx\n", ul_mpu_addr,
1246                         ul_num_bytes, vma->vm_start, vma->vm_end,
1247                         vma->vm_flags);
1248         }
1249         if (!vma) {
1250                 pr_err("%s: Failed to get VMA region for 0x%x (%d)\n",
1251                        __func__, ul_mpu_addr, ul_num_bytes);
1252                 status = -EINVAL;
1253                 up_read(&mm->mmap_sem);
1254                 goto func_cont;
1255         }
1256
1257         if (vma->vm_flags & VM_IO) {
1258                 num_usr_pgs = ul_num_bytes / PG_SIZE4K;
1259                 mpu_addr = ul_mpu_addr;
1260
1261                 /* Get the physical addresses for user buffer */
1262                 for (pg_i = 0; pg_i < num_usr_pgs; pg_i++) {
1263                         pa = user_va2_pa(mm, mpu_addr);
1264                         if (!pa) {
1265                                 status = -EPERM;
1266                                 pr_err("DSPBRIDGE: VM_IO mapping physical"
1267                                        "address is invalid\n");
1268                                 break;
1269                         }
1270                         if (pfn_valid(__phys_to_pfn(pa))) {
1271                                 pg = PHYS_TO_PAGE(pa);
1272                                 get_page(pg);
1273                                 if (page_count(pg) < 1) {
1274                                         pr_err("Bad page in VM_IO buffer\n");
1275                                         bad_page_dump(pa, pg);
1276                                 }
1277                         }
1278                         status = pte_set(dev_context->pt_attrs, pa,
1279                                          va, HW_PAGE_SIZE4KB, &hw_attrs);
1280                         if (status)
1281                                 break;
1282
1283                         va += HW_PAGE_SIZE4KB;
1284                         mpu_addr += HW_PAGE_SIZE4KB;
1285                         pa += HW_PAGE_SIZE4KB;
1286                 }
1287         } else {
1288                 num_usr_pgs = ul_num_bytes / PG_SIZE4K;
1289                 if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE))
1290                         write = 1;
1291
1292                 for (pg_i = 0; pg_i < num_usr_pgs; pg_i++) {
1293                         pg_num = get_user_pages(curr_task, mm, ul_mpu_addr, 1,
1294                                                 write, 1, &mapped_page, NULL);
1295                         if (pg_num > 0) {
1296                                 if (page_count(mapped_page) < 1) {
1297                                         pr_err("Bad page count after doing"
1298                                                "get_user_pages on"
1299                                                "user buffer\n");
1300                                         bad_page_dump(page_to_phys(mapped_page),
1301                                                       mapped_page);
1302                                 }
1303                                 status = pte_set(dev_context->pt_attrs,
1304                                                  page_to_phys(mapped_page), va,
1305                                                  HW_PAGE_SIZE4KB, &hw_attrs);
1306                                 if (status)
1307                                         break;
1308
1309                                 if (mapped_pages)
1310                                         mapped_pages[pg_i] = mapped_page;
1311
1312                                 va += HW_PAGE_SIZE4KB;
1313                                 ul_mpu_addr += HW_PAGE_SIZE4KB;
1314                         } else {
1315                                 pr_err("DSPBRIDGE: get_user_pages FAILED,"
1316                                        "MPU addr = 0x%x,"
1317                                        "vma->vm_flags = 0x%lx,"
1318                                        "get_user_pages Err"
1319                                        "Value = %d, Buffer"
1320                                        "size=0x%x\n", ul_mpu_addr,
1321                                        vma->vm_flags, pg_num, ul_num_bytes);
1322                                 status = -EPERM;
1323                                 break;
1324                         }
1325                 }
1326         }
1327         up_read(&mm->mmap_sem);
1328 func_cont:
1329         if (status) {
1330                 /*
1331                  * Roll out the mapped pages incase it failed in middle of
1332                  * mapping
1333                  */
1334                 if (pg_i) {
1335                         bridge_brd_mem_un_map(dev_context, virt_addr,
1336                                            (pg_i * PG_SIZE4K));
1337                 }
1338                 status = -EPERM;
1339         }
1340         /*
1341          * In any case, flush the TLB
1342          * This is called from here instead from pte_update to avoid unnecessary
1343          * repetition while mapping non-contiguous physical regions of a virtual
1344          * region
1345          */
1346         flush_all(dev_context);
1347         dev_dbg(bridge, "%s status %x\n", __func__, status);
1348         return status;
1349 }
1350
1351 /*
1352  *  ======== bridge_brd_mem_un_map ========
1353  *      Invalidate the PTEs for the DSP VA block to be unmapped.
1354  *
1355  *      PTEs of a mapped memory block are contiguous in any page table
1356  *      So, instead of looking up the PTE address for every 4K block,
1357  *      we clear consecutive PTEs until we unmap all the bytes
1358  */
1359 static int bridge_brd_mem_un_map(struct bridge_dev_context *dev_ctxt,
1360                                      u32 virt_addr, u32 ul_num_bytes)
1361 {
1362         u32 l1_base_va;
1363         u32 l2_base_va;
1364         u32 l2_base_pa;
1365         u32 l2_page_num;
1366         u32 pte_val;
1367         u32 pte_size;
1368         u32 pte_count;
1369         u32 pte_addr_l1;
1370         u32 pte_addr_l2 = 0;
1371         u32 rem_bytes;
1372         u32 rem_bytes_l2;
1373         u32 va_curr;
1374         struct page *pg = NULL;
1375         int status = 0;
1376         struct bridge_dev_context *dev_context = dev_ctxt;
1377         struct pg_table_attrs *pt = dev_context->pt_attrs;
1378         u32 temp;
1379         u32 paddr;
1380         u32 numof4k_pages = 0;
1381
1382         va_curr = virt_addr;
1383         rem_bytes = ul_num_bytes;
1384         rem_bytes_l2 = 0;
1385         l1_base_va = pt->l1_base_va;
1386         pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va_curr);
1387         dev_dbg(bridge, "%s dev_ctxt %p, va %x, NumBytes %x l1_base_va %x, "
1388                 "pte_addr_l1 %x\n", __func__, dev_ctxt, virt_addr,
1389                 ul_num_bytes, l1_base_va, pte_addr_l1);
1390
1391         while (rem_bytes && !status) {
1392                 u32 va_curr_orig = va_curr;
1393                 /* Find whether the L1 PTE points to a valid L2 PT */
1394                 pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va_curr);
1395                 pte_val = *(u32 *) pte_addr_l1;
1396                 pte_size = hw_mmu_pte_size_l1(pte_val);
1397
1398                 if (pte_size != HW_MMU_COARSE_PAGE_SIZE)
1399                         goto skip_coarse_page;
1400
1401                 /*
1402                  * Get the L2 PA from the L1 PTE, and find
1403                  * corresponding L2 VA
1404                  */
1405                 l2_base_pa = hw_mmu_pte_coarse_l1(pte_val);
1406                 l2_base_va = l2_base_pa - pt->l2_base_pa + pt->l2_base_va;
1407                 l2_page_num =
1408                     (l2_base_pa - pt->l2_base_pa) / HW_MMU_COARSE_PAGE_SIZE;
1409                 /*
1410                  * Find the L2 PTE address from which we will start
1411                  * clearing, the number of PTEs to be cleared on this
1412                  * page, and the size of VA space that needs to be
1413                  * cleared on this L2 page
1414                  */
1415                 pte_addr_l2 = hw_mmu_pte_addr_l2(l2_base_va, va_curr);
1416                 pte_count = pte_addr_l2 & (HW_MMU_COARSE_PAGE_SIZE - 1);
1417                 pte_count = (HW_MMU_COARSE_PAGE_SIZE - pte_count) / sizeof(u32);
1418                 if (rem_bytes < (pte_count * PG_SIZE4K))
1419                         pte_count = rem_bytes / PG_SIZE4K;
1420                 rem_bytes_l2 = pte_count * PG_SIZE4K;
1421
1422                 /*
1423                  * Unmap the VA space on this L2 PT. A quicker way
1424                  * would be to clear pte_count entries starting from
1425                  * pte_addr_l2. However, below code checks that we don't
1426                  * clear invalid entries or less than 64KB for a 64KB
1427                  * entry. Similar checking is done for L1 PTEs too
1428                  * below
1429                  */
1430                 while (rem_bytes_l2 && !status) {
1431                         pte_val = *(u32 *) pte_addr_l2;
1432                         pte_size = hw_mmu_pte_size_l2(pte_val);
1433                         /* va_curr aligned to pte_size? */
1434                         if (pte_size == 0 || rem_bytes_l2 < pte_size ||
1435                             va_curr & (pte_size - 1)) {
1436                                 status = -EPERM;
1437                                 break;
1438                         }
1439
1440                         /* Collect Physical addresses from VA */
1441                         paddr = (pte_val & ~(pte_size - 1));
1442                         if (pte_size == HW_PAGE_SIZE64KB)
1443                                 numof4k_pages = 16;
1444                         else
1445                                 numof4k_pages = 1;
1446                         temp = 0;
1447                         while (temp++ < numof4k_pages) {
1448                                 if (!pfn_valid(__phys_to_pfn(paddr))) {
1449                                         paddr += HW_PAGE_SIZE4KB;
1450                                         continue;
1451                                 }
1452                                 pg = PHYS_TO_PAGE(paddr);
1453                                 if (page_count(pg) < 1) {
1454                                         pr_info("DSPBRIDGE: UNMAP function: "
1455                                                 "COUNT 0 FOR PA 0x%x, size = "
1456                                                 "0x%x\n", paddr, ul_num_bytes);
1457                                         bad_page_dump(paddr, pg);
1458                                 } else {
1459                                         set_page_dirty(pg);
1460                                         page_cache_release(pg);
1461                                 }
1462                                 paddr += HW_PAGE_SIZE4KB;
1463                         }
1464                         if (hw_mmu_pte_clear(pte_addr_l2, va_curr, pte_size)) {
1465                                 status = -EPERM;
1466                                 goto EXIT_LOOP;
1467                         }
1468
1469                         status = 0;
1470                         rem_bytes_l2 -= pte_size;
1471                         va_curr += pte_size;
1472                         pte_addr_l2 += (pte_size >> 12) * sizeof(u32);
1473                 }
1474                 spin_lock(&pt->pg_lock);
1475                 if (rem_bytes_l2 == 0) {
1476                         pt->pg_info[l2_page_num].num_entries -= pte_count;
1477                         if (pt->pg_info[l2_page_num].num_entries == 0) {
1478                                 /*
1479                                  * Clear the L1 PTE pointing to the L2 PT
1480                                  */
1481                                 if (!hw_mmu_pte_clear(l1_base_va, va_curr_orig,
1482                                                      HW_MMU_COARSE_PAGE_SIZE))
1483                                         status = 0;
1484                                 else {
1485                                         status = -EPERM;
1486                                         spin_unlock(&pt->pg_lock);
1487                                         goto EXIT_LOOP;
1488                                 }
1489                         }
1490                         rem_bytes -= pte_count * PG_SIZE4K;
1491                 } else
1492                         status = -EPERM;
1493
1494                 spin_unlock(&pt->pg_lock);
1495                 continue;
1496 skip_coarse_page:
1497                 /* va_curr aligned to pte_size? */
1498                 /* pte_size = 1 MB or 16 MB */
1499                 if (pte_size == 0 || rem_bytes < pte_size ||
1500                     va_curr & (pte_size - 1)) {
1501                         status = -EPERM;
1502                         break;
1503                 }
1504
1505                 if (pte_size == HW_PAGE_SIZE1MB)
1506                         numof4k_pages = 256;
1507                 else
1508                         numof4k_pages = 4096;
1509                 temp = 0;
1510                 /* Collect Physical addresses from VA */
1511                 paddr = (pte_val & ~(pte_size - 1));
1512                 while (temp++ < numof4k_pages) {
1513                         if (pfn_valid(__phys_to_pfn(paddr))) {
1514                                 pg = PHYS_TO_PAGE(paddr);
1515                                 if (page_count(pg) < 1) {
1516                                         pr_info("DSPBRIDGE: UNMAP function: "
1517                                                 "COUNT 0 FOR PA 0x%x, size = "
1518                                                 "0x%x\n", paddr, ul_num_bytes);
1519                                         bad_page_dump(paddr, pg);
1520                                 } else {
1521                                         set_page_dirty(pg);
1522                                         page_cache_release(pg);
1523                                 }
1524                         }
1525                         paddr += HW_PAGE_SIZE4KB;
1526                 }
1527                 if (!hw_mmu_pte_clear(l1_base_va, va_curr, pte_size)) {
1528                         status = 0;
1529                         rem_bytes -= pte_size;
1530                         va_curr += pte_size;
1531                 } else {
1532                         status = -EPERM;
1533                         goto EXIT_LOOP;
1534                 }
1535         }
1536         /*
1537          * It is better to flush the TLB here, so that any stale old entries
1538          * get flushed
1539          */
1540 EXIT_LOOP:
1541         flush_all(dev_context);
1542         dev_dbg(bridge,
1543                 "%s: va_curr %x, pte_addr_l1 %x pte_addr_l2 %x rem_bytes %x,"
1544                 " rem_bytes_l2 %x status %x\n", __func__, va_curr, pte_addr_l1,
1545                 pte_addr_l2, rem_bytes, rem_bytes_l2, status);
1546         return status;
1547 }
1548
1549 /*
1550  *  ======== user_va2_pa ========
1551  *  Purpose:
1552  *      This function walks through the page tables to convert a userland
1553  *      virtual address to physical address
1554  */
1555 static u32 user_va2_pa(struct mm_struct *mm, u32 address)
1556 {
1557         pgd_t *pgd;
1558         pmd_t *pmd;
1559         pte_t *ptep, pte;
1560
1561         pgd = pgd_offset(mm, address);
1562         if (!(pgd_none(*pgd) || pgd_bad(*pgd))) {
1563                 pmd = pmd_offset(pgd, address);
1564                 if (!(pmd_none(*pmd) || pmd_bad(*pmd))) {
1565                         ptep = pte_offset_map(pmd, address);
1566                         if (ptep) {
1567                                 pte = *ptep;
1568                                 if (pte_present(pte))
1569                                         return pte & PAGE_MASK;
1570                         }
1571                 }
1572         }
1573
1574         return 0;
1575 }
1576
1577 /*
1578  *  ======== pte_update ========
1579  *      This function calculates the optimum page-aligned addresses and sizes
1580  *      Caller must pass page-aligned values
1581  */
1582 static int pte_update(struct bridge_dev_context *dev_ctxt, u32 pa,
1583                              u32 va, u32 size,
1584                              struct hw_mmu_map_attrs_t *map_attrs)
1585 {
1586         u32 i;
1587         u32 all_bits;
1588         u32 pa_curr = pa;
1589         u32 va_curr = va;
1590         u32 num_bytes = size;
1591         struct bridge_dev_context *dev_context = dev_ctxt;
1592         int status = 0;
1593         u32 page_size[] = { HW_PAGE_SIZE16MB, HW_PAGE_SIZE1MB,
1594                 HW_PAGE_SIZE64KB, HW_PAGE_SIZE4KB
1595         };
1596
1597         while (num_bytes && !status) {
1598                 /* To find the max. page size with which both PA & VA are
1599                  * aligned */
1600                 all_bits = pa_curr | va_curr;
1601
1602                 for (i = 0; i < 4; i++) {
1603                         if ((num_bytes >= page_size[i]) && ((all_bits &
1604                                                              (page_size[i] -
1605                                                               1)) == 0)) {
1606                                 status =
1607                                     pte_set(dev_context->pt_attrs, pa_curr,
1608                                             va_curr, page_size[i], map_attrs);
1609                                 pa_curr += page_size[i];
1610                                 va_curr += page_size[i];
1611                                 num_bytes -= page_size[i];
1612                                 /* Don't try smaller sizes. Hopefully we have
1613                                  * reached an address aligned to a bigger page
1614                                  * size */
1615                                 break;
1616                         }
1617                 }
1618         }
1619
1620         return status;
1621 }
1622
1623 /*
1624  *  ======== pte_set ========
1625  *      This function calculates PTE address (MPU virtual) to be updated
1626  *      It also manages the L2 page tables
1627  */
1628 static int pte_set(struct pg_table_attrs *pt, u32 pa, u32 va,
1629                           u32 size, struct hw_mmu_map_attrs_t *attrs)
1630 {
1631         u32 i;
1632         u32 pte_val;
1633         u32 pte_addr_l1;
1634         u32 pte_size;
1635         /* Base address of the PT that will be updated */
1636         u32 pg_tbl_va;
1637         u32 l1_base_va;
1638         /* Compiler warns that the next three variables might be used
1639          * uninitialized in this function. Doesn't seem so. Working around,
1640          * anyways. */
1641         u32 l2_base_va = 0;
1642         u32 l2_base_pa = 0;
1643         u32 l2_page_num = 0;
1644         int status = 0;
1645
1646         l1_base_va = pt->l1_base_va;
1647         pg_tbl_va = l1_base_va;
1648         if ((size == HW_PAGE_SIZE64KB) || (size == HW_PAGE_SIZE4KB)) {
1649                 /* Find whether the L1 PTE points to a valid L2 PT */
1650                 pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va);
1651                 if (pte_addr_l1 <= (pt->l1_base_va + pt->l1_size)) {
1652                         pte_val = *(u32 *) pte_addr_l1;
1653                         pte_size = hw_mmu_pte_size_l1(pte_val);
1654                 } else {
1655                         return -EPERM;
1656                 }
1657                 spin_lock(&pt->pg_lock);
1658                 if (pte_size == HW_MMU_COARSE_PAGE_SIZE) {
1659                         /* Get the L2 PA from the L1 PTE, and find
1660                          * corresponding L2 VA */
1661                         l2_base_pa = hw_mmu_pte_coarse_l1(pte_val);
1662                         l2_base_va =
1663                             l2_base_pa - pt->l2_base_pa + pt->l2_base_va;
1664                         l2_page_num =
1665                             (l2_base_pa -
1666                              pt->l2_base_pa) / HW_MMU_COARSE_PAGE_SIZE;
1667                 } else if (pte_size == 0) {
1668                         /* L1 PTE is invalid. Allocate a L2 PT and
1669                          * point the L1 PTE to it */
1670                         /* Find a free L2 PT. */
1671                         for (i = 0; (i < pt->l2_num_pages) &&
1672                              (pt->pg_info[i].num_entries != 0); i++)
1673                                 ;
1674                         if (i < pt->l2_num_pages) {
1675                                 l2_page_num = i;
1676                                 l2_base_pa = pt->l2_base_pa + (l2_page_num *
1677                                                 HW_MMU_COARSE_PAGE_SIZE);
1678                                 l2_base_va = pt->l2_base_va + (l2_page_num *
1679                                                 HW_MMU_COARSE_PAGE_SIZE);
1680                                 /* Endianness attributes are ignored for
1681                                  * HW_MMU_COARSE_PAGE_SIZE */
1682                                 status =
1683                                     hw_mmu_pte_set(l1_base_va, l2_base_pa, va,
1684                                                    HW_MMU_COARSE_PAGE_SIZE,
1685                                                    attrs);
1686                         } else {
1687                                 status = -ENOMEM;
1688                         }
1689                 } else {
1690                         /* Found valid L1 PTE of another size.
1691                          * Should not overwrite it. */
1692                         status = -EPERM;
1693                 }
1694                 if (!status) {
1695                         pg_tbl_va = l2_base_va;
1696                         if (size == HW_PAGE_SIZE64KB)
1697                                 pt->pg_info[l2_page_num].num_entries += 16;
1698                         else
1699                                 pt->pg_info[l2_page_num].num_entries++;
1700                         dev_dbg(bridge, "PTE: L2 BaseVa %x, BasePa %x, PageNum "
1701                                 "%x, num_entries %x\n", l2_base_va,
1702                                 l2_base_pa, l2_page_num,
1703                                 pt->pg_info[l2_page_num].num_entries);
1704                 }
1705                 spin_unlock(&pt->pg_lock);
1706         }
1707         if (!status) {
1708                 dev_dbg(bridge, "PTE: pg_tbl_va %x, pa %x, va %x, size %x\n",
1709                         pg_tbl_va, pa, va, size);
1710                 dev_dbg(bridge, "PTE: endianism %x, element_size %x, "
1711                         "mixed_size %x\n", attrs->endianism,
1712                         attrs->element_size, attrs->mixed_size);
1713                 status = hw_mmu_pte_set(pg_tbl_va, pa, va, size, attrs);
1714         }
1715
1716         return status;
1717 }
1718
1719 /* Memory map kernel VA -- memory allocated with vmalloc */
1720 static int mem_map_vmalloc(struct bridge_dev_context *dev_context,
1721                                   u32 ul_mpu_addr, u32 virt_addr,
1722                                   u32 ul_num_bytes,
1723                                   struct hw_mmu_map_attrs_t *hw_attrs)
1724 {
1725         int status = 0;
1726         struct page *page[1];
1727         u32 i;
1728         u32 pa_curr;
1729         u32 pa_next;
1730         u32 va_curr;
1731         u32 size_curr;
1732         u32 num_pages;
1733         u32 pa;
1734         u32 num_of4k_pages;
1735         u32 temp = 0;
1736
1737         /*
1738          * Do Kernel va to pa translation.
1739          * Combine physically contiguous regions to reduce TLBs.
1740          * Pass the translated pa to pte_update.
1741          */
1742         num_pages = ul_num_bytes / PAGE_SIZE;   /* PAGE_SIZE = OS page size */
1743         i = 0;
1744         va_curr = ul_mpu_addr;
1745         page[0] = vmalloc_to_page((void *)va_curr);
1746         pa_next = page_to_phys(page[0]);
1747         while (!status && (i < num_pages)) {
1748                 /*
1749                  * Reuse pa_next from the previous iteraion to avoid
1750                  * an extra va2pa call
1751                  */
1752                 pa_curr = pa_next;
1753                 size_curr = PAGE_SIZE;
1754                 /*
1755                  * If the next page is physically contiguous,
1756                  * map it with the current one by increasing
1757                  * the size of the region to be mapped
1758                  */
1759                 while (++i < num_pages) {
1760                         page[0] =
1761                             vmalloc_to_page((void *)(va_curr + size_curr));
1762                         pa_next = page_to_phys(page[0]);
1763
1764                         if (pa_next == (pa_curr + size_curr))
1765                                 size_curr += PAGE_SIZE;
1766                         else
1767                                 break;
1768
1769                 }
1770                 if (pa_next == 0) {
1771                         status = -ENOMEM;
1772                         break;
1773                 }
1774                 pa = pa_curr;
1775                 num_of4k_pages = size_curr / HW_PAGE_SIZE4KB;
1776                 while (temp++ < num_of4k_pages) {
1777                         get_page(PHYS_TO_PAGE(pa));
1778                         pa += HW_PAGE_SIZE4KB;
1779                 }
1780                 status = pte_update(dev_context, pa_curr, virt_addr +
1781                                     (va_curr - ul_mpu_addr), size_curr,
1782                                     hw_attrs);
1783                 va_curr += size_curr;
1784         }
1785         /*
1786          * In any case, flush the TLB
1787          * This is called from here instead from pte_update to avoid unnecessary
1788          * repetition while mapping non-contiguous physical regions of a virtual
1789          * region
1790          */
1791         flush_all(dev_context);
1792         dev_dbg(bridge, "%s status %x\n", __func__, status);
1793         return status;
1794 }
1795
1796 /*
1797  *  ======== wait_for_start ========
1798  *      Wait for the singal from DSP that it has started, or time out.
1799  */
1800 bool wait_for_start(struct bridge_dev_context *dev_context, u32 dw_sync_addr)
1801 {
1802         u16 timeout = TIHELEN_ACKTIMEOUT;
1803
1804         /*  Wait for response from board */
1805         while (__raw_readw(dw_sync_addr) && --timeout)
1806                 udelay(10);
1807
1808         /*  If timed out: return false */
1809         if (!timeout) {
1810                 pr_err("%s: Timed out waiting DSP to Start\n", __func__);
1811                 return false;
1812         }
1813         return true;
1814 }