]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/staging/tidspbridge/core/tiomap3430.c
Revert "staging: tidspbridge - remove hw directory"
[linux.git] / drivers / staging / tidspbridge / core / tiomap3430.c
1 /*
2  * tiomap.c
3  *
4  * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5  *
6  * Processor Manager Driver for TI OMAP3430 EVM.
7  *
8  * Copyright (C) 2005-2006 Texas Instruments, Inc.
9  *
10  * This package is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as
12  * published by the Free Software Foundation.
13  *
14  * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15  * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16  * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
17  */
18
19 #include <plat/dsp.h>
20
21 #include <linux/types.h>
22 /*  ----------------------------------- Host OS */
23 #include <dspbridge/host_os.h>
24 #include <linux/mm.h>
25 #include <linux/mmzone.h>
26
27 /*  ----------------------------------- DSP/BIOS Bridge */
28 #include <dspbridge/dbdefs.h>
29
30 /*  ----------------------------------- Trace & Debug */
31 #include <dspbridge/dbc.h>
32
33 /*  ----------------------------------- OS Adaptation Layer */
34 #include <dspbridge/drv.h>
35 #include <dspbridge/sync.h>
36
37 /* ------------------------------------ Hardware Abstraction Layer */
38 #include <hw_defs.h>
39 #include <hw_mmu.h>
40
41 /*  ----------------------------------- Link Driver */
42 #include <dspbridge/dspdefs.h>
43 #include <dspbridge/dspchnl.h>
44 #include <dspbridge/dspdeh.h>
45 #include <dspbridge/dspio.h>
46 #include <dspbridge/dspmsg.h>
47 #include <dspbridge/pwr.h>
48 #include <dspbridge/io_sm.h>
49
50 /*  ----------------------------------- Platform Manager */
51 #include <dspbridge/dev.h>
52 #include <dspbridge/dspapi.h>
53 #include <dspbridge/dmm.h>
54 #include <dspbridge/wdt.h>
55
56 /*  ----------------------------------- Local */
57 #include "_tiomap.h"
58 #include "_tiomap_pwr.h"
59 #include "tiomap_io.h"
60 #include "_deh.h"
61
62 /* Offset in shared mem to write to in order to synchronize start with DSP */
63 #define SHMSYNCOFFSET 4         /* GPP byte offset */
64
65 #define BUFFERSIZE 1024
66
67 #define TIHELEN_ACKTIMEOUT  10000
68
69 #define MMU_SECTION_ADDR_MASK    0xFFF00000
70 #define MMU_SSECTION_ADDR_MASK   0xFF000000
71 #define MMU_LARGE_PAGE_MASK      0xFFFF0000
72 #define MMU_SMALL_PAGE_MASK      0xFFFFF000
73 #define OMAP3_IVA2_BOOTADDR_MASK 0xFFFFFC00
74 #define PAGES_II_LVL_TABLE   512
75 #define PHYS_TO_PAGE(phys)      pfn_to_page((phys) >> PAGE_SHIFT)
76
77 /*
78  * This is a totally ugly layer violation, but needed until
79  * omap_ctrl_set_dsp_boot*() are provided.
80  */
81 #define OMAP3_IVA2_BOOTMOD_IDLE 1
82 #define OMAP2_CONTROL_GENERAL 0x270
83 #define OMAP343X_CONTROL_IVA2_BOOTADDR (OMAP2_CONTROL_GENERAL + 0x0190)
84 #define OMAP343X_CONTROL_IVA2_BOOTMOD (OMAP2_CONTROL_GENERAL + 0x0194)
85
86 #define OMAP343X_CTRL_REGADDR(reg) \
87         OMAP2_L4_IO_ADDRESS(OMAP343X_CTRL_BASE + (reg))
88
89
90 /* Forward Declarations: */
91 static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt);
92 static int bridge_brd_read(struct bridge_dev_context *dev_ctxt,
93                                   u8 *host_buff,
94                                   u32 dsp_addr, u32 ul_num_bytes,
95                                   u32 mem_type);
96 static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
97                                    u32 dsp_addr);
98 static int bridge_brd_status(struct bridge_dev_context *dev_ctxt,
99                                     int *board_state);
100 static int bridge_brd_stop(struct bridge_dev_context *dev_ctxt);
101 static int bridge_brd_write(struct bridge_dev_context *dev_ctxt,
102                                    u8 *host_buff,
103                                    u32 dsp_addr, u32 ul_num_bytes,
104                                    u32 mem_type);
105 static int bridge_brd_set_state(struct bridge_dev_context *dev_ctxt,
106                                     u32 brd_state);
107 static int bridge_brd_mem_copy(struct bridge_dev_context *dev_ctxt,
108                                    u32 dsp_dest_addr, u32 dsp_src_addr,
109                                    u32 ul_num_bytes, u32 mem_type);
110 static int bridge_brd_mem_write(struct bridge_dev_context *dev_ctxt,
111                                     u8 *host_buff, u32 dsp_addr,
112                                     u32 ul_num_bytes, u32 mem_type);
113 static int bridge_dev_create(struct bridge_dev_context
114                                         **dev_cntxt,
115                                         struct dev_object *hdev_obj,
116                                         struct cfg_hostres *config_param);
117 static int bridge_dev_ctrl(struct bridge_dev_context *dev_context,
118                                   u32 dw_cmd, void *pargs);
119 static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt);
120 bool wait_for_start(struct bridge_dev_context *dev_context, u32 dw_sync_addr);
121
122 /*
123  *  This Bridge driver's function interface table.
124  */
125 static struct bridge_drv_interface drv_interface_fxns = {
126         /* Bridge API ver. for which this bridge driver is built. */
127         BRD_API_MAJOR_VERSION,
128         BRD_API_MINOR_VERSION,
129         bridge_dev_create,
130         bridge_dev_destroy,
131         bridge_dev_ctrl,
132         bridge_brd_monitor,
133         bridge_brd_start,
134         bridge_brd_stop,
135         bridge_brd_status,
136         bridge_brd_read,
137         bridge_brd_write,
138         bridge_brd_set_state,
139         bridge_brd_mem_copy,
140         bridge_brd_mem_write,
141         /* The following CHNL functions are provided by chnl_io.lib: */
142         bridge_chnl_create,
143         bridge_chnl_destroy,
144         bridge_chnl_open,
145         bridge_chnl_close,
146         bridge_chnl_add_io_req,
147         bridge_chnl_get_ioc,
148         bridge_chnl_cancel_io,
149         bridge_chnl_flush_io,
150         bridge_chnl_get_info,
151         bridge_chnl_get_mgr_info,
152         bridge_chnl_idle,
153         bridge_chnl_register_notify,
154         /* The following IO functions are provided by chnl_io.lib: */
155         bridge_io_create,
156         bridge_io_destroy,
157         bridge_io_on_loaded,
158         bridge_io_get_proc_load,
159         /* The following msg_ctrl functions are provided by chnl_io.lib: */
160         bridge_msg_create,
161         bridge_msg_create_queue,
162         bridge_msg_delete,
163         bridge_msg_delete_queue,
164         bridge_msg_get,
165         bridge_msg_put,
166         bridge_msg_register_notify,
167         bridge_msg_set_queue_id,
168 };
169
170 /*
171  *  ======== bridge_drv_entry ========
172  *  purpose:
173  *      Bridge Driver entry point.
174  */
175 void bridge_drv_entry(struct bridge_drv_interface **drv_intf,
176                    const char *driver_file_name)
177 {
178
179         DBC_REQUIRE(driver_file_name != NULL);
180
181         io_sm_init();           /* Initialization of io_sm module */
182
183         if (strcmp(driver_file_name, "UMA") == 0)
184                 *drv_intf = &drv_interface_fxns;
185         else
186                 dev_dbg(bridge, "%s Unknown Bridge file name", __func__);
187
188 }
189
190 /*
191  *  ======== bridge_brd_monitor ========
192  *  purpose:
193  *      This bridge_brd_monitor puts DSP into a Loadable state.
194  *      i.e Application can load and start the device.
195  *
196  *  Preconditions:
197  *      Device in 'OFF' state.
198  */
199 static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt)
200 {
201         struct bridge_dev_context *dev_context = dev_ctxt;
202         u32 temp;
203         struct omap_dsp_platform_data *pdata =
204                 omap_dspbridge_dev->dev.platform_data;
205
206         temp = (*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST) &
207                                         OMAP_POWERSTATEST_MASK;
208         if (!(temp & 0x02)) {
209                 /* IVA2 is not in ON state */
210                 /* Read and set PM_PWSTCTRL_IVA2  to ON */
211                 (*pdata->dsp_prm_rmw_bits)(OMAP_POWERSTATEST_MASK,
212                         PWRDM_POWER_ON, OMAP3430_IVA2_MOD, OMAP2_PM_PWSTCTRL);
213                 /* Set the SW supervised state transition */
214                 (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_FORCE_WAKEUP,
215                                         OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);
216
217                 /* Wait until the state has moved to ON */
218                 while ((*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST) &
219                                                 OMAP_INTRANSITION_MASK)
220                         ;
221                 /* Disable Automatic transition */
222                 (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_DISABLE_AUTO,
223                                         OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);
224         }
225
226         dsp_clk_enable(DSP_CLK_IVA2);
227
228         /* set the device state to IDLE */
229         dev_context->dw_brd_state = BRD_IDLE;
230
231         return 0;
232 }
233
234 /*
235  *  ======== bridge_brd_read ========
236  *  purpose:
237  *      Reads buffers for DSP memory.
238  */
239 static int bridge_brd_read(struct bridge_dev_context *dev_ctxt,
240                                   u8 *host_buff, u32 dsp_addr,
241                                   u32 ul_num_bytes, u32 mem_type)
242 {
243         int status = 0;
244         struct bridge_dev_context *dev_context = dev_ctxt;
245         u32 offset;
246         u32 dsp_base_addr = dev_ctxt->dw_dsp_base_addr;
247
248         if (dsp_addr < dev_context->dw_dsp_start_add) {
249                 status = -EPERM;
250                 return status;
251         }
252         /* change here to account for the 3 bands of the DSP internal memory */
253         if ((dsp_addr - dev_context->dw_dsp_start_add) <
254             dev_context->dw_internal_size) {
255                 offset = dsp_addr - dev_context->dw_dsp_start_add;
256         } else {
257                 status = read_ext_dsp_data(dev_context, host_buff, dsp_addr,
258                                            ul_num_bytes, mem_type);
259                 return status;
260         }
261         /* copy the data from  DSP memory, */
262         memcpy(host_buff, (void *)(dsp_base_addr + offset), ul_num_bytes);
263         return status;
264 }
265
266 /*
267  *  ======== bridge_brd_set_state ========
268  *  purpose:
269  *      This routine updates the Board status.
270  */
271 static int bridge_brd_set_state(struct bridge_dev_context *dev_ctxt,
272                                     u32 brd_state)
273 {
274         int status = 0;
275         struct bridge_dev_context *dev_context = dev_ctxt;
276
277         dev_context->dw_brd_state = brd_state;
278         return status;
279 }
280
281 /*
282  *  ======== bridge_brd_start ========
283  *  purpose:
284  *      Initializes DSP MMU and Starts DSP.
285  *
286  *  Preconditions:
287  *  a) DSP domain is 'ACTIVE'.
288  *  b) DSP_RST1 is asserted.
289  *  b) DSP_RST2 is released.
290  */
291 static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
292                                    u32 dsp_addr)
293 {
294         int status = 0;
295         struct bridge_dev_context *dev_context = dev_ctxt;
296         struct iommu *mmu = NULL;
297         struct shm_segs *sm_sg;
298         int l4_i = 0, tlb_i = 0;
299         u32 sg0_da = 0, sg1_da = 0;
300         struct bridge_ioctl_extproc *tlb = dev_context->atlb_entry;
301         u32 dw_sync_addr = 0;
302         u32 ul_shm_base;        /* Gpp Phys SM base addr(byte) */
303         u32 ul_shm_base_virt;   /* Dsp Virt SM base addr */
304         u32 ul_tlb_base_virt;   /* Base of MMU TLB entry */
305         /* Offset of shm_base_virt from tlb_base_virt */
306         u32 ul_shm_offset_virt;
307         struct cfg_hostres *resources = NULL;
308         u32 temp;
309         u32 ul_dsp_clk_rate;
310         u32 ul_dsp_clk_addr;
311         u32 ul_bios_gp_timer;
312         u32 clk_cmd;
313         struct io_mgr *hio_mgr;
314         u32 ul_load_monitor_timer;
315         struct omap_dsp_platform_data *pdata =
316                 omap_dspbridge_dev->dev.platform_data;
317
318         /* The device context contains all the mmu setup info from when the
319          * last dsp base image was loaded. The first entry is always
320          * SHMMEM base. */
321         /* Get SHM_BEG - convert to byte address */
322         (void)dev_get_symbol(dev_context->hdev_obj, SHMBASENAME,
323                              &ul_shm_base_virt);
324         ul_shm_base_virt *= DSPWORDSIZE;
325         DBC_ASSERT(ul_shm_base_virt != 0);
326         /* DSP Virtual address */
327         ul_tlb_base_virt = dev_context->sh_s.seg0_da;
328         DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt);
329         ul_shm_offset_virt =
330             ul_shm_base_virt - (ul_tlb_base_virt * DSPWORDSIZE);
331         /* Kernel logical address */
332         ul_shm_base = dev_context->sh_s.seg0_va + ul_shm_offset_virt;
333
334         DBC_ASSERT(ul_shm_base != 0);
335         /* 2nd wd is used as sync field */
336         dw_sync_addr = ul_shm_base + SHMSYNCOFFSET;
337         /* Write a signature into the shm base + offset; this will
338          * get cleared when the DSP program starts. */
339         if ((ul_shm_base_virt == 0) || (ul_shm_base == 0)) {
340                 pr_err("%s: Illegal SM base\n", __func__);
341                 status = -EPERM;
342         } else
343                 __raw_writel(0xffffffff, dw_sync_addr);
344
345         if (!status) {
346                 resources = dev_context->resources;
347                 if (!resources)
348                         status = -EPERM;
349
350                 /* Assert RST1 i.e only the RST only for DSP megacell */
351                 if (!status) {
352                         (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK,
353                                         OMAP3430_RST1_IVA2_MASK, OMAP3430_IVA2_MOD,
354                                         OMAP2_RM_RSTCTRL);
355                         /* Mask address with 1K for compatibility */
356                         __raw_writel(dsp_addr & OMAP3_IVA2_BOOTADDR_MASK,
357                                         OMAP343X_CTRL_REGADDR(
358                                         OMAP343X_CONTROL_IVA2_BOOTADDR));
359                         /*
360                          * Set bootmode to self loop if dsp_debug flag is true
361                          */
362                         __raw_writel((dsp_debug) ? OMAP3_IVA2_BOOTMOD_IDLE : 0,
363                                         OMAP343X_CTRL_REGADDR(
364                                         OMAP343X_CONTROL_IVA2_BOOTMOD));
365                 }
366         }
367
368         if (!status) {
369                 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0,
370                                         OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
371                 mmu = dev_context->dsp_mmu;
372                 if (mmu)
373                         iommu_put(mmu);
374                 mmu = iommu_get("iva2");
375                 if (IS_ERR(mmu)) {
376                         dev_err(bridge, "iommu_get failed!\n");
377                         dev_context->dsp_mmu = NULL;
378                         status = (int)mmu;
379                 }
380         }
381         if (!status) {
382                 dev_context->dsp_mmu = mmu;
383                 mmu->isr = mmu_fault_isr;
384                 sm_sg = &dev_context->sh_s;
385                 sg0_da = iommu_kmap(mmu, sm_sg->seg0_da, sm_sg->seg0_pa,
386                         sm_sg->seg0_size, IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32);
387                 if (IS_ERR_VALUE(sg0_da)) {
388                         status = (int)sg0_da;
389                         sg0_da = 0;
390                 }
391         }
392         if (!status) {
393                 sg1_da = iommu_kmap(mmu, sm_sg->seg1_da, sm_sg->seg1_pa,
394                         sm_sg->seg1_size, IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32);
395                 if (IS_ERR_VALUE(sg1_da)) {
396                         status = (int)sg1_da;
397                         sg1_da = 0;
398                 }
399         }
400         if (!status) {
401                 u32 da;
402                 for (tlb_i = 0; tlb_i < BRDIOCTL_NUMOFMMUTLB; tlb_i++) {
403                         if (!tlb[tlb_i].ul_gpp_pa)
404                                 continue;
405
406                         dev_dbg(bridge, "IOMMU %d GppPa: 0x%x DspVa 0x%x Size"
407                                 " 0x%x\n", tlb_i, tlb[tlb_i].ul_gpp_pa,
408                                 tlb[tlb_i].ul_dsp_va, tlb[tlb_i].ul_size);
409
410                         da = iommu_kmap(mmu, tlb[tlb_i].ul_dsp_va,
411                                 tlb[tlb_i].ul_gpp_pa, PAGE_SIZE,
412                                 IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32);
413                         if (IS_ERR_VALUE(da)) {
414                                 status = (int)da;
415                                 break;
416                         }
417                 }
418         }
419         if (!status) {
420                 u32 da;
421                 l4_i = 0;
422                 while (l4_peripheral_table[l4_i].phys_addr) {
423                         da = iommu_kmap(mmu, l4_peripheral_table[l4_i].
424                                 dsp_virt_addr, l4_peripheral_table[l4_i].
425                                 phys_addr, PAGE_SIZE,
426                                 IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32);
427                         if (IS_ERR_VALUE(da)) {
428                                 status = (int)da;
429                                 break;
430                         }
431                         l4_i++;
432                 }
433         }
434
435         /* Lock the above TLB entries and get the BIOS and load monitor timer
436          * information */
437         if (!status) {
438                 /* Enable the BIOS clock */
439                 (void)dev_get_symbol(dev_context->hdev_obj,
440                                      BRIDGEINIT_BIOSGPTIMER, &ul_bios_gp_timer);
441                 (void)dev_get_symbol(dev_context->hdev_obj,
442                                      BRIDGEINIT_LOADMON_GPTIMER,
443                                      &ul_load_monitor_timer);
444
445                 if (ul_load_monitor_timer != 0xFFFF) {
446                         clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) |
447                             ul_load_monitor_timer;
448                         dsp_peripheral_clk_ctrl(dev_context, &clk_cmd);
449                 } else {
450                         dev_dbg(bridge, "Not able to get the symbol for Load "
451                                 "Monitor Timer\n");
452                 }
453
454                 if (ul_bios_gp_timer != 0xFFFF) {
455                         clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) |
456                             ul_bios_gp_timer;
457                         dsp_peripheral_clk_ctrl(dev_context, &clk_cmd);
458                 } else {
459                         dev_dbg(bridge,
460                                 "Not able to get the symbol for BIOS Timer\n");
461                 }
462
463                 /* Set the DSP clock rate */
464                 (void)dev_get_symbol(dev_context->hdev_obj,
465                                      "_BRIDGEINIT_DSP_FREQ", &ul_dsp_clk_addr);
466                 /*Set Autoidle Mode for IVA2 PLL */
467                 (*pdata->dsp_cm_write)(1 << OMAP3430_AUTO_IVA2_DPLL_SHIFT,
468                                 OMAP3430_IVA2_MOD, OMAP3430_CM_AUTOIDLE_PLL);
469
470                 if ((unsigned int *)ul_dsp_clk_addr != NULL) {
471                         /* Get the clock rate */
472                         ul_dsp_clk_rate = dsp_clk_get_iva2_rate();
473                         dev_dbg(bridge, "%s: DSP clock rate (KHZ): 0x%x \n",
474                                 __func__, ul_dsp_clk_rate);
475                         (void)bridge_brd_write(dev_context,
476                                                (u8 *) &ul_dsp_clk_rate,
477                                                ul_dsp_clk_addr, sizeof(u32), 0);
478                 }
479                 /*
480                  * Enable Mailbox events and also drain any pending
481                  * stale messages.
482                  */
483                 dev_context->mbox = omap_mbox_get("dsp");
484                 if (IS_ERR(dev_context->mbox)) {
485                         dev_context->mbox = NULL;
486                         pr_err("%s: Failed to get dsp mailbox handle\n",
487                                                                 __func__);
488                         status = -EPERM;
489                 }
490
491         }
492         if (!status) {
493                 dev_context->mbox->rxq->callback = (int (*)(void *))io_mbox_msg;
494
495 /*PM_IVA2GRPSEL_PER = 0xC0;*/
496                 temp = readl(resources->dw_per_pm_base + 0xA8);
497                 temp = (temp & 0xFFFFFF30) | 0xC0;
498                 writel(temp, resources->dw_per_pm_base + 0xA8);
499
500 /*PM_MPUGRPSEL_PER &= 0xFFFFFF3F; */
501                 temp = readl(resources->dw_per_pm_base + 0xA4);
502                 temp = (temp & 0xFFFFFF3F);
503                 writel(temp, resources->dw_per_pm_base + 0xA4);
504 /*CM_SLEEPDEP_PER |= 0x04; */
505                 temp = readl(resources->dw_per_base + 0x44);
506                 temp = (temp & 0xFFFFFFFB) | 0x04;
507                 writel(temp, resources->dw_per_base + 0x44);
508
509 /*CM_CLKSTCTRL_IVA2 = 0x00000003 -To Allow automatic transitions */
510                 (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_ENABLE_AUTO,
511                                         OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);
512
513                 /* Let DSP go */
514                 dev_dbg(bridge, "%s Unreset\n", __func__);
515                 /* release the RST1, DSP starts executing now .. */
516                 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK, 0,
517                                         OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
518
519                 dev_dbg(bridge, "Waiting for Sync @ 0x%x\n", dw_sync_addr);
520                 dev_dbg(bridge, "DSP c_int00 Address =  0x%x\n", dsp_addr);
521                 if (dsp_debug)
522                         while (__raw_readw(dw_sync_addr))
523                                 ;;
524
525                 /* Wait for DSP to clear word in shared memory */
526                 /* Read the Location */
527                 if (!wait_for_start(dev_context, dw_sync_addr))
528                         status = -ETIMEDOUT;
529
530                 /* Start wdt */
531                 dsp_wdt_sm_set((void *)ul_shm_base);
532                 dsp_wdt_enable(true);
533
534                 status = dev_get_io_mgr(dev_context->hdev_obj, &hio_mgr);
535                 if (hio_mgr) {
536                         io_sh_msetting(hio_mgr, SHM_OPPINFO, NULL);
537                         /* Write the synchronization bit to indicate the
538                          * completion of OPP table update to DSP
539                          */
540                         __raw_writel(0XCAFECAFE, dw_sync_addr);
541
542                         /* update board state */
543                         dev_context->dw_brd_state = BRD_RUNNING;
544                         return 0;
545                 } else {
546                         dev_context->dw_brd_state = BRD_UNKNOWN;
547                 }
548         }
549
550         while (tlb_i--) {
551                 if (!tlb[tlb_i].ul_gpp_pa)
552                         continue;
553                 iommu_kunmap(mmu, tlb[tlb_i].ul_gpp_va);
554         }
555         while (l4_i--)
556                 iommu_kunmap(mmu, l4_peripheral_table[l4_i].dsp_virt_addr);
557         if (sg0_da)
558                 iommu_kunmap(mmu, sg0_da);
559         if (sg1_da)
560                 iommu_kunmap(mmu, sg1_da);
561         return status;
562 }
563
564 /*
565  *  ======== bridge_brd_stop ========
566  *  purpose:
567  *      Puts DSP in self loop.
568  *
569  *  Preconditions :
570  *  a) None
571  */
572 static int bridge_brd_stop(struct bridge_dev_context *dev_ctxt)
573 {
574         int status = 0;
575         struct bridge_dev_context *dev_context = dev_ctxt;
576         u32 dsp_pwr_state;
577         int i;
578         struct bridge_ioctl_extproc *tlb = dev_context->atlb_entry;
579         struct omap_dsp_platform_data *pdata =
580                 omap_dspbridge_dev->dev.platform_data;
581
582         if (dev_context->dw_brd_state == BRD_STOPPED)
583                 return status;
584
585         /* as per TRM, it is advised to first drive the IVA2 to 'Standby' mode,
586          * before turning off the clocks.. This is to ensure that there are no
587          * pending L3 or other transactons from IVA2 */
588         dsp_pwr_state = (*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST) &
589                                         OMAP_POWERSTATEST_MASK;
590         if (dsp_pwr_state != PWRDM_POWER_OFF) {
591                 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0,
592                                         OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
593                 sm_interrupt_dsp(dev_context, MBX_PM_DSPIDLE);
594                 mdelay(10);
595
596                 /* IVA2 is not in OFF state */
597                 /* Set PM_PWSTCTRL_IVA2  to OFF */
598                 (*pdata->dsp_prm_rmw_bits)(OMAP_POWERSTATEST_MASK,
599                         PWRDM_POWER_OFF, OMAP3430_IVA2_MOD, OMAP2_PM_PWSTCTRL);
600                 /* Set the SW supervised state transition for Sleep */
601                 (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_FORCE_SLEEP,
602                                         OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);
603         }
604         udelay(10);
605         /* Release the Ext Base virtual Address as the next DSP Program
606          * may have a different load address */
607         if (dev_context->dw_dsp_ext_base_addr)
608                 dev_context->dw_dsp_ext_base_addr = 0;
609
610         dev_context->dw_brd_state = BRD_STOPPED;        /* update board state */
611
612         dsp_wdt_enable(false);
613
614         /* Reset DSP */
615         (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK,
616                 OMAP3430_RST1_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
617
618         /* Disable the mailbox interrupts */
619         if (dev_context->mbox) {
620                 omap_mbox_disable_irq(dev_context->mbox, IRQ_RX);
621                 omap_mbox_put(dev_context->mbox);
622                 dev_context->mbox = NULL;
623         }
624         if (dev_context->dsp_mmu) {
625                 pr_err("Proc stop mmu if statement\n");
626                 for (i = 0; i < BRDIOCTL_NUMOFMMUTLB; i++) {
627                         if (!tlb[i].ul_gpp_pa)
628                                 continue;
629                         iommu_kunmap(dev_context->dsp_mmu, tlb[i].ul_gpp_va);
630                 }
631                 i = 0;
632                 while (l4_peripheral_table[i].phys_addr) {
633                         iommu_kunmap(dev_context->dsp_mmu,
634                                 l4_peripheral_table[i].dsp_virt_addr);
635                         i++;
636                 }
637                 iommu_kunmap(dev_context->dsp_mmu, dev_context->sh_s.seg0_da);
638                 iommu_kunmap(dev_context->dsp_mmu, dev_context->sh_s.seg1_da);
639                 iommu_put(dev_context->dsp_mmu);
640                 dev_context->dsp_mmu = NULL;
641         }
642         /* Reset IVA IOMMU*/
643         (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK,
644                 OMAP3430_RST2_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
645
646         dsp_clock_disable_all(dev_context->dsp_per_clks);
647         dsp_clk_disable(DSP_CLK_IVA2);
648
649         return status;
650 }
651
652 /*
653  *  ======== bridge_brd_status ========
654  *      Returns the board status.
655  */
656 static int bridge_brd_status(struct bridge_dev_context *dev_ctxt,
657                                     int *board_state)
658 {
659         struct bridge_dev_context *dev_context = dev_ctxt;
660         *board_state = dev_context->dw_brd_state;
661         return 0;
662 }
663
664 /*
665  *  ======== bridge_brd_write ========
666  *      Copies the buffers to DSP internal or external memory.
667  */
668 static int bridge_brd_write(struct bridge_dev_context *dev_ctxt,
669                                    u8 *host_buff, u32 dsp_addr,
670                                    u32 ul_num_bytes, u32 mem_type)
671 {
672         int status = 0;
673         struct bridge_dev_context *dev_context = dev_ctxt;
674
675         if (dsp_addr < dev_context->dw_dsp_start_add) {
676                 status = -EPERM;
677                 return status;
678         }
679         if ((dsp_addr - dev_context->dw_dsp_start_add) <
680             dev_context->dw_internal_size) {
681                 status = write_dsp_data(dev_ctxt, host_buff, dsp_addr,
682                                         ul_num_bytes, mem_type);
683         } else {
684                 status = write_ext_dsp_data(dev_context, host_buff, dsp_addr,
685                                             ul_num_bytes, mem_type, false);
686         }
687
688         return status;
689 }
690
691 /*
692  *  ======== bridge_dev_create ========
693  *      Creates a driver object. Puts DSP in self loop.
694  */
695 static int bridge_dev_create(struct bridge_dev_context
696                                         **dev_cntxt,
697                                         struct dev_object *hdev_obj,
698                                         struct cfg_hostres *config_param)
699 {
700         int status = 0;
701         struct bridge_dev_context *dev_context = NULL;
702         s32 entry_ndx;
703         struct cfg_hostres *resources = config_param;
704         struct drv_data *drv_datap = dev_get_drvdata(bridge);
705
706         /* Allocate and initialize a data structure to contain the bridge driver
707          *  state, which becomes the context for later calls into this driver */
708         dev_context = kzalloc(sizeof(struct bridge_dev_context), GFP_KERNEL);
709         if (!dev_context) {
710                 status = -ENOMEM;
711                 goto func_end;
712         }
713
714         dev_context->dw_dsp_start_add = (u32) OMAP_GEM_BASE;
715         dev_context->dw_self_loop = (u32) NULL;
716         dev_context->dsp_per_clks = 0;
717         dev_context->dw_internal_size = OMAP_DSP_SIZE;
718         /*  Clear dev context MMU table entries.
719          *  These get set on bridge_io_on_loaded() call after program loaded. */
720         for (entry_ndx = 0; entry_ndx < BRDIOCTL_NUMOFMMUTLB; entry_ndx++) {
721                 dev_context->atlb_entry[entry_ndx].ul_gpp_pa =
722                     dev_context->atlb_entry[entry_ndx].ul_dsp_va = 0;
723         }
724         dev_context->dw_dsp_base_addr = (u32) MEM_LINEAR_ADDRESS((void *)
725                                                                  (config_param->
726                                                                   dw_mem_base
727                                                                   [3]),
728                                                                  config_param->
729                                                                  dw_mem_length
730                                                                  [3]);
731         if (!dev_context->dw_dsp_base_addr)
732                 status = -EPERM;
733
734         if (!status) {
735                 dev_context->tc_word_swap_on = drv_datap->tc_wordswapon;
736                 dev_context->hdev_obj = hdev_obj;
737                 /* Store current board state. */
738                 dev_context->dw_brd_state = BRD_UNKNOWN;
739                 dev_context->resources = resources;
740                 dsp_clk_enable(DSP_CLK_IVA2);
741                 bridge_brd_stop(dev_context);
742                 /* Return ptr to our device state to the DSP API for storage */
743                 *dev_cntxt = dev_context;
744         } else {
745                 kfree(dev_context);
746         }
747 func_end:
748         return status;
749 }
750
751 /*
752  *  ======== bridge_dev_ctrl ========
753  *      Receives device specific commands.
754  */
755 static int bridge_dev_ctrl(struct bridge_dev_context *dev_context,
756                                   u32 dw_cmd, void *pargs)
757 {
758         int status = 0;
759         struct bridge_ioctl_extproc *pa_ext_proc =
760                                         (struct bridge_ioctl_extproc *)pargs;
761         s32 ndx;
762
763         switch (dw_cmd) {
764         case BRDIOCTL_CHNLREAD:
765                 break;
766         case BRDIOCTL_CHNLWRITE:
767                 break;
768         case BRDIOCTL_SETMMUCONFIG:
769                 /* store away dsp-mmu setup values for later use */
770                 for (ndx = 0; ndx < BRDIOCTL_NUMOFMMUTLB; ndx++, pa_ext_proc++)
771                         dev_context->atlb_entry[ndx] = *pa_ext_proc;
772                 break;
773         case BRDIOCTL_DEEPSLEEP:
774         case BRDIOCTL_EMERGENCYSLEEP:
775                 /* Currently only DSP Idle is supported Need to update for
776                  * later releases */
777                 status = sleep_dsp(dev_context, PWR_DEEPSLEEP, pargs);
778                 break;
779         case BRDIOCTL_WAKEUP:
780                 status = wake_dsp(dev_context, pargs);
781                 break;
782         case BRDIOCTL_CLK_CTRL:
783                 status = 0;
784                 /* Looking For Baseport Fix for Clocks */
785                 status = dsp_peripheral_clk_ctrl(dev_context, pargs);
786                 break;
787         case BRDIOCTL_PWR_HIBERNATE:
788                 status = handle_hibernation_from_dsp(dev_context);
789                 break;
790         case BRDIOCTL_PRESCALE_NOTIFY:
791                 status = pre_scale_dsp(dev_context, pargs);
792                 break;
793         case BRDIOCTL_POSTSCALE_NOTIFY:
794                 status = post_scale_dsp(dev_context, pargs);
795                 break;
796         case BRDIOCTL_CONSTRAINT_REQUEST:
797                 status = handle_constraints_set(dev_context, pargs);
798                 break;
799         default:
800                 status = -EPERM;
801                 break;
802         }
803         return status;
804 }
805
806 /*
807  *  ======== bridge_dev_destroy ========
808  *      Destroys the driver object.
809  */
810 static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt)
811 {
812         int status = 0;
813         struct bridge_dev_context *dev_context = (struct bridge_dev_context *)
814             dev_ctxt;
815         struct cfg_hostres *host_res;
816         u32 shm_size;
817         struct drv_data *drv_datap = dev_get_drvdata(bridge);
818
819         /* It should never happen */
820         if (!dev_ctxt)
821                 return -EFAULT;
822
823         /* first put the device to stop state */
824         bridge_brd_stop(dev_context);
825
826         if (dev_context->resources) {
827                 host_res = dev_context->resources;
828                 shm_size = drv_datap->shm_size;
829                 if (shm_size >= 0x10000) {
830                         if ((host_res->dw_mem_base[1]) &&
831                             (host_res->dw_mem_phys[1])) {
832                                 mem_free_phys_mem((void *)
833                                                   host_res->dw_mem_base
834                                                   [1],
835                                                   host_res->dw_mem_phys
836                                                   [1], shm_size);
837                         }
838                 } else {
839                         dev_dbg(bridge, "%s: Error getting shm size "
840                                 "from registry: %x. Not calling "
841                                 "mem_free_phys_mem\n", __func__,
842                                 status);
843                 }
844                 host_res->dw_mem_base[1] = 0;
845                 host_res->dw_mem_phys[1] = 0;
846
847                 if (host_res->dw_mem_base[0])
848                         iounmap((void *)host_res->dw_mem_base[0]);
849                 if (host_res->dw_mem_base[2])
850                         iounmap((void *)host_res->dw_mem_base[2]);
851                 if (host_res->dw_mem_base[3])
852                         iounmap((void *)host_res->dw_mem_base[3]);
853                 if (host_res->dw_mem_base[4])
854                         iounmap((void *)host_res->dw_mem_base[4]);
855                 if (host_res->dw_dmmu_base)
856                         iounmap(host_res->dw_dmmu_base);
857                 if (host_res->dw_per_base)
858                         iounmap(host_res->dw_per_base);
859                 if (host_res->dw_per_pm_base)
860                         iounmap((void *)host_res->dw_per_pm_base);
861                 if (host_res->dw_core_pm_base)
862                         iounmap((void *)host_res->dw_core_pm_base);
863                 if (host_res->dw_sys_ctrl_base)
864                         iounmap(host_res->dw_sys_ctrl_base);
865
866                 host_res->dw_mem_base[0] = (u32) NULL;
867                 host_res->dw_mem_base[2] = (u32) NULL;
868                 host_res->dw_mem_base[3] = (u32) NULL;
869                 host_res->dw_mem_base[4] = (u32) NULL;
870                 host_res->dw_dmmu_base = NULL;
871                 host_res->dw_sys_ctrl_base = NULL;
872
873                 kfree(host_res);
874         }
875
876         /* Free the driver's device context: */
877         kfree(drv_datap->base_img);
878         kfree(drv_datap);
879         dev_set_drvdata(bridge, NULL);
880         kfree((void *)dev_ctxt);
881         return status;
882 }
883
884 static int bridge_brd_mem_copy(struct bridge_dev_context *dev_ctxt,
885                                    u32 dsp_dest_addr, u32 dsp_src_addr,
886                                    u32 ul_num_bytes, u32 mem_type)
887 {
888         int status = 0;
889         u32 src_addr = dsp_src_addr;
890         u32 dest_addr = dsp_dest_addr;
891         u32 copy_bytes = 0;
892         u32 total_bytes = ul_num_bytes;
893         u8 host_buf[BUFFERSIZE];
894         struct bridge_dev_context *dev_context = dev_ctxt;
895         while (total_bytes > 0 && !status) {
896                 copy_bytes =
897                     total_bytes > BUFFERSIZE ? BUFFERSIZE : total_bytes;
898                 /* Read from External memory */
899                 status = read_ext_dsp_data(dev_ctxt, host_buf, src_addr,
900                                            copy_bytes, mem_type);
901                 if (!status) {
902                         if (dest_addr < (dev_context->dw_dsp_start_add +
903                                          dev_context->dw_internal_size)) {
904                                 /* Write to Internal memory */
905                                 status = write_dsp_data(dev_ctxt, host_buf,
906                                                         dest_addr, copy_bytes,
907                                                         mem_type);
908                         } else {
909                                 /* Write to External memory */
910                                 status =
911                                     write_ext_dsp_data(dev_ctxt, host_buf,
912                                                        dest_addr, copy_bytes,
913                                                        mem_type, false);
914                         }
915                 }
916                 total_bytes -= copy_bytes;
917                 src_addr += copy_bytes;
918                 dest_addr += copy_bytes;
919         }
920         return status;
921 }
922
923 /* Mem Write does not halt the DSP to write unlike bridge_brd_write */
924 static int bridge_brd_mem_write(struct bridge_dev_context *dev_ctxt,
925                                     u8 *host_buff, u32 dsp_addr,
926                                     u32 ul_num_bytes, u32 mem_type)
927 {
928         int status = 0;
929         struct bridge_dev_context *dev_context = dev_ctxt;
930         u32 ul_remain_bytes = 0;
931         u32 ul_bytes = 0;
932         ul_remain_bytes = ul_num_bytes;
933         while (ul_remain_bytes > 0 && !status) {
934                 ul_bytes =
935                     ul_remain_bytes > BUFFERSIZE ? BUFFERSIZE : ul_remain_bytes;
936                 if (dsp_addr < (dev_context->dw_dsp_start_add +
937                                  dev_context->dw_internal_size)) {
938                         status =
939                             write_dsp_data(dev_ctxt, host_buff, dsp_addr,
940                                            ul_bytes, mem_type);
941                 } else {
942                         status = write_ext_dsp_data(dev_ctxt, host_buff,
943                                                     dsp_addr, ul_bytes,
944                                                     mem_type, true);
945                 }
946                 ul_remain_bytes -= ul_bytes;
947                 dsp_addr += ul_bytes;
948                 host_buff = host_buff + ul_bytes;
949         }
950         return status;
951 }
952
953 /*
954  *  ======== user_va2_pa ========
955  *  Purpose:
956  *      This function walks through the page tables to convert a userland
957  *      virtual address to physical address
958  */
959 static u32 user_va2_pa(struct mm_struct *mm, u32 address)
960 {
961         pgd_t *pgd;
962         pmd_t *pmd;
963         pte_t *ptep, pte;
964
965         pgd = pgd_offset(mm, address);
966         if (!(pgd_none(*pgd) || pgd_bad(*pgd))) {
967                 pmd = pmd_offset(pgd, address);
968                 if (!(pmd_none(*pmd) || pmd_bad(*pmd))) {
969                         ptep = pte_offset_map(pmd, address);
970                         if (ptep) {
971                                 pte = *ptep;
972                                 if (pte_present(pte))
973                                         return pte & PAGE_MASK;
974                         }
975                 }
976         }
977
978         return 0;
979 }
980
981 /**
982  * get_io_pages() - pin and get pages of io user's buffer.
983  * @mm:         mm_struct Pointer of the process.
984  * @uva:                Virtual user space address.
985  * @pages       Pages to be pined.
986  * @usr_pgs     struct page array pointer where the user pages will be stored
987  *
988  */
989 static int get_io_pages(struct mm_struct *mm, u32 uva, unsigned pages,
990                                                 struct page **usr_pgs)
991 {
992         u32 pa;
993         int i;
994         struct page *pg;
995
996         for (i = 0; i < pages; i++) {
997                 pa = user_va2_pa(mm, uva);
998
999                 if (!pfn_valid(__phys_to_pfn(pa)))
1000                         break;
1001
1002                 pg = PHYS_TO_PAGE(pa);
1003                 usr_pgs[i] = pg;
1004                 get_page(pg);
1005         }
1006         return i;
1007 }
1008
1009 /**
1010  * user_to_dsp_map() - maps user to dsp virtual address
1011  * @mmu:        Pointer to iommu handle.
1012  * @uva:                Virtual user space address.
1013  * @da          DSP address
1014  * @size                Buffer size to map.
1015  * @usr_pgs     struct page array pointer where the user pages will be stored
1016  *
1017  * This function maps a user space buffer into DSP virtual address.
1018  *
1019  */
1020 u32 user_to_dsp_map(struct iommu *mmu, u32 uva, u32 da, u32 size,
1021                                 struct page **usr_pgs)
1022 {
1023         int res, w;
1024         unsigned pages, i;
1025         struct vm_area_struct *vma;
1026         struct mm_struct *mm = current->mm;
1027         struct sg_table *sgt;
1028         struct scatterlist *sg;
1029
1030         if (!size || !usr_pgs)
1031                 return -EINVAL;
1032
1033         pages = size / PG_SIZE4K;
1034
1035         down_read(&mm->mmap_sem);
1036         vma = find_vma(mm, uva);
1037         while (vma && (uva + size > vma->vm_end))
1038                 vma = find_vma(mm, vma->vm_end + 1);
1039
1040         if (!vma) {
1041                 pr_err("%s: Failed to get VMA region for 0x%x (%d)\n",
1042                                                 __func__, uva, size);
1043                 up_read(&mm->mmap_sem);
1044                 return -EINVAL;
1045         }
1046         if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE))
1047                 w = 1;
1048
1049         if (vma->vm_flags & VM_IO)
1050                 i = get_io_pages(mm, uva, pages, usr_pgs);
1051         else
1052                 i = get_user_pages(current, mm, uva, pages, w, 1,
1053                                                         usr_pgs, NULL);
1054         up_read(&mm->mmap_sem);
1055
1056         if (i < 0)
1057                 return i;
1058
1059         if (i < pages) {
1060                 res = -EFAULT;
1061                 goto err_pages;
1062         }
1063
1064         sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
1065         if (!sgt) {
1066                 res = -ENOMEM;
1067                 goto err_pages;
1068         }
1069
1070         res = sg_alloc_table(sgt, pages, GFP_KERNEL);
1071
1072         if (res < 0)
1073                 goto err_sg;
1074
1075         for_each_sg(sgt->sgl, sg, sgt->nents, i)
1076                 sg_set_page(sg, usr_pgs[i], PAGE_SIZE, 0);
1077
1078         da = iommu_vmap(mmu, da, sgt, IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32);
1079
1080         if (!IS_ERR_VALUE(da))
1081                 return da;
1082         res = (int)da;
1083
1084         sg_free_table(sgt);
1085 err_sg:
1086         kfree(sgt);
1087         i = pages;
1088 err_pages:
1089         while (i--)
1090                 put_page(usr_pgs[i]);
1091         return res;
1092 }
1093
1094 /**
1095  * user_to_dsp_unmap() - unmaps DSP virtual buffer.
1096  * @mmu:        Pointer to iommu handle.
1097  * @da          DSP address
1098  *
1099  * This function unmaps a user space buffer into DSP virtual address.
1100  *
1101  */
1102 int user_to_dsp_unmap(struct iommu *mmu, u32 da)
1103 {
1104         unsigned i;
1105         struct sg_table *sgt;
1106         struct scatterlist *sg;
1107
1108         sgt = iommu_vunmap(mmu, da);
1109         if (!sgt)
1110                 return -EFAULT;
1111
1112         for_each_sg(sgt->sgl, sg, sgt->nents, i)
1113                 put_page(sg_page(sg));
1114         sg_free_table(sgt);
1115         kfree(sgt);
1116
1117         return 0;
1118 }
1119
1120 /*
1121  *  ======== wait_for_start ========
1122  *      Wait for the singal from DSP that it has started, or time out.
1123  */
1124 bool wait_for_start(struct bridge_dev_context *dev_context, u32 dw_sync_addr)
1125 {
1126         u16 timeout = TIHELEN_ACKTIMEOUT;
1127
1128         /*  Wait for response from board */
1129         while (__raw_readw(dw_sync_addr) && --timeout)
1130                 udelay(10);
1131
1132         /*  If timed out: return false */
1133         if (!timeout) {
1134                 pr_err("%s: Timed out waiting DSP to Start\n", __func__);
1135                 return false;
1136         }
1137         return true;
1138 }