3 * sep_main_mod.c - Security Processor Driver main group of functions
5 * Copyright(c) 2009 Intel Corporation. All rights reserved.
6 * Copyright(c) 2009 Discretix. All rights reserved.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc., 59
20 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 * Mark Allyn mark.a.allyn@intel.com
28 * 2009.06.26 Initial publish
32 #include <linux/init.h>
33 #include <linux/module.h>
35 #include <linux/cdev.h>
36 #include <linux/kdev_t.h>
37 #include <linux/mutex.h>
39 #include <linux/poll.h>
40 #include <linux/wait.h>
41 #include <linux/pci.h>
42 #include <linux/firmware.h>
43 #include <asm/ioctl.h>
44 #include <linux/ioport.h>
46 #include <linux/interrupt.h>
47 #include <linux/pagemap.h>
48 #include <asm/cacheflush.h>
49 #include "sep_driver_hw_defs.h"
50 #include "sep_driver_config.h"
51 #include "sep_driver_api.h"
54 #if SEP_DRIVER_ARM_DEBUG_MODE
56 #define CRYS_SEP_ROM_length 0x4000
57 #define CRYS_SEP_ROM_start_address 0x8000C000UL
58 #define CRYS_SEP_ROM_start_address_offset 0xC000UL
59 #define SEP_ROM_BANK_register 0x80008420UL
60 #define SEP_ROM_BANK_register_offset 0x8420UL
61 #define SEP_RAR_IO_MEM_REGION_START_ADDRESS 0x82000000
64 * THESE 2 definitions are specific to the board - must be
65 * defined during integration
67 #define SEP_RAR_IO_MEM_REGION_START_ADDRESS 0xFF0D0000
71 static void sep_load_rom_code(struct sep_device *sep)
74 unsigned long i, k, j;
79 /* Loading ROM from SEP_ROM_image.h file */
80 k = sizeof(CRYS_SEP_ROM);
82 edbg("SEP Driver: DX_CC_TST_SepRomLoader start\n");
84 edbg("SEP Driver: k is %lu\n", k);
85 edbg("SEP Driver: sep->reg_addr is %p\n", sep->reg_addr);
86 edbg("SEP Driver: CRYS_SEP_ROM_start_address_offset is %p\n", CRYS_SEP_ROM_start_address_offset);
88 for (i = 0; i < 4; i++) {
90 sep_write_reg(sep, SEP_ROM_BANK_register_offset, i);
92 for (j = 0; j < CRYS_SEP_ROM_length / 4; j++) {
93 sep_write_reg(sep, CRYS_SEP_ROM_start_address_offset + 4 * j, CRYS_SEP_ROM[i * 0x1000 + j]);
98 j = CRYS_SEP_ROM_length;
105 sep_write_reg(sep, HW_HOST_SEP_SW_RST_REG_ADDR, 0x1);
107 /* poll for SEP ROM boot finish */
109 reg = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
112 edbg("SEP Driver: ROM polling ended\n");
116 /* fatal error - read erro status from GPRO */
117 error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
118 edbg("SEP Driver: ROM polling case 1\n");
121 /* Cold boot ended successfully */
123 /* Warmboot ended successfully */
125 /* ColdWarm boot ended successfully */
128 /* Boot First Phase ended */
129 warning = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
131 edbg("SEP Driver: ROM polling case %d\n", reg);
138 static void sep_load_rom_code(struct sep_device *sep) { }
139 #endif /* SEP_DRIVER_ARM_DEBUG_MODE */
143 /*----------------------------------------
145 -----------------------------------------*/
147 #define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0)
148 #define BASE_ADDRESS_FOR_SYSTEM 0xfffc0000
149 #define SEP_RAR_IO_MEM_REGION_SIZE 0x40000
151 /*--------------------------------------------
153 --------------------------------------------*/
155 /* debug messages level */
157 module_param(sepDebug, int , 0);
158 MODULE_PARM_DESC(sepDebug, "Flag to enable SEP debug messages");
160 /* Keep this a single static object for now to keep the conversion easy */
162 static struct sep_device sep_instance;
163 static struct sep_device *sep_dev = &sep_instance;
166 mutex for the access to the internals of the sep driver
168 static DEFINE_MUTEX(sep_mutex);
171 /* wait queue head (event) of the driver */
172 static DECLARE_WAIT_QUEUE_HEAD(sep_event);
175 This functions copies the cache and resident from their source location into
176 destination memory, which is external to Linux VM and is given as
179 static int sep_copy_cache_resident_to_area(struct sep_device *sep,
180 unsigned long src_cache_addr,
181 unsigned long cache_size_in_bytes,
182 unsigned long src_resident_addr,
183 unsigned long resident_size_in_bytes,
184 unsigned long *dst_new_cache_addr_ptr,
185 unsigned long *dst_new_resident_addr_ptr)
189 const struct firmware *fw;
191 char *cache_name = "cache.image.bin";
192 char *res_name = "resident.image.bin";
197 /*--------------------------------
199 -------------------------------------*/
202 edbg("SEP Driver:rar_virtual is %p\n", sep->rar_addr);
203 edbg("SEP Driver:rar_physical is %08llx\n", (unsigned long long)sep->rar_bus);
205 sep->rar_region_addr = (unsigned long) sep->rar_addr;
207 sep->cache_bus = sep->rar_bus;
208 sep->cache_addr = sep->rar_addr;
211 error = request_firmware(&fw, cache_name, &sep->pdev->dev);
213 edbg("SEP Driver:cant request cache fw\n");
217 edbg("SEP Driver:cache data loc is %p\n", (void *) fw->data);
218 edbg("SEP Driver:cache data size is %08Zx\n", fw->size);
220 memcpy(sep->cache_addr, (void *) fw->data, fw->size);
222 sep->cache_size = fw->size;
224 cache_addr = sep->cache_addr;
226 release_firmware(fw);
228 sep->resident_bus = sep->cache_bus + sep->cache_size;
229 sep->resident_addr = sep->cache_addr + sep->cache_size;
232 error = request_firmware(&fw, res_name, &sep->pdev->dev);
234 edbg("SEP Driver:cant request res fw\n");
238 edbg("SEP Driver:res data loc is %p\n", (void *) fw->data);
239 edbg("SEP Driver:res data size is %08Zx\n", fw->size);
241 memcpy((void *) sep->resident_addr, (void *) fw->data, fw->size);
243 sep->resident_size = fw->size;
245 release_firmware(fw);
247 resident_addr = sep->resident_addr;
249 edbg("SEP Driver:resident_addr (physical )is %08llx\n", (unsigned long long)sep->resident_bus);
250 edbg("SEP Driver:cache_addr (physical) is %08llx\n", (unsigned long long)sep->cache_bus);
252 edbg("SEP Driver:resident_addr (logical )is %p\n", resident_addr);
253 edbg("SEP Driver:cache_addr (logical) is %08llx\n", (unsigned long long)cache_addr);
255 edbg("SEP Driver:resident_size is %08lx\n", sep->resident_size);
256 edbg("SEP Driver:cache_size is %08llx\n", (unsigned long long)sep->cache_size);
260 /* physical addresses */
261 *dst_new_cache_addr_ptr = sep->cache_bus;
262 *dst_new_resident_addr_ptr = sep->resident_bus;
268 * sep_map_and_alloc_shared_area - allocate shared block
269 * @sep: security processor
270 * @size: size of shared area
272 * Allocate a shared buffer in host memory that can be used by both the
273 * kernel and also the hardware interface via DMA.
276 static int sep_map_and_alloc_shared_area(struct sep_device *sep,
279 /* shared_addr = ioremap_nocache(0xda00000,shared_area_size); */
280 sep->shared_area = dma_alloc_coherent(&sep->pdev->dev, size,
281 &sep->shared_bus, GFP_KERNEL);
283 if (!sep->shared_addr) {
284 edbg("sep_driver :shared memory dma_alloc_coherent failed\n");
287 sep->shared_area = sep->shared_addr;
288 /* set the physical address of the shared area */
289 sep->shared_area_bus = sep->shared_bus;
290 edbg("sep: shared_area %d bytes @%p (bus %08llx)\n",
291 size, sep->shared_addr, (unsigned long long)sep->shared_bus);
296 * sep_unmap_and_free_shared_area - free shared block
297 * @sep: security processor
299 * Free the shared area allocated to the security processor. The
300 * processor must have finished with this and any final posted
301 * writes cleared before we do so.
303 static void sep_unmap_and_free_shared_area(struct sep_device *sep, int size)
305 dma_free_coherent(&sep->pdev->dev, size,
306 sep->shared_area, sep->shared_area_bus);
310 * sep_shared_area_virt_to_bus - convert bus/virt addresses
312 * Returns the physical address inside the shared area according
313 * to the virtual address.
316 static dma_addr_t sep_shared_area_virt_to_bus(struct sep_device *sep,
319 dma_addr_t pa = sep->shared_bus + (virt_address - sep->shared_addr);
320 edbg("sep: virt to phys p %08llx v %p\n", pa, virt_address);
325 * sep_shared_area_bus_to_virt - convert bus/virt addresses
327 * Returns virtual address inside the shared area according
328 * to the bus address.
331 static void *sep_shared_area_bus_to_virt(struct sep_device *sep,
332 dma_addr_t bus_address)
334 return sep->shared_addr + (bus_address - sep->shared_bus);
338 /*----------------------------------------------------------------------
339 open function of the character driver - must only lock the mutex
340 must also release the memory data pool allocations
341 ------------------------------------------------------------------------*/
342 static int sep_open(struct inode *inode, struct file *filp)
346 dbg("SEP Driver:--------> open start\n");
348 /* check the blocking mode */
349 if (filp->f_flags & O_NDELAY)
350 error = mutex_trylock(&sep_mutex);
353 mutex_lock(&sep_mutex);
355 /* check the error */
357 edbg("SEP Driver: down_interruptible failed\n");
360 /* Bind to the device, we only have one which makes it easy */
361 filp->private_data = sep_dev;
365 /* release data pool allocations */
366 sep_dev->data_pool_bytes_allocated = 0;
370 dbg("SEP Driver:<-------- open end\n");
377 /*------------------------------------------------------------
379 -------------------------------------------------------------*/
380 static int sep_release(struct inode *inode_ptr, struct file *filp)
382 struct sep_driver *sep = filp->private_data;
383 dbg("----------->SEP Driver: sep_release start\n");
385 #if 0 /*!SEP_DRIVER_POLLING_MODE */
387 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, 0x7FFF);
388 /* release IRQ line */
389 free_irq(SEP_DIRVER_IRQ_NUM, sep);
392 /* unlock the sep mutex */
393 mutex_unlock(&sep_mutex);
394 dbg("SEP Driver:<-------- sep_release end\n");
401 /*---------------------------------------------------------------
402 map function - this functions maps the message shared area
403 -----------------------------------------------------------------*/
404 static int sep_mmap(struct file *filp, struct vm_area_struct *vma)
406 dma_addr_t phys_addr;
407 struct sep_device *sep = filp->private_data;
409 dbg("-------->SEP Driver: mmap start\n");
411 /* check that the size of the mapped range is as the size of the message
413 if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) {
414 edbg("SEP Driver mmap requested size is more than allowed\n");
415 printk(KERN_WARNING "SEP Driver mmap requested size is more \
417 printk(KERN_WARNING "SEP Driver vma->vm_end is %08lx\n", vma->vm_end);
418 printk(KERN_WARNING "SEP Driver vma->vm_end is %08lx\n", vma->vm_start);
422 edbg("SEP Driver:sep->message_shared_area_addr is %p\n", sep->message_shared_area_addr);
424 /* get physical address */
425 phys_addr = sep->shared_area_bus;
427 edbg("SEP Driver: phys_addr is %08llx\n", (unsigned long long)phys_addr);
429 if (remap_pfn_range(vma, vma->vm_start, phys_addr >> PAGE_SHIFT, vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
430 edbg("SEP Driver remap_page_range failed\n");
431 printk(KERN_WARNING "SEP Driver remap_page_range failed\n");
435 dbg("SEP Driver:<-------- mmap end\n");
441 /*-----------------------------------------------
443 *----------------------------------------------*/
444 static unsigned int sep_poll(struct file *filp, poll_table * wait)
447 unsigned int mask = 0;
448 unsigned long retVal = 0; /* flow id */
449 struct sep_device *sep = filp->private_data;
451 dbg("---------->SEP Driver poll: start\n");
454 #if SEP_DRIVER_POLLING_MODE
456 while (sep->send_ct != (retVal & 0x7FFFFFFF)) {
457 retVal = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
459 for (count = 0; count < 10 * 4; count += 4)
460 edbg("Poll Debug Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_area + SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES + count)));
465 /* add the event to the polling wait table */
466 poll_wait(filp, &sep_event, wait);
470 edbg("sep->send_ct is %lu\n", sep->send_ct);
471 edbg("sep->reply_ct is %lu\n", sep->reply_ct);
473 /* check if the data is ready */
474 if (sep->send_ct == sep->reply_ct) {
475 for (count = 0; count < 12 * 4; count += 4)
476 edbg("Sep Mesg Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_area + count)));
478 for (count = 0; count < 10 * 4; count += 4)
479 edbg("Debug Data Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_area + 0x1800 + count)));
481 retVal = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
482 edbg("retVal is %lu\n", retVal);
483 /* check if the this is sep reply or request */
485 edbg("SEP Driver: sep request in\n");
487 mask |= POLLOUT | POLLWRNORM;
489 edbg("SEP Driver: sep reply in\n");
490 mask |= POLLIN | POLLRDNORM;
493 dbg("SEP Driver:<-------- poll exit\n");
498 calculates time and sets it at the predefined address
500 static int sep_set_time(struct sep_device *sep, unsigned long *address_ptr, unsigned long *time_in_sec_ptr)
503 /* address of time in the kernel */
507 dbg("SEP Driver:--------> sep_set_time start\n");
509 do_gettimeofday(&time);
511 /* set value in the SYSTEM MEMORY offset */
512 time_addr = sep->message_shared_area_addr + SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES;
514 time_addr[0] = SEP_TIME_VAL_TOKEN;
515 time_addr[1] = time.tv_sec;
517 edbg("SEP Driver:time.tv_sec is %lu\n", time.tv_sec);
518 edbg("SEP Driver:time_addr is %p\n", time_addr);
519 edbg("SEP Driver:sep->message_shared_area_addr is %p\n", sep->message_shared_area_addr);
521 /* set the output parameters if needed */
523 *address_ptr = sep_shared_area_virt_to_bus(sep, time_addr);
526 *time_in_sec_ptr = time.tv_sec;
528 dbg("SEP Driver:<-------- sep_set_time end\n");
534 This function raises interrupt to SEP that signals that is has a new
537 static void sep_send_command_handler(struct sep_device *sep)
541 dbg("SEP Driver:--------> sep_send_command_handler start\n");
542 sep_set_time(sep, 0, 0);
547 for (count = 0; count < 12 * 4; count += 4)
548 edbg("Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_area + count)));
552 /* send interrupt to SEP */
553 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
554 dbg("SEP Driver:<-------- sep_send_command_handler end\n");
559 This function raises interrupt to SEPm that signals that is has a
560 new command from HOST
562 static void sep_send_reply_command_handler(struct sep_device *sep)
566 dbg("SEP Driver:--------> sep_send_reply_command_handler start\n");
570 for (count = 0; count < 12 * 4; count += 4)
571 edbg("Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_area + count)));
574 /* send the interrupt to SEP */
575 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR2_REG_ADDR, sep->send_ct);
576 /* update both counters */
579 dbg("SEP Driver:<-------- sep_send_reply_command_handler end\n");
583 This function handles the allocate data pool memory request
584 This function returns calculates the physical address of the
585 allocated memory, and the offset of this area from the mapped address.
586 Therefore, the FVOs in user space can calculate the exact virtual
587 address of this allocated memory
589 static int sep_allocate_data_pool_memory_handler(struct sep_device *sep,
593 struct sep_driver_alloc_t command_args;
595 dbg("SEP Driver:--------> sep_allocate_data_pool_memory_handler start\n");
597 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_alloc_t));
601 /* allocate memory */
602 if ((sep->data_pool_bytes_allocated + command_args.num_bytes) > SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES) {
603 /* FIXME: ENOMEM ? */
608 /* set the virtual and physical address */
609 command_args.offset = SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES + sep->data_pool_bytes_allocated;
610 command_args.phys_address = sep->shared_area_bus + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES + sep->data_pool_bytes_allocated;
612 /* write the memory back to the user space */
613 error = copy_to_user((void *) arg, (void *) &command_args, sizeof(struct sep_driver_alloc_t));
617 /* set the allocation */
618 sep->data_pool_bytes_allocated += command_args.num_bytes;
621 dbg("SEP Driver:<-------- sep_allocate_data_pool_memory_handler end\n");
626 This function handles write into allocated data pool command
628 static int sep_write_into_data_pool_handler(struct sep_device *sep, unsigned long arg)
633 unsigned long app_in_address;
634 unsigned long num_bytes;
635 void *data_pool_area_addr;
637 dbg("SEP Driver:--------> sep_write_into_data_pool_handler start\n");
639 /* get the application address */
640 error = get_user(app_in_address, &(((struct sep_driver_write_t *) arg)->app_address));
644 /* get the virtual kernel address address */
645 error = get_user(va, &(((struct sep_driver_write_t *) arg)->datapool_address));
648 virt_address = (void *)va;
650 /* get the number of bytes */
651 error = get_user(num_bytes, &(((struct sep_driver_write_t *) arg)->num_bytes));
655 /* calculate the start of the data pool */
656 data_pool_area_addr = sep->shared_area + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES;
659 /* check that the range of the virtual kernel address is correct */
660 if (virt_address < data_pool_area_addr || virt_address > (data_pool_area_addr + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES)) {
661 /* FIXME: EINVAL ? */
665 /* copy the application data */
666 error = copy_from_user(virt_address, (void *) app_in_address, num_bytes);
668 dbg("SEP Driver:<-------- sep_write_into_data_pool_handler end\n");
673 this function handles the read from data pool command
675 static int sep_read_from_data_pool_handler(struct sep_device *sep, unsigned long arg)
678 /* virtual address of dest application buffer */
679 unsigned long app_out_address;
680 /* virtual address of the data pool */
683 unsigned long num_bytes;
684 void *data_pool_area_addr;
686 dbg("SEP Driver:--------> sep_read_from_data_pool_handler start\n");
688 /* get the application address */
689 error = get_user(app_out_address, &(((struct sep_driver_write_t *) arg)->app_address));
693 /* get the virtual kernel address address */
694 error = get_user(va, &(((struct sep_driver_write_t *) arg)->datapool_address));
697 virt_address = (void *)va;
699 /* get the number of bytes */
700 error = get_user(num_bytes, &(((struct sep_driver_write_t *) arg)->num_bytes));
704 /* calculate the start of the data pool */
705 data_pool_area_addr = sep->shared_area + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES;
707 /* FIXME: These are incomplete all over the driver: what about + len
708 and when doing that also overflows */
709 /* check that the range of the virtual kernel address is correct */
710 if (virt_address < data_pool_area_addr || virt_address > data_pool_area_addr + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES) {
715 /* copy the application data */
716 error = copy_to_user((void *) app_out_address, virt_address, num_bytes);
718 dbg("SEP Driver:<-------- sep_read_from_data_pool_handler end\n");
723 This function releases all the application virtual buffer physical pages,
724 that were previously locked
726 static int sep_free_dma_pages(struct page **page_array_ptr, unsigned long num_pages, unsigned long dirtyFlag)
731 for (count = 0; count < num_pages; count++) {
732 /* the out array was written, therefore the data was changed */
733 if (!PageReserved(page_array_ptr[count]))
734 SetPageDirty(page_array_ptr[count]);
735 page_cache_release(page_array_ptr[count]);
738 /* free in pages - the data was only read, therefore no update was done
740 for (count = 0; count < num_pages; count++)
741 page_cache_release(page_array_ptr[count]);
746 kfree(page_array_ptr);
752 This function locks all the physical pages of the kernel virtual buffer
753 and construct a basic lli array, where each entry holds the physical
754 page address and the size that application data holds in this physical pages
756 static int sep_lock_kernel_pages(struct sep_device *sep,
757 unsigned long kernel_virt_addr,
758 unsigned long data_size,
759 unsigned long *num_pages_ptr,
760 struct sep_lli_entry_t **lli_array_ptr,
761 struct page ***page_array_ptr)
764 /* the the page of the end address of the user space buffer */
765 unsigned long end_page;
766 /* the page of the start address of the user space buffer */
767 unsigned long start_page;
768 /* the range in pages */
769 unsigned long num_pages;
770 struct sep_lli_entry_t *lli_array;
771 /* next kernel address to map */
772 unsigned long next_kernel_address;
775 dbg("SEP Driver:--------> sep_lock_kernel_pages start\n");
777 /* set start and end pages and num pages */
778 end_page = (kernel_virt_addr + data_size - 1) >> PAGE_SHIFT;
779 start_page = kernel_virt_addr >> PAGE_SHIFT;
780 num_pages = end_page - start_page + 1;
782 edbg("SEP Driver: kernel_virt_addr is %08lx\n", kernel_virt_addr);
783 edbg("SEP Driver: data_size is %lu\n", data_size);
784 edbg("SEP Driver: start_page is %lx\n", start_page);
785 edbg("SEP Driver: end_page is %lx\n", end_page);
786 edbg("SEP Driver: num_pages is %lu\n", num_pages);
788 lli_array = kmalloc(sizeof(struct sep_lli_entry_t) * num_pages, GFP_ATOMIC);
790 edbg("SEP Driver: kmalloc for lli_array failed\n");
795 /* set the start address of the first page - app data may start not at
796 the beginning of the page */
797 lli_array[0].physical_address = (unsigned long) virt_to_phys((unsigned long *) kernel_virt_addr);
799 /* check that not all the data is in the first page only */
800 if ((PAGE_SIZE - (kernel_virt_addr & (~PAGE_MASK))) >= data_size)
801 lli_array[0].block_size = data_size;
803 lli_array[0].block_size = PAGE_SIZE - (kernel_virt_addr & (~PAGE_MASK));
806 dbg("lli_array[0].physical_address is %08lx, lli_array[0].block_size is %lu\n", lli_array[0].physical_address, lli_array[0].block_size);
808 /* advance the address to the start of the next page */
809 next_kernel_address = (kernel_virt_addr & PAGE_MASK) + PAGE_SIZE;
811 /* go from the second page to the prev before last */
812 for (count = 1; count < (num_pages - 1); count++) {
813 lli_array[count].physical_address = (unsigned long) virt_to_phys((unsigned long *) next_kernel_address);
814 lli_array[count].block_size = PAGE_SIZE;
816 edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
817 next_kernel_address += PAGE_SIZE;
820 /* if more then 1 pages locked - then update for the last page size needed */
822 /* update the address of the last page */
823 lli_array[count].physical_address = (unsigned long) virt_to_phys((unsigned long *) next_kernel_address);
825 /* set the size of the last page */
826 lli_array[count].block_size = (kernel_virt_addr + data_size) & (~PAGE_MASK);
828 if (lli_array[count].block_size == 0) {
829 dbg("app_virt_addr is %08lx\n", kernel_virt_addr);
830 dbg("data_size is %lu\n", data_size);
834 edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
836 /* set output params */
837 *lli_array_ptr = lli_array;
838 *num_pages_ptr = num_pages;
841 dbg("SEP Driver:<-------- sep_lock_kernel_pages end\n");
846 This function locks all the physical pages of the application virtual buffer
847 and construct a basic lli array, where each entry holds the physical page
848 address and the size that application data holds in this physical pages
850 static int sep_lock_user_pages(struct sep_device *sep,
851 unsigned long app_virt_addr,
852 unsigned long data_size,
853 unsigned long *num_pages_ptr,
854 struct sep_lli_entry_t **lli_array_ptr,
855 struct page ***page_array_ptr)
858 /* the the page of the end address of the user space buffer */
859 unsigned long end_page;
860 /* the page of the start address of the user space buffer */
861 unsigned long start_page;
862 /* the range in pages */
863 unsigned long num_pages;
864 struct page **page_array;
865 struct sep_lli_entry_t *lli_array;
869 dbg("SEP Driver:--------> sep_lock_user_pages start\n");
871 /* set start and end pages and num pages */
872 end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT;
873 start_page = app_virt_addr >> PAGE_SHIFT;
874 num_pages = end_page - start_page + 1;
876 edbg("SEP Driver: app_virt_addr is %08lx\n", app_virt_addr);
877 edbg("SEP Driver: data_size is %lu\n", data_size);
878 edbg("SEP Driver: start_page is %lu\n", start_page);
879 edbg("SEP Driver: end_page is %lu\n", end_page);
880 edbg("SEP Driver: num_pages is %lu\n", num_pages);
882 /* allocate array of pages structure pointers */
883 page_array = kmalloc(sizeof(struct page *) * num_pages, GFP_ATOMIC);
885 edbg("SEP Driver: kmalloc for page_array failed\n");
891 lli_array = kmalloc(sizeof(struct sep_lli_entry_t) * num_pages, GFP_ATOMIC);
893 edbg("SEP Driver: kmalloc for lli_array failed\n");
896 goto end_function_with_error1;
899 /* convert the application virtual address into a set of physical */
900 down_read(¤t->mm->mmap_sem);
901 result = get_user_pages(current, current->mm, app_virt_addr, num_pages, 1, 0, page_array, 0);
902 up_read(¤t->mm->mmap_sem);
904 /* check the number of pages locked - if not all then exit with error */
905 if (result != num_pages) {
906 dbg("SEP Driver: not all pages locked by get_user_pages\n");
909 goto end_function_with_error2;
912 /* flush the cache */
913 for (count = 0; count < num_pages; count++)
914 flush_dcache_page(page_array[count]);
916 /* set the start address of the first page - app data may start not at
917 the beginning of the page */
918 lli_array[0].physical_address = ((unsigned long) page_to_phys(page_array[0])) + (app_virt_addr & (~PAGE_MASK));
920 /* check that not all the data is in the first page only */
921 if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size)
922 lli_array[0].block_size = data_size;
924 lli_array[0].block_size = PAGE_SIZE - (app_virt_addr & (~PAGE_MASK));
927 dbg("lli_array[0].physical_address is %08lx, lli_array[0].block_size is %lu\n", lli_array[0].physical_address, lli_array[0].block_size);
929 /* go from the second page to the prev before last */
930 for (count = 1; count < (num_pages - 1); count++) {
931 lli_array[count].physical_address = (unsigned long) page_to_phys(page_array[count]);
932 lli_array[count].block_size = PAGE_SIZE;
934 edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
937 /* if more then 1 pages locked - then update for the last page size needed */
939 /* update the address of the last page */
940 lli_array[count].physical_address = (unsigned long) page_to_phys(page_array[count]);
942 /* set the size of the last page */
943 lli_array[count].block_size = (app_virt_addr + data_size) & (~PAGE_MASK);
945 if (lli_array[count].block_size == 0) {
946 dbg("app_virt_addr is %08lx\n", app_virt_addr);
947 dbg("data_size is %lu\n", data_size);
950 edbg("lli_array[%lu].physical_address is %08lx, \
951 lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
954 /* set output params */
955 *lli_array_ptr = lli_array;
956 *num_pages_ptr = num_pages;
957 *page_array_ptr = page_array;
960 end_function_with_error2:
961 /* release the cache */
962 for (count = 0; count < num_pages; count++)
963 page_cache_release(page_array[count]);
965 end_function_with_error1:
968 dbg("SEP Driver:<-------- sep_lock_user_pages end\n");
974 this function calculates the size of data that can be inserted into the lli
975 table from this array the condition is that either the table is full
976 (all etnries are entered), or there are no more entries in the lli array
978 static unsigned long sep_calculate_lli_table_max_size(struct sep_lli_entry_t *lli_in_array_ptr, unsigned long num_array_entries)
980 unsigned long table_data_size = 0;
981 unsigned long counter;
983 /* calculate the data in the out lli table if till we fill the whole
984 table or till the data has ended */
985 for (counter = 0; (counter < (SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP - 1)) && (counter < num_array_entries); counter++)
986 table_data_size += lli_in_array_ptr[counter].block_size;
987 return table_data_size;
991 this functions builds ont lli table from the lli_array according to
992 the given size of data
994 static void sep_build_lli_table(struct sep_lli_entry_t *lli_array_ptr, struct sep_lli_entry_t *lli_table_ptr, unsigned long *num_processed_entries_ptr, unsigned long *num_table_entries_ptr, unsigned long table_data_size)
996 unsigned long curr_table_data_size;
997 /* counter of lli array entry */
998 unsigned long array_counter;
1000 dbg("SEP Driver:--------> sep_build_lli_table start\n");
1002 /* init currrent table data size and lli array entry counter */
1003 curr_table_data_size = 0;
1005 *num_table_entries_ptr = 1;
1007 edbg("SEP Driver:table_data_size is %lu\n", table_data_size);
1009 /* fill the table till table size reaches the needed amount */
1010 while (curr_table_data_size < table_data_size) {
1011 /* update the number of entries in table */
1012 (*num_table_entries_ptr)++;
1014 lli_table_ptr->physical_address = lli_array_ptr[array_counter].physical_address;
1015 lli_table_ptr->block_size = lli_array_ptr[array_counter].block_size;
1016 curr_table_data_size += lli_table_ptr->block_size;
1018 edbg("SEP Driver:lli_table_ptr is %08lx\n", (unsigned long) lli_table_ptr);
1019 edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
1020 edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
1022 /* check for overflow of the table data */
1023 if (curr_table_data_size > table_data_size) {
1024 edbg("SEP Driver:curr_table_data_size > table_data_size\n");
1026 /* update the size of block in the table */
1027 lli_table_ptr->block_size -= (curr_table_data_size - table_data_size);
1029 /* update the physical address in the lli array */
1030 lli_array_ptr[array_counter].physical_address += lli_table_ptr->block_size;
1032 /* update the block size left in the lli array */
1033 lli_array_ptr[array_counter].block_size = (curr_table_data_size - table_data_size);
1035 /* advance to the next entry in the lli_array */
1038 edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
1039 edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
1041 /* move to the next entry in table */
1045 /* set the info entry to default */
1046 lli_table_ptr->physical_address = 0xffffffff;
1047 lli_table_ptr->block_size = 0;
1049 edbg("SEP Driver:lli_table_ptr is %08lx\n", (unsigned long) lli_table_ptr);
1050 edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
1051 edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
1053 /* set the output parameter */
1054 *num_processed_entries_ptr += array_counter;
1056 edbg("SEP Driver:*num_processed_entries_ptr is %lu\n", *num_processed_entries_ptr);
1057 dbg("SEP Driver:<-------- sep_build_lli_table end\n");
1062 this function goes over the list of the print created tables and
1065 static void sep_debug_print_lli_tables(struct sep_device *sep, struct sep_lli_entry_t *lli_table_ptr, unsigned long num_table_entries, unsigned long table_data_size)
1067 unsigned long table_count;
1068 unsigned long entries_count;
1070 dbg("SEP Driver:--------> sep_debug_print_lli_tables start\n");
1073 while ((unsigned long) lli_table_ptr != 0xffffffff) {
1074 edbg("SEP Driver: lli table %08lx, table_data_size is %lu\n", table_count, table_data_size);
1075 edbg("SEP Driver: num_table_entries is %lu\n", num_table_entries);
1077 /* print entries of the table (without info entry) */
1078 for (entries_count = 0; entries_count < num_table_entries; entries_count++, lli_table_ptr++) {
1079 edbg("SEP Driver:lli_table_ptr address is %08lx\n", (unsigned long) lli_table_ptr);
1080 edbg("SEP Driver:phys address is %08lx block size is %lu\n", lli_table_ptr->physical_address, lli_table_ptr->block_size);
1083 /* point to the info entry */
1086 edbg("SEP Driver:phys lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
1087 edbg("SEP Driver:phys lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
1090 table_data_size = lli_table_ptr->block_size & 0xffffff;
1091 num_table_entries = (lli_table_ptr->block_size >> 24) & 0xff;
1092 lli_table_ptr = (struct sep_lli_entry_t *)
1093 (lli_table_ptr->physical_address);
1095 edbg("SEP Driver:phys table_data_size is %lu num_table_entries is %lu lli_table_ptr is%lu\n", table_data_size, num_table_entries, (unsigned long) lli_table_ptr);
1097 if ((unsigned long) lli_table_ptr != 0xffffffff)
1098 lli_table_ptr = (struct sep_lli_entry_t *) sep_shared_area_bus_to_virt(sep, (unsigned long) lli_table_ptr);
1102 dbg("SEP Driver:<-------- sep_debug_print_lli_tables end\n");
1107 This function prepares only input DMA table for synhronic symmetric
1110 static int sep_prepare_input_dma_table(struct sep_device *sep,
1111 unsigned long app_virt_addr,
1112 unsigned long data_size,
1113 unsigned long block_size,
1114 unsigned long *lli_table_ptr,
1115 unsigned long *num_entries_ptr,
1116 unsigned long *table_data_size_ptr,
1117 bool isKernelVirtualAddress)
1119 /* pointer to the info entry of the table - the last entry */
1120 struct sep_lli_entry_t *info_entry_ptr;
1121 /* array of pointers ot page */
1122 struct sep_lli_entry_t *lli_array_ptr;
1123 /* points to the first entry to be processed in the lli_in_array */
1124 unsigned long current_entry;
1125 /* num entries in the virtual buffer */
1126 unsigned long sep_lli_entries;
1127 /* lli table pointer */
1128 struct sep_lli_entry_t *in_lli_table_ptr;
1129 /* the total data in one table */
1130 unsigned long table_data_size;
1131 /* number of entries in lli table */
1132 unsigned long num_entries_in_table;
1133 /* next table address */
1134 void *lli_table_alloc_addr;
1135 unsigned long result;
1137 dbg("SEP Driver:--------> sep_prepare_input_dma_table start\n");
1139 edbg("SEP Driver:data_size is %lu\n", data_size);
1140 edbg("SEP Driver:block_size is %lu\n", block_size);
1142 /* initialize the pages pointers */
1143 sep->in_page_array = 0;
1144 sep->in_num_pages = 0;
1146 if (data_size == 0) {
1147 /* special case - created 2 entries table with zero data */
1148 in_lli_table_ptr = (struct sep_lli_entry_t *) (sep->shared_area + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES);
1149 /* FIXME: Should the entry below not be for _bus */
1150 in_lli_table_ptr->physical_address = (unsigned long)sep->shared_area + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1151 in_lli_table_ptr->block_size = 0;
1154 in_lli_table_ptr->physical_address = 0xFFFFFFFF;
1155 in_lli_table_ptr->block_size = 0;
1157 *lli_table_ptr = sep->shared_area_bus + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1158 *num_entries_ptr = 2;
1159 *table_data_size_ptr = 0;
1164 /* check if the pages are in Kernel Virtual Address layout */
1165 if (isKernelVirtualAddress == true)
1166 /* lock the pages of the kernel buffer and translate them to pages */
1167 result = sep_lock_kernel_pages(sep, app_virt_addr, data_size, &sep->in_num_pages, &lli_array_ptr, &sep->in_page_array);
1169 /* lock the pages of the user buffer and translate them to pages */
1170 result = sep_lock_user_pages(sep, app_virt_addr, data_size, &sep->in_num_pages, &lli_array_ptr, &sep->in_page_array);
1175 edbg("SEP Driver:output sep->in_num_pages is %lu\n", sep->in_num_pages);
1179 sep_lli_entries = sep->in_num_pages;
1181 /* initiate to point after the message area */
1182 lli_table_alloc_addr = sep->shared_area + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1184 /* loop till all the entries in in array are not processed */
1185 while (current_entry < sep_lli_entries) {
1186 /* set the new input and output tables */
1187 in_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr;
1189 lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1191 /* calculate the maximum size of data for input table */
1192 table_data_size = sep_calculate_lli_table_max_size(&lli_array_ptr[current_entry], (sep_lli_entries - current_entry));
1194 /* now calculate the table size so that it will be module block size */
1195 table_data_size = (table_data_size / block_size) * block_size;
1197 edbg("SEP Driver:output table_data_size is %lu\n", table_data_size);
1199 /* construct input lli table */
1200 sep_build_lli_table(&lli_array_ptr[current_entry], in_lli_table_ptr, ¤t_entry, &num_entries_in_table, table_data_size);
1202 if (info_entry_ptr == 0) {
1203 /* set the output parameters to physical addresses */
1204 *lli_table_ptr = sep_shared_area_virt_to_bus(sep, in_lli_table_ptr);
1205 *num_entries_ptr = num_entries_in_table;
1206 *table_data_size_ptr = table_data_size;
1208 edbg("SEP Driver:output lli_table_in_ptr is %08lx\n", *lli_table_ptr);
1210 /* update the info entry of the previous in table */
1211 info_entry_ptr->physical_address = sep_shared_area_virt_to_bus(sep, in_lli_table_ptr);
1212 info_entry_ptr->block_size = ((num_entries_in_table) << 24) | (table_data_size);
1215 /* save the pointer to the info entry of the current tables */
1216 info_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
1219 /* print input tables */
1220 sep_debug_print_lli_tables(sep, (struct sep_lli_entry_t *)
1221 sep_shared_area_bus_to_virt(sep, *lli_table_ptr), *num_entries_ptr, *table_data_size_ptr);
1223 /* the array of the pages */
1224 kfree(lli_array_ptr);
1226 dbg("SEP Driver:<-------- sep_prepare_input_dma_table end\n");
1232 This function creates the input and output dma tables for
1233 symmetric operations (AES/DES) according to the block size from LLI arays
1235 static int sep_construct_dma_tables_from_lli(struct sep_device *sep,
1236 struct sep_lli_entry_t *lli_in_array,
1237 unsigned long sep_in_lli_entries,
1238 struct sep_lli_entry_t *lli_out_array,
1239 unsigned long sep_out_lli_entries,
1240 unsigned long block_size, unsigned long *lli_table_in_ptr, unsigned long *lli_table_out_ptr, unsigned long *in_num_entries_ptr, unsigned long *out_num_entries_ptr, unsigned long *table_data_size_ptr)
1242 /* points to the area where next lli table can be allocated: keep void *
1243 as there is pointer scaling to fix otherwise */
1244 void *lli_table_alloc_addr;
1245 /* input lli table */
1246 struct sep_lli_entry_t *in_lli_table_ptr;
1247 /* output lli table */
1248 struct sep_lli_entry_t *out_lli_table_ptr;
1249 /* pointer to the info entry of the table - the last entry */
1250 struct sep_lli_entry_t *info_in_entry_ptr;
1251 /* pointer to the info entry of the table - the last entry */
1252 struct sep_lli_entry_t *info_out_entry_ptr;
1253 /* points to the first entry to be processed in the lli_in_array */
1254 unsigned long current_in_entry;
1255 /* points to the first entry to be processed in the lli_out_array */
1256 unsigned long current_out_entry;
1257 /* max size of the input table */
1258 unsigned long in_table_data_size;
1259 /* max size of the output table */
1260 unsigned long out_table_data_size;
1261 /* flag te signifies if this is the first tables build from the arrays */
1262 unsigned long first_table_flag;
1263 /* the data size that should be in table */
1264 unsigned long table_data_size;
1265 /* number of etnries in the input table */
1266 unsigned long num_entries_in_table;
1267 /* number of etnries in the output table */
1268 unsigned long num_entries_out_table;
1270 dbg("SEP Driver:--------> sep_construct_dma_tables_from_lli start\n");
1272 /* initiate to pint after the message area */
1273 lli_table_alloc_addr = sep->shared_area + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1275 current_in_entry = 0;
1276 current_out_entry = 0;
1277 first_table_flag = 1;
1278 info_in_entry_ptr = 0;
1279 info_out_entry_ptr = 0;
1281 /* loop till all the entries in in array are not processed */
1282 while (current_in_entry < sep_in_lli_entries) {
1283 /* set the new input and output tables */
1284 in_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr;
1286 lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1288 /* set the first output tables */
1289 out_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr;
1291 lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1293 /* calculate the maximum size of data for input table */
1294 in_table_data_size = sep_calculate_lli_table_max_size(&lli_in_array[current_in_entry], (sep_in_lli_entries - current_in_entry));
1296 /* calculate the maximum size of data for output table */
1297 out_table_data_size = sep_calculate_lli_table_max_size(&lli_out_array[current_out_entry], (sep_out_lli_entries - current_out_entry));
1299 edbg("SEP Driver:in_table_data_size is %lu\n", in_table_data_size);
1300 edbg("SEP Driver:out_table_data_size is %lu\n", out_table_data_size);
1302 /* check where the data is smallest */
1303 table_data_size = in_table_data_size;
1304 if (table_data_size > out_table_data_size)
1305 table_data_size = out_table_data_size;
1307 /* now calculate the table size so that it will be module block size */
1308 table_data_size = (table_data_size / block_size) * block_size;
1310 dbg("SEP Driver:table_data_size is %lu\n", table_data_size);
1312 /* construct input lli table */
1313 sep_build_lli_table(&lli_in_array[current_in_entry], in_lli_table_ptr, ¤t_in_entry, &num_entries_in_table, table_data_size);
1315 /* construct output lli table */
1316 sep_build_lli_table(&lli_out_array[current_out_entry], out_lli_table_ptr, ¤t_out_entry, &num_entries_out_table, table_data_size);
1318 /* if info entry is null - this is the first table built */
1319 if (info_in_entry_ptr == 0) {
1320 /* set the output parameters to physical addresses */
1321 *lli_table_in_ptr = sep_shared_area_virt_to_bus(sep, in_lli_table_ptr);
1322 *in_num_entries_ptr = num_entries_in_table;
1323 *lli_table_out_ptr = sep_shared_area_virt_to_bus(sep, out_lli_table_ptr);
1324 *out_num_entries_ptr = num_entries_out_table;
1325 *table_data_size_ptr = table_data_size;
1327 edbg("SEP Driver:output lli_table_in_ptr is %08lx\n", *lli_table_in_ptr);
1328 edbg("SEP Driver:output lli_table_out_ptr is %08lx\n", *lli_table_out_ptr);
1330 /* update the info entry of the previous in table */
1331 info_in_entry_ptr->physical_address = sep_shared_area_virt_to_bus(sep, in_lli_table_ptr);
1332 info_in_entry_ptr->block_size = ((num_entries_in_table) << 24) | (table_data_size);
1334 /* update the info entry of the previous in table */
1335 info_out_entry_ptr->physical_address = sep_shared_area_virt_to_bus(sep, out_lli_table_ptr);
1336 info_out_entry_ptr->block_size = ((num_entries_out_table) << 24) | (table_data_size);
1339 /* save the pointer to the info entry of the current tables */
1340 info_in_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
1341 info_out_entry_ptr = out_lli_table_ptr + num_entries_out_table - 1;
1343 edbg("SEP Driver:output num_entries_out_table is %lu\n", (unsigned long) num_entries_out_table);
1344 edbg("SEP Driver:output info_in_entry_ptr is %lu\n", (unsigned long) info_in_entry_ptr);
1345 edbg("SEP Driver:output info_out_entry_ptr is %lu\n", (unsigned long) info_out_entry_ptr);
1348 /* print input tables */
1349 sep_debug_print_lli_tables(sep, (struct sep_lli_entry_t *)
1350 sep_shared_area_bus_to_virt(sep, *lli_table_in_ptr), *in_num_entries_ptr, *table_data_size_ptr);
1351 /* print output tables */
1352 sep_debug_print_lli_tables(sep, (struct sep_lli_entry_t *)
1353 sep_shared_area_bus_to_virt(sep, *lli_table_out_ptr), *out_num_entries_ptr, *table_data_size_ptr);
1354 dbg("SEP Driver:<-------- sep_construct_dma_tables_from_lli end\n");
1360 This function builds input and output DMA tables for synhronic
1361 symmetric operations (AES, DES). It also checks that each table
1362 is of the modular block size
1364 static int sep_prepare_input_output_dma_table(struct sep_device *sep,
1365 unsigned long app_virt_in_addr,
1366 unsigned long app_virt_out_addr,
1367 unsigned long data_size,
1368 unsigned long block_size,
1369 unsigned long *lli_table_in_ptr, unsigned long *lli_table_out_ptr, unsigned long *in_num_entries_ptr, unsigned long *out_num_entries_ptr, unsigned long *table_data_size_ptr, bool isKernelVirtualAddress)
1371 /* array of pointers of page */
1372 struct sep_lli_entry_t *lli_in_array;
1373 /* array of pointers of page */
1374 struct sep_lli_entry_t *lli_out_array;
1377 dbg("SEP Driver:--------> sep_prepare_input_output_dma_table start\n");
1379 /* initialize the pages pointers */
1380 sep->in_page_array = 0;
1381 sep->out_page_array = 0;
1383 /* check if the pages are in Kernel Virtual Address layout */
1384 if (isKernelVirtualAddress == true) {
1385 /* lock the pages of the kernel buffer and translate them to pages */
1386 result = sep_lock_kernel_pages(sep, app_virt_in_addr, data_size, &sep->in_num_pages, &lli_in_array, &sep->in_page_array);
1388 edbg("SEP Driver: sep_lock_kernel_pages for input virtual buffer failed\n");
1392 /* lock the pages of the user buffer and translate them to pages */
1393 result = sep_lock_user_pages(sep, app_virt_in_addr, data_size, &sep->in_num_pages, &lli_in_array, &sep->in_page_array);
1395 edbg("SEP Driver: sep_lock_user_pages for input virtual buffer failed\n");
1400 if (isKernelVirtualAddress == true) {
1401 result = sep_lock_kernel_pages(sep, app_virt_out_addr, data_size, &sep->out_num_pages, &lli_out_array, &sep->out_page_array);
1403 edbg("SEP Driver: sep_lock_kernel_pages for output virtual buffer failed\n");
1404 goto end_function_with_error1;
1407 result = sep_lock_user_pages(sep, app_virt_out_addr, data_size, &sep->out_num_pages, &lli_out_array, &sep->out_page_array);
1409 edbg("SEP Driver: sep_lock_user_pages for output virtual buffer failed\n");
1410 goto end_function_with_error1;
1413 edbg("sep->in_num_pages is %lu\n", sep->in_num_pages);
1414 edbg("sep->out_num_pages is %lu\n", sep->out_num_pages);
1415 edbg("SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP is %x\n", SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
1418 /* call the fucntion that creates table from the lli arrays */
1419 result = sep_construct_dma_tables_from_lli(sep, lli_in_array, sep->in_num_pages, lli_out_array, sep->out_num_pages, block_size, lli_table_in_ptr, lli_table_out_ptr, in_num_entries_ptr, out_num_entries_ptr, table_data_size_ptr);
1421 edbg("SEP Driver: sep_construct_dma_tables_from_lli failed\n");
1422 goto end_function_with_error2;
1425 /* fall through - free the lli entry arrays */
1426 dbg("in_num_entries_ptr is %08lx\n", *in_num_entries_ptr);
1427 dbg("out_num_entries_ptr is %08lx\n", *out_num_entries_ptr);
1428 dbg("table_data_size_ptr is %08lx\n", *table_data_size_ptr);
1429 end_function_with_error2:
1430 kfree(lli_out_array);
1431 end_function_with_error1:
1432 kfree(lli_in_array);
1434 dbg("SEP Driver:<-------- sep_prepare_input_output_dma_table end result = %d\n", (int) result);
1440 this function handles tha request for creation of the DMA table
1441 for the synchronic symmetric operations (AES,DES)
1443 static int sep_create_sync_dma_tables_handler(struct sep_device *sep,
1447 /* command arguments */
1448 struct sep_driver_build_sync_table_t command_args;
1450 dbg("SEP Driver:--------> sep_create_sync_dma_tables_handler start\n");
1452 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_build_sync_table_t));
1456 edbg("app_in_address is %08lx\n", command_args.app_in_address);
1457 edbg("app_out_address is %08lx\n", command_args.app_out_address);
1458 edbg("data_size is %lu\n", command_args.data_in_size);
1459 edbg("block_size is %lu\n", command_args.block_size);
1461 /* check if we need to build only input table or input/output */
1462 if (command_args.app_out_address)
1463 /* prepare input and output tables */
1464 error = sep_prepare_input_output_dma_table(sep,
1465 command_args.app_in_address,
1466 command_args.app_out_address,
1467 command_args.data_in_size,
1468 command_args.block_size,
1469 &command_args.in_table_address,
1470 &command_args.out_table_address, &command_args.in_table_num_entries, &command_args.out_table_num_entries, &command_args.table_data_size, command_args.isKernelVirtualAddress);
1472 /* prepare input tables */
1473 error = sep_prepare_input_dma_table(sep,
1474 command_args.app_in_address,
1475 command_args.data_in_size, command_args.block_size, &command_args.in_table_address, &command_args.in_table_num_entries, &command_args.table_data_size, command_args.isKernelVirtualAddress);
1480 error = copy_to_user((void *) arg, (void *) &command_args, sizeof(struct sep_driver_build_sync_table_t));
1481 /* FIXME: wrong error returned ! */
1483 dbg("SEP Driver:<-------- sep_create_sync_dma_tables_handler end\n");
1488 this function handles the request for freeing dma table for synhronic actions
1490 static int sep_free_dma_table_data_handler(struct sep_device *sep)
1492 dbg("SEP Driver:--------> sep_free_dma_table_data_handler start\n");
1494 /* free input pages array */
1495 sep_free_dma_pages(sep->in_page_array, sep->in_num_pages, 0);
1497 /* free output pages array if needed */
1498 if (sep->out_page_array)
1499 sep_free_dma_pages(sep->out_page_array, sep->out_num_pages, 1);
1501 /* reset all the values */
1502 sep->in_page_array = 0;
1503 sep->out_page_array = 0;
1504 sep->in_num_pages = 0;
1505 sep->out_num_pages = 0;
1506 dbg("SEP Driver:<-------- sep_free_dma_table_data_handler end\n");
1511 this function find a space for the new flow dma table
1513 static int sep_find_free_flow_dma_table_space(struct sep_device *sep,
1514 unsigned long **table_address_ptr)
1517 /* pointer to the id field of the flow dma table */
1518 unsigned long *start_table_ptr;
1519 /* Do not make start_addr unsigned long * unless fixing the offset
1521 void *flow_dma_area_start_addr;
1522 unsigned long *flow_dma_area_end_addr;
1523 /* maximum table size in words */
1524 unsigned long table_size_in_words;
1526 /* find the start address of the flow DMA table area */
1527 flow_dma_area_start_addr = sep->shared_area + SEP_DRIVER_FLOW_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1529 /* set end address of the flow table area */
1530 flow_dma_area_end_addr = flow_dma_area_start_addr + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES;
1532 /* set table size in words */
1533 table_size_in_words = SEP_DRIVER_MAX_FLOW_NUM_ENTRIES_IN_TABLE * (sizeof(struct sep_lli_entry_t) / sizeof(long)) + 2;
1535 /* set the pointer to the start address of DMA area */
1536 start_table_ptr = flow_dma_area_start_addr;
1538 /* find the space for the next table */
1539 while (((*start_table_ptr & 0x7FFFFFFF) != 0) && start_table_ptr < flow_dma_area_end_addr)
1540 start_table_ptr += table_size_in_words;
1542 /* check if we reached the end of floa tables area */
1543 if (start_table_ptr >= flow_dma_area_end_addr)
1546 *table_address_ptr = start_table_ptr;
1552 This function creates one DMA table for flow and returns its data,
1553 and pointer to its info entry
1555 static int sep_prepare_one_flow_dma_table(struct sep_device *sep,
1556 unsigned long virt_buff_addr,
1557 unsigned long virt_buff_size,
1558 struct sep_lli_entry_t *table_data,
1559 struct sep_lli_entry_t **info_entry_ptr,
1560 struct sep_flow_context_t *flow_data_ptr,
1561 bool isKernelVirtualAddress)
1564 /* the range in pages */
1565 unsigned long lli_array_size;
1566 struct sep_lli_entry_t *lli_array;
1567 struct sep_lli_entry_t *flow_dma_table_entry_ptr;
1568 unsigned long *start_dma_table_ptr;
1569 /* total table data counter */
1570 unsigned long dma_table_data_count;
1571 /* pointer that will keep the pointer to the pages of the virtual buffer */
1572 struct page **page_array_ptr;
1573 unsigned long entry_count;
1575 /* find the space for the new table */
1576 error = sep_find_free_flow_dma_table_space(sep, &start_dma_table_ptr);
1580 /* check if the pages are in Kernel Virtual Address layout */
1581 if (isKernelVirtualAddress == true)
1582 /* lock kernel buffer in the memory */
1583 error = sep_lock_kernel_pages(sep, virt_buff_addr, virt_buff_size, &lli_array_size, &lli_array, &page_array_ptr);
1585 /* lock user buffer in the memory */
1586 error = sep_lock_user_pages(sep, virt_buff_addr, virt_buff_size, &lli_array_size, &lli_array, &page_array_ptr);
1591 /* set the pointer to page array at the beginning of table - this table is
1592 now considered taken */
1593 *start_dma_table_ptr = lli_array_size;
1595 /* point to the place of the pages pointers of the table */
1596 start_dma_table_ptr++;
1598 /* set the pages pointer */
1599 *start_dma_table_ptr = (unsigned long) page_array_ptr;
1601 /* set the pointer to the first entry */
1602 flow_dma_table_entry_ptr = (struct sep_lli_entry_t *) (++start_dma_table_ptr);
1604 /* now create the entries for table */
1605 for (dma_table_data_count = entry_count = 0; entry_count < lli_array_size; entry_count++) {
1606 flow_dma_table_entry_ptr->physical_address = lli_array[entry_count].physical_address;
1608 flow_dma_table_entry_ptr->block_size = lli_array[entry_count].block_size;
1610 /* set the total data of a table */
1611 dma_table_data_count += lli_array[entry_count].block_size;
1613 flow_dma_table_entry_ptr++;
1616 /* set the physical address */
1617 table_data->physical_address = virt_to_phys(start_dma_table_ptr);
1619 /* set the num_entries and total data size */
1620 table_data->block_size = ((lli_array_size + 1) << SEP_NUM_ENTRIES_OFFSET_IN_BITS) | (dma_table_data_count);
1622 /* set the info entry */
1623 flow_dma_table_entry_ptr->physical_address = 0xffffffff;
1624 flow_dma_table_entry_ptr->block_size = 0;
1626 /* set the pointer to info entry */
1627 *info_entry_ptr = flow_dma_table_entry_ptr;
1629 /* the array of the lli entries */
1638 This function creates a list of tables for flow and returns the data for
1639 the first and last tables of the list
1641 static int sep_prepare_flow_dma_tables(struct sep_device *sep,
1642 unsigned long num_virtual_buffers,
1643 unsigned long first_buff_addr, struct sep_flow_context_t *flow_data_ptr, struct sep_lli_entry_t *first_table_data_ptr, struct sep_lli_entry_t *last_table_data_ptr, bool isKernelVirtualAddress)
1646 unsigned long virt_buff_addr;
1647 unsigned long virt_buff_size;
1648 struct sep_lli_entry_t table_data;
1649 struct sep_lli_entry_t *info_entry_ptr;
1650 struct sep_lli_entry_t *prev_info_entry_ptr;
1655 prev_info_entry_ptr = 0;
1657 /* init the first table to default */
1658 table_data.physical_address = 0xffffffff;
1659 first_table_data_ptr->physical_address = 0xffffffff;
1660 table_data.block_size = 0;
1662 for (i = 0; i < num_virtual_buffers; i++) {
1663 /* get the virtual buffer address */
1664 error = get_user(virt_buff_addr, &first_buff_addr);
1668 /* get the virtual buffer size */
1670 error = get_user(virt_buff_size, &first_buff_addr);
1674 /* advance the address to point to the next pair of address|size */
1677 /* now prepare the one flow LLI table from the data */
1678 error = sep_prepare_one_flow_dma_table(sep, virt_buff_addr, virt_buff_size, &table_data, &info_entry_ptr, flow_data_ptr, isKernelVirtualAddress);
1683 /* if this is the first table - save it to return to the user
1685 *first_table_data_ptr = table_data;
1687 /* set the pointer to info entry */
1688 prev_info_entry_ptr = info_entry_ptr;
1690 /* not first table - the previous table info entry should
1692 prev_info_entry_ptr->block_size = (0x1 << SEP_INT_FLAG_OFFSET_IN_BITS) | (table_data.block_size);
1694 /* set the pointer to info entry */
1695 prev_info_entry_ptr = info_entry_ptr;
1699 /* set the last table data */
1700 *last_table_data_ptr = table_data;
1706 this function goes over all the flow tables connected to the given
1707 table and deallocate them
1709 static void sep_deallocated_flow_tables(struct sep_lli_entry_t *first_table_ptr)
1712 unsigned long *table_ptr;
1713 /* end address of the flow dma area */
1714 unsigned long num_entries;
1715 unsigned long num_pages;
1716 struct page **pages_ptr;
1717 /* maximum table size in words */
1718 struct sep_lli_entry_t *info_entry_ptr;
1720 /* set the pointer to the first table */
1721 table_ptr = (unsigned long *) first_table_ptr->physical_address;
1723 /* set the num of entries */
1724 num_entries = (first_table_ptr->block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS)
1725 & SEP_NUM_ENTRIES_MASK;
1727 /* go over all the connected tables */
1728 while (*table_ptr != 0xffffffff) {
1729 /* get number of pages */
1730 num_pages = *(table_ptr - 2);
1732 /* get the pointer to the pages */
1733 pages_ptr = (struct page **) (*(table_ptr - 1));
1735 /* free the pages */
1736 sep_free_dma_pages(pages_ptr, num_pages, 1);
1738 /* goto to the info entry */
1739 info_entry_ptr = ((struct sep_lli_entry_t *) table_ptr) + (num_entries - 1);
1741 table_ptr = (unsigned long *) info_entry_ptr->physical_address;
1742 num_entries = (info_entry_ptr->block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK;
1749 * sep_find_flow_context - find a flow
1750 * @sep: the SEP we are working with
1751 * @flow_id: flow identifier
1753 * Returns a pointer the matching flow, or NULL if the flow does not
1757 static struct sep_flow_context_t *sep_find_flow_context(struct sep_device *sep,
1758 unsigned long flow_id)
1762 * always search for flow with id default first - in case we
1763 * already started working on the flow there can be no situation
1764 * when 2 flows are with default flag
1766 for (count = 0; count < SEP_DRIVER_NUM_FLOWS; count++) {
1767 if (sep->flows[count].flow_id == flow_id)
1768 return &sep->flows[count];
1775 this function handles the request to create the DMA tables for flow
1777 static int sep_create_flow_dma_tables_handler(struct sep_device *sep,
1781 struct sep_driver_build_flow_table_t command_args;
1782 /* first table - output */
1783 struct sep_lli_entry_t first_table_data;
1784 /* dma table data */
1785 struct sep_lli_entry_t last_table_data;
1786 /* pointer to the info entry of the previuos DMA table */
1787 struct sep_lli_entry_t *prev_info_entry_ptr;
1788 /* pointer to the flow data strucutre */
1789 struct sep_flow_context_t *flow_context_ptr;
1791 dbg("SEP Driver:--------> sep_create_flow_dma_tables_handler start\n");
1793 /* init variables */
1794 prev_info_entry_ptr = 0;
1795 first_table_data.physical_address = 0xffffffff;
1797 /* find the free structure for flow data */
1798 flow_context_ptr = sep_find_flow_context(sep, SEP_FREE_FLOW_ID);
1799 if (flow_context_ptr == NULL)
1802 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_build_flow_table_t));
1806 /* create flow tables */
1807 error = sep_prepare_flow_dma_tables(sep, command_args.num_virtual_buffers, command_args.virt_buff_data_addr, flow_context_ptr, &first_table_data, &last_table_data, command_args.isKernelVirtualAddress);
1809 goto end_function_with_error;
1811 /* check if flow is static */
1812 if (!command_args.flow_type)
1813 /* point the info entry of the last to the info entry of the first */
1814 last_table_data = first_table_data;
1816 /* set output params */
1817 command_args.first_table_addr = first_table_data.physical_address;
1818 command_args.first_table_num_entries = ((first_table_data.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK);
1819 command_args.first_table_data_size = (first_table_data.block_size & SEP_TABLE_DATA_SIZE_MASK);
1821 /* send the parameters to user application */
1822 error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_build_flow_table_t));
1824 goto end_function_with_error;
1826 /* all the flow created - update the flow entry with temp id */
1827 flow_context_ptr->flow_id = SEP_TEMP_FLOW_ID;
1829 /* set the processing tables data in the context */
1830 if (command_args.input_output_flag == SEP_DRIVER_IN_FLAG)
1831 flow_context_ptr->input_tables_in_process = first_table_data;
1833 flow_context_ptr->output_tables_in_process = first_table_data;
1837 end_function_with_error:
1838 /* free the allocated tables */
1839 sep_deallocated_flow_tables(&first_table_data);
1841 dbg("SEP Driver:<-------- sep_create_flow_dma_tables_handler end\n");
1846 this function handles add tables to flow
1848 static int sep_add_flow_tables_handler(struct sep_device *sep, unsigned long arg)
1851 unsigned long num_entries;
1852 struct sep_driver_add_flow_table_t command_args;
1853 struct sep_flow_context_t *flow_context_ptr;
1854 /* first dma table data */
1855 struct sep_lli_entry_t first_table_data;
1856 /* last dma table data */
1857 struct sep_lli_entry_t last_table_data;
1858 /* pointer to the info entry of the current DMA table */
1859 struct sep_lli_entry_t *info_entry_ptr;
1861 dbg("SEP Driver:--------> sep_add_flow_tables_handler start\n");
1863 /* get input parameters */
1864 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_add_flow_table_t));
1868 /* find the flow structure for the flow id */
1869 flow_context_ptr = sep_find_flow_context(sep, command_args.flow_id);
1870 if (flow_context_ptr == NULL)
1873 /* prepare the flow dma tables */
1874 error = sep_prepare_flow_dma_tables(sep, command_args.num_virtual_buffers, command_args.virt_buff_data_addr, flow_context_ptr, &first_table_data, &last_table_data, command_args.isKernelVirtualAddress);
1876 goto end_function_with_error;
1878 /* now check if there is already an existing add table for this flow */
1879 if (command_args.inputOutputFlag == SEP_DRIVER_IN_FLAG) {
1880 /* this buffer was for input buffers */
1881 if (flow_context_ptr->input_tables_flag) {
1882 /* add table already exists - add the new tables to the end
1884 num_entries = (flow_context_ptr->last_input_table.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK;
1886 info_entry_ptr = (struct sep_lli_entry_t *)
1887 (flow_context_ptr->last_input_table.physical_address + (sizeof(struct sep_lli_entry_t) * (num_entries - 1)));
1889 /* connect to list of tables */
1890 *info_entry_ptr = first_table_data;
1892 /* set the first table data */
1893 first_table_data = flow_context_ptr->first_input_table;
1895 /* set the input flag */
1896 flow_context_ptr->input_tables_flag = 1;
1898 /* set the first table data */
1899 flow_context_ptr->first_input_table = first_table_data;
1901 /* set the last table data */
1902 flow_context_ptr->last_input_table = last_table_data;
1903 } else { /* this is output tables */
1905 /* this buffer was for input buffers */
1906 if (flow_context_ptr->output_tables_flag) {
1907 /* add table already exists - add the new tables to
1908 the end of the previous */
1909 num_entries = (flow_context_ptr->last_output_table.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK;
1911 info_entry_ptr = (struct sep_lli_entry_t *)
1912 (flow_context_ptr->last_output_table.physical_address + (sizeof(struct sep_lli_entry_t) * (num_entries - 1)));
1914 /* connect to list of tables */
1915 *info_entry_ptr = first_table_data;
1917 /* set the first table data */
1918 first_table_data = flow_context_ptr->first_output_table;
1920 /* set the input flag */
1921 flow_context_ptr->output_tables_flag = 1;
1923 /* set the first table data */
1924 flow_context_ptr->first_output_table = first_table_data;
1926 /* set the last table data */
1927 flow_context_ptr->last_output_table = last_table_data;
1930 /* set output params */
1931 command_args.first_table_addr = first_table_data.physical_address;
1932 command_args.first_table_num_entries = ((first_table_data.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK);
1933 command_args.first_table_data_size = (first_table_data.block_size & SEP_TABLE_DATA_SIZE_MASK);
1935 /* send the parameters to user application */
1936 error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_add_flow_table_t));
1937 end_function_with_error:
1938 /* free the allocated tables */
1939 sep_deallocated_flow_tables(&first_table_data);
1941 dbg("SEP Driver:<-------- sep_add_flow_tables_handler end\n");
1946 this function add the flow add message to the specific flow
1948 static int sep_add_flow_tables_message_handler(struct sep_device *sep, unsigned long arg)
1951 struct sep_driver_add_message_t command_args;
1952 struct sep_flow_context_t *flow_context_ptr;
1954 dbg("SEP Driver:--------> sep_add_flow_tables_message_handler start\n");
1956 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_add_message_t));
1961 if (command_args.message_size_in_bytes > SEP_MAX_ADD_MESSAGE_LENGTH_IN_BYTES) {
1966 /* find the flow context */
1967 flow_context_ptr = sep_find_flow_context(sep, command_args.flow_id);
1968 if (flow_context_ptr == NULL)
1971 /* copy the message into context */
1972 flow_context_ptr->message_size_in_bytes = command_args.message_size_in_bytes;
1973 error = copy_from_user(flow_context_ptr->message, (void *) command_args.message_address, command_args.message_size_in_bytes);
1975 dbg("SEP Driver:<-------- sep_add_flow_tables_message_handler end\n");
1981 this function returns the physical and virtual addresses of the static pool
1983 static int sep_get_static_pool_addr_handler(struct sep_device *sep, unsigned long arg)
1986 struct sep_driver_static_pool_addr_t command_args;
1988 dbg("SEP Driver:--------> sep_get_static_pool_addr_handler start\n");
1990 /*prepare the output parameters in the struct */
1991 command_args.physical_static_address = sep->shared_area_bus + SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES;
1992 command_args.virtual_static_address = (unsigned long)sep->shared_area + SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES;
1994 edbg("SEP Driver:physical_static_address is %08lx, virtual_static_address %08lx\n", command_args.physical_static_address, command_args.virtual_static_address);
1996 /* send the parameters to user application */
1997 error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_static_pool_addr_t));
1998 dbg("SEP Driver:<-------- sep_get_static_pool_addr_handler end\n");
2003 this address gets the offset of the physical address from the start
2006 static int sep_get_physical_mapped_offset_handler(struct sep_device *sep, unsigned long arg)
2009 struct sep_driver_get_mapped_offset_t command_args;
2011 dbg("SEP Driver:--------> sep_get_physical_mapped_offset_handler start\n");
2013 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_get_mapped_offset_t));
2017 if (command_args.physical_address < sep->shared_area_bus) {
2023 /*prepare the output parameters in the struct */
2024 command_args.offset = command_args.physical_address - sep->shared_area_bus;
2026 edbg("SEP Driver:physical_address is %08lx, offset is %lu\n", command_args.physical_address, command_args.offset);
2028 /* send the parameters to user application */
2029 error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_get_mapped_offset_t));
2031 dbg("SEP Driver:<-------- sep_get_physical_mapped_offset_handler end\n");
2039 static int sep_start_handler(struct sep_device *sep)
2041 unsigned long reg_val;
2042 unsigned long error = 0;
2044 dbg("SEP Driver:--------> sep_start_handler start\n");
2046 /* wait in polling for message from SEP */
2048 reg_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
2051 /* check the value */
2053 /* fatal error - read error status from GPRO */
2054 error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
2055 dbg("SEP Driver:<-------- sep_start_handler end\n");
2060 this function handles the request for SEP initialization
2062 static int sep_init_handler(struct sep_device *sep, unsigned long arg)
2064 unsigned long message_word;
2065 unsigned long *message_ptr;
2066 struct sep_driver_init_t command_args;
2067 unsigned long counter;
2068 unsigned long error;
2069 unsigned long reg_val;
2071 dbg("SEP Driver:--------> sep_init_handler start\n");
2074 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_init_t));
2076 dbg("SEP Driver:--------> sep_init_handler - finished copy_from_user \n");
2081 /* PATCH - configure the DMA to single -burst instead of multi-burst */
2082 /*sep_configure_dma_burst(); */
2084 dbg("SEP Driver:--------> sep_init_handler - finished sep_configure_dma_burst \n");
2086 message_ptr = (unsigned long *) command_args.message_addr;
2088 /* set the base address of the SRAM */
2089 sep_write_reg(sep, HW_SRAM_ADDR_REG_ADDR, HW_CC_SRAM_BASE_ADDRESS);
2091 for (counter = 0; counter < command_args.message_size_in_words; counter++, message_ptr++) {
2092 get_user(message_word, message_ptr);
2093 /* write data to SRAM */
2094 sep_write_reg(sep, HW_SRAM_DATA_REG_ADDR, message_word);
2095 edbg("SEP Driver:message_word is %lu\n", message_word);
2096 /* wait for write complete */
2097 sep_wait_sram_write(sep);
2099 dbg("SEP Driver:--------> sep_init_handler - finished getting messages from user space\n");
2101 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x1);
2104 reg_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
2105 while (!(reg_val & 0xFFFFFFFD));
2107 dbg("SEP Driver:--------> sep_init_handler - finished waiting for reg_val & 0xFFFFFFFD \n");
2109 /* check the value */
2110 if (reg_val == 0x1) {
2111 edbg("SEP Driver:init failed\n");
2113 error = sep_read_reg(sep, 0x8060);
2114 edbg("SEP Driver:sw monitor is %lu\n", error);
2116 /* fatal error - read erro status from GPRO */
2117 error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
2118 edbg("SEP Driver:error is %lu\n", error);
2121 dbg("SEP Driver:<-------- sep_init_handler end\n");
2127 this function handles the request cache and resident reallocation
2129 static int sep_realloc_cache_resident_handler(struct sep_device *sep,
2133 unsigned long phys_cache_address;
2134 unsigned long phys_resident_address;
2135 struct sep_driver_realloc_cache_resident_t command_args;
2138 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_realloc_cache_resident_t));
2142 /* copy cache and resident to the their intended locations */
2143 error = sep_copy_cache_resident_to_area(sep, command_args.cache_addr, command_args.cache_size_in_bytes, command_args.resident_addr, command_args.resident_size_in_bytes, &phys_cache_address, &phys_resident_address);
2147 command_args.new_base_addr = sep->shared_area_bus;
2149 /* find the new base address according to the lowest address between
2150 cache, resident and shared area */
2151 if (phys_resident_address < command_args.new_base_addr)
2152 command_args.new_base_addr = phys_resident_address;
2153 if (phys_cache_address < command_args.new_base_addr)
2154 command_args.new_base_addr = phys_cache_address;
2156 /* set the return parameters */
2157 command_args.new_cache_addr = phys_cache_address;
2158 command_args.new_resident_addr = phys_resident_address;
2160 /* set the new shared area */
2161 command_args.new_shared_area_addr = sep->shared_area_bus;
2163 edbg("SEP Driver:command_args.new_shared_area is %08lx\n", command_args.new_shared_area_addr);
2164 edbg("SEP Driver:command_args.new_base_addr is %08lx\n", command_args.new_base_addr);
2165 edbg("SEP Driver:command_args.new_resident_addr is %08lx\n", command_args.new_resident_addr);
2166 edbg("SEP Driver:command_args.new_cache_addr is %08lx\n", command_args.new_cache_addr);
2168 /* return to user */
2169 error = copy_to_user((void *) arg, (void *) &command_args, sizeof(struct sep_driver_realloc_cache_resident_t));
2175 this function handles the request for get time
2177 static int sep_get_time_handler(struct sep_device *sep, unsigned long arg)
2180 struct sep_driver_get_time_t command_args;
2182 error = sep_set_time(sep, &command_args.time_physical_address, &command_args.time_value);
2184 error = copy_to_user((void __user *)arg,
2185 &command_args, sizeof(struct sep_driver_get_time_t));
2191 This API handles the end transaction request
2193 static int sep_end_transaction_handler(struct sep_device *sep, unsigned long arg)
2195 dbg("SEP Driver:--------> sep_end_transaction_handler start\n");
2197 #if 0 /*!SEP_DRIVER_POLLING_MODE */
2199 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, 0x7FFF);
2201 /* release IRQ line */
2202 free_irq(SEP_DIRVER_IRQ_NUM, sep);
2204 /* lock the sep mutex */
2205 mutex_unlock(&sep_mutex);
2208 dbg("SEP Driver:<-------- sep_end_transaction_handler end\n");
2215 This function handler the set flow id command
2217 static int sep_set_flow_id_handler(struct sep_device *sep, unsigned long arg)
2220 unsigned long flow_id;
2221 struct sep_flow_context_t *flow_data_ptr;
2223 dbg("------------>SEP Driver: sep_set_flow_id_handler start\n");
2225 error = get_user(flow_id, &(((struct sep_driver_set_flow_id_t *) arg)->flow_id));
2229 /* find the flow data structure that was just used for creating new flow
2230 - its id should be default */
2231 flow_data_ptr = sep_find_flow_context(sep, SEP_TEMP_FLOW_ID);
2232 if (flow_data_ptr == NULL)
2236 flow_data_ptr->flow_id = flow_id;
2239 dbg("SEP Driver:<-------- sep_set_flow_id_handler end\n");
2247 static int sep_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
2250 struct sep_device *sep = filp->private_data;
2252 dbg("------------>SEP Driver: ioctl start\n");
2254 edbg("SEP Driver: cmd is %x\n", cmd);
2256 /* check that the command is for sep device */
2257 if (_IOC_TYPE(cmd) != SEP_IOC_MAGIC_NUMBER)
2261 case SEP_IOCSENDSEPCOMMAND:
2262 /* send command to SEP */
2263 sep_send_command_handler(sep);
2264 edbg("SEP Driver: after sep_send_command_handler\n");
2266 case SEP_IOCSENDSEPRPLYCOMMAND:
2267 /* send reply command to SEP */
2268 sep_send_reply_command_handler(sep);
2270 case SEP_IOCALLOCDATAPOLL:
2271 /* allocate data pool */
2272 error = sep_allocate_data_pool_memory_handler(sep, arg);
2274 case SEP_IOCWRITEDATAPOLL:
2275 /* write data into memory pool */
2276 error = sep_write_into_data_pool_handler(sep, arg);
2278 case SEP_IOCREADDATAPOLL:
2279 /* read data from data pool into application memory */
2280 error = sep_read_from_data_pool_handler(sep, arg);
2282 case SEP_IOCCREATESYMDMATABLE:
2283 /* create dma table for synhronic operation */
2284 error = sep_create_sync_dma_tables_handler(sep, arg);
2286 case SEP_IOCCREATEFLOWDMATABLE:
2287 /* create flow dma tables */
2288 error = sep_create_flow_dma_tables_handler(sep, arg);
2290 case SEP_IOCFREEDMATABLEDATA:
2291 /* free the pages */
2292 error = sep_free_dma_table_data_handler(sep);
2294 case SEP_IOCSETFLOWID:
2296 error = sep_set_flow_id_handler(sep, arg);
2298 case SEP_IOCADDFLOWTABLE:
2299 /* add tables to the dynamic flow */
2300 error = sep_add_flow_tables_handler(sep, arg);
2302 case SEP_IOCADDFLOWMESSAGE:
2303 /* add message of add tables to flow */
2304 error = sep_add_flow_tables_message_handler(sep, arg);
2306 case SEP_IOCSEPSTART:
2307 /* start command to sep */
2308 error = sep_start_handler(sep);
2310 case SEP_IOCSEPINIT:
2311 /* init command to sep */
2312 error = sep_init_handler(sep, arg);
2314 case SEP_IOCGETSTATICPOOLADDR:
2315 /* get the physical and virtual addresses of the static pool */
2316 error = sep_get_static_pool_addr_handler(sep, arg);
2318 case SEP_IOCENDTRANSACTION:
2319 error = sep_end_transaction_handler(sep, arg);
2321 case SEP_IOCREALLOCCACHERES:
2322 error = sep_realloc_cache_resident_handler(sep, arg);
2324 case SEP_IOCGETMAPPEDADDROFFSET:
2325 error = sep_get_physical_mapped_offset_handler(sep, arg);
2328 error = sep_get_time_handler(sep, arg);
2334 dbg("SEP Driver:<-------- ioctl end\n");
2340 #if !SEP_DRIVER_POLLING_MODE
2342 /* handler for flow done interrupt */
2344 static void sep_flow_done_handler(struct work_struct *work)
2346 struct sep_flow_context_t *flow_data_ptr;
2348 /* obtain the mutex */
2349 mutex_lock(&sep_mutex);
2351 /* get the pointer to context */
2352 flow_data_ptr = (struct sep_flow_context_t *) work;
2354 /* free all the current input tables in sep */
2355 sep_deallocated_flow_tables(&flow_data_ptr->input_tables_in_process);
2357 /* free all the current tables output tables in SEP (if needed) */
2358 if (flow_data_ptr->output_tables_in_process.physical_address != 0xffffffff)
2359 sep_deallocated_flow_tables(&flow_data_ptr->output_tables_in_process);
2361 /* check if we have additional tables to be sent to SEP only input
2362 flag may be checked */
2363 if (flow_data_ptr->input_tables_flag) {
2364 /* copy the message to the shared RAM and signal SEP */
2365 memcpy((void *) flow_data_ptr->message, (void *) sep->shared_area, flow_data_ptr->message_size_in_bytes);
2367 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR2_REG_ADDR, 0x2);
2369 mutex_unlock(&sep_mutex);
2372 interrupt handler function
2374 static irqreturn_t sep_inthandler(int irq, void *dev_id)
2376 irqreturn_t int_error;
2377 unsigned long reg_val;
2378 unsigned long flow_id;
2379 struct sep_flow_context_t *flow_context_ptr;
2380 struct sep_device *sep = dev_id;
2382 int_error = IRQ_HANDLED;
2384 /* read the IRR register to check if this is SEP interrupt */
2385 reg_val = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR);
2386 edbg("SEP Interrupt - reg is %08lx\n", reg_val);
2388 /* check if this is the flow interrupt */
2389 if (0 /*reg_val & (0x1 << 11) */ ) {
2390 /* read GPRO to find out the which flow is done */
2391 flow_id = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR);
2393 /* find the contex of the flow */
2394 flow_context_ptr = sep_find_flow_context(sep, flow_id >> 28);
2395 if (flow_context_ptr == NULL)
2396 goto end_function_with_error;
2398 /* queue the work */
2399 INIT_WORK(&flow_context_ptr->flow_wq, sep_flow_done_handler);
2400 queue_work(sep->flow_wq, &flow_context_ptr->flow_wq);
2403 /* check if this is reply interrupt from SEP */
2404 if (reg_val & (0x1 << 13)) {
2405 /* update the counter of reply messages */
2407 /* wake up the waiting process */
2408 wake_up(&sep_event);
2410 int_error = IRQ_NONE;
2414 end_function_with_error:
2415 /* clear the interrupt */
2416 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, reg_val);
2427 static void sep_wait_busy(struct sep_device *sep)
2432 reg = sep_read_reg(sep, HW_HOST_SEP_BUSY_REG_ADDR);
2437 PATCH for configuring the DMA to single burst instead of multi-burst
2439 static void sep_configure_dma_burst(struct sep_device *sep)
2441 #define HW_AHB_RD_WR_BURSTS_REG_ADDR 0x0E10UL
2443 dbg("SEP Driver:<-------- sep_configure_dma_burst start \n");
2445 /* request access to registers from SEP */
2446 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
2448 dbg("SEP Driver:<-------- sep_configure_dma_burst finished request access to registers from SEP (write reg) \n");
2452 dbg("SEP Driver:<-------- sep_configure_dma_burst finished request access to registers from SEP (while(revVal) wait loop) \n");
2454 /* set the DMA burst register to single burst */
2455 sep_write_reg(sep, HW_AHB_RD_WR_BURSTS_REG_ADDR, 0x0UL);
2457 /* release the sep busy */
2458 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x0UL);
2461 dbg("SEP Driver:<-------- sep_configure_dma_burst done \n");
2468 Function that is activaed on the succesful probe of the SEP device
2470 static int __devinit sep_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2473 struct sep_device *sep;
2475 int size; /* size of memory for allocation */
2477 edbg("Sep pci probe starting\n");
2478 if (sep_dev != NULL) {
2479 dev_warn(&pdev->dev, "only one SEP supported.\n");
2483 /* enable the device */
2484 error = pci_enable_device(pdev);
2486 edbg("error enabling pci device\n");
2490 /* set the pci dev pointer */
2491 sep_dev = &sep_instance;
2492 sep = &sep_instance;
2494 edbg("sep->shared_area = %lx\n", (unsigned long) &sep->shared_area);
2495 /* transaction counter that coordinates the transactions between SEP
2498 /* counter for the messages from sep */
2500 /* counter for the number of bytes allocated in the pool
2501 for the current transaction */
2502 sep->data_pool_bytes_allocated = 0;
2504 /* calculate the total size for allocation */
2505 size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
2506 SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES + SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
2508 /* allocate the shared area */
2509 if (sep_map_and_alloc_shared_area(sep, size)) {
2511 /* allocation failed */
2512 goto end_function_error;
2514 /* now set the memory regions */
2515 sep->message_shared_area_addr = sep->shared_area;
2517 edbg("SEP Driver: sep->message_shared_area_addr is %p\n", sep->message_shared_area_addr);
2519 #if (SEP_DRIVER_RECONFIG_MESSAGE_AREA == 1)
2520 /* send the new SHARED MESSAGE AREA to the SEP */
2521 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR1_REG_ADDR, sep->shared_area_bus);
2523 /* poll for SEP response */
2524 retVal = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
2525 while (retVal != 0xffffffff && retVal != sep->shared_area_bus)
2526 retVal = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
2528 /* check the return value (register) */
2529 if (retVal != sep->shared_area_bus) {
2531 goto end_function_deallocate_sep_shared_area;
2534 /* init the flow contextes */
2535 for (counter = 0; counter < SEP_DRIVER_NUM_FLOWS; counter++)
2536 sep->flows[counter].flow_id = SEP_FREE_FLOW_ID;
2538 sep->flow_wq = create_singlethread_workqueue("sepflowwq");
2539 if (sep->flow_wq == NULL) {
2541 edbg("sep_driver:flow queue creation failed\n");
2542 goto end_function_deallocate_sep_shared_area;
2544 edbg("SEP Driver: create flow workqueue \n");
2545 /* load the rom code */
2546 sep_load_rom_code(sep);
2548 sep->pdev = pci_dev_get(pdev);
2550 /* get the io memory start address */
2551 sep->io_bus = pci_resource_start(pdev, 0);
2553 edbg("SEP Driver error pci resource start\n");
2554 goto end_function_deallocate_sep_shared_area;
2557 /* get the io memory end address */
2558 sep->io_end_bus = pci_resource_end(pdev, 0);
2559 if (!sep->io_end_bus) {
2560 edbg("SEP Driver error pci resource end\n");
2561 goto end_function_deallocate_sep_shared_area;
2564 sep->io_memory_size = sep->io_end_bus - sep->io_bus + 1;
2566 edbg("SEP Driver:io_bus is %08lx\n", sep->io_bus);
2568 edbg("SEP Driver:io_memory_end_phyaical_address is %08lx\n", sep->io_end_bus);
2570 edbg("SEP Driver:io_memory_size is %08lx\n", sep->io_memory_size);
2572 sep->io_addr = ioremap_nocache(sep->io_bus, sep->io_memory_size);
2573 if (!sep->io_addr) {
2574 edbg("SEP Driver error ioremap of io memory\n");
2575 goto end_function_deallocate_sep_shared_area;
2578 edbg("SEP Driver:io_addr is %p\n", sep->io_addr);
2580 sep->reg_addr = (void __iomem *) sep->io_addr;
2582 /* set up system base address and shared memory location */
2584 sep->rar_addr = kmalloc(2 * SEP_RAR_IO_MEM_REGION_SIZE, GFP_KERNEL);
2586 if (!sep->rar_addr) {
2587 edbg("SEP Driver:cant kmalloc rar\n");
2588 goto end_function_uniomap;
2591 sep->rar_bus = __pa(sep->rar_addr);
2593 edbg("SEP Driver:rar_physical is %08llx\n", (unsigned long long)sep->rar_bus);
2594 edbg("SEP Driver:rar_virtual is %p\n", sep->rar_addr);
2596 #if !SEP_DRIVER_POLLING_MODE
2598 edbg("SEP Driver: about to write IMR and ICR REG_ADDR\n");
2600 /* clear ICR register */
2601 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
2603 /* set the IMR register - open only GPR 2 */
2604 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
2606 edbg("SEP Driver: about to call request_irq\n");
2607 /* get the interrupt line */
2608 error = request_irq(pdev->irq, sep_inthandler, IRQF_SHARED, "sep_driver", sep);
2610 goto end_function_free_res;
2613 edbg("SEP Driver: about to write IMR REG_ADDR");
2615 /* set the IMR register - open only GPR 2 */
2616 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
2618 end_function_free_res:
2619 kfree(sep->rar_addr);
2620 #endif /* SEP_DRIVER_POLLING_MODE */
2621 end_function_uniomap:
2622 iounmap(sep->io_addr);
2623 end_function_deallocate_sep_shared_area:
2624 /* de-allocate shared area */
2625 sep_unmap_and_free_shared_area(sep, size);
2632 static struct pci_device_id sep_pci_id_tbl[] = {
2633 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080c)},
2637 MODULE_DEVICE_TABLE(pci, sep_pci_id_tbl);
2639 /* field for registering driver to PCI device */
2640 static struct pci_driver sep_pci_driver = {
2641 .name = "sep_sec_driver",
2642 .id_table = sep_pci_id_tbl,
2644 /* FIXME: remove handler */
2647 /* major and minor device numbers */
2648 static dev_t sep_devno;
2650 /* the files operations structure of the driver */
2651 static struct file_operations sep_file_operations = {
2652 .owner = THIS_MODULE,
2656 .release = sep_release,
2661 /* cdev struct of the driver */
2662 static struct cdev sep_cdev;
2665 this function registers the driver to the file system
2667 static int sep_register_driver_to_fs(void)
2669 int ret_val = alloc_chrdev_region(&sep_devno, 0, 1, "sep_sec_driver");
2671 edbg("sep_driver:major number allocation failed, retval is %d\n", ret_val);
2676 cdev_init(&sep_cdev, &sep_file_operations);
2677 sep_cdev.owner = THIS_MODULE;
2679 /* register the driver with the kernel */
2680 ret_val = cdev_add(&sep_cdev, sep_devno, 1);
2683 edbg("sep_driver:cdev_add failed, retval is %d\n", ret_val);
2684 goto end_function_unregister_devnum;
2689 end_function_unregister_devnum:
2691 /* unregister dev numbers */
2692 unregister_chrdev_region(sep_devno, 1);
2699 /*--------------------------------------------------------------
2701 ----------------------------------------------------------------*/
2702 static int __init sep_init(void)
2705 dbg("SEP Driver:-------->Init start\n");
2706 /* FIXME: Probe can occur before we are ready to survive a probe */
2707 ret_val = pci_register_driver(&sep_pci_driver);
2709 edbg("sep_driver:sep_driver_to_device failed, ret_val is %d\n", ret_val);
2710 goto end_function_unregister_from_fs;
2712 /* register driver to fs */
2713 ret_val = sep_register_driver_to_fs();
2715 goto end_function_unregister_pci;
2717 end_function_unregister_pci:
2718 pci_unregister_driver(&sep_pci_driver);
2719 end_function_unregister_from_fs:
2720 /* unregister from fs */
2721 cdev_del(&sep_cdev);
2722 /* unregister dev numbers */
2723 unregister_chrdev_region(sep_devno, 1);
2725 dbg("SEP Driver:<-------- Init end\n");
2730 /*-------------------------------------------------------------
2732 --------------------------------------------------------------*/
2733 static void __exit sep_exit(void)
2737 dbg("SEP Driver:--------> Exit start\n");
2739 /* unregister from fs */
2740 cdev_del(&sep_cdev);
2741 /* unregister dev numbers */
2742 unregister_chrdev_region(sep_devno, 1);
2743 /* calculate the total size for de-allocation */
2744 size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
2745 SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES + SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
2746 /* FIXME: We need to do this in the unload for the device */
2747 /* free shared area */
2749 sep_unmap_and_free_shared_area(sep_dev, size);
2750 edbg("SEP Driver: free pages SEP SHARED AREA \n");
2751 iounmap((void *) sep_dev->reg_addr);
2752 edbg("SEP Driver: iounmap \n");
2754 edbg("SEP Driver: release_mem_region \n");
2755 dbg("SEP Driver:<-------- Exit end\n");
2759 module_init(sep_init);
2760 module_exit(sep_exit);
2762 MODULE_LICENSE("GPL");