3 * sep_driver.c - Security Processor Driver main group of functions
5 * Copyright(c) 2009 Intel Corporation. All rights reserved.
6 * Copyright(c) 2009 Discretix. All rights reserved.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc., 59
20 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 * Mark Allyn mark.a.allyn@intel.com
28 * 2009.06.26 Initial publish
32 #include <linux/init.h>
33 #include <linux/module.h>
35 #include <linux/cdev.h>
36 #include <linux/kdev_t.h>
37 #include <linux/mutex.h>
38 #include <linux/sched.h>
40 #include <linux/poll.h>
41 #include <linux/wait.h>
42 #include <linux/pci.h>
43 #include <linux/firmware.h>
44 #include <linux/slab.h>
45 #include <asm/ioctl.h>
46 #include <linux/ioport.h>
48 #include <linux/interrupt.h>
49 #include <linux/pagemap.h>
50 #include <asm/cacheflush.h>
51 #include "sep_driver_hw_defs.h"
52 #include "sep_driver_config.h"
53 #include "sep_driver_api.h"
56 #if SEP_DRIVER_ARM_DEBUG_MODE
58 #define CRYS_SEP_ROM_length 0x4000
59 #define CRYS_SEP_ROM_start_address 0x8000C000UL
60 #define CRYS_SEP_ROM_start_address_offset 0xC000UL
61 #define SEP_ROM_BANK_register 0x80008420UL
62 #define SEP_ROM_BANK_register_offset 0x8420UL
63 #define SEP_RAR_IO_MEM_REGION_START_ADDRESS 0x82000000
66 * THESE 2 definitions are specific to the board - must be
67 * defined during integration
69 #define SEP_RAR_IO_MEM_REGION_START_ADDRESS 0xFF0D0000
73 static void sep_load_rom_code(struct sep_device *sep)
76 unsigned long i, k, j;
81 /* Loading ROM from SEP_ROM_image.h file */
82 k = sizeof(CRYS_SEP_ROM);
84 edbg("SEP Driver: DX_CC_TST_SepRomLoader start\n");
86 edbg("SEP Driver: k is %lu\n", k);
87 edbg("SEP Driver: sep->reg_addr is %p\n", sep->reg_addr);
88 edbg("SEP Driver: CRYS_SEP_ROM_start_address_offset is %p\n", CRYS_SEP_ROM_start_address_offset);
90 for (i = 0; i < 4; i++) {
92 sep_write_reg(sep, SEP_ROM_BANK_register_offset, i);
94 for (j = 0; j < CRYS_SEP_ROM_length / 4; j++) {
95 sep_write_reg(sep, CRYS_SEP_ROM_start_address_offset + 4 * j, CRYS_SEP_ROM[i * 0x1000 + j]);
100 j = CRYS_SEP_ROM_length;
107 sep_write_reg(sep, HW_HOST_SEP_SW_RST_REG_ADDR, 0x1);
109 /* poll for SEP ROM boot finish */
111 reg = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
114 edbg("SEP Driver: ROM polling ended\n");
118 /* fatal error - read erro status from GPRO */
119 error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
120 edbg("SEP Driver: ROM polling case 1\n");
123 /* Cold boot ended successfully */
125 /* Warmboot ended successfully */
127 /* ColdWarm boot ended successfully */
130 /* Boot First Phase ended */
131 warning = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
133 edbg("SEP Driver: ROM polling case %d\n", reg);
140 static void sep_load_rom_code(struct sep_device *sep) { }
141 #endif /* SEP_DRIVER_ARM_DEBUG_MODE */
145 /*----------------------------------------
147 -----------------------------------------*/
149 #define BASE_ADDRESS_FOR_SYSTEM 0xfffc0000
150 #define SEP_RAR_IO_MEM_REGION_SIZE 0x40000
152 /*--------------------------------------------
154 --------------------------------------------*/
156 /* debug messages level */
158 module_param(debug, int , 0);
159 MODULE_PARM_DESC(debug, "Flag to enable SEP debug messages");
161 /* Keep this a single static object for now to keep the conversion easy */
163 static struct sep_device sep_instance;
164 static struct sep_device *sep_dev = &sep_instance;
167 mutex for the access to the internals of the sep driver
169 static DEFINE_MUTEX(sep_mutex);
172 /* wait queue head (event) of the driver */
173 static DECLARE_WAIT_QUEUE_HEAD(sep_event);
176 * sep_load_firmware - copy firmware cache/resident
177 * @sep: device we are loading
179 * This functions copies the cache and resident from their source
180 * location into destination shared memory.
183 static int sep_load_firmware(struct sep_device *sep)
185 const struct firmware *fw;
186 char *cache_name = "sep/cache.image.bin";
187 char *res_name = "sep/resident.image.bin";
190 edbg("SEP Driver:rar_virtual is %p\n", sep->rar_addr);
191 edbg("SEP Driver:rar_bus is %08llx\n", (unsigned long long)sep->rar_bus);
194 error = request_firmware(&fw, cache_name, &sep->pdev->dev);
196 edbg("SEP Driver:cant request cache fw\n");
199 edbg("SEP Driver:cache %08Zx@%p\n", fw->size, (void *) fw->data);
201 memcpy(sep->rar_addr, (void *)fw->data, fw->size);
202 sep->cache_size = fw->size;
203 release_firmware(fw);
205 sep->resident_bus = sep->rar_bus + sep->cache_size;
206 sep->resident_addr = sep->rar_addr + sep->cache_size;
209 error = request_firmware(&fw, res_name, &sep->pdev->dev);
211 edbg("SEP Driver:cant request res fw\n");
214 edbg("sep: res %08Zx@%p\n", fw->size, (void *)fw->data);
216 memcpy(sep->resident_addr, (void *) fw->data, fw->size);
217 sep->resident_size = fw->size;
218 release_firmware(fw);
220 edbg("sep: resident v %p b %08llx cache v %p b %08llx\n",
221 sep->resident_addr, (unsigned long long)sep->resident_bus,
222 sep->rar_addr, (unsigned long long)sep->rar_bus);
226 MODULE_FIRMWARE("sep/cache.image.bin");
227 MODULE_FIRMWARE("sep/resident.image.bin");
230 * sep_map_and_alloc_shared_area - allocate shared block
231 * @sep: security processor
232 * @size: size of shared area
234 * Allocate a shared buffer in host memory that can be used by both the
235 * kernel and also the hardware interface via DMA.
238 static int sep_map_and_alloc_shared_area(struct sep_device *sep,
241 /* shared_addr = ioremap_nocache(0xda00000,shared_area_size); */
242 sep->shared_addr = dma_alloc_coherent(&sep->pdev->dev, size,
243 &sep->shared_bus, GFP_KERNEL);
245 if (!sep->shared_addr) {
246 edbg("sep_driver :shared memory dma_alloc_coherent failed\n");
249 /* set the bus address of the shared area */
250 edbg("sep: shared_addr %ld bytes @%p (bus %08llx)\n",
251 size, sep->shared_addr, (unsigned long long)sep->shared_bus);
256 * sep_unmap_and_free_shared_area - free shared block
257 * @sep: security processor
259 * Free the shared area allocated to the security processor. The
260 * processor must have finished with this and any final posted
261 * writes cleared before we do so.
263 static void sep_unmap_and_free_shared_area(struct sep_device *sep, int size)
265 dma_free_coherent(&sep->pdev->dev, size,
266 sep->shared_addr, sep->shared_bus);
270 * sep_shared_virt_to_bus - convert bus/virt addresses
272 * Returns the bus address inside the shared area according
273 * to the virtual address.
276 static dma_addr_t sep_shared_virt_to_bus(struct sep_device *sep,
279 dma_addr_t pa = sep->shared_bus + (virt_address - sep->shared_addr);
280 edbg("sep: virt to bus b %08llx v %p\n", (unsigned long long) pa,
286 * sep_shared_bus_to_virt - convert bus/virt addresses
288 * Returns virtual address inside the shared area according
289 * to the bus address.
292 static void *sep_shared_bus_to_virt(struct sep_device *sep,
293 dma_addr_t bus_address)
295 return sep->shared_addr + (bus_address - sep->shared_bus);
300 * sep_try_open - attempt to open a SEP device
301 * @sep: device to attempt to open
303 * Atomically attempt to get ownership of a SEP device.
304 * Returns 1 if the device was opened, 0 on failure.
307 static int sep_try_open(struct sep_device *sep)
309 if (!test_and_set_bit(0, &sep->in_use))
315 * sep_open - device open method
316 * @inode: inode of sep device
317 * @filp: file handle to sep device
319 * Open method for the SEP device. Called when userspace opens
320 * the SEP device node. Must also release the memory data pool
323 * Returns zero on success otherwise an error code.
326 static int sep_open(struct inode *inode, struct file *filp)
331 /* check the blocking mode */
332 if (filp->f_flags & O_NDELAY) {
333 if (sep_try_open(sep_dev) == 0)
336 if (wait_event_interruptible(sep_event, sep_try_open(sep_dev)) < 0)
339 /* Bind to the device, we only have one which makes it easy */
340 filp->private_data = sep_dev;
341 /* release data pool allocations */
342 sep_dev->data_pool_bytes_allocated = 0;
348 * sep_release - close a SEP device
349 * @inode: inode of SEP device
350 * @filp: file handle being closed
352 * Called on the final close of a SEP device. As the open protects against
353 * multiple simultaenous opens that means this method is called when the
354 * final reference to the open handle is dropped.
357 static int sep_release(struct inode *inode, struct file *filp)
359 struct sep_device *sep = filp->private_data;
360 #if 0 /*!SEP_DRIVER_POLLING_MODE */
362 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, 0x7FFF);
363 /* release IRQ line */
364 free_irq(SEP_DIRVER_IRQ_NUM, sep);
367 /* Ensure any blocked open progresses */
368 clear_bit(0, &sep->in_use);
373 /*---------------------------------------------------------------
374 map function - this functions maps the message shared area
375 -----------------------------------------------------------------*/
376 static int sep_mmap(struct file *filp, struct vm_area_struct *vma)
379 struct sep_device *sep = filp->private_data;
381 dbg("-------->SEP Driver: mmap start\n");
383 /* check that the size of the mapped range is as the size of the message
385 if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) {
386 edbg("SEP Driver mmap requested size is more than allowed\n");
387 printk(KERN_WARNING "SEP Driver mmap requested size is more than allowed\n");
388 printk(KERN_WARNING "SEP Driver vma->vm_end is %08lx\n", vma->vm_end);
389 printk(KERN_WARNING "SEP Driver vma->vm_end is %08lx\n", vma->vm_start);
393 edbg("SEP Driver:sep->shared_addr is %p\n", sep->shared_addr);
395 /* get bus address */
396 bus_addr = sep->shared_bus;
398 edbg("SEP Driver: phys_addr is %08llx\n", (unsigned long long)bus_addr);
400 if (remap_pfn_range(vma, vma->vm_start, bus_addr >> PAGE_SHIFT, vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
401 edbg("SEP Driver remap_page_range failed\n");
402 printk(KERN_WARNING "SEP Driver remap_page_range failed\n");
406 dbg("SEP Driver:<-------- mmap end\n");
412 /*-----------------------------------------------
414 *----------------------------------------------*/
415 static unsigned int sep_poll(struct file *filp, poll_table * wait)
418 unsigned int mask = 0;
419 unsigned long retval = 0; /* flow id */
420 struct sep_device *sep = filp->private_data;
422 dbg("---------->SEP Driver poll: start\n");
425 #if SEP_DRIVER_POLLING_MODE
427 while (sep->send_ct != (retval & 0x7FFFFFFF)) {
428 retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
430 for (count = 0; count < 10 * 4; count += 4)
431 edbg("Poll Debug Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_addr + SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES + count)));
436 /* add the event to the polling wait table */
437 poll_wait(filp, &sep_event, wait);
441 edbg("sep->send_ct is %lu\n", sep->send_ct);
442 edbg("sep->reply_ct is %lu\n", sep->reply_ct);
444 /* check if the data is ready */
445 if (sep->send_ct == sep->reply_ct) {
446 for (count = 0; count < 12 * 4; count += 4)
447 edbg("Sep Mesg Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_addr + count)));
449 for (count = 0; count < 10 * 4; count += 4)
450 edbg("Debug Data Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_addr + 0x1800 + count)));
452 retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
453 edbg("retval is %lu\n", retval);
454 /* check if the this is sep reply or request */
456 edbg("SEP Driver: sep request in\n");
458 mask |= POLLOUT | POLLWRNORM;
460 edbg("SEP Driver: sep reply in\n");
461 mask |= POLLIN | POLLRDNORM;
464 dbg("SEP Driver:<-------- poll exit\n");
469 * sep_time_address - address in SEP memory of time
470 * @sep: SEP device we want the address from
472 * Return the address of the two dwords in memory used for time
476 static u32 *sep_time_address(struct sep_device *sep)
478 return sep->shared_addr + SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES;
482 * sep_set_time - set the SEP time
483 * @sep: the SEP we are setting the time for
485 * Calculates time and sets it at the predefined address.
486 * Called with the sep mutex held.
488 static unsigned long sep_set_time(struct sep_device *sep)
491 u32 *time_addr; /* address of time as seen by the kernel */
494 dbg("sep:sep_set_time start\n");
496 do_gettimeofday(&time);
498 /* set value in the SYSTEM MEMORY offset */
499 time_addr = sep_time_address(sep);
501 time_addr[0] = SEP_TIME_VAL_TOKEN;
502 time_addr[1] = time.tv_sec;
504 edbg("SEP Driver:time.tv_sec is %lu\n", time.tv_sec);
505 edbg("SEP Driver:time_addr is %p\n", time_addr);
506 edbg("SEP Driver:sep->shared_addr is %p\n", sep->shared_addr);
512 * sep_dump_message - dump the message that is pending
515 * Dump out the message pending in the shared message area
518 static void sep_dump_message(struct sep_device *sep)
521 for (count = 0; count < 12 * 4; count += 4)
522 edbg("Word %d of the message is %u\n", count, *((u32 *) (sep->shared_addr + count)));
526 * sep_send_command_handler - kick off a command
527 * @sep: sep being signalled
529 * This function raises interrupt to SEP that signals that is has a new
530 * command from the host
533 static void sep_send_command_handler(struct sep_device *sep)
535 dbg("sep:sep_send_command_handler start\n");
537 mutex_lock(&sep_mutex);
540 /* FIXME: flush cache */
543 sep_dump_message(sep);
546 /* send interrupt to SEP */
547 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
548 dbg("SEP Driver:<-------- sep_send_command_handler end\n");
549 mutex_unlock(&sep_mutex);
554 * sep_send_reply_command_handler - kick off a command reply
555 * @sep: sep being signalled
557 * This function raises interrupt to SEP that signals that is has a new
558 * command from the host
561 static void sep_send_reply_command_handler(struct sep_device *sep)
563 dbg("sep:sep_send_reply_command_handler start\n");
568 sep_dump_message(sep);
570 mutex_lock(&sep_mutex);
571 sep->send_ct++; /* update counter */
572 /* send the interrupt to SEP */
573 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR2_REG_ADDR, sep->send_ct);
574 /* update both counters */
577 mutex_unlock(&sep_mutex);
578 dbg("sep: sep_send_reply_command_handler end\n");
582 This function handles the allocate data pool memory request
583 This function returns calculates the bus address of the
584 allocated memory, and the offset of this area from the mapped address.
585 Therefore, the FVOs in user space can calculate the exact virtual
586 address of this allocated memory
588 static int sep_allocate_data_pool_memory_handler(struct sep_device *sep,
592 struct sep_driver_alloc_t command_args;
594 dbg("SEP Driver:--------> sep_allocate_data_pool_memory_handler start\n");
596 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_alloc_t));
602 /* allocate memory */
603 if ((sep->data_pool_bytes_allocated + command_args.num_bytes) > SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES) {
608 /* set the virtual and bus address */
609 command_args.offset = SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES + sep->data_pool_bytes_allocated;
610 command_args.phys_address = sep->shared_bus + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES + sep->data_pool_bytes_allocated;
612 /* write the memory back to the user space */
613 error = copy_to_user((void *) arg, (void *) &command_args, sizeof(struct sep_driver_alloc_t));
619 /* set the allocation */
620 sep->data_pool_bytes_allocated += command_args.num_bytes;
623 dbg("SEP Driver:<-------- sep_allocate_data_pool_memory_handler end\n");
628 This function handles write into allocated data pool command
630 static int sep_write_into_data_pool_handler(struct sep_device *sep, unsigned long arg)
635 unsigned long app_in_address;
636 unsigned long num_bytes;
637 void *data_pool_area_addr;
639 dbg("SEP Driver:--------> sep_write_into_data_pool_handler start\n");
641 /* get the application address */
642 error = get_user(app_in_address, &(((struct sep_driver_write_t *) arg)->app_address));
646 /* get the virtual kernel address address */
647 error = get_user(va, &(((struct sep_driver_write_t *) arg)->datapool_address));
650 virt_address = (void *)va;
652 /* get the number of bytes */
653 error = get_user(num_bytes, &(((struct sep_driver_write_t *) arg)->num_bytes));
657 /* calculate the start of the data pool */
658 data_pool_area_addr = sep->shared_addr + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES;
661 /* check that the range of the virtual kernel address is correct */
662 if (virt_address < data_pool_area_addr || virt_address > (data_pool_area_addr + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES)) {
666 /* copy the application data */
667 error = copy_from_user(virt_address, (void *) app_in_address, num_bytes);
671 dbg("SEP Driver:<-------- sep_write_into_data_pool_handler end\n");
676 this function handles the read from data pool command
678 static int sep_read_from_data_pool_handler(struct sep_device *sep, unsigned long arg)
681 /* virtual address of dest application buffer */
682 unsigned long app_out_address;
683 /* virtual address of the data pool */
686 unsigned long num_bytes;
687 void *data_pool_area_addr;
689 dbg("SEP Driver:--------> sep_read_from_data_pool_handler start\n");
691 /* get the application address */
692 error = get_user(app_out_address, &(((struct sep_driver_write_t *) arg)->app_address));
696 /* get the virtual kernel address address */
697 error = get_user(va, &(((struct sep_driver_write_t *) arg)->datapool_address));
700 virt_address = (void *)va;
702 /* get the number of bytes */
703 error = get_user(num_bytes, &(((struct sep_driver_write_t *) arg)->num_bytes));
707 /* calculate the start of the data pool */
708 data_pool_area_addr = sep->shared_addr + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES;
710 /* FIXME: These are incomplete all over the driver: what about + len
711 and when doing that also overflows */
712 /* check that the range of the virtual kernel address is correct */
713 if (virt_address < data_pool_area_addr || virt_address > data_pool_area_addr + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES) {
718 /* copy the application data */
719 error = copy_to_user((void *) app_out_address, virt_address, num_bytes);
723 dbg("SEP Driver:<-------- sep_read_from_data_pool_handler end\n");
728 This function releases all the application virtual buffer physical pages,
729 that were previously locked
731 static int sep_free_dma_pages(struct page **page_array_ptr, unsigned long num_pages, unsigned long dirtyFlag)
736 for (count = 0; count < num_pages; count++) {
737 /* the out array was written, therefore the data was changed */
738 if (!PageReserved(page_array_ptr[count]))
739 SetPageDirty(page_array_ptr[count]);
740 page_cache_release(page_array_ptr[count]);
743 /* free in pages - the data was only read, therefore no update was done
745 for (count = 0; count < num_pages; count++)
746 page_cache_release(page_array_ptr[count]);
751 kfree(page_array_ptr);
757 This function locks all the physical pages of the kernel virtual buffer
758 and construct a basic lli array, where each entry holds the physical
759 page address and the size that application data holds in this physical pages
761 static int sep_lock_kernel_pages(struct sep_device *sep,
762 unsigned long kernel_virt_addr,
763 unsigned long data_size,
764 unsigned long *num_pages_ptr,
765 struct sep_lli_entry_t **lli_array_ptr,
766 struct page ***page_array_ptr)
769 /* the the page of the end address of the user space buffer */
770 unsigned long end_page;
771 /* the page of the start address of the user space buffer */
772 unsigned long start_page;
773 /* the range in pages */
774 unsigned long num_pages;
775 struct sep_lli_entry_t *lli_array;
776 /* next kernel address to map */
777 unsigned long next_kernel_address;
780 dbg("SEP Driver:--------> sep_lock_kernel_pages start\n");
782 /* set start and end pages and num pages */
783 end_page = (kernel_virt_addr + data_size - 1) >> PAGE_SHIFT;
784 start_page = kernel_virt_addr >> PAGE_SHIFT;
785 num_pages = end_page - start_page + 1;
787 edbg("SEP Driver: kernel_virt_addr is %08lx\n", kernel_virt_addr);
788 edbg("SEP Driver: data_size is %lu\n", data_size);
789 edbg("SEP Driver: start_page is %lx\n", start_page);
790 edbg("SEP Driver: end_page is %lx\n", end_page);
791 edbg("SEP Driver: num_pages is %lu\n", num_pages);
793 lli_array = kmalloc(sizeof(struct sep_lli_entry_t) * num_pages, GFP_ATOMIC);
795 edbg("SEP Driver: kmalloc for lli_array failed\n");
800 /* set the start address of the first page - app data may start not at
801 the beginning of the page */
802 lli_array[0].physical_address = (unsigned long) virt_to_phys((unsigned long *) kernel_virt_addr);
804 /* check that not all the data is in the first page only */
805 if ((PAGE_SIZE - (kernel_virt_addr & (~PAGE_MASK))) >= data_size)
806 lli_array[0].block_size = data_size;
808 lli_array[0].block_size = PAGE_SIZE - (kernel_virt_addr & (~PAGE_MASK));
811 dbg("lli_array[0].physical_address is %08lx, lli_array[0].block_size is %lu\n", lli_array[0].physical_address, lli_array[0].block_size);
813 /* advance the address to the start of the next page */
814 next_kernel_address = (kernel_virt_addr & PAGE_MASK) + PAGE_SIZE;
816 /* go from the second page to the prev before last */
817 for (count = 1; count < (num_pages - 1); count++) {
818 lli_array[count].physical_address = (unsigned long) virt_to_phys((unsigned long *) next_kernel_address);
819 lli_array[count].block_size = PAGE_SIZE;
821 edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
822 next_kernel_address += PAGE_SIZE;
825 /* if more then 1 pages locked - then update for the last page size needed */
827 /* update the address of the last page */
828 lli_array[count].physical_address = (unsigned long) virt_to_phys((unsigned long *) next_kernel_address);
830 /* set the size of the last page */
831 lli_array[count].block_size = (kernel_virt_addr + data_size) & (~PAGE_MASK);
833 if (lli_array[count].block_size == 0) {
834 dbg("app_virt_addr is %08lx\n", kernel_virt_addr);
835 dbg("data_size is %lu\n", data_size);
839 edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
841 /* set output params */
842 *lli_array_ptr = lli_array;
843 *num_pages_ptr = num_pages;
846 dbg("SEP Driver:<-------- sep_lock_kernel_pages end\n");
851 This function locks all the physical pages of the application virtual buffer
852 and construct a basic lli array, where each entry holds the physical page
853 address and the size that application data holds in this physical pages
855 static int sep_lock_user_pages(struct sep_device *sep,
856 unsigned long app_virt_addr,
857 unsigned long data_size,
858 unsigned long *num_pages_ptr,
859 struct sep_lli_entry_t **lli_array_ptr,
860 struct page ***page_array_ptr)
863 /* the the page of the end address of the user space buffer */
864 unsigned long end_page;
865 /* the page of the start address of the user space buffer */
866 unsigned long start_page;
867 /* the range in pages */
868 unsigned long num_pages;
869 struct page **page_array;
870 struct sep_lli_entry_t *lli_array;
874 dbg("SEP Driver:--------> sep_lock_user_pages start\n");
876 /* set start and end pages and num pages */
877 end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT;
878 start_page = app_virt_addr >> PAGE_SHIFT;
879 num_pages = end_page - start_page + 1;
881 edbg("SEP Driver: app_virt_addr is %08lx\n", app_virt_addr);
882 edbg("SEP Driver: data_size is %lu\n", data_size);
883 edbg("SEP Driver: start_page is %lu\n", start_page);
884 edbg("SEP Driver: end_page is %lu\n", end_page);
885 edbg("SEP Driver: num_pages is %lu\n", num_pages);
887 /* allocate array of pages structure pointers */
888 page_array = kmalloc(sizeof(struct page *) * num_pages, GFP_ATOMIC);
890 edbg("SEP Driver: kmalloc for page_array failed\n");
896 lli_array = kmalloc(sizeof(struct sep_lli_entry_t) * num_pages, GFP_ATOMIC);
898 edbg("SEP Driver: kmalloc for lli_array failed\n");
901 goto end_function_with_error1;
904 /* convert the application virtual address into a set of physical */
905 down_read(¤t->mm->mmap_sem);
906 result = get_user_pages(current, current->mm, app_virt_addr, num_pages, 1, 0, page_array, 0);
907 up_read(¤t->mm->mmap_sem);
909 /* check the number of pages locked - if not all then exit with error */
910 if (result != num_pages) {
911 dbg("SEP Driver: not all pages locked by get_user_pages\n");
914 goto end_function_with_error2;
917 /* flush the cache */
918 for (count = 0; count < num_pages; count++)
919 flush_dcache_page(page_array[count]);
921 /* set the start address of the first page - app data may start not at
922 the beginning of the page */
923 lli_array[0].physical_address = ((unsigned long) page_to_phys(page_array[0])) + (app_virt_addr & (~PAGE_MASK));
925 /* check that not all the data is in the first page only */
926 if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size)
927 lli_array[0].block_size = data_size;
929 lli_array[0].block_size = PAGE_SIZE - (app_virt_addr & (~PAGE_MASK));
932 dbg("lli_array[0].physical_address is %08lx, lli_array[0].block_size is %lu\n", lli_array[0].physical_address, lli_array[0].block_size);
934 /* go from the second page to the prev before last */
935 for (count = 1; count < (num_pages - 1); count++) {
936 lli_array[count].physical_address = (unsigned long) page_to_phys(page_array[count]);
937 lli_array[count].block_size = PAGE_SIZE;
939 edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
942 /* if more then 1 pages locked - then update for the last page size needed */
944 /* update the address of the last page */
945 lli_array[count].physical_address = (unsigned long) page_to_phys(page_array[count]);
947 /* set the size of the last page */
948 lli_array[count].block_size = (app_virt_addr + data_size) & (~PAGE_MASK);
950 if (lli_array[count].block_size == 0) {
951 dbg("app_virt_addr is %08lx\n", app_virt_addr);
952 dbg("data_size is %lu\n", data_size);
955 edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n",
956 count, lli_array[count].physical_address,
957 count, lli_array[count].block_size);
960 /* set output params */
961 *lli_array_ptr = lli_array;
962 *num_pages_ptr = num_pages;
963 *page_array_ptr = page_array;
966 end_function_with_error2:
967 /* release the cache */
968 for (count = 0; count < num_pages; count++)
969 page_cache_release(page_array[count]);
971 end_function_with_error1:
974 dbg("SEP Driver:<-------- sep_lock_user_pages end\n");
980 this function calculates the size of data that can be inserted into the lli
981 table from this array the condition is that either the table is full
982 (all etnries are entered), or there are no more entries in the lli array
984 static unsigned long sep_calculate_lli_table_max_size(struct sep_lli_entry_t *lli_in_array_ptr, unsigned long num_array_entries)
986 unsigned long table_data_size = 0;
987 unsigned long counter;
989 /* calculate the data in the out lli table if till we fill the whole
990 table or till the data has ended */
991 for (counter = 0; (counter < (SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP - 1)) && (counter < num_array_entries); counter++)
992 table_data_size += lli_in_array_ptr[counter].block_size;
993 return table_data_size;
997 this functions builds ont lli table from the lli_array according to
998 the given size of data
1000 static void sep_build_lli_table(struct sep_lli_entry_t *lli_array_ptr, struct sep_lli_entry_t *lli_table_ptr, unsigned long *num_processed_entries_ptr, unsigned long *num_table_entries_ptr, unsigned long table_data_size)
1002 unsigned long curr_table_data_size;
1003 /* counter of lli array entry */
1004 unsigned long array_counter;
1006 dbg("SEP Driver:--------> sep_build_lli_table start\n");
1008 /* init currrent table data size and lli array entry counter */
1009 curr_table_data_size = 0;
1011 *num_table_entries_ptr = 1;
1013 edbg("SEP Driver:table_data_size is %lu\n", table_data_size);
1015 /* fill the table till table size reaches the needed amount */
1016 while (curr_table_data_size < table_data_size) {
1017 /* update the number of entries in table */
1018 (*num_table_entries_ptr)++;
1020 lli_table_ptr->physical_address = lli_array_ptr[array_counter].physical_address;
1021 lli_table_ptr->block_size = lli_array_ptr[array_counter].block_size;
1022 curr_table_data_size += lli_table_ptr->block_size;
1024 edbg("SEP Driver:lli_table_ptr is %08lx\n", (unsigned long) lli_table_ptr);
1025 edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
1026 edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
1028 /* check for overflow of the table data */
1029 if (curr_table_data_size > table_data_size) {
1030 edbg("SEP Driver:curr_table_data_size > table_data_size\n");
1032 /* update the size of block in the table */
1033 lli_table_ptr->block_size -= (curr_table_data_size - table_data_size);
1035 /* update the physical address in the lli array */
1036 lli_array_ptr[array_counter].physical_address += lli_table_ptr->block_size;
1038 /* update the block size left in the lli array */
1039 lli_array_ptr[array_counter].block_size = (curr_table_data_size - table_data_size);
1041 /* advance to the next entry in the lli_array */
1044 edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
1045 edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
1047 /* move to the next entry in table */
1051 /* set the info entry to default */
1052 lli_table_ptr->physical_address = 0xffffffff;
1053 lli_table_ptr->block_size = 0;
1055 edbg("SEP Driver:lli_table_ptr is %08lx\n", (unsigned long) lli_table_ptr);
1056 edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
1057 edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
1059 /* set the output parameter */
1060 *num_processed_entries_ptr += array_counter;
1062 edbg("SEP Driver:*num_processed_entries_ptr is %lu\n", *num_processed_entries_ptr);
1063 dbg("SEP Driver:<-------- sep_build_lli_table end\n");
1068 this function goes over the list of the print created tables and
1071 static void sep_debug_print_lli_tables(struct sep_device *sep, struct sep_lli_entry_t *lli_table_ptr, unsigned long num_table_entries, unsigned long table_data_size)
1073 unsigned long table_count;
1074 unsigned long entries_count;
1076 dbg("SEP Driver:--------> sep_debug_print_lli_tables start\n");
1079 while ((unsigned long) lli_table_ptr != 0xffffffff) {
1080 edbg("SEP Driver: lli table %08lx, table_data_size is %lu\n", table_count, table_data_size);
1081 edbg("SEP Driver: num_table_entries is %lu\n", num_table_entries);
1083 /* print entries of the table (without info entry) */
1084 for (entries_count = 0; entries_count < num_table_entries; entries_count++, lli_table_ptr++) {
1085 edbg("SEP Driver:lli_table_ptr address is %08lx\n", (unsigned long) lli_table_ptr);
1086 edbg("SEP Driver:phys address is %08lx block size is %lu\n", lli_table_ptr->physical_address, lli_table_ptr->block_size);
1089 /* point to the info entry */
1092 edbg("SEP Driver:phys lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
1093 edbg("SEP Driver:phys lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
1096 table_data_size = lli_table_ptr->block_size & 0xffffff;
1097 num_table_entries = (lli_table_ptr->block_size >> 24) & 0xff;
1098 lli_table_ptr = (struct sep_lli_entry_t *)
1099 (lli_table_ptr->physical_address);
1101 edbg("SEP Driver:phys table_data_size is %lu num_table_entries is %lu lli_table_ptr is%lu\n", table_data_size, num_table_entries, (unsigned long) lli_table_ptr);
1103 if ((unsigned long) lli_table_ptr != 0xffffffff)
1104 lli_table_ptr = (struct sep_lli_entry_t *) sep_shared_bus_to_virt(sep, (unsigned long) lli_table_ptr);
1108 dbg("SEP Driver:<-------- sep_debug_print_lli_tables end\n");
1113 This function prepares only input DMA table for synhronic symmetric
1116 static int sep_prepare_input_dma_table(struct sep_device *sep,
1117 unsigned long app_virt_addr,
1118 unsigned long data_size,
1119 unsigned long block_size,
1120 unsigned long *lli_table_ptr,
1121 unsigned long *num_entries_ptr,
1122 unsigned long *table_data_size_ptr,
1123 bool isKernelVirtualAddress)
1125 /* pointer to the info entry of the table - the last entry */
1126 struct sep_lli_entry_t *info_entry_ptr;
1127 /* array of pointers ot page */
1128 struct sep_lli_entry_t *lli_array_ptr;
1129 /* points to the first entry to be processed in the lli_in_array */
1130 unsigned long current_entry;
1131 /* num entries in the virtual buffer */
1132 unsigned long sep_lli_entries;
1133 /* lli table pointer */
1134 struct sep_lli_entry_t *in_lli_table_ptr;
1135 /* the total data in one table */
1136 unsigned long table_data_size;
1137 /* number of entries in lli table */
1138 unsigned long num_entries_in_table;
1139 /* next table address */
1140 void *lli_table_alloc_addr;
1141 unsigned long result;
1143 dbg("SEP Driver:--------> sep_prepare_input_dma_table start\n");
1145 edbg("SEP Driver:data_size is %lu\n", data_size);
1146 edbg("SEP Driver:block_size is %lu\n", block_size);
1148 /* initialize the pages pointers */
1149 sep->in_page_array = 0;
1150 sep->in_num_pages = 0;
1152 if (data_size == 0) {
1153 /* special case - created 2 entries table with zero data */
1154 in_lli_table_ptr = (struct sep_lli_entry_t *) (sep->shared_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES);
1155 /* FIXME: Should the entry below not be for _bus */
1156 in_lli_table_ptr->physical_address = (unsigned long)sep->shared_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1157 in_lli_table_ptr->block_size = 0;
1160 in_lli_table_ptr->physical_address = 0xFFFFFFFF;
1161 in_lli_table_ptr->block_size = 0;
1163 *lli_table_ptr = sep->shared_bus + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1164 *num_entries_ptr = 2;
1165 *table_data_size_ptr = 0;
1170 /* check if the pages are in Kernel Virtual Address layout */
1171 if (isKernelVirtualAddress == true)
1172 /* lock the pages of the kernel buffer and translate them to pages */
1173 result = sep_lock_kernel_pages(sep, app_virt_addr, data_size, &sep->in_num_pages, &lli_array_ptr, &sep->in_page_array);
1175 /* lock the pages of the user buffer and translate them to pages */
1176 result = sep_lock_user_pages(sep, app_virt_addr, data_size, &sep->in_num_pages, &lli_array_ptr, &sep->in_page_array);
1181 edbg("SEP Driver:output sep->in_num_pages is %lu\n", sep->in_num_pages);
1185 sep_lli_entries = sep->in_num_pages;
1187 /* initiate to point after the message area */
1188 lli_table_alloc_addr = sep->shared_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1190 /* loop till all the entries in in array are not processed */
1191 while (current_entry < sep_lli_entries) {
1192 /* set the new input and output tables */
1193 in_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr;
1195 lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1197 /* calculate the maximum size of data for input table */
1198 table_data_size = sep_calculate_lli_table_max_size(&lli_array_ptr[current_entry], (sep_lli_entries - current_entry));
1200 /* now calculate the table size so that it will be module block size */
1201 table_data_size = (table_data_size / block_size) * block_size;
1203 edbg("SEP Driver:output table_data_size is %lu\n", table_data_size);
1205 /* construct input lli table */
1206 sep_build_lli_table(&lli_array_ptr[current_entry], in_lli_table_ptr, ¤t_entry, &num_entries_in_table, table_data_size);
1208 if (info_entry_ptr == 0) {
1209 /* set the output parameters to physical addresses */
1210 *lli_table_ptr = sep_shared_virt_to_bus(sep, in_lli_table_ptr);
1211 *num_entries_ptr = num_entries_in_table;
1212 *table_data_size_ptr = table_data_size;
1214 edbg("SEP Driver:output lli_table_in_ptr is %08lx\n", *lli_table_ptr);
1216 /* update the info entry of the previous in table */
1217 info_entry_ptr->physical_address = sep_shared_virt_to_bus(sep, in_lli_table_ptr);
1218 info_entry_ptr->block_size = ((num_entries_in_table) << 24) | (table_data_size);
1221 /* save the pointer to the info entry of the current tables */
1222 info_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
1225 /* print input tables */
1226 sep_debug_print_lli_tables(sep, (struct sep_lli_entry_t *)
1227 sep_shared_bus_to_virt(sep, *lli_table_ptr), *num_entries_ptr, *table_data_size_ptr);
1229 /* the array of the pages */
1230 kfree(lli_array_ptr);
1232 dbg("SEP Driver:<-------- sep_prepare_input_dma_table end\n");
1238 This function creates the input and output dma tables for
1239 symmetric operations (AES/DES) according to the block size from LLI arays
1241 static int sep_construct_dma_tables_from_lli(struct sep_device *sep,
1242 struct sep_lli_entry_t *lli_in_array,
1243 unsigned long sep_in_lli_entries,
1244 struct sep_lli_entry_t *lli_out_array,
1245 unsigned long sep_out_lli_entries,
1246 unsigned long block_size, unsigned long *lli_table_in_ptr, unsigned long *lli_table_out_ptr, unsigned long *in_num_entries_ptr, unsigned long *out_num_entries_ptr, unsigned long *table_data_size_ptr)
1248 /* points to the area where next lli table can be allocated: keep void *
1249 as there is pointer scaling to fix otherwise */
1250 void *lli_table_alloc_addr;
1251 /* input lli table */
1252 struct sep_lli_entry_t *in_lli_table_ptr;
1253 /* output lli table */
1254 struct sep_lli_entry_t *out_lli_table_ptr;
1255 /* pointer to the info entry of the table - the last entry */
1256 struct sep_lli_entry_t *info_in_entry_ptr;
1257 /* pointer to the info entry of the table - the last entry */
1258 struct sep_lli_entry_t *info_out_entry_ptr;
1259 /* points to the first entry to be processed in the lli_in_array */
1260 unsigned long current_in_entry;
1261 /* points to the first entry to be processed in the lli_out_array */
1262 unsigned long current_out_entry;
1263 /* max size of the input table */
1264 unsigned long in_table_data_size;
1265 /* max size of the output table */
1266 unsigned long out_table_data_size;
1267 /* flag te signifies if this is the first tables build from the arrays */
1268 unsigned long first_table_flag;
1269 /* the data size that should be in table */
1270 unsigned long table_data_size;
1271 /* number of etnries in the input table */
1272 unsigned long num_entries_in_table;
1273 /* number of etnries in the output table */
1274 unsigned long num_entries_out_table;
1276 dbg("SEP Driver:--------> sep_construct_dma_tables_from_lli start\n");
1278 /* initiate to pint after the message area */
1279 lli_table_alloc_addr = sep->shared_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1281 current_in_entry = 0;
1282 current_out_entry = 0;
1283 first_table_flag = 1;
1284 info_in_entry_ptr = 0;
1285 info_out_entry_ptr = 0;
1287 /* loop till all the entries in in array are not processed */
1288 while (current_in_entry < sep_in_lli_entries) {
1289 /* set the new input and output tables */
1290 in_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr;
1292 lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1294 /* set the first output tables */
1295 out_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr;
1297 lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1299 /* calculate the maximum size of data for input table */
1300 in_table_data_size = sep_calculate_lli_table_max_size(&lli_in_array[current_in_entry], (sep_in_lli_entries - current_in_entry));
1302 /* calculate the maximum size of data for output table */
1303 out_table_data_size = sep_calculate_lli_table_max_size(&lli_out_array[current_out_entry], (sep_out_lli_entries - current_out_entry));
1305 edbg("SEP Driver:in_table_data_size is %lu\n", in_table_data_size);
1306 edbg("SEP Driver:out_table_data_size is %lu\n", out_table_data_size);
1308 /* check where the data is smallest */
1309 table_data_size = in_table_data_size;
1310 if (table_data_size > out_table_data_size)
1311 table_data_size = out_table_data_size;
1313 /* now calculate the table size so that it will be module block size */
1314 table_data_size = (table_data_size / block_size) * block_size;
1316 dbg("SEP Driver:table_data_size is %lu\n", table_data_size);
1318 /* construct input lli table */
1319 sep_build_lli_table(&lli_in_array[current_in_entry], in_lli_table_ptr, ¤t_in_entry, &num_entries_in_table, table_data_size);
1321 /* construct output lli table */
1322 sep_build_lli_table(&lli_out_array[current_out_entry], out_lli_table_ptr, ¤t_out_entry, &num_entries_out_table, table_data_size);
1324 /* if info entry is null - this is the first table built */
1325 if (info_in_entry_ptr == 0) {
1326 /* set the output parameters to physical addresses */
1327 *lli_table_in_ptr = sep_shared_virt_to_bus(sep, in_lli_table_ptr);
1328 *in_num_entries_ptr = num_entries_in_table;
1329 *lli_table_out_ptr = sep_shared_virt_to_bus(sep, out_lli_table_ptr);
1330 *out_num_entries_ptr = num_entries_out_table;
1331 *table_data_size_ptr = table_data_size;
1333 edbg("SEP Driver:output lli_table_in_ptr is %08lx\n", *lli_table_in_ptr);
1334 edbg("SEP Driver:output lli_table_out_ptr is %08lx\n", *lli_table_out_ptr);
1336 /* update the info entry of the previous in table */
1337 info_in_entry_ptr->physical_address = sep_shared_virt_to_bus(sep, in_lli_table_ptr);
1338 info_in_entry_ptr->block_size = ((num_entries_in_table) << 24) | (table_data_size);
1340 /* update the info entry of the previous in table */
1341 info_out_entry_ptr->physical_address = sep_shared_virt_to_bus(sep, out_lli_table_ptr);
1342 info_out_entry_ptr->block_size = ((num_entries_out_table) << 24) | (table_data_size);
1345 /* save the pointer to the info entry of the current tables */
1346 info_in_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
1347 info_out_entry_ptr = out_lli_table_ptr + num_entries_out_table - 1;
1349 edbg("SEP Driver:output num_entries_out_table is %lu\n", (unsigned long) num_entries_out_table);
1350 edbg("SEP Driver:output info_in_entry_ptr is %lu\n", (unsigned long) info_in_entry_ptr);
1351 edbg("SEP Driver:output info_out_entry_ptr is %lu\n", (unsigned long) info_out_entry_ptr);
1354 /* print input tables */
1355 sep_debug_print_lli_tables(sep, (struct sep_lli_entry_t *)
1356 sep_shared_bus_to_virt(sep, *lli_table_in_ptr), *in_num_entries_ptr, *table_data_size_ptr);
1357 /* print output tables */
1358 sep_debug_print_lli_tables(sep, (struct sep_lli_entry_t *)
1359 sep_shared_bus_to_virt(sep, *lli_table_out_ptr), *out_num_entries_ptr, *table_data_size_ptr);
1360 dbg("SEP Driver:<-------- sep_construct_dma_tables_from_lli end\n");
1366 This function builds input and output DMA tables for synhronic
1367 symmetric operations (AES, DES). It also checks that each table
1368 is of the modular block size
1370 static int sep_prepare_input_output_dma_table(struct sep_device *sep,
1371 unsigned long app_virt_in_addr,
1372 unsigned long app_virt_out_addr,
1373 unsigned long data_size,
1374 unsigned long block_size,
1375 unsigned long *lli_table_in_ptr, unsigned long *lli_table_out_ptr, unsigned long *in_num_entries_ptr, unsigned long *out_num_entries_ptr, unsigned long *table_data_size_ptr, bool isKernelVirtualAddress)
1377 /* array of pointers of page */
1378 struct sep_lli_entry_t *lli_in_array;
1379 /* array of pointers of page */
1380 struct sep_lli_entry_t *lli_out_array;
1383 dbg("SEP Driver:--------> sep_prepare_input_output_dma_table start\n");
1385 /* initialize the pages pointers */
1386 sep->in_page_array = 0;
1387 sep->out_page_array = 0;
1389 /* check if the pages are in Kernel Virtual Address layout */
1390 if (isKernelVirtualAddress == true) {
1391 /* lock the pages of the kernel buffer and translate them to pages */
1392 result = sep_lock_kernel_pages(sep, app_virt_in_addr, data_size, &sep->in_num_pages, &lli_in_array, &sep->in_page_array);
1394 edbg("SEP Driver: sep_lock_kernel_pages for input virtual buffer failed\n");
1398 /* lock the pages of the user buffer and translate them to pages */
1399 result = sep_lock_user_pages(sep, app_virt_in_addr, data_size, &sep->in_num_pages, &lli_in_array, &sep->in_page_array);
1401 edbg("SEP Driver: sep_lock_user_pages for input virtual buffer failed\n");
1406 if (isKernelVirtualAddress == true) {
1407 result = sep_lock_kernel_pages(sep, app_virt_out_addr, data_size, &sep->out_num_pages, &lli_out_array, &sep->out_page_array);
1409 edbg("SEP Driver: sep_lock_kernel_pages for output virtual buffer failed\n");
1410 goto end_function_with_error1;
1413 result = sep_lock_user_pages(sep, app_virt_out_addr, data_size, &sep->out_num_pages, &lli_out_array, &sep->out_page_array);
1415 edbg("SEP Driver: sep_lock_user_pages for output virtual buffer failed\n");
1416 goto end_function_with_error1;
1419 edbg("sep->in_num_pages is %lu\n", sep->in_num_pages);
1420 edbg("sep->out_num_pages is %lu\n", sep->out_num_pages);
1421 edbg("SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP is %x\n", SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
1424 /* call the fucntion that creates table from the lli arrays */
1425 result = sep_construct_dma_tables_from_lli(sep, lli_in_array, sep->in_num_pages, lli_out_array, sep->out_num_pages, block_size, lli_table_in_ptr, lli_table_out_ptr, in_num_entries_ptr, out_num_entries_ptr, table_data_size_ptr);
1427 edbg("SEP Driver: sep_construct_dma_tables_from_lli failed\n");
1428 goto end_function_with_error2;
1431 /* fall through - free the lli entry arrays */
1432 dbg("in_num_entries_ptr is %08lx\n", *in_num_entries_ptr);
1433 dbg("out_num_entries_ptr is %08lx\n", *out_num_entries_ptr);
1434 dbg("table_data_size_ptr is %08lx\n", *table_data_size_ptr);
1435 end_function_with_error2:
1436 kfree(lli_out_array);
1437 end_function_with_error1:
1438 kfree(lli_in_array);
1440 dbg("SEP Driver:<-------- sep_prepare_input_output_dma_table end result = %d\n", (int) result);
1446 this function handles tha request for creation of the DMA table
1447 for the synchronic symmetric operations (AES,DES)
1449 static int sep_create_sync_dma_tables_handler(struct sep_device *sep,
1453 /* command arguments */
1454 struct sep_driver_build_sync_table_t command_args;
1456 dbg("SEP Driver:--------> sep_create_sync_dma_tables_handler start\n");
1458 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_build_sync_table_t));
1464 edbg("app_in_address is %08lx\n", command_args.app_in_address);
1465 edbg("app_out_address is %08lx\n", command_args.app_out_address);
1466 edbg("data_size is %lu\n", command_args.data_in_size);
1467 edbg("block_size is %lu\n", command_args.block_size);
1469 /* check if we need to build only input table or input/output */
1470 if (command_args.app_out_address)
1471 /* prepare input and output tables */
1472 error = sep_prepare_input_output_dma_table(sep,
1473 command_args.app_in_address,
1474 command_args.app_out_address,
1475 command_args.data_in_size,
1476 command_args.block_size,
1477 &command_args.in_table_address,
1478 &command_args.out_table_address, &command_args.in_table_num_entries, &command_args.out_table_num_entries, &command_args.table_data_size, command_args.isKernelVirtualAddress);
1480 /* prepare input tables */
1481 error = sep_prepare_input_dma_table(sep,
1482 command_args.app_in_address,
1483 command_args.data_in_size, command_args.block_size, &command_args.in_table_address, &command_args.in_table_num_entries, &command_args.table_data_size, command_args.isKernelVirtualAddress);
1488 if (copy_to_user((void *) arg, (void *) &command_args, sizeof(struct sep_driver_build_sync_table_t)))
1491 dbg("SEP Driver:<-------- sep_create_sync_dma_tables_handler end\n");
1496 this function handles the request for freeing dma table for synhronic actions
1498 static int sep_free_dma_table_data_handler(struct sep_device *sep)
1500 dbg("SEP Driver:--------> sep_free_dma_table_data_handler start\n");
1502 /* free input pages array */
1503 sep_free_dma_pages(sep->in_page_array, sep->in_num_pages, 0);
1505 /* free output pages array if needed */
1506 if (sep->out_page_array)
1507 sep_free_dma_pages(sep->out_page_array, sep->out_num_pages, 1);
1509 /* reset all the values */
1510 sep->in_page_array = 0;
1511 sep->out_page_array = 0;
1512 sep->in_num_pages = 0;
1513 sep->out_num_pages = 0;
1514 dbg("SEP Driver:<-------- sep_free_dma_table_data_handler end\n");
1519 this function find a space for the new flow dma table
1521 static int sep_find_free_flow_dma_table_space(struct sep_device *sep,
1522 unsigned long **table_address_ptr)
1525 /* pointer to the id field of the flow dma table */
1526 unsigned long *start_table_ptr;
1527 /* Do not make start_addr unsigned long * unless fixing the offset
1529 void *flow_dma_area_start_addr;
1530 unsigned long *flow_dma_area_end_addr;
1531 /* maximum table size in words */
1532 unsigned long table_size_in_words;
1534 /* find the start address of the flow DMA table area */
1535 flow_dma_area_start_addr = sep->shared_addr + SEP_DRIVER_FLOW_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1537 /* set end address of the flow table area */
1538 flow_dma_area_end_addr = flow_dma_area_start_addr + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES;
1540 /* set table size in words */
1541 table_size_in_words = SEP_DRIVER_MAX_FLOW_NUM_ENTRIES_IN_TABLE * (sizeof(struct sep_lli_entry_t) / sizeof(long)) + 2;
1543 /* set the pointer to the start address of DMA area */
1544 start_table_ptr = flow_dma_area_start_addr;
1546 /* find the space for the next table */
1547 while (((*start_table_ptr & 0x7FFFFFFF) != 0) && start_table_ptr < flow_dma_area_end_addr)
1548 start_table_ptr += table_size_in_words;
1550 /* check if we reached the end of floa tables area */
1551 if (start_table_ptr >= flow_dma_area_end_addr)
1554 *table_address_ptr = start_table_ptr;
1560 This function creates one DMA table for flow and returns its data,
1561 and pointer to its info entry
1563 static int sep_prepare_one_flow_dma_table(struct sep_device *sep,
1564 unsigned long virt_buff_addr,
1565 unsigned long virt_buff_size,
1566 struct sep_lli_entry_t *table_data,
1567 struct sep_lli_entry_t **info_entry_ptr,
1568 struct sep_flow_context_t *flow_data_ptr,
1569 bool isKernelVirtualAddress)
1572 /* the range in pages */
1573 unsigned long lli_array_size;
1574 struct sep_lli_entry_t *lli_array;
1575 struct sep_lli_entry_t *flow_dma_table_entry_ptr;
1576 unsigned long *start_dma_table_ptr;
1577 /* total table data counter */
1578 unsigned long dma_table_data_count;
1579 /* pointer that will keep the pointer to the pages of the virtual buffer */
1580 struct page **page_array_ptr;
1581 unsigned long entry_count;
1583 /* find the space for the new table */
1584 error = sep_find_free_flow_dma_table_space(sep, &start_dma_table_ptr);
1588 /* check if the pages are in Kernel Virtual Address layout */
1589 if (isKernelVirtualAddress == true)
1590 /* lock kernel buffer in the memory */
1591 error = sep_lock_kernel_pages(sep, virt_buff_addr, virt_buff_size, &lli_array_size, &lli_array, &page_array_ptr);
1593 /* lock user buffer in the memory */
1594 error = sep_lock_user_pages(sep, virt_buff_addr, virt_buff_size, &lli_array_size, &lli_array, &page_array_ptr);
1599 /* set the pointer to page array at the beginning of table - this table is
1600 now considered taken */
1601 *start_dma_table_ptr = lli_array_size;
1603 /* point to the place of the pages pointers of the table */
1604 start_dma_table_ptr++;
1606 /* set the pages pointer */
1607 *start_dma_table_ptr = (unsigned long) page_array_ptr;
1609 /* set the pointer to the first entry */
1610 flow_dma_table_entry_ptr = (struct sep_lli_entry_t *) (++start_dma_table_ptr);
1612 /* now create the entries for table */
1613 for (dma_table_data_count = entry_count = 0; entry_count < lli_array_size; entry_count++) {
1614 flow_dma_table_entry_ptr->physical_address = lli_array[entry_count].physical_address;
1616 flow_dma_table_entry_ptr->block_size = lli_array[entry_count].block_size;
1618 /* set the total data of a table */
1619 dma_table_data_count += lli_array[entry_count].block_size;
1621 flow_dma_table_entry_ptr++;
1624 /* set the physical address */
1625 table_data->physical_address = virt_to_phys(start_dma_table_ptr);
1627 /* set the num_entries and total data size */
1628 table_data->block_size = ((lli_array_size + 1) << SEP_NUM_ENTRIES_OFFSET_IN_BITS) | (dma_table_data_count);
1630 /* set the info entry */
1631 flow_dma_table_entry_ptr->physical_address = 0xffffffff;
1632 flow_dma_table_entry_ptr->block_size = 0;
1634 /* set the pointer to info entry */
1635 *info_entry_ptr = flow_dma_table_entry_ptr;
1637 /* the array of the lli entries */
1646 This function creates a list of tables for flow and returns the data for
1647 the first and last tables of the list
1649 static int sep_prepare_flow_dma_tables(struct sep_device *sep,
1650 unsigned long num_virtual_buffers,
1651 unsigned long first_buff_addr, struct sep_flow_context_t *flow_data_ptr, struct sep_lli_entry_t *first_table_data_ptr, struct sep_lli_entry_t *last_table_data_ptr, bool isKernelVirtualAddress)
1654 unsigned long virt_buff_addr;
1655 unsigned long virt_buff_size;
1656 struct sep_lli_entry_t table_data;
1657 struct sep_lli_entry_t *info_entry_ptr;
1658 struct sep_lli_entry_t *prev_info_entry_ptr;
1663 prev_info_entry_ptr = 0;
1665 /* init the first table to default */
1666 table_data.physical_address = 0xffffffff;
1667 first_table_data_ptr->physical_address = 0xffffffff;
1668 table_data.block_size = 0;
1670 for (i = 0; i < num_virtual_buffers; i++) {
1671 /* get the virtual buffer address */
1672 error = get_user(virt_buff_addr, &first_buff_addr);
1676 /* get the virtual buffer size */
1678 error = get_user(virt_buff_size, &first_buff_addr);
1682 /* advance the address to point to the next pair of address|size */
1685 /* now prepare the one flow LLI table from the data */
1686 error = sep_prepare_one_flow_dma_table(sep, virt_buff_addr, virt_buff_size, &table_data, &info_entry_ptr, flow_data_ptr, isKernelVirtualAddress);
1691 /* if this is the first table - save it to return to the user
1693 *first_table_data_ptr = table_data;
1695 /* set the pointer to info entry */
1696 prev_info_entry_ptr = info_entry_ptr;
1698 /* not first table - the previous table info entry should
1700 prev_info_entry_ptr->block_size = (0x1 << SEP_INT_FLAG_OFFSET_IN_BITS) | (table_data.block_size);
1702 /* set the pointer to info entry */
1703 prev_info_entry_ptr = info_entry_ptr;
1707 /* set the last table data */
1708 *last_table_data_ptr = table_data;
1714 this function goes over all the flow tables connected to the given
1715 table and deallocate them
1717 static void sep_deallocated_flow_tables(struct sep_lli_entry_t *first_table_ptr)
1720 unsigned long *table_ptr;
1721 /* end address of the flow dma area */
1722 unsigned long num_entries;
1723 unsigned long num_pages;
1724 struct page **pages_ptr;
1725 /* maximum table size in words */
1726 struct sep_lli_entry_t *info_entry_ptr;
1728 /* set the pointer to the first table */
1729 table_ptr = (unsigned long *) first_table_ptr->physical_address;
1731 /* set the num of entries */
1732 num_entries = (first_table_ptr->block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS)
1733 & SEP_NUM_ENTRIES_MASK;
1735 /* go over all the connected tables */
1736 while (*table_ptr != 0xffffffff) {
1737 /* get number of pages */
1738 num_pages = *(table_ptr - 2);
1740 /* get the pointer to the pages */
1741 pages_ptr = (struct page **) (*(table_ptr - 1));
1743 /* free the pages */
1744 sep_free_dma_pages(pages_ptr, num_pages, 1);
1746 /* goto to the info entry */
1747 info_entry_ptr = ((struct sep_lli_entry_t *) table_ptr) + (num_entries - 1);
1749 table_ptr = (unsigned long *) info_entry_ptr->physical_address;
1750 num_entries = (info_entry_ptr->block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK;
1757 * sep_find_flow_context - find a flow
1758 * @sep: the SEP we are working with
1759 * @flow_id: flow identifier
1761 * Returns a pointer the matching flow, or NULL if the flow does not
1765 static struct sep_flow_context_t *sep_find_flow_context(struct sep_device *sep,
1766 unsigned long flow_id)
1770 * always search for flow with id default first - in case we
1771 * already started working on the flow there can be no situation
1772 * when 2 flows are with default flag
1774 for (count = 0; count < SEP_DRIVER_NUM_FLOWS; count++) {
1775 if (sep->flows[count].flow_id == flow_id)
1776 return &sep->flows[count];
1783 this function handles the request to create the DMA tables for flow
1785 static int sep_create_flow_dma_tables_handler(struct sep_device *sep,
1788 int error = -ENOENT;
1789 struct sep_driver_build_flow_table_t command_args;
1790 /* first table - output */
1791 struct sep_lli_entry_t first_table_data;
1792 /* dma table data */
1793 struct sep_lli_entry_t last_table_data;
1794 /* pointer to the info entry of the previuos DMA table */
1795 struct sep_lli_entry_t *prev_info_entry_ptr;
1796 /* pointer to the flow data strucutre */
1797 struct sep_flow_context_t *flow_context_ptr;
1799 dbg("SEP Driver:--------> sep_create_flow_dma_tables_handler start\n");
1801 /* init variables */
1802 prev_info_entry_ptr = 0;
1803 first_table_data.physical_address = 0xffffffff;
1805 /* find the free structure for flow data */
1807 flow_context_ptr = sep_find_flow_context(sep, SEP_FREE_FLOW_ID);
1808 if (flow_context_ptr == NULL)
1811 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_build_flow_table_t));
1817 /* create flow tables */
1818 error = sep_prepare_flow_dma_tables(sep, command_args.num_virtual_buffers, command_args.virt_buff_data_addr, flow_context_ptr, &first_table_data, &last_table_data, command_args.isKernelVirtualAddress);
1820 goto end_function_with_error;
1822 /* check if flow is static */
1823 if (!command_args.flow_type)
1824 /* point the info entry of the last to the info entry of the first */
1825 last_table_data = first_table_data;
1827 /* set output params */
1828 command_args.first_table_addr = first_table_data.physical_address;
1829 command_args.first_table_num_entries = ((first_table_data.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK);
1830 command_args.first_table_data_size = (first_table_data.block_size & SEP_TABLE_DATA_SIZE_MASK);
1832 /* send the parameters to user application */
1833 error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_build_flow_table_t));
1836 goto end_function_with_error;
1839 /* all the flow created - update the flow entry with temp id */
1840 flow_context_ptr->flow_id = SEP_TEMP_FLOW_ID;
1842 /* set the processing tables data in the context */
1843 if (command_args.input_output_flag == SEP_DRIVER_IN_FLAG)
1844 flow_context_ptr->input_tables_in_process = first_table_data;
1846 flow_context_ptr->output_tables_in_process = first_table_data;
1850 end_function_with_error:
1851 /* free the allocated tables */
1852 sep_deallocated_flow_tables(&first_table_data);
1854 dbg("SEP Driver:<-------- sep_create_flow_dma_tables_handler end\n");
1859 this function handles add tables to flow
1861 static int sep_add_flow_tables_handler(struct sep_device *sep, unsigned long arg)
1864 unsigned long num_entries;
1865 struct sep_driver_add_flow_table_t command_args;
1866 struct sep_flow_context_t *flow_context_ptr;
1867 /* first dma table data */
1868 struct sep_lli_entry_t first_table_data;
1869 /* last dma table data */
1870 struct sep_lli_entry_t last_table_data;
1871 /* pointer to the info entry of the current DMA table */
1872 struct sep_lli_entry_t *info_entry_ptr;
1874 dbg("SEP Driver:--------> sep_add_flow_tables_handler start\n");
1876 /* get input parameters */
1877 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_add_flow_table_t));
1883 /* find the flow structure for the flow id */
1884 flow_context_ptr = sep_find_flow_context(sep, command_args.flow_id);
1885 if (flow_context_ptr == NULL)
1888 /* prepare the flow dma tables */
1889 error = sep_prepare_flow_dma_tables(sep, command_args.num_virtual_buffers, command_args.virt_buff_data_addr, flow_context_ptr, &first_table_data, &last_table_data, command_args.isKernelVirtualAddress);
1891 goto end_function_with_error;
1893 /* now check if there is already an existing add table for this flow */
1894 if (command_args.inputOutputFlag == SEP_DRIVER_IN_FLAG) {
1895 /* this buffer was for input buffers */
1896 if (flow_context_ptr->input_tables_flag) {
1897 /* add table already exists - add the new tables to the end
1899 num_entries = (flow_context_ptr->last_input_table.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK;
1901 info_entry_ptr = (struct sep_lli_entry_t *)
1902 (flow_context_ptr->last_input_table.physical_address + (sizeof(struct sep_lli_entry_t) * (num_entries - 1)));
1904 /* connect to list of tables */
1905 *info_entry_ptr = first_table_data;
1907 /* set the first table data */
1908 first_table_data = flow_context_ptr->first_input_table;
1910 /* set the input flag */
1911 flow_context_ptr->input_tables_flag = 1;
1913 /* set the first table data */
1914 flow_context_ptr->first_input_table = first_table_data;
1916 /* set the last table data */
1917 flow_context_ptr->last_input_table = last_table_data;
1918 } else { /* this is output tables */
1920 /* this buffer was for input buffers */
1921 if (flow_context_ptr->output_tables_flag) {
1922 /* add table already exists - add the new tables to
1923 the end of the previous */
1924 num_entries = (flow_context_ptr->last_output_table.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK;
1926 info_entry_ptr = (struct sep_lli_entry_t *)
1927 (flow_context_ptr->last_output_table.physical_address + (sizeof(struct sep_lli_entry_t) * (num_entries - 1)));
1929 /* connect to list of tables */
1930 *info_entry_ptr = first_table_data;
1932 /* set the first table data */
1933 first_table_data = flow_context_ptr->first_output_table;
1935 /* set the input flag */
1936 flow_context_ptr->output_tables_flag = 1;
1938 /* set the first table data */
1939 flow_context_ptr->first_output_table = first_table_data;
1941 /* set the last table data */
1942 flow_context_ptr->last_output_table = last_table_data;
1945 /* set output params */
1946 command_args.first_table_addr = first_table_data.physical_address;
1947 command_args.first_table_num_entries = ((first_table_data.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK);
1948 command_args.first_table_data_size = (first_table_data.block_size & SEP_TABLE_DATA_SIZE_MASK);
1950 /* send the parameters to user application */
1951 error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_add_flow_table_t));
1954 end_function_with_error:
1955 /* free the allocated tables */
1956 sep_deallocated_flow_tables(&first_table_data);
1958 dbg("SEP Driver:<-------- sep_add_flow_tables_handler end\n");
1963 this function add the flow add message to the specific flow
1965 static int sep_add_flow_tables_message_handler(struct sep_device *sep, unsigned long arg)
1968 struct sep_driver_add_message_t command_args;
1969 struct sep_flow_context_t *flow_context_ptr;
1971 dbg("SEP Driver:--------> sep_add_flow_tables_message_handler start\n");
1973 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_add_message_t));
1980 if (command_args.message_size_in_bytes > SEP_MAX_ADD_MESSAGE_LENGTH_IN_BYTES) {
1985 /* find the flow context */
1986 flow_context_ptr = sep_find_flow_context(sep, command_args.flow_id);
1987 if (flow_context_ptr == NULL)
1990 /* copy the message into context */
1991 flow_context_ptr->message_size_in_bytes = command_args.message_size_in_bytes;
1992 error = copy_from_user(flow_context_ptr->message, (void *) command_args.message_address, command_args.message_size_in_bytes);
1996 dbg("SEP Driver:<-------- sep_add_flow_tables_message_handler end\n");
2002 this function returns the bus and virtual addresses of the static pool
2004 static int sep_get_static_pool_addr_handler(struct sep_device *sep, unsigned long arg)
2007 struct sep_driver_static_pool_addr_t command_args;
2009 dbg("SEP Driver:--------> sep_get_static_pool_addr_handler start\n");
2011 /*prepare the output parameters in the struct */
2012 command_args.physical_static_address = sep->shared_bus + SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES;
2013 command_args.virtual_static_address = (unsigned long)sep->shared_addr + SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES;
2015 edbg("SEP Driver:bus_static_address is %08lx, virtual_static_address %08lx\n", command_args.physical_static_address, command_args.virtual_static_address);
2017 /* send the parameters to user application */
2018 error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_static_pool_addr_t));
2021 dbg("SEP Driver:<-------- sep_get_static_pool_addr_handler end\n");
2026 this address gets the offset of the physical address from the start
2029 static int sep_get_physical_mapped_offset_handler(struct sep_device *sep, unsigned long arg)
2032 struct sep_driver_get_mapped_offset_t command_args;
2034 dbg("SEP Driver:--------> sep_get_physical_mapped_offset_handler start\n");
2036 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_get_mapped_offset_t));
2042 if (command_args.physical_address < sep->shared_bus) {
2047 /*prepare the output parameters in the struct */
2048 command_args.offset = command_args.physical_address - sep->shared_bus;
2050 edbg("SEP Driver:bus_address is %08lx, offset is %lu\n", command_args.physical_address, command_args.offset);
2052 /* send the parameters to user application */
2053 error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_get_mapped_offset_t));
2057 dbg("SEP Driver:<-------- sep_get_physical_mapped_offset_handler end\n");
2065 static int sep_start_handler(struct sep_device *sep)
2067 unsigned long reg_val;
2068 unsigned long error = 0;
2070 dbg("SEP Driver:--------> sep_start_handler start\n");
2072 /* wait in polling for message from SEP */
2074 reg_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
2077 /* check the value */
2079 /* fatal error - read error status from GPRO */
2080 error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
2081 dbg("SEP Driver:<-------- sep_start_handler end\n");
2086 this function handles the request for SEP initialization
2088 static int sep_init_handler(struct sep_device *sep, unsigned long arg)
2090 unsigned long message_word;
2091 unsigned long *message_ptr;
2092 struct sep_driver_init_t command_args;
2093 unsigned long counter;
2094 unsigned long error;
2095 unsigned long reg_val;
2097 dbg("SEP Driver:--------> sep_init_handler start\n");
2100 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_init_t));
2105 dbg("SEP Driver:--------> sep_init_handler - finished copy_from_user\n");
2107 /* PATCH - configure the DMA to single -burst instead of multi-burst */
2108 /*sep_configure_dma_burst(); */
2110 dbg("SEP Driver:--------> sep_init_handler - finished sep_configure_dma_burst \n");
2112 message_ptr = (unsigned long *) command_args.message_addr;
2114 /* set the base address of the SRAM */
2115 sep_write_reg(sep, HW_SRAM_ADDR_REG_ADDR, HW_CC_SRAM_BASE_ADDRESS);
2117 for (counter = 0; counter < command_args.message_size_in_words; counter++, message_ptr++) {
2118 get_user(message_word, message_ptr);
2119 /* write data to SRAM */
2120 sep_write_reg(sep, HW_SRAM_DATA_REG_ADDR, message_word);
2121 edbg("SEP Driver:message_word is %lu\n", message_word);
2122 /* wait for write complete */
2123 sep_wait_sram_write(sep);
2125 dbg("SEP Driver:--------> sep_init_handler - finished getting messages from user space\n");
2127 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x1);
2130 reg_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
2131 while (!(reg_val & 0xFFFFFFFD));
2133 dbg("SEP Driver:--------> sep_init_handler - finished waiting for reg_val & 0xFFFFFFFD \n");
2135 /* check the value */
2136 if (reg_val == 0x1) {
2137 edbg("SEP Driver:init failed\n");
2139 error = sep_read_reg(sep, 0x8060);
2140 edbg("SEP Driver:sw monitor is %lu\n", error);
2142 /* fatal error - read erro status from GPRO */
2143 error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
2144 edbg("SEP Driver:error is %lu\n", error);
2147 dbg("SEP Driver:<-------- sep_init_handler end\n");
2153 this function handles the request cache and resident reallocation
2155 static int sep_realloc_cache_resident_handler(struct sep_device *sep,
2158 struct sep_driver_realloc_cache_resident_t command_args;
2161 /* copy cache and resident to the their intended locations */
2162 error = sep_load_firmware(sep);
2166 command_args.new_base_addr = sep->shared_bus;
2168 /* find the new base address according to the lowest address between
2169 cache, resident and shared area */
2170 if (sep->resident_bus < command_args.new_base_addr)
2171 command_args.new_base_addr = sep->resident_bus;
2172 if (sep->rar_bus < command_args.new_base_addr)
2173 command_args.new_base_addr = sep->rar_bus;
2175 /* set the return parameters */
2176 command_args.new_cache_addr = sep->rar_bus;
2177 command_args.new_resident_addr = sep->resident_bus;
2179 /* set the new shared area */
2180 command_args.new_shared_area_addr = sep->shared_bus;
2182 edbg("SEP Driver:command_args.new_shared_addr is %08llx\n", command_args.new_shared_area_addr);
2183 edbg("SEP Driver:command_args.new_base_addr is %08llx\n", command_args.new_base_addr);
2184 edbg("SEP Driver:command_args.new_resident_addr is %08llx\n", command_args.new_resident_addr);
2185 edbg("SEP Driver:command_args.new_rar_addr is %08llx\n", command_args.new_cache_addr);
2187 /* return to user */
2188 if (copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_realloc_cache_resident_t)))
2194 * sep_get_time_handler - time request from user space
2195 * @sep: sep we are to set the time for
2196 * @arg: pointer to user space arg buffer
2198 * This function reports back the time and the address in the SEP
2199 * shared buffer at which it has been placed. (Do we really need this!!!)
2202 static int sep_get_time_handler(struct sep_device *sep, unsigned long arg)
2204 struct sep_driver_get_time_t command_args;
2206 mutex_lock(&sep_mutex);
2207 command_args.time_value = sep_set_time(sep);
2208 command_args.time_physical_address = (unsigned long)sep_time_address(sep);
2209 mutex_unlock(&sep_mutex);
2210 if (copy_to_user((void __user *)arg,
2211 &command_args, sizeof(struct sep_driver_get_time_t)))
2218 This API handles the end transaction request
2220 static int sep_end_transaction_handler(struct sep_device *sep, unsigned long arg)
2222 dbg("SEP Driver:--------> sep_end_transaction_handler start\n");
2224 #if 0 /*!SEP_DRIVER_POLLING_MODE */
2226 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, 0x7FFF);
2228 /* release IRQ line */
2229 free_irq(SEP_DIRVER_IRQ_NUM, sep);
2231 /* lock the sep mutex */
2232 mutex_unlock(&sep_mutex);
2235 dbg("SEP Driver:<-------- sep_end_transaction_handler end\n");
2242 * sep_set_flow_id_handler - handle flow setting
2243 * @sep: the SEP we are configuring
2244 * @flow_id: the flow we are setting
2246 * This function handler the set flow id command
2248 static int sep_set_flow_id_handler(struct sep_device *sep,
2249 unsigned long flow_id)
2252 struct sep_flow_context_t *flow_data_ptr;
2254 /* find the flow data structure that was just used for creating new flow
2255 - its id should be default */
2257 mutex_lock(&sep_mutex);
2258 flow_data_ptr = sep_find_flow_context(sep, SEP_TEMP_FLOW_ID);
2260 flow_data_ptr->flow_id = flow_id; /* set flow id */
2263 mutex_unlock(&sep_mutex);
2267 static long sep_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
2270 struct sep_device *sep = filp->private_data;
2272 dbg("------------>SEP Driver: ioctl start\n");
2274 edbg("SEP Driver: cmd is %x\n", cmd);
2277 case SEP_IOCSENDSEPCOMMAND:
2278 /* send command to SEP */
2279 sep_send_command_handler(sep);
2280 edbg("SEP Driver: after sep_send_command_handler\n");
2282 case SEP_IOCSENDSEPRPLYCOMMAND:
2283 /* send reply command to SEP */
2284 sep_send_reply_command_handler(sep);
2286 case SEP_IOCALLOCDATAPOLL:
2287 /* allocate data pool */
2288 error = sep_allocate_data_pool_memory_handler(sep, arg);
2290 case SEP_IOCWRITEDATAPOLL:
2291 /* write data into memory pool */
2292 error = sep_write_into_data_pool_handler(sep, arg);
2294 case SEP_IOCREADDATAPOLL:
2295 /* read data from data pool into application memory */
2296 error = sep_read_from_data_pool_handler(sep, arg);
2298 case SEP_IOCCREATESYMDMATABLE:
2299 /* create dma table for synhronic operation */
2300 error = sep_create_sync_dma_tables_handler(sep, arg);
2302 case SEP_IOCCREATEFLOWDMATABLE:
2303 /* create flow dma tables */
2304 error = sep_create_flow_dma_tables_handler(sep, arg);
2306 case SEP_IOCFREEDMATABLEDATA:
2307 /* free the pages */
2308 error = sep_free_dma_table_data_handler(sep);
2310 case SEP_IOCSETFLOWID:
2312 error = sep_set_flow_id_handler(sep, (unsigned long)arg);
2314 case SEP_IOCADDFLOWTABLE:
2315 /* add tables to the dynamic flow */
2316 error = sep_add_flow_tables_handler(sep, arg);
2318 case SEP_IOCADDFLOWMESSAGE:
2319 /* add message of add tables to flow */
2320 error = sep_add_flow_tables_message_handler(sep, arg);
2322 case SEP_IOCSEPSTART:
2323 /* start command to sep */
2324 error = sep_start_handler(sep);
2326 case SEP_IOCSEPINIT:
2327 /* init command to sep */
2328 error = sep_init_handler(sep, arg);
2330 case SEP_IOCGETSTATICPOOLADDR:
2331 /* get the physical and virtual addresses of the static pool */
2332 error = sep_get_static_pool_addr_handler(sep, arg);
2334 case SEP_IOCENDTRANSACTION:
2335 error = sep_end_transaction_handler(sep, arg);
2337 case SEP_IOCREALLOCCACHERES:
2338 error = sep_realloc_cache_resident_handler(sep, arg);
2340 case SEP_IOCGETMAPPEDADDROFFSET:
2341 error = sep_get_physical_mapped_offset_handler(sep, arg);
2344 error = sep_get_time_handler(sep, arg);
2350 dbg("SEP Driver:<-------- ioctl end\n");
2356 #if !SEP_DRIVER_POLLING_MODE
2358 /* handler for flow done interrupt */
2360 static void sep_flow_done_handler(struct work_struct *work)
2362 struct sep_flow_context_t *flow_data_ptr;
2364 /* obtain the mutex */
2365 mutex_lock(&sep_mutex);
2367 /* get the pointer to context */
2368 flow_data_ptr = (struct sep_flow_context_t *) work;
2370 /* free all the current input tables in sep */
2371 sep_deallocated_flow_tables(&flow_data_ptr->input_tables_in_process);
2373 /* free all the current tables output tables in SEP (if needed) */
2374 if (flow_data_ptr->output_tables_in_process.physical_address != 0xffffffff)
2375 sep_deallocated_flow_tables(&flow_data_ptr->output_tables_in_process);
2377 /* check if we have additional tables to be sent to SEP only input
2378 flag may be checked */
2379 if (flow_data_ptr->input_tables_flag) {
2380 /* copy the message to the shared RAM and signal SEP */
2381 memcpy((void *) flow_data_ptr->message, (void *) sep->shared_addr, flow_data_ptr->message_size_in_bytes);
2383 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR2_REG_ADDR, 0x2);
2385 mutex_unlock(&sep_mutex);
2388 interrupt handler function
2390 static irqreturn_t sep_inthandler(int irq, void *dev_id)
2392 irqreturn_t int_error;
2393 unsigned long reg_val;
2394 unsigned long flow_id;
2395 struct sep_flow_context_t *flow_context_ptr;
2396 struct sep_device *sep = dev_id;
2398 int_error = IRQ_HANDLED;
2400 /* read the IRR register to check if this is SEP interrupt */
2401 reg_val = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR);
2402 edbg("SEP Interrupt - reg is %08lx\n", reg_val);
2404 /* check if this is the flow interrupt */
2405 if (0 /*reg_val & (0x1 << 11) */ ) {
2406 /* read GPRO to find out the which flow is done */
2407 flow_id = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR);
2409 /* find the contex of the flow */
2410 flow_context_ptr = sep_find_flow_context(sep, flow_id >> 28);
2411 if (flow_context_ptr == NULL)
2412 goto end_function_with_error;
2414 /* queue the work */
2415 INIT_WORK(&flow_context_ptr->flow_wq, sep_flow_done_handler);
2416 queue_work(sep->flow_wq, &flow_context_ptr->flow_wq);
2419 /* check if this is reply interrupt from SEP */
2420 if (reg_val & (0x1 << 13)) {
2421 /* update the counter of reply messages */
2423 /* wake up the waiting process */
2424 wake_up(&sep_event);
2426 int_error = IRQ_NONE;
2430 end_function_with_error:
2431 /* clear the interrupt */
2432 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, reg_val);
2443 static void sep_wait_busy(struct sep_device *sep)
2448 reg = sep_read_reg(sep, HW_HOST_SEP_BUSY_REG_ADDR);
2453 PATCH for configuring the DMA to single burst instead of multi-burst
2455 static void sep_configure_dma_burst(struct sep_device *sep)
2457 #define HW_AHB_RD_WR_BURSTS_REG_ADDR 0x0E10UL
2459 dbg("SEP Driver:<-------- sep_configure_dma_burst start \n");
2461 /* request access to registers from SEP */
2462 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
2464 dbg("SEP Driver:<-------- sep_configure_dma_burst finished request access to registers from SEP (write reg) \n");
2468 dbg("SEP Driver:<-------- sep_configure_dma_burst finished request access to registers from SEP (while(revVal) wait loop) \n");
2470 /* set the DMA burst register to single burst */
2471 sep_write_reg(sep, HW_AHB_RD_WR_BURSTS_REG_ADDR, 0x0UL);
2473 /* release the sep busy */
2474 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x0UL);
2477 dbg("SEP Driver:<-------- sep_configure_dma_burst done \n");
2484 Function that is activated on the successful probe of the SEP device
2486 static int __devinit sep_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2489 struct sep_device *sep;
2491 int size; /* size of memory for allocation */
2493 edbg("Sep pci probe starting\n");
2494 if (sep_dev != NULL) {
2495 dev_warn(&pdev->dev, "only one SEP supported.\n");
2499 /* enable the device */
2500 error = pci_enable_device(pdev);
2502 edbg("error enabling pci device\n");
2506 /* set the pci dev pointer */
2507 sep_dev = &sep_instance;
2508 sep = &sep_instance;
2510 edbg("sep->shared_addr = %p\n", sep->shared_addr);
2511 /* transaction counter that coordinates the transactions between SEP
2514 /* counter for the messages from sep */
2516 /* counter for the number of bytes allocated in the pool
2517 for the current transaction */
2518 sep->data_pool_bytes_allocated = 0;
2520 /* calculate the total size for allocation */
2521 size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
2522 SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES + SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
2524 /* allocate the shared area */
2525 if (sep_map_and_alloc_shared_area(sep, size)) {
2527 /* allocation failed */
2528 goto end_function_error;
2530 /* now set the memory regions */
2531 #if (SEP_DRIVER_RECONFIG_MESSAGE_AREA == 1)
2532 /* Note: this test section will need moving before it could ever
2533 work as the registers are not yet mapped ! */
2534 /* send the new SHARED MESSAGE AREA to the SEP */
2535 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR1_REG_ADDR, sep->shared_bus);
2537 /* poll for SEP response */
2538 retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
2539 while (retval != 0xffffffff && retval != sep->shared_bus)
2540 retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
2542 /* check the return value (register) */
2543 if (retval != sep->shared_bus) {
2545 goto end_function_deallocate_sep_shared_area;
2548 /* init the flow contextes */
2549 for (counter = 0; counter < SEP_DRIVER_NUM_FLOWS; counter++)
2550 sep->flows[counter].flow_id = SEP_FREE_FLOW_ID;
2552 sep->flow_wq = create_singlethread_workqueue("sepflowwq");
2553 if (sep->flow_wq == NULL) {
2555 edbg("sep_driver:flow queue creation failed\n");
2556 goto end_function_deallocate_sep_shared_area;
2558 edbg("SEP Driver: create flow workqueue \n");
2559 sep->pdev = pci_dev_get(pdev);
2561 sep->reg_addr = pci_ioremap_bar(pdev, 0);
2562 if (!sep->reg_addr) {
2563 edbg("sep: ioremap of registers failed.\n");
2564 goto end_function_deallocate_sep_shared_area;
2566 edbg("SEP Driver:reg_addr is %p\n", sep->reg_addr);
2568 /* load the rom code */
2569 sep_load_rom_code(sep);
2571 /* set up system base address and shared memory location */
2572 sep->rar_addr = dma_alloc_coherent(&sep->pdev->dev,
2573 2 * SEP_RAR_IO_MEM_REGION_SIZE,
2574 &sep->rar_bus, GFP_KERNEL);
2576 if (!sep->rar_addr) {
2577 edbg("SEP Driver:can't allocate rar\n");
2578 goto end_function_uniomap;
2582 edbg("SEP Driver:rar_bus is %08llx\n", (unsigned long long)sep->rar_bus);
2583 edbg("SEP Driver:rar_virtual is %p\n", sep->rar_addr);
2585 #if !SEP_DRIVER_POLLING_MODE
2587 edbg("SEP Driver: about to write IMR and ICR REG_ADDR\n");
2589 /* clear ICR register */
2590 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
2592 /* set the IMR register - open only GPR 2 */
2593 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
2595 edbg("SEP Driver: about to call request_irq\n");
2596 /* get the interrupt line */
2597 error = request_irq(pdev->irq, sep_inthandler, IRQF_SHARED, "sep_driver", sep);
2599 goto end_function_free_res;
2601 edbg("SEP Driver: about to write IMR REG_ADDR");
2603 /* set the IMR register - open only GPR 2 */
2604 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
2606 end_function_free_res:
2607 dma_free_coherent(&sep->pdev->dev, 2 * SEP_RAR_IO_MEM_REGION_SIZE,
2608 sep->rar_addr, sep->rar_bus);
2609 #endif /* SEP_DRIVER_POLLING_MODE */
2610 end_function_uniomap:
2611 iounmap(sep->reg_addr);
2612 end_function_deallocate_sep_shared_area:
2613 /* de-allocate shared area */
2614 sep_unmap_and_free_shared_area(sep, size);
2621 static const struct pci_device_id sep_pci_id_tbl[] = {
2622 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080c)},
2626 MODULE_DEVICE_TABLE(pci, sep_pci_id_tbl);
2628 /* field for registering driver to PCI device */
2629 static struct pci_driver sep_pci_driver = {
2630 .name = "sep_sec_driver",
2631 .id_table = sep_pci_id_tbl,
2633 /* FIXME: remove handler */
2636 /* major and minor device numbers */
2637 static dev_t sep_devno;
2639 /* the files operations structure of the driver */
2640 static struct file_operations sep_file_operations = {
2641 .owner = THIS_MODULE,
2642 .unlocked_ioctl = sep_ioctl,
2645 .release = sep_release,
2650 /* cdev struct of the driver */
2651 static struct cdev sep_cdev;
2654 this function registers the driver to the file system
2656 static int sep_register_driver_to_fs(void)
2658 int ret_val = alloc_chrdev_region(&sep_devno, 0, 1, "sep_sec_driver");
2660 edbg("sep: major number allocation failed, retval is %d\n",
2665 cdev_init(&sep_cdev, &sep_file_operations);
2666 sep_cdev.owner = THIS_MODULE;
2668 /* register the driver with the kernel */
2669 ret_val = cdev_add(&sep_cdev, sep_devno, 1);
2671 edbg("sep_driver:cdev_add failed, retval is %d\n", ret_val);
2672 /* unregister dev numbers */
2673 unregister_chrdev_region(sep_devno, 1);
2679 /*--------------------------------------------------------------
2681 ----------------------------------------------------------------*/
2682 static int __init sep_init(void)
2685 dbg("SEP Driver:-------->Init start\n");
2686 /* FIXME: Probe can occur before we are ready to survive a probe */
2687 ret_val = pci_register_driver(&sep_pci_driver);
2689 edbg("sep_driver:sep_driver_to_device failed, ret_val is %d\n", ret_val);
2690 goto end_function_unregister_from_fs;
2692 /* register driver to fs */
2693 ret_val = sep_register_driver_to_fs();
2695 goto end_function_unregister_pci;
2697 end_function_unregister_pci:
2698 pci_unregister_driver(&sep_pci_driver);
2699 end_function_unregister_from_fs:
2700 /* unregister from fs */
2701 cdev_del(&sep_cdev);
2702 /* unregister dev numbers */
2703 unregister_chrdev_region(sep_devno, 1);
2705 dbg("SEP Driver:<-------- Init end\n");
2710 /*-------------------------------------------------------------
2712 --------------------------------------------------------------*/
2713 static void __exit sep_exit(void)
2717 dbg("SEP Driver:--------> Exit start\n");
2719 /* unregister from fs */
2720 cdev_del(&sep_cdev);
2721 /* unregister dev numbers */
2722 unregister_chrdev_region(sep_devno, 1);
2723 /* calculate the total size for de-allocation */
2724 size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
2725 SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES + SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
2726 /* FIXME: We need to do this in the unload for the device */
2727 /* free shared area */
2729 sep_unmap_and_free_shared_area(sep_dev, size);
2730 edbg("SEP Driver: free pages SEP SHARED AREA \n");
2731 iounmap((void *) sep_dev->reg_addr);
2732 edbg("SEP Driver: iounmap \n");
2734 edbg("SEP Driver: release_mem_region \n");
2735 dbg("SEP Driver:<-------- Exit end\n");
2739 module_init(sep_init);
2740 module_exit(sep_exit);
2742 MODULE_LICENSE("GPL");