3 * sep_driver.c - Security Processor Driver main group of functions
5 * Copyright(c) 2009 Intel Corporation. All rights reserved.
6 * Copyright(c) 2009 Discretix. All rights reserved.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc., 59
20 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 * Mark Allyn mark.a.allyn@intel.com
28 * 2009.06.26 Initial publish
32 #include <linux/init.h>
33 #include <linux/module.h>
35 #include <linux/cdev.h>
36 #include <linux/kdev_t.h>
37 #include <linux/mutex.h>
38 #include <linux/sched.h>
40 #include <linux/poll.h>
41 #include <linux/wait.h>
42 #include <linux/sched.h>
43 #include <linux/pci.h>
44 #include <linux/firmware.h>
45 #include <linux/slab.h>
46 #include <asm/ioctl.h>
47 #include <linux/ioport.h>
49 #include <linux/interrupt.h>
50 #include <linux/pagemap.h>
51 #include <asm/cacheflush.h>
52 #include "sep_driver_hw_defs.h"
53 #include "sep_driver_config.h"
54 #include "sep_driver_api.h"
57 #if SEP_DRIVER_ARM_DEBUG_MODE
59 #define CRYS_SEP_ROM_length 0x4000
60 #define CRYS_SEP_ROM_start_address 0x8000C000UL
61 #define CRYS_SEP_ROM_start_address_offset 0xC000UL
62 #define SEP_ROM_BANK_register 0x80008420UL
63 #define SEP_ROM_BANK_register_offset 0x8420UL
64 #define SEP_RAR_IO_MEM_REGION_START_ADDRESS 0x82000000
67 * THESE 2 definitions are specific to the board - must be
68 * defined during integration
70 #define SEP_RAR_IO_MEM_REGION_START_ADDRESS 0xFF0D0000
74 static void sep_load_rom_code(struct sep_device *sep)
77 unsigned long i, k, j;
82 /* Loading ROM from SEP_ROM_image.h file */
83 k = sizeof(CRYS_SEP_ROM);
85 edbg("SEP Driver: DX_CC_TST_SepRomLoader start\n");
87 edbg("SEP Driver: k is %lu\n", k);
88 edbg("SEP Driver: sep->reg_addr is %p\n", sep->reg_addr);
89 edbg("SEP Driver: CRYS_SEP_ROM_start_address_offset is %p\n", CRYS_SEP_ROM_start_address_offset);
91 for (i = 0; i < 4; i++) {
93 sep_write_reg(sep, SEP_ROM_BANK_register_offset, i);
95 for (j = 0; j < CRYS_SEP_ROM_length / 4; j++) {
96 sep_write_reg(sep, CRYS_SEP_ROM_start_address_offset + 4 * j, CRYS_SEP_ROM[i * 0x1000 + j]);
101 j = CRYS_SEP_ROM_length;
108 sep_write_reg(sep, HW_HOST_SEP_SW_RST_REG_ADDR, 0x1);
110 /* poll for SEP ROM boot finish */
112 reg = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
115 edbg("SEP Driver: ROM polling ended\n");
119 /* fatal error - read erro status from GPRO */
120 error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
121 edbg("SEP Driver: ROM polling case 1\n");
124 /* Cold boot ended successfully */
126 /* Warmboot ended successfully */
128 /* ColdWarm boot ended successfully */
131 /* Boot First Phase ended */
132 warning = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
134 edbg("SEP Driver: ROM polling case %d\n", reg);
141 static void sep_load_rom_code(struct sep_device *sep) { }
142 #endif /* SEP_DRIVER_ARM_DEBUG_MODE */
146 /*----------------------------------------
148 -----------------------------------------*/
150 #define BASE_ADDRESS_FOR_SYSTEM 0xfffc0000
151 #define SEP_RAR_IO_MEM_REGION_SIZE 0x40000
153 /*--------------------------------------------
155 --------------------------------------------*/
157 /* debug messages level */
159 module_param(debug, int , 0);
160 MODULE_PARM_DESC(debug, "Flag to enable SEP debug messages");
162 /* Keep this a single static object for now to keep the conversion easy */
164 static struct sep_device sep_instance;
165 static struct sep_device *sep_dev = &sep_instance;
168 mutex for the access to the internals of the sep driver
170 static DEFINE_MUTEX(sep_mutex);
173 /* wait queue head (event) of the driver */
174 static DECLARE_WAIT_QUEUE_HEAD(sep_event);
177 * sep_load_firmware - copy firmware cache/resident
178 * @sep: device we are loading
180 * This functions copies the cache and resident from their source
181 * location into destination shared memory.
184 static int sep_load_firmware(struct sep_device *sep)
186 const struct firmware *fw;
187 char *cache_name = "sep/cache.image.bin";
188 char *res_name = "sep/resident.image.bin";
191 edbg("SEP Driver:rar_virtual is %p\n", sep->rar_addr);
192 edbg("SEP Driver:rar_bus is %08llx\n", (unsigned long long)sep->rar_bus);
195 error = request_firmware(&fw, cache_name, &sep->pdev->dev);
197 edbg("SEP Driver:cant request cache fw\n");
200 edbg("SEP Driver:cache %08Zx@%p\n", fw->size, (void *) fw->data);
202 memcpy(sep->rar_addr, (void *)fw->data, fw->size);
203 sep->cache_size = fw->size;
204 release_firmware(fw);
206 sep->resident_bus = sep->rar_bus + sep->cache_size;
207 sep->resident_addr = sep->rar_addr + sep->cache_size;
210 error = request_firmware(&fw, res_name, &sep->pdev->dev);
212 edbg("SEP Driver:cant request res fw\n");
215 edbg("sep: res %08Zx@%p\n", fw->size, (void *)fw->data);
217 memcpy(sep->resident_addr, (void *) fw->data, fw->size);
218 sep->resident_size = fw->size;
219 release_firmware(fw);
221 edbg("sep: resident v %p b %08llx cache v %p b %08llx\n",
222 sep->resident_addr, (unsigned long long)sep->resident_bus,
223 sep->rar_addr, (unsigned long long)sep->rar_bus);
227 MODULE_FIRMWARE("sep/cache.image.bin");
228 MODULE_FIRMWARE("sep/resident.image.bin");
231 * sep_map_and_alloc_shared_area - allocate shared block
232 * @sep: security processor
233 * @size: size of shared area
235 * Allocate a shared buffer in host memory that can be used by both the
236 * kernel and also the hardware interface via DMA.
239 static int sep_map_and_alloc_shared_area(struct sep_device *sep,
242 /* shared_addr = ioremap_nocache(0xda00000,shared_area_size); */
243 sep->shared_addr = dma_alloc_coherent(&sep->pdev->dev, size,
244 &sep->shared_bus, GFP_KERNEL);
246 if (!sep->shared_addr) {
247 edbg("sep_driver :shared memory dma_alloc_coherent failed\n");
250 /* set the bus address of the shared area */
251 edbg("sep: shared_addr %ld bytes @%p (bus %08llx)\n",
252 size, sep->shared_addr, (unsigned long long)sep->shared_bus);
257 * sep_unmap_and_free_shared_area - free shared block
258 * @sep: security processor
260 * Free the shared area allocated to the security processor. The
261 * processor must have finished with this and any final posted
262 * writes cleared before we do so.
264 static void sep_unmap_and_free_shared_area(struct sep_device *sep, int size)
266 dma_free_coherent(&sep->pdev->dev, size,
267 sep->shared_addr, sep->shared_bus);
271 * sep_shared_virt_to_bus - convert bus/virt addresses
273 * Returns the bus address inside the shared area according
274 * to the virtual address.
277 static dma_addr_t sep_shared_virt_to_bus(struct sep_device *sep,
280 dma_addr_t pa = sep->shared_bus + (virt_address - sep->shared_addr);
281 edbg("sep: virt to bus b %08llx v %p\n", (unsigned long long) pa,
287 * sep_shared_bus_to_virt - convert bus/virt addresses
289 * Returns virtual address inside the shared area according
290 * to the bus address.
293 static void *sep_shared_bus_to_virt(struct sep_device *sep,
294 dma_addr_t bus_address)
296 return sep->shared_addr + (bus_address - sep->shared_bus);
301 * sep_try_open - attempt to open a SEP device
302 * @sep: device to attempt to open
304 * Atomically attempt to get ownership of a SEP device.
305 * Returns 1 if the device was opened, 0 on failure.
308 static int sep_try_open(struct sep_device *sep)
310 if (!test_and_set_bit(0, &sep->in_use))
316 * sep_open - device open method
317 * @inode: inode of sep device
318 * @filp: file handle to sep device
320 * Open method for the SEP device. Called when userspace opens
321 * the SEP device node. Must also release the memory data pool
324 * Returns zero on success otherwise an error code.
327 static int sep_open(struct inode *inode, struct file *filp)
332 /* check the blocking mode */
333 if (filp->f_flags & O_NDELAY) {
334 if (sep_try_open(sep_dev) == 0)
337 if (wait_event_interruptible(sep_event, sep_try_open(sep_dev)) < 0)
340 /* Bind to the device, we only have one which makes it easy */
341 filp->private_data = sep_dev;
342 /* release data pool allocations */
343 sep_dev->data_pool_bytes_allocated = 0;
349 * sep_release - close a SEP device
350 * @inode: inode of SEP device
351 * @filp: file handle being closed
353 * Called on the final close of a SEP device. As the open protects against
354 * multiple simultaenous opens that means this method is called when the
355 * final reference to the open handle is dropped.
358 static int sep_release(struct inode *inode, struct file *filp)
360 struct sep_device *sep = filp->private_data;
361 #if 0 /*!SEP_DRIVER_POLLING_MODE */
363 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, 0x7FFF);
364 /* release IRQ line */
365 free_irq(SEP_DIRVER_IRQ_NUM, sep);
368 /* Ensure any blocked open progresses */
369 clear_bit(0, &sep->in_use);
374 /*---------------------------------------------------------------
375 map function - this functions maps the message shared area
376 -----------------------------------------------------------------*/
377 static int sep_mmap(struct file *filp, struct vm_area_struct *vma)
380 struct sep_device *sep = filp->private_data;
382 dbg("-------->SEP Driver: mmap start\n");
384 /* check that the size of the mapped range is as the size of the message
386 if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) {
387 edbg("SEP Driver mmap requested size is more than allowed\n");
388 printk(KERN_WARNING "SEP Driver mmap requested size is more than allowed\n");
389 printk(KERN_WARNING "SEP Driver vma->vm_end is %08lx\n", vma->vm_end);
390 printk(KERN_WARNING "SEP Driver vma->vm_end is %08lx\n", vma->vm_start);
394 edbg("SEP Driver:sep->shared_addr is %p\n", sep->shared_addr);
396 /* get bus address */
397 bus_addr = sep->shared_bus;
399 edbg("SEP Driver: phys_addr is %08llx\n", (unsigned long long)bus_addr);
401 if (remap_pfn_range(vma, vma->vm_start, bus_addr >> PAGE_SHIFT, vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
402 edbg("SEP Driver remap_page_range failed\n");
403 printk(KERN_WARNING "SEP Driver remap_page_range failed\n");
407 dbg("SEP Driver:<-------- mmap end\n");
413 /*-----------------------------------------------
415 *----------------------------------------------*/
416 static unsigned int sep_poll(struct file *filp, poll_table * wait)
419 unsigned int mask = 0;
420 unsigned long retval = 0; /* flow id */
421 struct sep_device *sep = filp->private_data;
423 dbg("---------->SEP Driver poll: start\n");
426 #if SEP_DRIVER_POLLING_MODE
428 while (sep->send_ct != (retval & 0x7FFFFFFF)) {
429 retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
431 for (count = 0; count < 10 * 4; count += 4)
432 edbg("Poll Debug Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_addr + SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES + count)));
437 /* add the event to the polling wait table */
438 poll_wait(filp, &sep_event, wait);
442 edbg("sep->send_ct is %lu\n", sep->send_ct);
443 edbg("sep->reply_ct is %lu\n", sep->reply_ct);
445 /* check if the data is ready */
446 if (sep->send_ct == sep->reply_ct) {
447 for (count = 0; count < 12 * 4; count += 4)
448 edbg("Sep Mesg Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_addr + count)));
450 for (count = 0; count < 10 * 4; count += 4)
451 edbg("Debug Data Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_addr + 0x1800 + count)));
453 retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
454 edbg("retval is %lu\n", retval);
455 /* check if the this is sep reply or request */
457 edbg("SEP Driver: sep request in\n");
459 mask |= POLLOUT | POLLWRNORM;
461 edbg("SEP Driver: sep reply in\n");
462 mask |= POLLIN | POLLRDNORM;
465 dbg("SEP Driver:<-------- poll exit\n");
470 * sep_time_address - address in SEP memory of time
471 * @sep: SEP device we want the address from
473 * Return the address of the two dwords in memory used for time
477 static u32 *sep_time_address(struct sep_device *sep)
479 return sep->shared_addr + SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES;
483 * sep_set_time - set the SEP time
484 * @sep: the SEP we are setting the time for
486 * Calculates time and sets it at the predefined address.
487 * Called with the sep mutex held.
489 static unsigned long sep_set_time(struct sep_device *sep)
492 u32 *time_addr; /* address of time as seen by the kernel */
495 dbg("sep:sep_set_time start\n");
497 do_gettimeofday(&time);
499 /* set value in the SYSTEM MEMORY offset */
500 time_addr = sep_time_address(sep);
502 time_addr[0] = SEP_TIME_VAL_TOKEN;
503 time_addr[1] = time.tv_sec;
505 edbg("SEP Driver:time.tv_sec is %lu\n", time.tv_sec);
506 edbg("SEP Driver:time_addr is %p\n", time_addr);
507 edbg("SEP Driver:sep->shared_addr is %p\n", sep->shared_addr);
513 * sep_dump_message - dump the message that is pending
516 * Dump out the message pending in the shared message area
519 static void sep_dump_message(struct sep_device *sep)
522 for (count = 0; count < 12 * 4; count += 4)
523 edbg("Word %d of the message is %u\n", count, *((u32 *) (sep->shared_addr + count)));
527 * sep_send_command_handler - kick off a command
528 * @sep: sep being signalled
530 * This function raises interrupt to SEP that signals that is has a new
531 * command from the host
534 static void sep_send_command_handler(struct sep_device *sep)
536 dbg("sep:sep_send_command_handler start\n");
538 mutex_lock(&sep_mutex);
541 /* FIXME: flush cache */
544 sep_dump_message(sep);
547 /* send interrupt to SEP */
548 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
549 dbg("SEP Driver:<-------- sep_send_command_handler end\n");
550 mutex_unlock(&sep_mutex);
555 * sep_send_reply_command_handler - kick off a command reply
556 * @sep: sep being signalled
558 * This function raises interrupt to SEP that signals that is has a new
559 * command from the host
562 static void sep_send_reply_command_handler(struct sep_device *sep)
564 dbg("sep:sep_send_reply_command_handler start\n");
569 sep_dump_message(sep);
571 mutex_lock(&sep_mutex);
572 sep->send_ct++; /* update counter */
573 /* send the interrupt to SEP */
574 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR2_REG_ADDR, sep->send_ct);
575 /* update both counters */
578 mutex_unlock(&sep_mutex);
579 dbg("sep: sep_send_reply_command_handler end\n");
583 This function handles the allocate data pool memory request
584 This function returns calculates the bus address of the
585 allocated memory, and the offset of this area from the mapped address.
586 Therefore, the FVOs in user space can calculate the exact virtual
587 address of this allocated memory
589 static int sep_allocate_data_pool_memory_handler(struct sep_device *sep,
593 struct sep_driver_alloc_t command_args;
595 dbg("SEP Driver:--------> sep_allocate_data_pool_memory_handler start\n");
597 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_alloc_t));
601 /* allocate memory */
602 if ((sep->data_pool_bytes_allocated + command_args.num_bytes) > SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES) {
607 /* set the virtual and bus address */
608 command_args.offset = SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES + sep->data_pool_bytes_allocated;
609 command_args.phys_address = sep->shared_bus + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES + sep->data_pool_bytes_allocated;
611 /* write the memory back to the user space */
612 error = copy_to_user((void *) arg, (void *) &command_args, sizeof(struct sep_driver_alloc_t));
616 /* set the allocation */
617 sep->data_pool_bytes_allocated += command_args.num_bytes;
620 dbg("SEP Driver:<-------- sep_allocate_data_pool_memory_handler end\n");
625 This function handles write into allocated data pool command
627 static int sep_write_into_data_pool_handler(struct sep_device *sep, unsigned long arg)
632 unsigned long app_in_address;
633 unsigned long num_bytes;
634 void *data_pool_area_addr;
636 dbg("SEP Driver:--------> sep_write_into_data_pool_handler start\n");
638 /* get the application address */
639 error = get_user(app_in_address, &(((struct sep_driver_write_t *) arg)->app_address));
643 /* get the virtual kernel address address */
644 error = get_user(va, &(((struct sep_driver_write_t *) arg)->datapool_address));
647 virt_address = (void *)va;
649 /* get the number of bytes */
650 error = get_user(num_bytes, &(((struct sep_driver_write_t *) arg)->num_bytes));
654 /* calculate the start of the data pool */
655 data_pool_area_addr = sep->shared_addr + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES;
658 /* check that the range of the virtual kernel address is correct */
659 if (virt_address < data_pool_area_addr || virt_address > (data_pool_area_addr + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES)) {
663 /* copy the application data */
664 error = copy_from_user(virt_address, (void *) app_in_address, num_bytes);
666 dbg("SEP Driver:<-------- sep_write_into_data_pool_handler end\n");
671 this function handles the read from data pool command
673 static int sep_read_from_data_pool_handler(struct sep_device *sep, unsigned long arg)
676 /* virtual address of dest application buffer */
677 unsigned long app_out_address;
678 /* virtual address of the data pool */
681 unsigned long num_bytes;
682 void *data_pool_area_addr;
684 dbg("SEP Driver:--------> sep_read_from_data_pool_handler start\n");
686 /* get the application address */
687 error = get_user(app_out_address, &(((struct sep_driver_write_t *) arg)->app_address));
691 /* get the virtual kernel address address */
692 error = get_user(va, &(((struct sep_driver_write_t *) arg)->datapool_address));
695 virt_address = (void *)va;
697 /* get the number of bytes */
698 error = get_user(num_bytes, &(((struct sep_driver_write_t *) arg)->num_bytes));
702 /* calculate the start of the data pool */
703 data_pool_area_addr = sep->shared_addr + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES;
705 /* FIXME: These are incomplete all over the driver: what about + len
706 and when doing that also overflows */
707 /* check that the range of the virtual kernel address is correct */
708 if (virt_address < data_pool_area_addr || virt_address > data_pool_area_addr + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES) {
713 /* copy the application data */
714 error = copy_to_user((void *) app_out_address, virt_address, num_bytes);
716 dbg("SEP Driver:<-------- sep_read_from_data_pool_handler end\n");
721 This function releases all the application virtual buffer physical pages,
722 that were previously locked
724 static int sep_free_dma_pages(struct page **page_array_ptr, unsigned long num_pages, unsigned long dirtyFlag)
729 for (count = 0; count < num_pages; count++) {
730 /* the out array was written, therefore the data was changed */
731 if (!PageReserved(page_array_ptr[count]))
732 SetPageDirty(page_array_ptr[count]);
733 page_cache_release(page_array_ptr[count]);
736 /* free in pages - the data was only read, therefore no update was done
738 for (count = 0; count < num_pages; count++)
739 page_cache_release(page_array_ptr[count]);
744 kfree(page_array_ptr);
750 This function locks all the physical pages of the kernel virtual buffer
751 and construct a basic lli array, where each entry holds the physical
752 page address and the size that application data holds in this physical pages
754 static int sep_lock_kernel_pages(struct sep_device *sep,
755 unsigned long kernel_virt_addr,
756 unsigned long data_size,
757 unsigned long *num_pages_ptr,
758 struct sep_lli_entry_t **lli_array_ptr,
759 struct page ***page_array_ptr)
762 /* the the page of the end address of the user space buffer */
763 unsigned long end_page;
764 /* the page of the start address of the user space buffer */
765 unsigned long start_page;
766 /* the range in pages */
767 unsigned long num_pages;
768 struct sep_lli_entry_t *lli_array;
769 /* next kernel address to map */
770 unsigned long next_kernel_address;
773 dbg("SEP Driver:--------> sep_lock_kernel_pages start\n");
775 /* set start and end pages and num pages */
776 end_page = (kernel_virt_addr + data_size - 1) >> PAGE_SHIFT;
777 start_page = kernel_virt_addr >> PAGE_SHIFT;
778 num_pages = end_page - start_page + 1;
780 edbg("SEP Driver: kernel_virt_addr is %08lx\n", kernel_virt_addr);
781 edbg("SEP Driver: data_size is %lu\n", data_size);
782 edbg("SEP Driver: start_page is %lx\n", start_page);
783 edbg("SEP Driver: end_page is %lx\n", end_page);
784 edbg("SEP Driver: num_pages is %lu\n", num_pages);
786 lli_array = kmalloc(sizeof(struct sep_lli_entry_t) * num_pages, GFP_ATOMIC);
788 edbg("SEP Driver: kmalloc for lli_array failed\n");
793 /* set the start address of the first page - app data may start not at
794 the beginning of the page */
795 lli_array[0].physical_address = (unsigned long) virt_to_phys((unsigned long *) kernel_virt_addr);
797 /* check that not all the data is in the first page only */
798 if ((PAGE_SIZE - (kernel_virt_addr & (~PAGE_MASK))) >= data_size)
799 lli_array[0].block_size = data_size;
801 lli_array[0].block_size = PAGE_SIZE - (kernel_virt_addr & (~PAGE_MASK));
804 dbg("lli_array[0].physical_address is %08lx, lli_array[0].block_size is %lu\n", lli_array[0].physical_address, lli_array[0].block_size);
806 /* advance the address to the start of the next page */
807 next_kernel_address = (kernel_virt_addr & PAGE_MASK) + PAGE_SIZE;
809 /* go from the second page to the prev before last */
810 for (count = 1; count < (num_pages - 1); count++) {
811 lli_array[count].physical_address = (unsigned long) virt_to_phys((unsigned long *) next_kernel_address);
812 lli_array[count].block_size = PAGE_SIZE;
814 edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
815 next_kernel_address += PAGE_SIZE;
818 /* if more then 1 pages locked - then update for the last page size needed */
820 /* update the address of the last page */
821 lli_array[count].physical_address = (unsigned long) virt_to_phys((unsigned long *) next_kernel_address);
823 /* set the size of the last page */
824 lli_array[count].block_size = (kernel_virt_addr + data_size) & (~PAGE_MASK);
826 if (lli_array[count].block_size == 0) {
827 dbg("app_virt_addr is %08lx\n", kernel_virt_addr);
828 dbg("data_size is %lu\n", data_size);
832 edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
834 /* set output params */
835 *lli_array_ptr = lli_array;
836 *num_pages_ptr = num_pages;
839 dbg("SEP Driver:<-------- sep_lock_kernel_pages end\n");
844 This function locks all the physical pages of the application virtual buffer
845 and construct a basic lli array, where each entry holds the physical page
846 address and the size that application data holds in this physical pages
848 static int sep_lock_user_pages(struct sep_device *sep,
849 unsigned long app_virt_addr,
850 unsigned long data_size,
851 unsigned long *num_pages_ptr,
852 struct sep_lli_entry_t **lli_array_ptr,
853 struct page ***page_array_ptr)
856 /* the the page of the end address of the user space buffer */
857 unsigned long end_page;
858 /* the page of the start address of the user space buffer */
859 unsigned long start_page;
860 /* the range in pages */
861 unsigned long num_pages;
862 struct page **page_array;
863 struct sep_lli_entry_t *lli_array;
867 dbg("SEP Driver:--------> sep_lock_user_pages start\n");
869 /* set start and end pages and num pages */
870 end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT;
871 start_page = app_virt_addr >> PAGE_SHIFT;
872 num_pages = end_page - start_page + 1;
874 edbg("SEP Driver: app_virt_addr is %08lx\n", app_virt_addr);
875 edbg("SEP Driver: data_size is %lu\n", data_size);
876 edbg("SEP Driver: start_page is %lu\n", start_page);
877 edbg("SEP Driver: end_page is %lu\n", end_page);
878 edbg("SEP Driver: num_pages is %lu\n", num_pages);
880 /* allocate array of pages structure pointers */
881 page_array = kmalloc(sizeof(struct page *) * num_pages, GFP_ATOMIC);
883 edbg("SEP Driver: kmalloc for page_array failed\n");
889 lli_array = kmalloc(sizeof(struct sep_lli_entry_t) * num_pages, GFP_ATOMIC);
891 edbg("SEP Driver: kmalloc for lli_array failed\n");
894 goto end_function_with_error1;
897 /* convert the application virtual address into a set of physical */
898 down_read(¤t->mm->mmap_sem);
899 result = get_user_pages(current, current->mm, app_virt_addr, num_pages, 1, 0, page_array, 0);
900 up_read(¤t->mm->mmap_sem);
902 /* check the number of pages locked - if not all then exit with error */
903 if (result != num_pages) {
904 dbg("SEP Driver: not all pages locked by get_user_pages\n");
907 goto end_function_with_error2;
910 /* flush the cache */
911 for (count = 0; count < num_pages; count++)
912 flush_dcache_page(page_array[count]);
914 /* set the start address of the first page - app data may start not at
915 the beginning of the page */
916 lli_array[0].physical_address = ((unsigned long) page_to_phys(page_array[0])) + (app_virt_addr & (~PAGE_MASK));
918 /* check that not all the data is in the first page only */
919 if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size)
920 lli_array[0].block_size = data_size;
922 lli_array[0].block_size = PAGE_SIZE - (app_virt_addr & (~PAGE_MASK));
925 dbg("lli_array[0].physical_address is %08lx, lli_array[0].block_size is %lu\n", lli_array[0].physical_address, lli_array[0].block_size);
927 /* go from the second page to the prev before last */
928 for (count = 1; count < (num_pages - 1); count++) {
929 lli_array[count].physical_address = (unsigned long) page_to_phys(page_array[count]);
930 lli_array[count].block_size = PAGE_SIZE;
932 edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
935 /* if more then 1 pages locked - then update for the last page size needed */
937 /* update the address of the last page */
938 lli_array[count].physical_address = (unsigned long) page_to_phys(page_array[count]);
940 /* set the size of the last page */
941 lli_array[count].block_size = (app_virt_addr + data_size) & (~PAGE_MASK);
943 if (lli_array[count].block_size == 0) {
944 dbg("app_virt_addr is %08lx\n", app_virt_addr);
945 dbg("data_size is %lu\n", data_size);
948 edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n",
949 count, lli_array[count].physical_address,
950 count, lli_array[count].block_size);
953 /* set output params */
954 *lli_array_ptr = lli_array;
955 *num_pages_ptr = num_pages;
956 *page_array_ptr = page_array;
959 end_function_with_error2:
960 /* release the cache */
961 for (count = 0; count < num_pages; count++)
962 page_cache_release(page_array[count]);
964 end_function_with_error1:
967 dbg("SEP Driver:<-------- sep_lock_user_pages end\n");
973 this function calculates the size of data that can be inserted into the lli
974 table from this array the condition is that either the table is full
975 (all etnries are entered), or there are no more entries in the lli array
977 static unsigned long sep_calculate_lli_table_max_size(struct sep_lli_entry_t *lli_in_array_ptr, unsigned long num_array_entries)
979 unsigned long table_data_size = 0;
980 unsigned long counter;
982 /* calculate the data in the out lli table if till we fill the whole
983 table or till the data has ended */
984 for (counter = 0; (counter < (SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP - 1)) && (counter < num_array_entries); counter++)
985 table_data_size += lli_in_array_ptr[counter].block_size;
986 return table_data_size;
990 this functions builds ont lli table from the lli_array according to
991 the given size of data
993 static void sep_build_lli_table(struct sep_lli_entry_t *lli_array_ptr, struct sep_lli_entry_t *lli_table_ptr, unsigned long *num_processed_entries_ptr, unsigned long *num_table_entries_ptr, unsigned long table_data_size)
995 unsigned long curr_table_data_size;
996 /* counter of lli array entry */
997 unsigned long array_counter;
999 dbg("SEP Driver:--------> sep_build_lli_table start\n");
1001 /* init currrent table data size and lli array entry counter */
1002 curr_table_data_size = 0;
1004 *num_table_entries_ptr = 1;
1006 edbg("SEP Driver:table_data_size is %lu\n", table_data_size);
1008 /* fill the table till table size reaches the needed amount */
1009 while (curr_table_data_size < table_data_size) {
1010 /* update the number of entries in table */
1011 (*num_table_entries_ptr)++;
1013 lli_table_ptr->physical_address = lli_array_ptr[array_counter].physical_address;
1014 lli_table_ptr->block_size = lli_array_ptr[array_counter].block_size;
1015 curr_table_data_size += lli_table_ptr->block_size;
1017 edbg("SEP Driver:lli_table_ptr is %08lx\n", (unsigned long) lli_table_ptr);
1018 edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
1019 edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
1021 /* check for overflow of the table data */
1022 if (curr_table_data_size > table_data_size) {
1023 edbg("SEP Driver:curr_table_data_size > table_data_size\n");
1025 /* update the size of block in the table */
1026 lli_table_ptr->block_size -= (curr_table_data_size - table_data_size);
1028 /* update the physical address in the lli array */
1029 lli_array_ptr[array_counter].physical_address += lli_table_ptr->block_size;
1031 /* update the block size left in the lli array */
1032 lli_array_ptr[array_counter].block_size = (curr_table_data_size - table_data_size);
1034 /* advance to the next entry in the lli_array */
1037 edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
1038 edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
1040 /* move to the next entry in table */
1044 /* set the info entry to default */
1045 lli_table_ptr->physical_address = 0xffffffff;
1046 lli_table_ptr->block_size = 0;
1048 edbg("SEP Driver:lli_table_ptr is %08lx\n", (unsigned long) lli_table_ptr);
1049 edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
1050 edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
1052 /* set the output parameter */
1053 *num_processed_entries_ptr += array_counter;
1055 edbg("SEP Driver:*num_processed_entries_ptr is %lu\n", *num_processed_entries_ptr);
1056 dbg("SEP Driver:<-------- sep_build_lli_table end\n");
1061 this function goes over the list of the print created tables and
1064 static void sep_debug_print_lli_tables(struct sep_device *sep, struct sep_lli_entry_t *lli_table_ptr, unsigned long num_table_entries, unsigned long table_data_size)
1066 unsigned long table_count;
1067 unsigned long entries_count;
1069 dbg("SEP Driver:--------> sep_debug_print_lli_tables start\n");
1072 while ((unsigned long) lli_table_ptr != 0xffffffff) {
1073 edbg("SEP Driver: lli table %08lx, table_data_size is %lu\n", table_count, table_data_size);
1074 edbg("SEP Driver: num_table_entries is %lu\n", num_table_entries);
1076 /* print entries of the table (without info entry) */
1077 for (entries_count = 0; entries_count < num_table_entries; entries_count++, lli_table_ptr++) {
1078 edbg("SEP Driver:lli_table_ptr address is %08lx\n", (unsigned long) lli_table_ptr);
1079 edbg("SEP Driver:phys address is %08lx block size is %lu\n", lli_table_ptr->physical_address, lli_table_ptr->block_size);
1082 /* point to the info entry */
1085 edbg("SEP Driver:phys lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
1086 edbg("SEP Driver:phys lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
1089 table_data_size = lli_table_ptr->block_size & 0xffffff;
1090 num_table_entries = (lli_table_ptr->block_size >> 24) & 0xff;
1091 lli_table_ptr = (struct sep_lli_entry_t *)
1092 (lli_table_ptr->physical_address);
1094 edbg("SEP Driver:phys table_data_size is %lu num_table_entries is %lu lli_table_ptr is%lu\n", table_data_size, num_table_entries, (unsigned long) lli_table_ptr);
1096 if ((unsigned long) lli_table_ptr != 0xffffffff)
1097 lli_table_ptr = (struct sep_lli_entry_t *) sep_shared_bus_to_virt(sep, (unsigned long) lli_table_ptr);
1101 dbg("SEP Driver:<-------- sep_debug_print_lli_tables end\n");
1106 This function prepares only input DMA table for synhronic symmetric
1109 static int sep_prepare_input_dma_table(struct sep_device *sep,
1110 unsigned long app_virt_addr,
1111 unsigned long data_size,
1112 unsigned long block_size,
1113 unsigned long *lli_table_ptr,
1114 unsigned long *num_entries_ptr,
1115 unsigned long *table_data_size_ptr,
1116 bool isKernelVirtualAddress)
1118 /* pointer to the info entry of the table - the last entry */
1119 struct sep_lli_entry_t *info_entry_ptr;
1120 /* array of pointers ot page */
1121 struct sep_lli_entry_t *lli_array_ptr;
1122 /* points to the first entry to be processed in the lli_in_array */
1123 unsigned long current_entry;
1124 /* num entries in the virtual buffer */
1125 unsigned long sep_lli_entries;
1126 /* lli table pointer */
1127 struct sep_lli_entry_t *in_lli_table_ptr;
1128 /* the total data in one table */
1129 unsigned long table_data_size;
1130 /* number of entries in lli table */
1131 unsigned long num_entries_in_table;
1132 /* next table address */
1133 void *lli_table_alloc_addr;
1134 unsigned long result;
1136 dbg("SEP Driver:--------> sep_prepare_input_dma_table start\n");
1138 edbg("SEP Driver:data_size is %lu\n", data_size);
1139 edbg("SEP Driver:block_size is %lu\n", block_size);
1141 /* initialize the pages pointers */
1142 sep->in_page_array = 0;
1143 sep->in_num_pages = 0;
1145 if (data_size == 0) {
1146 /* special case - created 2 entries table with zero data */
1147 in_lli_table_ptr = (struct sep_lli_entry_t *) (sep->shared_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES);
1148 /* FIXME: Should the entry below not be for _bus */
1149 in_lli_table_ptr->physical_address = (unsigned long)sep->shared_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1150 in_lli_table_ptr->block_size = 0;
1153 in_lli_table_ptr->physical_address = 0xFFFFFFFF;
1154 in_lli_table_ptr->block_size = 0;
1156 *lli_table_ptr = sep->shared_bus + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1157 *num_entries_ptr = 2;
1158 *table_data_size_ptr = 0;
1163 /* check if the pages are in Kernel Virtual Address layout */
1164 if (isKernelVirtualAddress == true)
1165 /* lock the pages of the kernel buffer and translate them to pages */
1166 result = sep_lock_kernel_pages(sep, app_virt_addr, data_size, &sep->in_num_pages, &lli_array_ptr, &sep->in_page_array);
1168 /* lock the pages of the user buffer and translate them to pages */
1169 result = sep_lock_user_pages(sep, app_virt_addr, data_size, &sep->in_num_pages, &lli_array_ptr, &sep->in_page_array);
1174 edbg("SEP Driver:output sep->in_num_pages is %lu\n", sep->in_num_pages);
1178 sep_lli_entries = sep->in_num_pages;
1180 /* initiate to point after the message area */
1181 lli_table_alloc_addr = sep->shared_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1183 /* loop till all the entries in in array are not processed */
1184 while (current_entry < sep_lli_entries) {
1185 /* set the new input and output tables */
1186 in_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr;
1188 lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1190 /* calculate the maximum size of data for input table */
1191 table_data_size = sep_calculate_lli_table_max_size(&lli_array_ptr[current_entry], (sep_lli_entries - current_entry));
1193 /* now calculate the table size so that it will be module block size */
1194 table_data_size = (table_data_size / block_size) * block_size;
1196 edbg("SEP Driver:output table_data_size is %lu\n", table_data_size);
1198 /* construct input lli table */
1199 sep_build_lli_table(&lli_array_ptr[current_entry], in_lli_table_ptr, ¤t_entry, &num_entries_in_table, table_data_size);
1201 if (info_entry_ptr == 0) {
1202 /* set the output parameters to physical addresses */
1203 *lli_table_ptr = sep_shared_virt_to_bus(sep, in_lli_table_ptr);
1204 *num_entries_ptr = num_entries_in_table;
1205 *table_data_size_ptr = table_data_size;
1207 edbg("SEP Driver:output lli_table_in_ptr is %08lx\n", *lli_table_ptr);
1209 /* update the info entry of the previous in table */
1210 info_entry_ptr->physical_address = sep_shared_virt_to_bus(sep, in_lli_table_ptr);
1211 info_entry_ptr->block_size = ((num_entries_in_table) << 24) | (table_data_size);
1214 /* save the pointer to the info entry of the current tables */
1215 info_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
1218 /* print input tables */
1219 sep_debug_print_lli_tables(sep, (struct sep_lli_entry_t *)
1220 sep_shared_bus_to_virt(sep, *lli_table_ptr), *num_entries_ptr, *table_data_size_ptr);
1222 /* the array of the pages */
1223 kfree(lli_array_ptr);
1225 dbg("SEP Driver:<-------- sep_prepare_input_dma_table end\n");
1231 This function creates the input and output dma tables for
1232 symmetric operations (AES/DES) according to the block size from LLI arays
1234 static int sep_construct_dma_tables_from_lli(struct sep_device *sep,
1235 struct sep_lli_entry_t *lli_in_array,
1236 unsigned long sep_in_lli_entries,
1237 struct sep_lli_entry_t *lli_out_array,
1238 unsigned long sep_out_lli_entries,
1239 unsigned long block_size, unsigned long *lli_table_in_ptr, unsigned long *lli_table_out_ptr, unsigned long *in_num_entries_ptr, unsigned long *out_num_entries_ptr, unsigned long *table_data_size_ptr)
1241 /* points to the area where next lli table can be allocated: keep void *
1242 as there is pointer scaling to fix otherwise */
1243 void *lli_table_alloc_addr;
1244 /* input lli table */
1245 struct sep_lli_entry_t *in_lli_table_ptr;
1246 /* output lli table */
1247 struct sep_lli_entry_t *out_lli_table_ptr;
1248 /* pointer to the info entry of the table - the last entry */
1249 struct sep_lli_entry_t *info_in_entry_ptr;
1250 /* pointer to the info entry of the table - the last entry */
1251 struct sep_lli_entry_t *info_out_entry_ptr;
1252 /* points to the first entry to be processed in the lli_in_array */
1253 unsigned long current_in_entry;
1254 /* points to the first entry to be processed in the lli_out_array */
1255 unsigned long current_out_entry;
1256 /* max size of the input table */
1257 unsigned long in_table_data_size;
1258 /* max size of the output table */
1259 unsigned long out_table_data_size;
1260 /* flag te signifies if this is the first tables build from the arrays */
1261 unsigned long first_table_flag;
1262 /* the data size that should be in table */
1263 unsigned long table_data_size;
1264 /* number of etnries in the input table */
1265 unsigned long num_entries_in_table;
1266 /* number of etnries in the output table */
1267 unsigned long num_entries_out_table;
1269 dbg("SEP Driver:--------> sep_construct_dma_tables_from_lli start\n");
1271 /* initiate to pint after the message area */
1272 lli_table_alloc_addr = sep->shared_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1274 current_in_entry = 0;
1275 current_out_entry = 0;
1276 first_table_flag = 1;
1277 info_in_entry_ptr = 0;
1278 info_out_entry_ptr = 0;
1280 /* loop till all the entries in in array are not processed */
1281 while (current_in_entry < sep_in_lli_entries) {
1282 /* set the new input and output tables */
1283 in_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr;
1285 lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1287 /* set the first output tables */
1288 out_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr;
1290 lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1292 /* calculate the maximum size of data for input table */
1293 in_table_data_size = sep_calculate_lli_table_max_size(&lli_in_array[current_in_entry], (sep_in_lli_entries - current_in_entry));
1295 /* calculate the maximum size of data for output table */
1296 out_table_data_size = sep_calculate_lli_table_max_size(&lli_out_array[current_out_entry], (sep_out_lli_entries - current_out_entry));
1298 edbg("SEP Driver:in_table_data_size is %lu\n", in_table_data_size);
1299 edbg("SEP Driver:out_table_data_size is %lu\n", out_table_data_size);
1301 /* check where the data is smallest */
1302 table_data_size = in_table_data_size;
1303 if (table_data_size > out_table_data_size)
1304 table_data_size = out_table_data_size;
1306 /* now calculate the table size so that it will be module block size */
1307 table_data_size = (table_data_size / block_size) * block_size;
1309 dbg("SEP Driver:table_data_size is %lu\n", table_data_size);
1311 /* construct input lli table */
1312 sep_build_lli_table(&lli_in_array[current_in_entry], in_lli_table_ptr, ¤t_in_entry, &num_entries_in_table, table_data_size);
1314 /* construct output lli table */
1315 sep_build_lli_table(&lli_out_array[current_out_entry], out_lli_table_ptr, ¤t_out_entry, &num_entries_out_table, table_data_size);
1317 /* if info entry is null - this is the first table built */
1318 if (info_in_entry_ptr == 0) {
1319 /* set the output parameters to physical addresses */
1320 *lli_table_in_ptr = sep_shared_virt_to_bus(sep, in_lli_table_ptr);
1321 *in_num_entries_ptr = num_entries_in_table;
1322 *lli_table_out_ptr = sep_shared_virt_to_bus(sep, out_lli_table_ptr);
1323 *out_num_entries_ptr = num_entries_out_table;
1324 *table_data_size_ptr = table_data_size;
1326 edbg("SEP Driver:output lli_table_in_ptr is %08lx\n", *lli_table_in_ptr);
1327 edbg("SEP Driver:output lli_table_out_ptr is %08lx\n", *lli_table_out_ptr);
1329 /* update the info entry of the previous in table */
1330 info_in_entry_ptr->physical_address = sep_shared_virt_to_bus(sep, in_lli_table_ptr);
1331 info_in_entry_ptr->block_size = ((num_entries_in_table) << 24) | (table_data_size);
1333 /* update the info entry of the previous in table */
1334 info_out_entry_ptr->physical_address = sep_shared_virt_to_bus(sep, out_lli_table_ptr);
1335 info_out_entry_ptr->block_size = ((num_entries_out_table) << 24) | (table_data_size);
1338 /* save the pointer to the info entry of the current tables */
1339 info_in_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
1340 info_out_entry_ptr = out_lli_table_ptr + num_entries_out_table - 1;
1342 edbg("SEP Driver:output num_entries_out_table is %lu\n", (unsigned long) num_entries_out_table);
1343 edbg("SEP Driver:output info_in_entry_ptr is %lu\n", (unsigned long) info_in_entry_ptr);
1344 edbg("SEP Driver:output info_out_entry_ptr is %lu\n", (unsigned long) info_out_entry_ptr);
1347 /* print input tables */
1348 sep_debug_print_lli_tables(sep, (struct sep_lli_entry_t *)
1349 sep_shared_bus_to_virt(sep, *lli_table_in_ptr), *in_num_entries_ptr, *table_data_size_ptr);
1350 /* print output tables */
1351 sep_debug_print_lli_tables(sep, (struct sep_lli_entry_t *)
1352 sep_shared_bus_to_virt(sep, *lli_table_out_ptr), *out_num_entries_ptr, *table_data_size_ptr);
1353 dbg("SEP Driver:<-------- sep_construct_dma_tables_from_lli end\n");
1359 This function builds input and output DMA tables for synhronic
1360 symmetric operations (AES, DES). It also checks that each table
1361 is of the modular block size
1363 static int sep_prepare_input_output_dma_table(struct sep_device *sep,
1364 unsigned long app_virt_in_addr,
1365 unsigned long app_virt_out_addr,
1366 unsigned long data_size,
1367 unsigned long block_size,
1368 unsigned long *lli_table_in_ptr, unsigned long *lli_table_out_ptr, unsigned long *in_num_entries_ptr, unsigned long *out_num_entries_ptr, unsigned long *table_data_size_ptr, bool isKernelVirtualAddress)
1370 /* array of pointers of page */
1371 struct sep_lli_entry_t *lli_in_array;
1372 /* array of pointers of page */
1373 struct sep_lli_entry_t *lli_out_array;
1376 dbg("SEP Driver:--------> sep_prepare_input_output_dma_table start\n");
1378 /* initialize the pages pointers */
1379 sep->in_page_array = 0;
1380 sep->out_page_array = 0;
1382 /* check if the pages are in Kernel Virtual Address layout */
1383 if (isKernelVirtualAddress == true) {
1384 /* lock the pages of the kernel buffer and translate them to pages */
1385 result = sep_lock_kernel_pages(sep, app_virt_in_addr, data_size, &sep->in_num_pages, &lli_in_array, &sep->in_page_array);
1387 edbg("SEP Driver: sep_lock_kernel_pages for input virtual buffer failed\n");
1391 /* lock the pages of the user buffer and translate them to pages */
1392 result = sep_lock_user_pages(sep, app_virt_in_addr, data_size, &sep->in_num_pages, &lli_in_array, &sep->in_page_array);
1394 edbg("SEP Driver: sep_lock_user_pages for input virtual buffer failed\n");
1399 if (isKernelVirtualAddress == true) {
1400 result = sep_lock_kernel_pages(sep, app_virt_out_addr, data_size, &sep->out_num_pages, &lli_out_array, &sep->out_page_array);
1402 edbg("SEP Driver: sep_lock_kernel_pages for output virtual buffer failed\n");
1403 goto end_function_with_error1;
1406 result = sep_lock_user_pages(sep, app_virt_out_addr, data_size, &sep->out_num_pages, &lli_out_array, &sep->out_page_array);
1408 edbg("SEP Driver: sep_lock_user_pages for output virtual buffer failed\n");
1409 goto end_function_with_error1;
1412 edbg("sep->in_num_pages is %lu\n", sep->in_num_pages);
1413 edbg("sep->out_num_pages is %lu\n", sep->out_num_pages);
1414 edbg("SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP is %x\n", SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
1417 /* call the fucntion that creates table from the lli arrays */
1418 result = sep_construct_dma_tables_from_lli(sep, lli_in_array, sep->in_num_pages, lli_out_array, sep->out_num_pages, block_size, lli_table_in_ptr, lli_table_out_ptr, in_num_entries_ptr, out_num_entries_ptr, table_data_size_ptr);
1420 edbg("SEP Driver: sep_construct_dma_tables_from_lli failed\n");
1421 goto end_function_with_error2;
1424 /* fall through - free the lli entry arrays */
1425 dbg("in_num_entries_ptr is %08lx\n", *in_num_entries_ptr);
1426 dbg("out_num_entries_ptr is %08lx\n", *out_num_entries_ptr);
1427 dbg("table_data_size_ptr is %08lx\n", *table_data_size_ptr);
1428 end_function_with_error2:
1429 kfree(lli_out_array);
1430 end_function_with_error1:
1431 kfree(lli_in_array);
1433 dbg("SEP Driver:<-------- sep_prepare_input_output_dma_table end result = %d\n", (int) result);
1439 this function handles tha request for creation of the DMA table
1440 for the synchronic symmetric operations (AES,DES)
1442 static int sep_create_sync_dma_tables_handler(struct sep_device *sep,
1446 /* command arguments */
1447 struct sep_driver_build_sync_table_t command_args;
1449 dbg("SEP Driver:--------> sep_create_sync_dma_tables_handler start\n");
1451 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_build_sync_table_t));
1455 edbg("app_in_address is %08lx\n", command_args.app_in_address);
1456 edbg("app_out_address is %08lx\n", command_args.app_out_address);
1457 edbg("data_size is %lu\n", command_args.data_in_size);
1458 edbg("block_size is %lu\n", command_args.block_size);
1460 /* check if we need to build only input table or input/output */
1461 if (command_args.app_out_address)
1462 /* prepare input and output tables */
1463 error = sep_prepare_input_output_dma_table(sep,
1464 command_args.app_in_address,
1465 command_args.app_out_address,
1466 command_args.data_in_size,
1467 command_args.block_size,
1468 &command_args.in_table_address,
1469 &command_args.out_table_address, &command_args.in_table_num_entries, &command_args.out_table_num_entries, &command_args.table_data_size, command_args.isKernelVirtualAddress);
1471 /* prepare input tables */
1472 error = sep_prepare_input_dma_table(sep,
1473 command_args.app_in_address,
1474 command_args.data_in_size, command_args.block_size, &command_args.in_table_address, &command_args.in_table_num_entries, &command_args.table_data_size, command_args.isKernelVirtualAddress);
1479 if (copy_to_user((void *) arg, (void *) &command_args, sizeof(struct sep_driver_build_sync_table_t)))
1482 dbg("SEP Driver:<-------- sep_create_sync_dma_tables_handler end\n");
1487 this function handles the request for freeing dma table for synhronic actions
1489 static int sep_free_dma_table_data_handler(struct sep_device *sep)
1491 dbg("SEP Driver:--------> sep_free_dma_table_data_handler start\n");
1493 /* free input pages array */
1494 sep_free_dma_pages(sep->in_page_array, sep->in_num_pages, 0);
1496 /* free output pages array if needed */
1497 if (sep->out_page_array)
1498 sep_free_dma_pages(sep->out_page_array, sep->out_num_pages, 1);
1500 /* reset all the values */
1501 sep->in_page_array = 0;
1502 sep->out_page_array = 0;
1503 sep->in_num_pages = 0;
1504 sep->out_num_pages = 0;
1505 dbg("SEP Driver:<-------- sep_free_dma_table_data_handler end\n");
1510 this function find a space for the new flow dma table
1512 static int sep_find_free_flow_dma_table_space(struct sep_device *sep,
1513 unsigned long **table_address_ptr)
1516 /* pointer to the id field of the flow dma table */
1517 unsigned long *start_table_ptr;
1518 /* Do not make start_addr unsigned long * unless fixing the offset
1520 void *flow_dma_area_start_addr;
1521 unsigned long *flow_dma_area_end_addr;
1522 /* maximum table size in words */
1523 unsigned long table_size_in_words;
1525 /* find the start address of the flow DMA table area */
1526 flow_dma_area_start_addr = sep->shared_addr + SEP_DRIVER_FLOW_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1528 /* set end address of the flow table area */
1529 flow_dma_area_end_addr = flow_dma_area_start_addr + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES;
1531 /* set table size in words */
1532 table_size_in_words = SEP_DRIVER_MAX_FLOW_NUM_ENTRIES_IN_TABLE * (sizeof(struct sep_lli_entry_t) / sizeof(long)) + 2;
1534 /* set the pointer to the start address of DMA area */
1535 start_table_ptr = flow_dma_area_start_addr;
1537 /* find the space for the next table */
1538 while (((*start_table_ptr & 0x7FFFFFFF) != 0) && start_table_ptr < flow_dma_area_end_addr)
1539 start_table_ptr += table_size_in_words;
1541 /* check if we reached the end of floa tables area */
1542 if (start_table_ptr >= flow_dma_area_end_addr)
1545 *table_address_ptr = start_table_ptr;
1551 This function creates one DMA table for flow and returns its data,
1552 and pointer to its info entry
1554 static int sep_prepare_one_flow_dma_table(struct sep_device *sep,
1555 unsigned long virt_buff_addr,
1556 unsigned long virt_buff_size,
1557 struct sep_lli_entry_t *table_data,
1558 struct sep_lli_entry_t **info_entry_ptr,
1559 struct sep_flow_context_t *flow_data_ptr,
1560 bool isKernelVirtualAddress)
1563 /* the range in pages */
1564 unsigned long lli_array_size;
1565 struct sep_lli_entry_t *lli_array;
1566 struct sep_lli_entry_t *flow_dma_table_entry_ptr;
1567 unsigned long *start_dma_table_ptr;
1568 /* total table data counter */
1569 unsigned long dma_table_data_count;
1570 /* pointer that will keep the pointer to the pages of the virtual buffer */
1571 struct page **page_array_ptr;
1572 unsigned long entry_count;
1574 /* find the space for the new table */
1575 error = sep_find_free_flow_dma_table_space(sep, &start_dma_table_ptr);
1579 /* check if the pages are in Kernel Virtual Address layout */
1580 if (isKernelVirtualAddress == true)
1581 /* lock kernel buffer in the memory */
1582 error = sep_lock_kernel_pages(sep, virt_buff_addr, virt_buff_size, &lli_array_size, &lli_array, &page_array_ptr);
1584 /* lock user buffer in the memory */
1585 error = sep_lock_user_pages(sep, virt_buff_addr, virt_buff_size, &lli_array_size, &lli_array, &page_array_ptr);
1590 /* set the pointer to page array at the beginning of table - this table is
1591 now considered taken */
1592 *start_dma_table_ptr = lli_array_size;
1594 /* point to the place of the pages pointers of the table */
1595 start_dma_table_ptr++;
1597 /* set the pages pointer */
1598 *start_dma_table_ptr = (unsigned long) page_array_ptr;
1600 /* set the pointer to the first entry */
1601 flow_dma_table_entry_ptr = (struct sep_lli_entry_t *) (++start_dma_table_ptr);
1603 /* now create the entries for table */
1604 for (dma_table_data_count = entry_count = 0; entry_count < lli_array_size; entry_count++) {
1605 flow_dma_table_entry_ptr->physical_address = lli_array[entry_count].physical_address;
1607 flow_dma_table_entry_ptr->block_size = lli_array[entry_count].block_size;
1609 /* set the total data of a table */
1610 dma_table_data_count += lli_array[entry_count].block_size;
1612 flow_dma_table_entry_ptr++;
1615 /* set the physical address */
1616 table_data->physical_address = virt_to_phys(start_dma_table_ptr);
1618 /* set the num_entries and total data size */
1619 table_data->block_size = ((lli_array_size + 1) << SEP_NUM_ENTRIES_OFFSET_IN_BITS) | (dma_table_data_count);
1621 /* set the info entry */
1622 flow_dma_table_entry_ptr->physical_address = 0xffffffff;
1623 flow_dma_table_entry_ptr->block_size = 0;
1625 /* set the pointer to info entry */
1626 *info_entry_ptr = flow_dma_table_entry_ptr;
1628 /* the array of the lli entries */
1637 This function creates a list of tables for flow and returns the data for
1638 the first and last tables of the list
1640 static int sep_prepare_flow_dma_tables(struct sep_device *sep,
1641 unsigned long num_virtual_buffers,
1642 unsigned long first_buff_addr, struct sep_flow_context_t *flow_data_ptr, struct sep_lli_entry_t *first_table_data_ptr, struct sep_lli_entry_t *last_table_data_ptr, bool isKernelVirtualAddress)
1645 unsigned long virt_buff_addr;
1646 unsigned long virt_buff_size;
1647 struct sep_lli_entry_t table_data;
1648 struct sep_lli_entry_t *info_entry_ptr;
1649 struct sep_lli_entry_t *prev_info_entry_ptr;
1654 prev_info_entry_ptr = 0;
1656 /* init the first table to default */
1657 table_data.physical_address = 0xffffffff;
1658 first_table_data_ptr->physical_address = 0xffffffff;
1659 table_data.block_size = 0;
1661 for (i = 0; i < num_virtual_buffers; i++) {
1662 /* get the virtual buffer address */
1663 error = get_user(virt_buff_addr, &first_buff_addr);
1667 /* get the virtual buffer size */
1669 error = get_user(virt_buff_size, &first_buff_addr);
1673 /* advance the address to point to the next pair of address|size */
1676 /* now prepare the one flow LLI table from the data */
1677 error = sep_prepare_one_flow_dma_table(sep, virt_buff_addr, virt_buff_size, &table_data, &info_entry_ptr, flow_data_ptr, isKernelVirtualAddress);
1682 /* if this is the first table - save it to return to the user
1684 *first_table_data_ptr = table_data;
1686 /* set the pointer to info entry */
1687 prev_info_entry_ptr = info_entry_ptr;
1689 /* not first table - the previous table info entry should
1691 prev_info_entry_ptr->block_size = (0x1 << SEP_INT_FLAG_OFFSET_IN_BITS) | (table_data.block_size);
1693 /* set the pointer to info entry */
1694 prev_info_entry_ptr = info_entry_ptr;
1698 /* set the last table data */
1699 *last_table_data_ptr = table_data;
1705 this function goes over all the flow tables connected to the given
1706 table and deallocate them
1708 static void sep_deallocated_flow_tables(struct sep_lli_entry_t *first_table_ptr)
1711 unsigned long *table_ptr;
1712 /* end address of the flow dma area */
1713 unsigned long num_entries;
1714 unsigned long num_pages;
1715 struct page **pages_ptr;
1716 /* maximum table size in words */
1717 struct sep_lli_entry_t *info_entry_ptr;
1719 /* set the pointer to the first table */
1720 table_ptr = (unsigned long *) first_table_ptr->physical_address;
1722 /* set the num of entries */
1723 num_entries = (first_table_ptr->block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS)
1724 & SEP_NUM_ENTRIES_MASK;
1726 /* go over all the connected tables */
1727 while (*table_ptr != 0xffffffff) {
1728 /* get number of pages */
1729 num_pages = *(table_ptr - 2);
1731 /* get the pointer to the pages */
1732 pages_ptr = (struct page **) (*(table_ptr - 1));
1734 /* free the pages */
1735 sep_free_dma_pages(pages_ptr, num_pages, 1);
1737 /* goto to the info entry */
1738 info_entry_ptr = ((struct sep_lli_entry_t *) table_ptr) + (num_entries - 1);
1740 table_ptr = (unsigned long *) info_entry_ptr->physical_address;
1741 num_entries = (info_entry_ptr->block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK;
1748 * sep_find_flow_context - find a flow
1749 * @sep: the SEP we are working with
1750 * @flow_id: flow identifier
1752 * Returns a pointer the matching flow, or NULL if the flow does not
1756 static struct sep_flow_context_t *sep_find_flow_context(struct sep_device *sep,
1757 unsigned long flow_id)
1761 * always search for flow with id default first - in case we
1762 * already started working on the flow there can be no situation
1763 * when 2 flows are with default flag
1765 for (count = 0; count < SEP_DRIVER_NUM_FLOWS; count++) {
1766 if (sep->flows[count].flow_id == flow_id)
1767 return &sep->flows[count];
1774 this function handles the request to create the DMA tables for flow
1776 static int sep_create_flow_dma_tables_handler(struct sep_device *sep,
1779 int error = -ENOENT;
1780 struct sep_driver_build_flow_table_t command_args;
1781 /* first table - output */
1782 struct sep_lli_entry_t first_table_data;
1783 /* dma table data */
1784 struct sep_lli_entry_t last_table_data;
1785 /* pointer to the info entry of the previuos DMA table */
1786 struct sep_lli_entry_t *prev_info_entry_ptr;
1787 /* pointer to the flow data strucutre */
1788 struct sep_flow_context_t *flow_context_ptr;
1790 dbg("SEP Driver:--------> sep_create_flow_dma_tables_handler start\n");
1792 /* init variables */
1793 prev_info_entry_ptr = 0;
1794 first_table_data.physical_address = 0xffffffff;
1796 /* find the free structure for flow data */
1798 flow_context_ptr = sep_find_flow_context(sep, SEP_FREE_FLOW_ID);
1799 if (flow_context_ptr == NULL)
1802 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_build_flow_table_t));
1806 /* create flow tables */
1807 error = sep_prepare_flow_dma_tables(sep, command_args.num_virtual_buffers, command_args.virt_buff_data_addr, flow_context_ptr, &first_table_data, &last_table_data, command_args.isKernelVirtualAddress);
1809 goto end_function_with_error;
1811 /* check if flow is static */
1812 if (!command_args.flow_type)
1813 /* point the info entry of the last to the info entry of the first */
1814 last_table_data = first_table_data;
1816 /* set output params */
1817 command_args.first_table_addr = first_table_data.physical_address;
1818 command_args.first_table_num_entries = ((first_table_data.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK);
1819 command_args.first_table_data_size = (first_table_data.block_size & SEP_TABLE_DATA_SIZE_MASK);
1821 /* send the parameters to user application */
1822 error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_build_flow_table_t));
1824 goto end_function_with_error;
1826 /* all the flow created - update the flow entry with temp id */
1827 flow_context_ptr->flow_id = SEP_TEMP_FLOW_ID;
1829 /* set the processing tables data in the context */
1830 if (command_args.input_output_flag == SEP_DRIVER_IN_FLAG)
1831 flow_context_ptr->input_tables_in_process = first_table_data;
1833 flow_context_ptr->output_tables_in_process = first_table_data;
1837 end_function_with_error:
1838 /* free the allocated tables */
1839 sep_deallocated_flow_tables(&first_table_data);
1841 dbg("SEP Driver:<-------- sep_create_flow_dma_tables_handler end\n");
1846 this function handles add tables to flow
1848 static int sep_add_flow_tables_handler(struct sep_device *sep, unsigned long arg)
1851 unsigned long num_entries;
1852 struct sep_driver_add_flow_table_t command_args;
1853 struct sep_flow_context_t *flow_context_ptr;
1854 /* first dma table data */
1855 struct sep_lli_entry_t first_table_data;
1856 /* last dma table data */
1857 struct sep_lli_entry_t last_table_data;
1858 /* pointer to the info entry of the current DMA table */
1859 struct sep_lli_entry_t *info_entry_ptr;
1861 dbg("SEP Driver:--------> sep_add_flow_tables_handler start\n");
1863 /* get input parameters */
1864 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_add_flow_table_t));
1868 /* find the flow structure for the flow id */
1869 flow_context_ptr = sep_find_flow_context(sep, command_args.flow_id);
1870 if (flow_context_ptr == NULL)
1873 /* prepare the flow dma tables */
1874 error = sep_prepare_flow_dma_tables(sep, command_args.num_virtual_buffers, command_args.virt_buff_data_addr, flow_context_ptr, &first_table_data, &last_table_data, command_args.isKernelVirtualAddress);
1876 goto end_function_with_error;
1878 /* now check if there is already an existing add table for this flow */
1879 if (command_args.inputOutputFlag == SEP_DRIVER_IN_FLAG) {
1880 /* this buffer was for input buffers */
1881 if (flow_context_ptr->input_tables_flag) {
1882 /* add table already exists - add the new tables to the end
1884 num_entries = (flow_context_ptr->last_input_table.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK;
1886 info_entry_ptr = (struct sep_lli_entry_t *)
1887 (flow_context_ptr->last_input_table.physical_address + (sizeof(struct sep_lli_entry_t) * (num_entries - 1)));
1889 /* connect to list of tables */
1890 *info_entry_ptr = first_table_data;
1892 /* set the first table data */
1893 first_table_data = flow_context_ptr->first_input_table;
1895 /* set the input flag */
1896 flow_context_ptr->input_tables_flag = 1;
1898 /* set the first table data */
1899 flow_context_ptr->first_input_table = first_table_data;
1901 /* set the last table data */
1902 flow_context_ptr->last_input_table = last_table_data;
1903 } else { /* this is output tables */
1905 /* this buffer was for input buffers */
1906 if (flow_context_ptr->output_tables_flag) {
1907 /* add table already exists - add the new tables to
1908 the end of the previous */
1909 num_entries = (flow_context_ptr->last_output_table.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK;
1911 info_entry_ptr = (struct sep_lli_entry_t *)
1912 (flow_context_ptr->last_output_table.physical_address + (sizeof(struct sep_lli_entry_t) * (num_entries - 1)));
1914 /* connect to list of tables */
1915 *info_entry_ptr = first_table_data;
1917 /* set the first table data */
1918 first_table_data = flow_context_ptr->first_output_table;
1920 /* set the input flag */
1921 flow_context_ptr->output_tables_flag = 1;
1923 /* set the first table data */
1924 flow_context_ptr->first_output_table = first_table_data;
1926 /* set the last table data */
1927 flow_context_ptr->last_output_table = last_table_data;
1930 /* set output params */
1931 command_args.first_table_addr = first_table_data.physical_address;
1932 command_args.first_table_num_entries = ((first_table_data.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK);
1933 command_args.first_table_data_size = (first_table_data.block_size & SEP_TABLE_DATA_SIZE_MASK);
1935 /* send the parameters to user application */
1936 error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_add_flow_table_t));
1937 end_function_with_error:
1938 /* free the allocated tables */
1939 sep_deallocated_flow_tables(&first_table_data);
1941 dbg("SEP Driver:<-------- sep_add_flow_tables_handler end\n");
1946 this function add the flow add message to the specific flow
1948 static int sep_add_flow_tables_message_handler(struct sep_device *sep, unsigned long arg)
1951 struct sep_driver_add_message_t command_args;
1952 struct sep_flow_context_t *flow_context_ptr;
1954 dbg("SEP Driver:--------> sep_add_flow_tables_message_handler start\n");
1956 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_add_message_t));
1961 if (command_args.message_size_in_bytes > SEP_MAX_ADD_MESSAGE_LENGTH_IN_BYTES) {
1966 /* find the flow context */
1967 flow_context_ptr = sep_find_flow_context(sep, command_args.flow_id);
1968 if (flow_context_ptr == NULL)
1971 /* copy the message into context */
1972 flow_context_ptr->message_size_in_bytes = command_args.message_size_in_bytes;
1973 error = copy_from_user(flow_context_ptr->message, (void *) command_args.message_address, command_args.message_size_in_bytes);
1975 dbg("SEP Driver:<-------- sep_add_flow_tables_message_handler end\n");
1981 this function returns the bus and virtual addresses of the static pool
1983 static int sep_get_static_pool_addr_handler(struct sep_device *sep, unsigned long arg)
1986 struct sep_driver_static_pool_addr_t command_args;
1988 dbg("SEP Driver:--------> sep_get_static_pool_addr_handler start\n");
1990 /*prepare the output parameters in the struct */
1991 command_args.physical_static_address = sep->shared_bus + SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES;
1992 command_args.virtual_static_address = (unsigned long)sep->shared_addr + SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES;
1994 edbg("SEP Driver:bus_static_address is %08lx, virtual_static_address %08lx\n", command_args.physical_static_address, command_args.virtual_static_address);
1996 /* send the parameters to user application */
1997 error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_static_pool_addr_t));
1998 dbg("SEP Driver:<-------- sep_get_static_pool_addr_handler end\n");
2003 this address gets the offset of the physical address from the start
2006 static int sep_get_physical_mapped_offset_handler(struct sep_device *sep, unsigned long arg)
2009 struct sep_driver_get_mapped_offset_t command_args;
2011 dbg("SEP Driver:--------> sep_get_physical_mapped_offset_handler start\n");
2013 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_get_mapped_offset_t));
2017 if (command_args.physical_address < sep->shared_bus) {
2022 /*prepare the output parameters in the struct */
2023 command_args.offset = command_args.physical_address - sep->shared_bus;
2025 edbg("SEP Driver:bus_address is %08lx, offset is %lu\n", command_args.physical_address, command_args.offset);
2027 /* send the parameters to user application */
2028 error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_get_mapped_offset_t));
2030 dbg("SEP Driver:<-------- sep_get_physical_mapped_offset_handler end\n");
2038 static int sep_start_handler(struct sep_device *sep)
2040 unsigned long reg_val;
2041 unsigned long error = 0;
2043 dbg("SEP Driver:--------> sep_start_handler start\n");
2045 /* wait in polling for message from SEP */
2047 reg_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
2050 /* check the value */
2052 /* fatal error - read error status from GPRO */
2053 error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
2054 dbg("SEP Driver:<-------- sep_start_handler end\n");
2059 this function handles the request for SEP initialization
2061 static int sep_init_handler(struct sep_device *sep, unsigned long arg)
2063 unsigned long message_word;
2064 unsigned long *message_ptr;
2065 struct sep_driver_init_t command_args;
2066 unsigned long counter;
2067 unsigned long error;
2068 unsigned long reg_val;
2070 dbg("SEP Driver:--------> sep_init_handler start\n");
2073 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_init_t));
2075 dbg("SEP Driver:--------> sep_init_handler - finished copy_from_user \n");
2080 /* PATCH - configure the DMA to single -burst instead of multi-burst */
2081 /*sep_configure_dma_burst(); */
2083 dbg("SEP Driver:--------> sep_init_handler - finished sep_configure_dma_burst \n");
2085 message_ptr = (unsigned long *) command_args.message_addr;
2087 /* set the base address of the SRAM */
2088 sep_write_reg(sep, HW_SRAM_ADDR_REG_ADDR, HW_CC_SRAM_BASE_ADDRESS);
2090 for (counter = 0; counter < command_args.message_size_in_words; counter++, message_ptr++) {
2091 get_user(message_word, message_ptr);
2092 /* write data to SRAM */
2093 sep_write_reg(sep, HW_SRAM_DATA_REG_ADDR, message_word);
2094 edbg("SEP Driver:message_word is %lu\n", message_word);
2095 /* wait for write complete */
2096 sep_wait_sram_write(sep);
2098 dbg("SEP Driver:--------> sep_init_handler - finished getting messages from user space\n");
2100 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x1);
2103 reg_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
2104 while (!(reg_val & 0xFFFFFFFD));
2106 dbg("SEP Driver:--------> sep_init_handler - finished waiting for reg_val & 0xFFFFFFFD \n");
2108 /* check the value */
2109 if (reg_val == 0x1) {
2110 edbg("SEP Driver:init failed\n");
2112 error = sep_read_reg(sep, 0x8060);
2113 edbg("SEP Driver:sw monitor is %lu\n", error);
2115 /* fatal error - read erro status from GPRO */
2116 error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
2117 edbg("SEP Driver:error is %lu\n", error);
2120 dbg("SEP Driver:<-------- sep_init_handler end\n");
2126 this function handles the request cache and resident reallocation
2128 static int sep_realloc_cache_resident_handler(struct sep_device *sep,
2131 struct sep_driver_realloc_cache_resident_t command_args;
2134 /* copy cache and resident to the their intended locations */
2135 error = sep_load_firmware(sep);
2139 command_args.new_base_addr = sep->shared_bus;
2141 /* find the new base address according to the lowest address between
2142 cache, resident and shared area */
2143 if (sep->resident_bus < command_args.new_base_addr)
2144 command_args.new_base_addr = sep->resident_bus;
2145 if (sep->rar_bus < command_args.new_base_addr)
2146 command_args.new_base_addr = sep->rar_bus;
2148 /* set the return parameters */
2149 command_args.new_cache_addr = sep->rar_bus;
2150 command_args.new_resident_addr = sep->resident_bus;
2152 /* set the new shared area */
2153 command_args.new_shared_area_addr = sep->shared_bus;
2155 edbg("SEP Driver:command_args.new_shared_addr is %08llx\n", command_args.new_shared_area_addr);
2156 edbg("SEP Driver:command_args.new_base_addr is %08llx\n", command_args.new_base_addr);
2157 edbg("SEP Driver:command_args.new_resident_addr is %08llx\n", command_args.new_resident_addr);
2158 edbg("SEP Driver:command_args.new_rar_addr is %08llx\n", command_args.new_cache_addr);
2160 /* return to user */
2161 if (copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_realloc_cache_resident_t)))
2167 * sep_get_time_handler - time request from user space
2168 * @sep: sep we are to set the time for
2169 * @arg: pointer to user space arg buffer
2171 * This function reports back the time and the address in the SEP
2172 * shared buffer at which it has been placed. (Do we really need this!!!)
2175 static int sep_get_time_handler(struct sep_device *sep, unsigned long arg)
2177 struct sep_driver_get_time_t command_args;
2179 mutex_lock(&sep_mutex);
2180 command_args.time_value = sep_set_time(sep);
2181 command_args.time_physical_address = (unsigned long)sep_time_address(sep);
2182 mutex_unlock(&sep_mutex);
2183 if (copy_to_user((void __user *)arg,
2184 &command_args, sizeof(struct sep_driver_get_time_t)))
2191 This API handles the end transaction request
2193 static int sep_end_transaction_handler(struct sep_device *sep, unsigned long arg)
2195 dbg("SEP Driver:--------> sep_end_transaction_handler start\n");
2197 #if 0 /*!SEP_DRIVER_POLLING_MODE */
2199 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, 0x7FFF);
2201 /* release IRQ line */
2202 free_irq(SEP_DIRVER_IRQ_NUM, sep);
2204 /* lock the sep mutex */
2205 mutex_unlock(&sep_mutex);
2208 dbg("SEP Driver:<-------- sep_end_transaction_handler end\n");
2215 * sep_set_flow_id_handler - handle flow setting
2216 * @sep: the SEP we are configuring
2217 * @flow_id: the flow we are setting
2219 * This function handler the set flow id command
2221 static int sep_set_flow_id_handler(struct sep_device *sep,
2222 unsigned long flow_id)
2225 struct sep_flow_context_t *flow_data_ptr;
2227 /* find the flow data structure that was just used for creating new flow
2228 - its id should be default */
2230 mutex_lock(&sep_mutex);
2231 flow_data_ptr = sep_find_flow_context(sep, SEP_TEMP_FLOW_ID);
2233 flow_data_ptr->flow_id = flow_id; /* set flow id */
2236 mutex_unlock(&sep_mutex);
2240 static long sep_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
2243 struct sep_device *sep = filp->private_data;
2245 dbg("------------>SEP Driver: ioctl start\n");
2247 edbg("SEP Driver: cmd is %x\n", cmd);
2250 case SEP_IOCSENDSEPCOMMAND:
2251 /* send command to SEP */
2252 sep_send_command_handler(sep);
2253 edbg("SEP Driver: after sep_send_command_handler\n");
2255 case SEP_IOCSENDSEPRPLYCOMMAND:
2256 /* send reply command to SEP */
2257 sep_send_reply_command_handler(sep);
2259 case SEP_IOCALLOCDATAPOLL:
2260 /* allocate data pool */
2261 error = sep_allocate_data_pool_memory_handler(sep, arg);
2263 case SEP_IOCWRITEDATAPOLL:
2264 /* write data into memory pool */
2265 error = sep_write_into_data_pool_handler(sep, arg);
2267 case SEP_IOCREADDATAPOLL:
2268 /* read data from data pool into application memory */
2269 error = sep_read_from_data_pool_handler(sep, arg);
2271 case SEP_IOCCREATESYMDMATABLE:
2272 /* create dma table for synhronic operation */
2273 error = sep_create_sync_dma_tables_handler(sep, arg);
2275 case SEP_IOCCREATEFLOWDMATABLE:
2276 /* create flow dma tables */
2277 error = sep_create_flow_dma_tables_handler(sep, arg);
2279 case SEP_IOCFREEDMATABLEDATA:
2280 /* free the pages */
2281 error = sep_free_dma_table_data_handler(sep);
2283 case SEP_IOCSETFLOWID:
2285 error = sep_set_flow_id_handler(sep, (unsigned long)arg);
2287 case SEP_IOCADDFLOWTABLE:
2288 /* add tables to the dynamic flow */
2289 error = sep_add_flow_tables_handler(sep, arg);
2291 case SEP_IOCADDFLOWMESSAGE:
2292 /* add message of add tables to flow */
2293 error = sep_add_flow_tables_message_handler(sep, arg);
2295 case SEP_IOCSEPSTART:
2296 /* start command to sep */
2297 error = sep_start_handler(sep);
2299 case SEP_IOCSEPINIT:
2300 /* init command to sep */
2301 error = sep_init_handler(sep, arg);
2303 case SEP_IOCGETSTATICPOOLADDR:
2304 /* get the physical and virtual addresses of the static pool */
2305 error = sep_get_static_pool_addr_handler(sep, arg);
2307 case SEP_IOCENDTRANSACTION:
2308 error = sep_end_transaction_handler(sep, arg);
2310 case SEP_IOCREALLOCCACHERES:
2311 error = sep_realloc_cache_resident_handler(sep, arg);
2313 case SEP_IOCGETMAPPEDADDROFFSET:
2314 error = sep_get_physical_mapped_offset_handler(sep, arg);
2317 error = sep_get_time_handler(sep, arg);
2323 dbg("SEP Driver:<-------- ioctl end\n");
2329 #if !SEP_DRIVER_POLLING_MODE
2331 /* handler for flow done interrupt */
2333 static void sep_flow_done_handler(struct work_struct *work)
2335 struct sep_flow_context_t *flow_data_ptr;
2337 /* obtain the mutex */
2338 mutex_lock(&sep_mutex);
2340 /* get the pointer to context */
2341 flow_data_ptr = (struct sep_flow_context_t *) work;
2343 /* free all the current input tables in sep */
2344 sep_deallocated_flow_tables(&flow_data_ptr->input_tables_in_process);
2346 /* free all the current tables output tables in SEP (if needed) */
2347 if (flow_data_ptr->output_tables_in_process.physical_address != 0xffffffff)
2348 sep_deallocated_flow_tables(&flow_data_ptr->output_tables_in_process);
2350 /* check if we have additional tables to be sent to SEP only input
2351 flag may be checked */
2352 if (flow_data_ptr->input_tables_flag) {
2353 /* copy the message to the shared RAM and signal SEP */
2354 memcpy((void *) flow_data_ptr->message, (void *) sep->shared_addr, flow_data_ptr->message_size_in_bytes);
2356 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR2_REG_ADDR, 0x2);
2358 mutex_unlock(&sep_mutex);
2361 interrupt handler function
2363 static irqreturn_t sep_inthandler(int irq, void *dev_id)
2365 irqreturn_t int_error;
2366 unsigned long reg_val;
2367 unsigned long flow_id;
2368 struct sep_flow_context_t *flow_context_ptr;
2369 struct sep_device *sep = dev_id;
2371 int_error = IRQ_HANDLED;
2373 /* read the IRR register to check if this is SEP interrupt */
2374 reg_val = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR);
2375 edbg("SEP Interrupt - reg is %08lx\n", reg_val);
2377 /* check if this is the flow interrupt */
2378 if (0 /*reg_val & (0x1 << 11) */ ) {
2379 /* read GPRO to find out the which flow is done */
2380 flow_id = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR);
2382 /* find the contex of the flow */
2383 flow_context_ptr = sep_find_flow_context(sep, flow_id >> 28);
2384 if (flow_context_ptr == NULL)
2385 goto end_function_with_error;
2387 /* queue the work */
2388 INIT_WORK(&flow_context_ptr->flow_wq, sep_flow_done_handler);
2389 queue_work(sep->flow_wq, &flow_context_ptr->flow_wq);
2392 /* check if this is reply interrupt from SEP */
2393 if (reg_val & (0x1 << 13)) {
2394 /* update the counter of reply messages */
2396 /* wake up the waiting process */
2397 wake_up(&sep_event);
2399 int_error = IRQ_NONE;
2403 end_function_with_error:
2404 /* clear the interrupt */
2405 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, reg_val);
2416 static void sep_wait_busy(struct sep_device *sep)
2421 reg = sep_read_reg(sep, HW_HOST_SEP_BUSY_REG_ADDR);
2426 PATCH for configuring the DMA to single burst instead of multi-burst
2428 static void sep_configure_dma_burst(struct sep_device *sep)
2430 #define HW_AHB_RD_WR_BURSTS_REG_ADDR 0x0E10UL
2432 dbg("SEP Driver:<-------- sep_configure_dma_burst start \n");
2434 /* request access to registers from SEP */
2435 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
2437 dbg("SEP Driver:<-------- sep_configure_dma_burst finished request access to registers from SEP (write reg) \n");
2441 dbg("SEP Driver:<-------- sep_configure_dma_burst finished request access to registers from SEP (while(revVal) wait loop) \n");
2443 /* set the DMA burst register to single burst */
2444 sep_write_reg(sep, HW_AHB_RD_WR_BURSTS_REG_ADDR, 0x0UL);
2446 /* release the sep busy */
2447 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x0UL);
2450 dbg("SEP Driver:<-------- sep_configure_dma_burst done \n");
2457 Function that is activated on the successful probe of the SEP device
2459 static int __devinit sep_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2462 struct sep_device *sep;
2464 int size; /* size of memory for allocation */
2466 edbg("Sep pci probe starting\n");
2467 if (sep_dev != NULL) {
2468 dev_warn(&pdev->dev, "only one SEP supported.\n");
2472 /* enable the device */
2473 error = pci_enable_device(pdev);
2475 edbg("error enabling pci device\n");
2479 /* set the pci dev pointer */
2480 sep_dev = &sep_instance;
2481 sep = &sep_instance;
2483 edbg("sep->shared_addr = %p\n", sep->shared_addr);
2484 /* transaction counter that coordinates the transactions between SEP
2487 /* counter for the messages from sep */
2489 /* counter for the number of bytes allocated in the pool
2490 for the current transaction */
2491 sep->data_pool_bytes_allocated = 0;
2493 /* calculate the total size for allocation */
2494 size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
2495 SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES + SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
2497 /* allocate the shared area */
2498 if (sep_map_and_alloc_shared_area(sep, size)) {
2500 /* allocation failed */
2501 goto end_function_error;
2503 /* now set the memory regions */
2504 #if (SEP_DRIVER_RECONFIG_MESSAGE_AREA == 1)
2505 /* Note: this test section will need moving before it could ever
2506 work as the registers are not yet mapped ! */
2507 /* send the new SHARED MESSAGE AREA to the SEP */
2508 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR1_REG_ADDR, sep->shared_bus);
2510 /* poll for SEP response */
2511 retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
2512 while (retval != 0xffffffff && retval != sep->shared_bus)
2513 retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
2515 /* check the return value (register) */
2516 if (retval != sep->shared_bus) {
2518 goto end_function_deallocate_sep_shared_area;
2521 /* init the flow contextes */
2522 for (counter = 0; counter < SEP_DRIVER_NUM_FLOWS; counter++)
2523 sep->flows[counter].flow_id = SEP_FREE_FLOW_ID;
2525 sep->flow_wq = create_singlethread_workqueue("sepflowwq");
2526 if (sep->flow_wq == NULL) {
2528 edbg("sep_driver:flow queue creation failed\n");
2529 goto end_function_deallocate_sep_shared_area;
2531 edbg("SEP Driver: create flow workqueue \n");
2532 sep->pdev = pci_dev_get(pdev);
2534 sep->reg_addr = pci_ioremap_bar(pdev, 0);
2535 if (!sep->reg_addr) {
2536 edbg("sep: ioremap of registers failed.\n");
2537 goto end_function_deallocate_sep_shared_area;
2539 edbg("SEP Driver:reg_addr is %p\n", sep->reg_addr);
2541 /* load the rom code */
2542 sep_load_rom_code(sep);
2544 /* set up system base address and shared memory location */
2545 sep->rar_addr = dma_alloc_coherent(&sep->pdev->dev,
2546 2 * SEP_RAR_IO_MEM_REGION_SIZE,
2547 &sep->rar_bus, GFP_KERNEL);
2549 if (!sep->rar_addr) {
2550 edbg("SEP Driver:can't allocate rar\n");
2551 goto end_function_uniomap;
2555 edbg("SEP Driver:rar_bus is %08llx\n", (unsigned long long)sep->rar_bus);
2556 edbg("SEP Driver:rar_virtual is %p\n", sep->rar_addr);
2558 #if !SEP_DRIVER_POLLING_MODE
2560 edbg("SEP Driver: about to write IMR and ICR REG_ADDR\n");
2562 /* clear ICR register */
2563 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
2565 /* set the IMR register - open only GPR 2 */
2566 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
2568 edbg("SEP Driver: about to call request_irq\n");
2569 /* get the interrupt line */
2570 error = request_irq(pdev->irq, sep_inthandler, IRQF_SHARED, "sep_driver", sep);
2572 goto end_function_free_res;
2574 edbg("SEP Driver: about to write IMR REG_ADDR");
2576 /* set the IMR register - open only GPR 2 */
2577 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
2579 end_function_free_res:
2580 dma_free_coherent(&sep->pdev->dev, 2 * SEP_RAR_IO_MEM_REGION_SIZE,
2581 sep->rar_addr, sep->rar_bus);
2582 #endif /* SEP_DRIVER_POLLING_MODE */
2583 end_function_uniomap:
2584 iounmap(sep->reg_addr);
2585 end_function_deallocate_sep_shared_area:
2586 /* de-allocate shared area */
2587 sep_unmap_and_free_shared_area(sep, size);
2594 static const struct pci_device_id sep_pci_id_tbl[] = {
2595 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080c)},
2599 MODULE_DEVICE_TABLE(pci, sep_pci_id_tbl);
2601 /* field for registering driver to PCI device */
2602 static struct pci_driver sep_pci_driver = {
2603 .name = "sep_sec_driver",
2604 .id_table = sep_pci_id_tbl,
2606 /* FIXME: remove handler */
2609 /* major and minor device numbers */
2610 static dev_t sep_devno;
2612 /* the files operations structure of the driver */
2613 static struct file_operations sep_file_operations = {
2614 .owner = THIS_MODULE,
2615 .unlocked_ioctl = sep_ioctl,
2618 .release = sep_release,
2623 /* cdev struct of the driver */
2624 static struct cdev sep_cdev;
2627 this function registers the driver to the file system
2629 static int sep_register_driver_to_fs(void)
2631 int ret_val = alloc_chrdev_region(&sep_devno, 0, 1, "sep_sec_driver");
2633 edbg("sep: major number allocation failed, retval is %d\n",
2638 cdev_init(&sep_cdev, &sep_file_operations);
2639 sep_cdev.owner = THIS_MODULE;
2641 /* register the driver with the kernel */
2642 ret_val = cdev_add(&sep_cdev, sep_devno, 1);
2644 edbg("sep_driver:cdev_add failed, retval is %d\n", ret_val);
2645 /* unregister dev numbers */
2646 unregister_chrdev_region(sep_devno, 1);
2652 /*--------------------------------------------------------------
2654 ----------------------------------------------------------------*/
2655 static int __init sep_init(void)
2658 dbg("SEP Driver:-------->Init start\n");
2659 /* FIXME: Probe can occur before we are ready to survive a probe */
2660 ret_val = pci_register_driver(&sep_pci_driver);
2662 edbg("sep_driver:sep_driver_to_device failed, ret_val is %d\n", ret_val);
2663 goto end_function_unregister_from_fs;
2665 /* register driver to fs */
2666 ret_val = sep_register_driver_to_fs();
2668 goto end_function_unregister_pci;
2670 end_function_unregister_pci:
2671 pci_unregister_driver(&sep_pci_driver);
2672 end_function_unregister_from_fs:
2673 /* unregister from fs */
2674 cdev_del(&sep_cdev);
2675 /* unregister dev numbers */
2676 unregister_chrdev_region(sep_devno, 1);
2678 dbg("SEP Driver:<-------- Init end\n");
2683 /*-------------------------------------------------------------
2685 --------------------------------------------------------------*/
2686 static void __exit sep_exit(void)
2690 dbg("SEP Driver:--------> Exit start\n");
2692 /* unregister from fs */
2693 cdev_del(&sep_cdev);
2694 /* unregister dev numbers */
2695 unregister_chrdev_region(sep_devno, 1);
2696 /* calculate the total size for de-allocation */
2697 size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
2698 SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES + SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
2699 /* FIXME: We need to do this in the unload for the device */
2700 /* free shared area */
2702 sep_unmap_and_free_shared_area(sep_dev, size);
2703 edbg("SEP Driver: free pages SEP SHARED AREA \n");
2704 iounmap((void *) sep_dev->reg_addr);
2705 edbg("SEP Driver: iounmap \n");
2707 edbg("SEP Driver: release_mem_region \n");
2708 dbg("SEP Driver:<-------- Exit end\n");
2712 module_init(sep_init);
2713 module_exit(sep_exit);
2715 MODULE_LICENSE("GPL");