3 * sep_main_mod.c - Security Processor Driver main group of functions
5 * Copyright(c) 2009 Intel Corporation. All rights reserved.
6 * Copyright(c) 2009 Discretix. All rights reserved.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc., 59
20 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 * Mark Allyn mark.a.allyn@intel.com
28 * 2009.06.26 Initial publish
32 #include <linux/init.h>
33 #include <linux/module.h>
35 #include <linux/cdev.h>
36 #include <linux/kdev_t.h>
37 #include <linux/mutex.h>
39 #include <linux/poll.h>
40 #include <linux/wait.h>
41 #include <asm/ioctl.h>
42 #include <linux/ioport.h>
44 #include <linux/interrupt.h>
45 #include <linux/pagemap.h>
46 #include <asm/cacheflush.h>
47 #include "sep_driver_hw_defs.h"
48 #include "sep_driver_config.h"
49 #include "sep_driver_api.h"
50 #include "sep_driver_ext_api.h"
53 /*----------------------------------------
55 -----------------------------------------*/
58 #define INT_MODULE_PARM(n, v) int n = v; module_param(n, int, 0)
60 /*--------------------------------------
62 -----------------------------------------*/
66 /*--------------------------------------------
68 --------------------------------------------*/
70 /* debug messages level */
71 INT_MODULE_PARM(sepDebug, 0x0);
72 MODULE_PARM_DESC(sepDebug, "Flag to enable SEP debug messages");
74 /* major and minor device numbers */
75 static dev_t g_sep_device_number;
77 /* the files operations structure of the driver */
78 static struct file_operations g_sep_fops;
80 /* cdev struct of the driver */
81 static struct cdev g_sep_cdev;
84 mutex for the access to the internals of the sep driver
86 static DEFINE_MUTEX(sep_mutex);
89 /* wait queue head (event) of the driver */
90 static DECLARE_WAIT_QUEUE_HEAD(g_sep_event);
94 /*------------------------------------------------
96 ---------------------------------------------------*/
99 interrupt handler function
101 irqreturn_t sep_inthandler(int irq, void *dev_id);
104 this function registers the driver to the file system
106 static int sep_register_driver_to_fs(void);
109 this function unregisters driver from fs
111 static void sep_unregister_driver_from_fs(void);
114 this function calculates the size of data that can be inserted into the lli
115 table from this array the condition is that either the table is full
116 (all etnries are entered), or there are no more entries in the lli array
118 static unsigned long sep_calculate_lli_table_max_size(struct sep_lli_entry_t *lli_in_array_ptr, unsigned long num_array_entries);
120 this functions builds ont lli table from the lli_array according to the
123 static void sep_build_lli_table(struct sep_lli_entry_t *lli_array_ptr, struct sep_lli_entry_t *lli_table_ptr, unsigned long *num_processed_entries_ptr, unsigned long *num_table_entries_ptr, unsigned long table_data_size);
126 this function goes over the list of the print created tables and prints
129 static void sep_debug_print_lli_tables(struct sep_lli_entry_t *lli_table_ptr, unsigned long num_table_entries, unsigned long table_data_size);
134 This function raises interrupt to SEPm that signals that is has a new
137 static void sep_send_command_handler(void);
141 This function raises interrupt to SEP that signals that is has a
144 static void sep_send_reply_command_handler(void);
147 This function handles the allocate data pool memory request
148 This function returns calculates the physical address of the allocated memory
149 and the offset of this area from the mapped address. Therefore, the FVOs in
150 user space can calculate the exact virtual address of this allocated memory
152 static int sep_allocate_data_pool_memory_handler(unsigned long arg);
156 This function handles write into allocated data pool command
158 static int sep_write_into_data_pool_handler(unsigned long arg);
161 this function handles the read from data pool command
163 static int sep_read_from_data_pool_handler(unsigned long arg);
166 this function handles tha request for creation of the DMA table
167 for the synchronic symmetric operations (AES,DES)
169 static int sep_create_sync_dma_tables_handler(unsigned long arg);
172 this function handles the request to create the DMA tables for flow
174 static int sep_create_flow_dma_tables_handler(unsigned long arg);
177 This API handles the end transaction request
179 static int sep_end_transaction_handler(unsigned long arg);
183 this function handles add tables to flow
185 static int sep_add_flow_tables_handler(unsigned long arg);
188 this function add the flow add message to the specific flow
190 static int sep_add_flow_tables_message_handler(unsigned long arg);
193 this function handles the request for SEP start
195 static int sep_start_handler(void);
198 this function handles the request for SEP initialization
200 static int sep_init_handler(unsigned long arg);
203 this function handles the request cache and resident reallocation
205 static int sep_realloc_cache_resident_handler(unsigned long arg);
209 This api handles the setting of API mode to blocking or non-blocking
211 static int sep_set_api_mode_handler(unsigned long arg);
213 /* handler for flow done interrupt */
214 static void sep_flow_done_handler(struct work_struct *work);
217 This function locks all the physical pages of the kernel virtual buffer
218 and construct a basic lli array, where each entry holds the physical
219 page address and the size that application data holds in this physical pages
221 static int sep_lock_kernel_pages(unsigned long kernel_virt_addr, unsigned long data_size, unsigned long *num_pages_ptr, struct sep_lli_entry_t **lli_array_ptr, struct page ***page_array_ptr);
224 This function creates one DMA table for flow and returns its data,
225 and pointer to its info entry
227 static int sep_prepare_one_flow_dma_table(unsigned long virt_buff_addr, unsigned long virt_buff_size, struct sep_lli_entry_t *table_data, struct sep_lli_entry_t **info_entry_ptr, struct sep_flow_context_t *flow_data_ptr, bool isKernelVirtualAddress);
230 This function creates a list of tables for flow and returns the data for the
231 first and last tables of the list
233 static int sep_prepare_flow_dma_tables(unsigned long num_virtual_buffers,
234 unsigned long first_buff_addr, struct sep_flow_context_t *flow_data_ptr, struct sep_lli_entry_t *first_table_data_ptr, struct sep_lli_entry_t *last_table_data_ptr, bool isKernelVirtualAddress);
237 this function find a space for the new flow dma table
239 static int sep_find_free_flow_dma_table_space(unsigned long **table_address_ptr);
242 this function goes over all the flow tables connected to the given table and
245 static void sep_deallocated_flow_tables(struct sep_lli_entry_t *first_table_ptr);
248 This function handler the set flow id command
250 static int sep_set_flow_id_handler(unsigned long arg);
253 This function returns pointer to the flow data structure
254 that conatins the given id
256 static int sep_find_flow_context(unsigned long flow_id, struct sep_flow_context_t **flow_data_ptr);
260 this function returns the physical and virtual addresses of the static pool
262 static int sep_get_static_pool_addr_handler(unsigned long arg);
265 this address gets the offset of the physical address from the start of
268 static int sep_get_physical_mapped_offset_handler(unsigned long arg);
272 this function handles the request for get time
274 static int sep_get_time_handler(unsigned long arg);
277 calculates time and sets it at the predefined address
279 static int sep_set_time(unsigned long *address_ptr, unsigned long *time_in_sec_ptr);
282 PATCH for configuring the DMA to single burst instead of multi-burst
284 static void sep_configure_dma_burst(void);
287 This function locks all the physical pages of the
288 application virtual buffer and construct a basic lli
289 array, where each entry holds the physical page address
290 and the size that application data holds in this physical pages
292 static int sep_lock_user_pages(unsigned long app_virt_addr, unsigned long data_size, unsigned long *num_pages_ptr, struct sep_lli_entry_t **lli_array_ptr, struct page ***page_array_ptr);
294 /*---------------------------------------------
296 -----------------------------------------------*/
299 this function locks SEP by locking the semaphore
303 mutex_lock(&sep_mutex);
309 this function unlocks SEP
314 mutex_unlock(&sep_mutex);
318 this function returns the address of the message shared area
320 void sep_map_shared_area(unsigned long *mappedAddr_ptr)
322 *mappedAddr_ptr = sep_dev->shared_area_addr;
326 this function returns the address of the message shared area
328 void sep_send_msg_rdy_cmd()
330 sep_send_command_handler();
333 /* this functions frees all the resources that were allocated for the building
334 of the LLI DMA tables */
335 void sep_free_dma_resources()
337 sep_free_dma_table_data_handler();
340 /* poll(suspend), until reply from sep */
341 void sep_driver_poll()
343 unsigned long retVal = 0;
345 #ifdef SEP_DRIVER_POLLING_MODE
347 while (sep_dev->host_to_sep_send_counter != (retVal & 0x7FFFFFFF))
348 retVal = sep_read_reg(sep_dev, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
350 sep_dev->sep_to_host_reply_counter++;
352 /* poll, until reply from sep */
353 wait_event(g_sep_event, (sep_dev->host_to_sep_send_counter == sep_dev->sep_to_host_reply_counter));
358 /*----------------------------------------------------------------------
359 open function of the character driver - must only lock the mutex
360 must also release the memory data pool allocations
361 ------------------------------------------------------------------------*/
362 static int sep_open(struct inode *inode_ptr, struct file *file_ptr)
369 ---------------------*/
371 dbg("SEP Driver:--------> open start\n");
375 /* check the blocking mode */
376 if (sep_dev->block_mode_flag)
378 mutex_lock(&sep_mutex);
380 error = mutex_trylock(&sep_mutex);
382 /* check the error */
384 edbg("SEP Driver: down_interruptible failed\n");
389 /* release data pool allocations */
390 sep_dev->data_pool_bytes_allocated = 0;
394 dbg("SEP Driver:<-------- open end\n");
402 /*------------------------------------------------------------
404 -------------------------------------------------------------*/
405 static int sep_release(struct inode *inode_ptr, struct file *file_ptr)
409 ---------------------*/
411 dbg("----------->SEP Driver: sep_release start\n");
413 #if 0 /*!SEP_DRIVER_POLLING_MODE */
415 sep_write_reg(sep_dev, HW_HOST_IMR_REG_ADDR, 0x7FFF);
417 /* release IRQ line */
418 free_irq(SEP_DIRVER_IRQ_NUM, &sep_dev->reg_base_address);
422 /* unlock the sep mutex */
423 mutex_unlock(&sep_mutex);
425 dbg("SEP Driver:<-------- sep_release end\n");
433 /*---------------------------------------------------------------
434 map function - this functions maps the message shared area
435 -----------------------------------------------------------------*/
436 static int sep_mmap(struct file *filp, struct vm_area_struct *vma)
439 unsigned long phys_addr;
441 /*-----------------------
443 -------------------------*/
445 dbg("-------->SEP Driver: mmap start\n");
447 /* check that the size of the mapped range is as the size of the message
449 if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) {
450 edbg("SEP Driver mmap requested size is more than allowed\n");
451 printk(KERN_WARNING "SEP Driver mmap requested size is more \
453 printk(KERN_WARNING "SEP Driver vma->vm_end is %08lx\n", vma->vm_end);
454 printk(KERN_WARNING "SEP Driver vma->vm_end is %08lx\n", vma->vm_start);
458 edbg("SEP Driver:g_message_shared_area_addr is %08lx\n", sep_dev->message_shared_area_addr);
460 /* get physical address */
461 phys_addr = sep_dev->phys_shared_area_addr;
463 edbg("SEP Driver: phys_addr is %08lx\n", phys_addr);
465 if (remap_pfn_range(vma, vma->vm_start, phys_addr >> PAGE_SHIFT, vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
466 edbg("SEP Driver remap_page_range failed\n");
467 printk(KERN_WARNING "SEP Driver remap_page_range failed\n");
471 dbg("SEP Driver:<-------- mmap end\n");
477 /*-----------------------------------------------
479 *----------------------------------------------*/
480 static unsigned int sep_poll(struct file *filp, poll_table * wait)
484 unsigned int mask = 0;
487 unsigned long retVal = 0;
489 /*----------------------------------------------
491 -------------------------------------------------*/
493 dbg("---------->SEP Driver poll: start\n");
496 #if SEP_DRIVER_POLLING_MODE
498 while (sep_dev->host_to_sep_send_counter != (retVal & 0x7FFFFFFF)) {
499 retVal = sep_read_reg(sep_dev, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
501 for (count = 0; count < 10 * 4; count += 4)
502 edbg("Poll Debug Word %lu of the message is %lu\n", count, *((unsigned long *) (sep_dev->shared_area_addr + SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES + count)));
505 sep_dev->sep_to_host_reply_counter++;
507 /* add the event to the polling wait table */
508 poll_wait(filp, &g_sep_event, wait);
512 edbg("sep_dev->host_to_sep_send_counter is %lu\n", sep_dev->host_to_sep_send_counter);
513 edbg("sep_dev->sep_to_host_reply_counter is %lu\n", sep_dev->sep_to_host_reply_counter);
515 /* check if the data is ready */
516 if (sep_dev->host_to_sep_send_counter == sep_dev->sep_to_host_reply_counter) {
517 for (count = 0; count < 12 * 4; count += 4)
518 edbg("Sep Mesg Word %lu of the message is %lu\n", count, *((unsigned long *) (sep_dev->shared_area_addr + count)));
520 for (count = 0; count < 10 * 4; count += 4)
521 edbg("Debug Data Word %lu of the message is %lu\n", count, *((unsigned long *) (sep_dev->shared_area_addr + 0x1800 + count)));
523 retVal = sep_read_reg(sep_dev, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
524 edbg("retVal is %lu\n", retVal);
525 /* check if the this is sep reply or request */
527 edbg("SEP Driver: sep request in\n");
529 mask |= POLLOUT | POLLWRNORM;
531 edbg("SEP Driver: sep reply in\n");
532 mask |= POLLIN | POLLRDNORM;
536 dbg("SEP Driver:<-------- poll exit\n");
541 static int sep_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
547 /*------------------------
549 ------------------------*/
552 dbg("------------>SEP Driver: ioctl start\n");
554 edbg("SEP Driver: cmd is %x\n", cmd);
556 /* check that the command is for sep device */
557 if (_IOC_TYPE(cmd) != SEP_IOC_MAGIC_NUMBER)
561 case SEP_IOCSENDSEPCOMMAND:
563 /* send command to SEP */
564 sep_send_command_handler();
566 edbg("SEP Driver: after sep_send_command_handler\n");
570 case SEP_IOCSENDSEPRPLYCOMMAND:
572 /* send reply command to SEP */
573 sep_send_reply_command_handler();
577 case SEP_IOCALLOCDATAPOLL:
579 /* allocate data pool */
580 error = sep_allocate_data_pool_memory_handler(arg);
584 case SEP_IOCWRITEDATAPOLL:
586 /* write data into memory pool */
587 error = sep_write_into_data_pool_handler(arg);
591 case SEP_IOCREADDATAPOLL:
593 /* read data from data pool into application memory */
594 error = sep_read_from_data_pool_handler(arg);
598 case SEP_IOCCREATESYMDMATABLE:
600 /* create dma table for synhronic operation */
601 error = sep_create_sync_dma_tables_handler(arg);
605 case SEP_IOCCREATEFLOWDMATABLE:
607 /* create flow dma tables */
608 error = sep_create_flow_dma_tables_handler(arg);
612 case SEP_IOCFREEDMATABLEDATA:
615 error = sep_free_dma_table_data_handler();
619 case SEP_IOCSETFLOWID:
622 error = sep_set_flow_id_handler(arg);
626 case SEP_IOCADDFLOWTABLE:
628 /* add tables to the dynamic flow */
629 error = sep_add_flow_tables_handler(arg);
633 case SEP_IOCADDFLOWMESSAGE:
635 /* add message of add tables to flow */
636 error = sep_add_flow_tables_message_handler(arg);
640 case SEP_IOCSEPSTART:
642 /* start command to sep */
643 error = sep_start_handler();
648 /* init command to sep */
649 error = sep_init_handler(arg);
652 case SEP_IOCSETAPIMODE:
654 /* set non- blocking mode */
655 error = sep_set_api_mode_handler(arg);
659 case SEP_IOCGETSTATICPOOLADDR:
661 /* get the physical and virtual addresses of the static pool */
662 error = sep_get_static_pool_addr_handler(arg);
666 case SEP_IOCENDTRANSACTION:
668 error = sep_end_transaction_handler(arg);
672 case SEP_IOCREALLOCCACHERES:
674 error = sep_realloc_cache_resident_handler(arg);
678 case SEP_IOCGETMAPPEDADDROFFSET:
680 error = sep_get_physical_mapped_offset_handler(arg);
685 error = sep_get_time_handler(arg);
694 dbg("SEP Driver:<-------- ioctl end\n");
701 this function registers the driver to the file system
703 static int sep_register_driver_to_fs(void)
708 /*---------------------
710 -----------------------*/
712 ret_val = alloc_chrdev_region(&g_sep_device_number, 0, 1, "sep_sec_driver");
714 edbg("sep_driver:major number allocation failed, retval is %d\n", ret_val);
718 /* set the files operations structure */
719 g_sep_fops.owner = THIS_MODULE;
720 g_sep_fops.ioctl = sep_ioctl;
721 g_sep_fops.poll = sep_poll;
722 g_sep_fops.open = sep_open;
723 g_sep_fops.release = sep_release;
724 g_sep_fops.mmap = sep_mmap;
727 cdev_init(&g_sep_cdev, &g_sep_fops);
728 g_sep_cdev.owner = THIS_MODULE;
730 /* register the driver with the kernel */
731 ret_val = cdev_add(&g_sep_cdev, g_sep_device_number, 1);
734 edbg("sep_driver:cdev_add failed, retval is %d\n", ret_val);
735 goto end_function_unregister_devnum;
740 end_function_unregister_devnum:
742 /* unregister dev numbers */
743 unregister_chrdev_region(g_sep_device_number, 1);
751 this function unregisters driver from fs
753 static void sep_unregister_driver_from_fs(void)
755 /*-------------------
757 ---------------------*/
759 cdev_del(&g_sep_cdev);
761 /* unregister dev numbers */
762 unregister_chrdev_region(g_sep_device_number, 1);
765 /*--------------------------------------------------------------
767 ----------------------------------------------------------------*/
768 static int __init sep_init(void)
776 /* size to of memory for allocation */
779 /*------------------------
781 ------------------------*/
783 dbg("SEP Driver:-------->Init start\n");
784 edbg("sep->shared_area_addr = %lx\n", (unsigned long) &sep_dev->shared_area_addr);
788 /* transaction counter that coordinates the transactions between SEP
790 sep_dev->host_to_sep_send_counter = 0;
792 /* counter for the messages from sep */
793 sep_dev->sep_to_host_reply_counter = 0;
795 /* counter for the number of bytes allocated in the pool
796 for the current transaction */
797 sep_dev->data_pool_bytes_allocated = 0;
799 /* set the starting mode to blocking */
800 sep_dev->block_mode_flag = 1;
803 ret_val = sep_register_driver_to_device();
805 edbg("sep_driver:sep_driver_to_device failed, ret_val is %d\n", ret_val);
806 goto end_function_unregister_from_fs;
809 /* calculate the total size for allocation */
810 size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
811 SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES + SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
815 /* allocate the shared area */
816 if (sep_map_and_alloc_shared_area(size, &sep_dev->shared_area_addr, &sep_dev->phys_shared_area_addr)) {
818 /* allocation failed */
819 goto end_function_unmap_io_memory;
822 /* now set the memory regions */
823 sep_dev->message_shared_area_addr = sep_dev->shared_area_addr;
825 edbg("SEP Driver: g_message_shared_area_addr is %08lx\n", sep_dev->message_shared_area_addr);
827 #if (SEP_DRIVER_RECONFIG_MESSAGE_AREA == 1)
829 /* send the new SHARED MESSAGE AREA to the SEP */
830 sep_write_reg(sep_dev, HW_HOST_HOST_SEP_GPR1_REG_ADDR, sep_dev->phys_shared_area_addr);
832 /* poll for SEP response */
833 retVal = sep_read_reg(sep_dev, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
834 while (retVal != 0xffffffff && retVal != sep_dev->phys_shared_area_addr)
835 retVal = sep_read_reg(sep_dev, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
837 /* check the return value (register) */
838 if (retVal != sep_dev->phys_shared_area_addr) {
840 goto end_function_deallocate_message_area;
844 /* init the flow contextes */
845 for (counter = 0; counter < SEP_DRIVER_NUM_FLOWS; counter++)
846 sep_dev->flows_data_array[counter].flow_id = SEP_FREE_FLOW_ID;
848 sep_dev->flow_wq_ptr = create_singlethread_workqueue("sepflowwq");
849 if (sep_dev->flow_wq_ptr == 0) {
851 edbg("sep_driver:flow queue creation failed\n");
852 goto end_function_deallocate_sep_shared_area;
855 edbg("SEP Driver: create flow workqueue \n");
857 /* register driver to fs */
858 ret_val = sep_register_driver_to_fs();
860 goto end_function_deallocate_sep_shared_area;
862 /* load the rom code */
867 end_function_unregister_from_fs:
869 /* unregister from fs */
870 sep_unregister_driver_from_fs();
872 end_function_deallocate_sep_shared_area:
874 /* de-allocate shared area */
875 sep_unmap_and_free_shared_area(size, sep_dev->shared_area_addr, sep_dev->phys_shared_area_addr);
877 end_function_unmap_io_memory:
879 iounmap((void *) sep_dev->reg_base_address);
881 /* release io memory region */
882 release_mem_region(SEP_IO_MEM_REGION_START_ADDRESS, SEP_IO_MEM_REGION_SIZE);
886 dbg("SEP Driver:<-------- Init end\n");
894 /*-------------------------------------------------------------
896 --------------------------------------------------------------*/
897 static void __exit sep_exit(void)
902 /*-----------------------------
904 --------------------------------*/
906 dbg("SEP Driver:--------> Exit start\n");
908 /* unregister from fs */
909 sep_unregister_driver_from_fs();
911 /* calculate the total size for de-allocation */
912 size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
913 SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES + SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
916 /* free shared area */
917 sep_unmap_and_free_shared_area(size, sep_dev->shared_area_addr, sep_dev->phys_shared_area_addr);
919 edbg("SEP Driver: free pages SEP SHARED AREA \n");
921 iounmap((void *) sep_dev->reg_base_address);
923 edbg("SEP Driver: iounmap \n");
925 /* release io memory region */
926 release_mem_region(SEP_IO_MEM_REGION_START_ADDRESS, SEP_IO_MEM_REGION_SIZE);
928 edbg("SEP Driver: release_mem_region \n");
930 dbg("SEP Driver:<-------- Exit end\n");
935 interrupt handler function
937 irqreturn_t sep_inthandler(int irq, void *dev_id)
940 irqreturn_t int_error;
946 unsigned long reg_val;
949 unsigned long flow_id;
952 struct sep_flow_context_t *flow_context_ptr;
954 /*-----------------------------
956 -----------------------------*/
958 int_error = IRQ_HANDLED;
960 /* read the IRR register to check if this is SEP interrupt */
961 reg_val = sep_read_reg(sep_dev, HW_HOST_IRR_REG_ADDR);
962 edbg("SEP Interrupt - reg is %08lx\n", reg_val);
964 /* check if this is the flow interrupt */
965 if (0 /*reg_val & (0x1 << 11) */ ) {
966 /* read GPRO to find out the which flow is done */
967 flow_id = sep_read_reg(sep_dev, HW_HOST_IRR_REG_ADDR);
969 /* find the contex of the flow */
970 error = sep_find_flow_context(flow_id >> 28, &flow_context_ptr);
972 goto end_function_with_error;
974 INIT_WORK(&flow_context_ptr->flow_wq, sep_flow_done_handler);
977 queue_work(sep_dev->flow_wq_ptr, &flow_context_ptr->flow_wq);
980 /* check if this is reply interrupt from SEP */
981 if (reg_val & (0x1 << 13)) {
982 /* update the counter of reply messages */
983 sep_dev->sep_to_host_reply_counter++;
985 /* wake up the waiting process */
986 wake_up(&g_sep_event);
988 int_error = IRQ_NONE;
993 end_function_with_error:
995 /* clear the interrupt */
996 sep_write_reg(sep_dev, HW_HOST_ICR_REG_ADDR, reg_val);
1005 This function prepares only input DMA table for synhronic symmetric
1008 int sep_prepare_input_dma_table(unsigned long app_virt_addr, unsigned long data_size, unsigned long block_size, unsigned long *lli_table_ptr, unsigned long *num_entries_ptr, unsigned long *table_data_size_ptr, bool isKernelVirtualAddress)
1010 /* pointer to the info entry of the table - the last entry */
1011 struct sep_lli_entry_t *info_entry_ptr;
1013 /* array of pointers ot page */
1014 struct sep_lli_entry_t *lli_array_ptr;
1016 /* points to the first entry to be processed in the lli_in_array */
1017 unsigned long current_entry;
1019 /* num entries in the virtual buffer */
1020 unsigned long sep_lli_entries;
1022 /* lli table pointer */
1023 struct sep_lli_entry_t *in_lli_table_ptr;
1025 /* the total data in one table */
1026 unsigned long table_data_size;
1028 /* number of entries in lli table */
1029 unsigned long num_entries_in_table;
1031 /* next table address */
1032 unsigned long lli_table_alloc_addr;
1035 unsigned long result;
1037 /*------------------------
1039 --------------------------*/
1041 dbg("SEP Driver:--------> sep_prepare_input_dma_table start\n");
1043 edbg("SEP Driver:data_size is %lu\n", data_size);
1044 edbg("SEP Driver:block_size is %lu\n", block_size);
1046 /* initialize the pages pointers */
1047 sep_dev->in_page_array = 0;
1048 sep_dev->in_num_pages = 0;
1050 if (data_size == 0) {
1051 /* special case - created 2 entries table with zero data */
1052 in_lli_table_ptr = (struct sep_lli_entry_t *) (sep_dev->shared_area_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES);
1053 in_lli_table_ptr->physical_address = sep_dev->shared_area_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1054 in_lli_table_ptr->block_size = 0;
1057 in_lli_table_ptr->physical_address = 0xFFFFFFFF;
1058 in_lli_table_ptr->block_size = 0;
1060 *lli_table_ptr = sep_dev->phys_shared_area_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1061 *num_entries_ptr = 2;
1062 *table_data_size_ptr = 0;
1067 /* check if the pages are in Kernel Virtual Address layout */
1068 if (isKernelVirtualAddress == true)
1069 /* lock the pages of the kernel buffer and translate them to pages */
1070 result = sep_lock_kernel_pages(app_virt_addr, data_size, &sep_dev->in_num_pages, &lli_array_ptr, &sep_dev->in_page_array);
1072 /* lock the pages of the user buffer and translate them to pages */
1073 result = sep_lock_user_pages(app_virt_addr, data_size, &sep_dev->in_num_pages, &lli_array_ptr, &sep_dev->in_page_array);
1078 edbg("SEP Driver:output sep_dev->in_num_pages is %lu\n", sep_dev->in_num_pages);
1082 sep_lli_entries = sep_dev->in_num_pages;
1084 /* initiate to point after the message area */
1085 lli_table_alloc_addr = sep_dev->shared_area_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1087 /* loop till all the entries in in array are not processed */
1088 while (current_entry < sep_lli_entries) {
1089 /* set the new input and output tables */
1090 in_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr;
1092 lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1094 /* calculate the maximum size of data for input table */
1095 table_data_size = sep_calculate_lli_table_max_size(&lli_array_ptr[current_entry], (sep_lli_entries - current_entry));
1097 /* now calculate the table size so that it will be module block size */
1098 table_data_size = (table_data_size / block_size) * block_size;
1100 edbg("SEP Driver:output table_data_size is %lu\n", table_data_size);
1102 /* construct input lli table */
1103 sep_build_lli_table(&lli_array_ptr[current_entry], in_lli_table_ptr, ¤t_entry, &num_entries_in_table, table_data_size);
1105 if (info_entry_ptr == 0) {
1106 /* set the output parameters to physical addresses */
1107 *lli_table_ptr = sep_shared_area_virt_to_phys((unsigned long) in_lli_table_ptr);
1108 *num_entries_ptr = num_entries_in_table;
1109 *table_data_size_ptr = table_data_size;
1111 edbg("SEP Driver:output lli_table_in_ptr is %08lx\n", *lli_table_ptr);
1113 /* update the info entry of the previous in table */
1114 info_entry_ptr->physical_address = sep_shared_area_virt_to_phys((unsigned long) in_lli_table_ptr);
1115 info_entry_ptr->block_size = ((num_entries_in_table) << 24) | (table_data_size);
1118 /* save the pointer to the info entry of the current tables */
1119 info_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
1122 /* print input tables */
1123 sep_debug_print_lli_tables((struct sep_lli_entry_t *)
1124 sep_shared_area_phys_to_virt(*lli_table_ptr), *num_entries_ptr, *table_data_size_ptr);
1126 /* the array of the pages */
1127 kfree(lli_array_ptr);
1131 dbg("SEP Driver:<-------- sep_prepare_input_dma_table end\n");
1138 This function builds input and output DMA tables for synhronic
1139 symmetric operations (AES, DES). It also checks that each table
1140 is of the modular block size
1142 int sep_prepare_input_output_dma_table(unsigned long app_virt_in_addr,
1143 unsigned long app_virt_out_addr,
1144 unsigned long data_size,
1145 unsigned long block_size,
1146 unsigned long *lli_table_in_ptr, unsigned long *lli_table_out_ptr, unsigned long *in_num_entries_ptr, unsigned long *out_num_entries_ptr, unsigned long *table_data_size_ptr, bool isKernelVirtualAddress)
1148 /* array of pointers of page */
1149 struct sep_lli_entry_t *lli_in_array;
1151 /* array of pointers of page */
1152 struct sep_lli_entry_t *lli_out_array;
1158 /*------------------------
1160 --------------------------*/
1162 dbg("SEP Driver:--------> sep_prepare_input_output_dma_table start\n");
1166 /* initialize the pages pointers */
1167 sep_dev->in_page_array = 0;
1168 sep_dev->out_page_array = 0;
1170 /* check if the pages are in Kernel Virtual Address layout */
1171 if (isKernelVirtualAddress == true) {
1172 /* lock the pages of the kernel buffer and translate them to pages */
1173 result = sep_lock_kernel_pages(app_virt_in_addr, data_size, &sep_dev->in_num_pages, &lli_in_array, &sep_dev->in_page_array);
1175 edbg("SEP Driver: sep_lock_kernel_pages for input virtual buffer failed\n");
1179 /* lock the pages of the user buffer and translate them to pages */
1180 result = sep_lock_user_pages(app_virt_in_addr, data_size, &sep_dev->in_num_pages, &lli_in_array, &sep_dev->in_page_array);
1182 edbg("SEP Driver: sep_lock_user_pages for input virtual buffer failed\n");
1187 if (isKernelVirtualAddress == true) {
1188 result = sep_lock_kernel_pages(app_virt_out_addr, data_size, &sep_dev->out_num_pages, &lli_out_array, &sep_dev->out_page_array);
1190 edbg("SEP Driver: sep_lock_kernel_pages for output virtual buffer failed\n");
1191 goto end_function_with_error1;
1194 result = sep_lock_user_pages(app_virt_out_addr, data_size, &sep_dev->out_num_pages, &lli_out_array, &sep_dev->out_page_array);
1196 edbg("SEP Driver: sep_lock_user_pages for output virtual buffer failed\n");
1197 goto end_function_with_error1;
1202 edbg("sep_dev->in_num_pages is %lu\n", sep_dev->in_num_pages);
1203 edbg("sep_dev->out_num_pages is %lu\n", sep_dev->out_num_pages);
1204 edbg("SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP is %x\n", SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
1207 /* call the fucntion that creates table from the lli arrays */
1208 result = sep_construct_dma_tables_from_lli(lli_in_array, sep_dev->in_num_pages, lli_out_array, sep_dev->out_num_pages, block_size, lli_table_in_ptr, lli_table_out_ptr, in_num_entries_ptr, out_num_entries_ptr, table_data_size_ptr);
1210 edbg("SEP Driver: sep_construct_dma_tables_from_lli failed\n");
1211 goto end_function_with_error2;
1214 /* fall through - free the lli entry arrays */
1216 dbg("in_num_entries_ptr is %08lx\n", *in_num_entries_ptr);
1217 dbg("out_num_entries_ptr is %08lx\n", *out_num_entries_ptr);
1218 dbg("table_data_size_ptr is %08lx\n", *table_data_size_ptr);
1221 end_function_with_error2:
1223 kfree(lli_out_array);
1225 end_function_with_error1:
1227 kfree(lli_in_array);
1231 dbg("SEP Driver:<-------- sep_prepare_input_output_dma_table end result = %d\n", (int) result);
1239 This function creates the input and output dma tables for
1240 symmetric operations (AES/DES) according to the block size from LLI arays
1242 int sep_construct_dma_tables_from_lli(struct sep_lli_entry_t *lli_in_array,
1243 unsigned long sep_in_lli_entries,
1244 struct sep_lli_entry_t *lli_out_array,
1245 unsigned long sep_out_lli_entries,
1246 unsigned long block_size, unsigned long *lli_table_in_ptr, unsigned long *lli_table_out_ptr, unsigned long *in_num_entries_ptr, unsigned long *out_num_entries_ptr, unsigned long *table_data_size_ptr)
1248 /* points to the area where next lli table can be allocated */
1249 unsigned long lli_table_alloc_addr;
1251 /* input lli table */
1252 struct sep_lli_entry_t *in_lli_table_ptr;
1254 /* output lli table */
1255 struct sep_lli_entry_t *out_lli_table_ptr;
1257 /* pointer to the info entry of the table - the last entry */
1258 struct sep_lli_entry_t *info_in_entry_ptr;
1260 /* pointer to the info entry of the table - the last entry */
1261 struct sep_lli_entry_t *info_out_entry_ptr;
1263 /* points to the first entry to be processed in the lli_in_array */
1264 unsigned long current_in_entry;
1266 /* points to the first entry to be processed in the lli_out_array */
1267 unsigned long current_out_entry;
1269 /* max size of the input table */
1270 unsigned long in_table_data_size;
1272 /* max size of the output table */
1273 unsigned long out_table_data_size;
1275 /* flag te signifies if this is the first tables build from the arrays */
1276 unsigned long first_table_flag;
1278 /* the data size that should be in table */
1279 unsigned long table_data_size;
1281 /* number of etnries in the input table */
1282 unsigned long num_entries_in_table;
1284 /* number of etnries in the output table */
1285 unsigned long num_entries_out_table;
1287 /*---------------------
1289 ------------------------*/
1291 dbg("SEP Driver:--------> sep_construct_dma_tables_from_lli start\n");
1293 /* initiate to pint after the message area */
1294 lli_table_alloc_addr = sep_dev->shared_area_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1296 current_in_entry = 0;
1297 current_out_entry = 0;
1298 first_table_flag = 1;
1299 info_in_entry_ptr = 0;
1300 info_out_entry_ptr = 0;
1302 /* loop till all the entries in in array are not processed */
1303 while (current_in_entry < sep_in_lli_entries) {
1304 /* set the new input and output tables */
1305 in_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr;
1307 lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1309 /* set the first output tables */
1310 out_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr;
1312 lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1314 /* calculate the maximum size of data for input table */
1315 in_table_data_size = sep_calculate_lli_table_max_size(&lli_in_array[current_in_entry], (sep_in_lli_entries - current_in_entry));
1317 /* calculate the maximum size of data for output table */
1318 out_table_data_size = sep_calculate_lli_table_max_size(&lli_out_array[current_out_entry], (sep_out_lli_entries - current_out_entry));
1320 edbg("SEP Driver:in_table_data_size is %lu\n", in_table_data_size);
1321 edbg("SEP Driver:out_table_data_size is %lu\n", out_table_data_size);
1323 /* check where the data is smallest */
1324 table_data_size = in_table_data_size;
1325 if (table_data_size > out_table_data_size)
1326 table_data_size = out_table_data_size;
1328 /* now calculate the table size so that it will be module block size */
1329 table_data_size = (table_data_size / block_size) * block_size;
1331 dbg("SEP Driver:table_data_size is %lu\n", table_data_size);
1333 /* construct input lli table */
1334 sep_build_lli_table(&lli_in_array[current_in_entry], in_lli_table_ptr, ¤t_in_entry, &num_entries_in_table, table_data_size);
1336 /* construct output lli table */
1337 sep_build_lli_table(&lli_out_array[current_out_entry], out_lli_table_ptr, ¤t_out_entry, &num_entries_out_table, table_data_size);
1339 /* if info entry is null - this is the first table built */
1340 if (info_in_entry_ptr == 0) {
1341 /* set the output parameters to physical addresses */
1342 *lli_table_in_ptr = sep_shared_area_virt_to_phys((unsigned long) in_lli_table_ptr);
1343 *in_num_entries_ptr = num_entries_in_table;
1344 *lli_table_out_ptr = sep_shared_area_virt_to_phys((unsigned long) out_lli_table_ptr);
1345 *out_num_entries_ptr = num_entries_out_table;
1346 *table_data_size_ptr = table_data_size;
1348 edbg("SEP Driver:output lli_table_in_ptr is %08lx\n", *lli_table_in_ptr);
1349 edbg("SEP Driver:output lli_table_out_ptr is %08lx\n", *lli_table_out_ptr);
1351 /* update the info entry of the previous in table */
1352 info_in_entry_ptr->physical_address = sep_shared_area_virt_to_phys((unsigned long) in_lli_table_ptr);
1353 info_in_entry_ptr->block_size = ((num_entries_in_table) << 24) | (table_data_size);
1355 /* update the info entry of the previous in table */
1356 info_out_entry_ptr->physical_address = sep_shared_area_virt_to_phys((unsigned long) out_lli_table_ptr);
1357 info_out_entry_ptr->block_size = ((num_entries_out_table) << 24) | (table_data_size);
1360 /* save the pointer to the info entry of the current tables */
1361 info_in_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
1362 info_out_entry_ptr = out_lli_table_ptr + num_entries_out_table - 1;
1364 edbg("SEP Driver:output num_entries_out_table is %lu\n", (unsigned long) num_entries_out_table);
1365 edbg("SEP Driver:output info_in_entry_ptr is %lu\n", (unsigned long) info_in_entry_ptr);
1366 edbg("SEP Driver:output info_out_entry_ptr is %lu\n", (unsigned long) info_out_entry_ptr);
1369 /* print input tables */
1370 sep_debug_print_lli_tables((struct sep_lli_entry_t *)
1371 sep_shared_area_phys_to_virt(*lli_table_in_ptr), *in_num_entries_ptr, *table_data_size_ptr);
1373 /* print output tables */
1374 sep_debug_print_lli_tables((struct sep_lli_entry_t *)
1375 sep_shared_area_phys_to_virt(*lli_table_out_ptr), *out_num_entries_ptr, *table_data_size_ptr);
1377 dbg("SEP Driver:<-------- sep_construct_dma_tables_from_lli end\n");
1383 this function calculates the size of data that can be inserted into the lli
1384 table from this array the condition is that either the table is full
1385 (all etnries are entered), or there are no more entries in the lli array
1387 unsigned long sep_calculate_lli_table_max_size(struct sep_lli_entry_t *lli_in_array_ptr, unsigned long num_array_entries)
1389 /* table data size */
1390 unsigned long table_data_size;
1393 unsigned long counter;
1395 /*---------------------
1397 ----------------------*/
1399 table_data_size = 0;
1401 /* calculate the data in the out lli table if till we fill the whole
1402 table or till the data has ended */
1403 for (counter = 0; (counter < (SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP - 1)) && (counter < num_array_entries); counter++)
1404 table_data_size += lli_in_array_ptr[counter].block_size;
1406 return table_data_size;
1410 this functions builds ont lli table from the lli_array according to
1411 the given size of data
1413 static void sep_build_lli_table(struct sep_lli_entry_t *lli_array_ptr, struct sep_lli_entry_t *lli_table_ptr, unsigned long *num_processed_entries_ptr, unsigned long *num_table_entries_ptr, unsigned long table_data_size)
1415 /* current table data size */
1416 unsigned long curr_table_data_size;
1418 /* counter of lli array entry */
1419 unsigned long array_counter;
1421 /*-----------------------
1423 ---------------------------*/
1425 dbg("SEP Driver:--------> sep_build_lli_table start\n");
1427 /* init currrent table data size and lli array entry counter */
1428 curr_table_data_size = 0;
1430 *num_table_entries_ptr = 1;
1432 edbg("SEP Driver:table_data_size is %lu\n", table_data_size);
1434 /* fill the table till table size reaches the needed amount */
1435 while (curr_table_data_size < table_data_size) {
1436 /* update the number of entries in table */
1437 (*num_table_entries_ptr)++;
1439 lli_table_ptr->physical_address = lli_array_ptr[array_counter].physical_address;
1440 lli_table_ptr->block_size = lli_array_ptr[array_counter].block_size;
1441 curr_table_data_size += lli_table_ptr->block_size;
1443 edbg("SEP Driver:lli_table_ptr is %08lx\n", (unsigned long) lli_table_ptr);
1444 edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
1445 edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
1447 /* check for overflow of the table data */
1448 if (curr_table_data_size > table_data_size) {
1449 edbg("SEP Driver:curr_table_data_size > table_data_size\n");
1451 /* update the size of block in the table */
1452 lli_table_ptr->block_size -= (curr_table_data_size - table_data_size);
1454 /* update the physical address in the lli array */
1455 lli_array_ptr[array_counter].physical_address += lli_table_ptr->block_size;
1457 /* update the block size left in the lli array */
1458 lli_array_ptr[array_counter].block_size = (curr_table_data_size - table_data_size);
1460 /* advance to the next entry in the lli_array */
1463 edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
1464 edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
1466 /* move to the next entry in table */
1470 /* set the info entry to default */
1471 lli_table_ptr->physical_address = 0xffffffff;
1472 lli_table_ptr->block_size = 0;
1474 edbg("SEP Driver:lli_table_ptr is %08lx\n", (unsigned long) lli_table_ptr);
1475 edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
1476 edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
1479 /* set the output parameter */
1480 *num_processed_entries_ptr += array_counter;
1482 edbg("SEP Driver:*num_processed_entries_ptr is %lu\n", *num_processed_entries_ptr);
1485 dbg("SEP Driver:<-------- sep_build_lli_table end\n");
1491 this function goes over the list of the print created tables and
1494 static void sep_debug_print_lli_tables(struct sep_lli_entry_t *lli_table_ptr, unsigned long num_table_entries, unsigned long table_data_size)
1496 unsigned long table_count;
1498 unsigned long entries_count;
1499 /*-----------------------------
1501 -------------------------------*/
1503 dbg("SEP Driver:--------> sep_debug_print_lli_tables start\n");
1506 while ((unsigned long) lli_table_ptr != 0xffffffff) {
1507 edbg("SEP Driver: lli table %08lx, table_data_size is %lu\n", table_count, table_data_size);
1508 edbg("SEP Driver: num_table_entries is %lu\n", num_table_entries);
1510 /* print entries of the table (without info entry) */
1511 for (entries_count = 0; entries_count < num_table_entries; entries_count++, lli_table_ptr++) {
1512 edbg("SEP Driver:lli_table_ptr address is %08lx\n", (unsigned long) lli_table_ptr);
1513 edbg("SEP Driver:phys address is %08lx block size is %lu\n", lli_table_ptr->physical_address, lli_table_ptr->block_size);
1516 /* point to the info entry */
1519 edbg("SEP Driver:phys lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
1520 edbg("SEP Driver:phys lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
1523 table_data_size = lli_table_ptr->block_size & 0xffffff;
1524 num_table_entries = (lli_table_ptr->block_size >> 24) & 0xff;
1525 lli_table_ptr = (struct sep_lli_entry_t *)
1526 (lli_table_ptr->physical_address);
1528 edbg("SEP Driver:phys table_data_size is %lu num_table_entries is %lu lli_table_ptr is%lu\n", table_data_size, num_table_entries, (unsigned long) lli_table_ptr);
1530 if ((unsigned long) lli_table_ptr != 0xffffffff)
1531 lli_table_ptr = (struct sep_lli_entry_t *) sep_shared_area_phys_to_virt((unsigned long) lli_table_ptr);
1536 dbg("SEP Driver:<-------- sep_debug_print_lli_tables end\n");
1541 This function locks all the physical pages of the application virtual buffer
1542 and construct a basic lli array, where each entry holds the physical page
1543 address and the size that application data holds in this physical pages
1545 int sep_lock_user_pages(unsigned long app_virt_addr, unsigned long data_size, unsigned long *num_pages_ptr, struct sep_lli_entry_t **lli_array_ptr, struct page ***page_array_ptr)
1550 /* the the page of the end address of the user space buffer */
1551 unsigned long end_page;
1553 /* the page of the start address of the user space buffer */
1554 unsigned long start_page;
1556 /* the range in pages */
1557 unsigned long num_pages;
1559 /* array of pointers ot page */
1560 struct page **page_array;
1563 struct sep_lli_entry_t *lli_array;
1566 unsigned long count;
1571 /*------------------------
1573 --------------------------*/
1575 dbg("SEP Driver:--------> sep_lock_user_pages start\n");
1579 /* set start and end pages and num pages */
1580 end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT;
1581 start_page = app_virt_addr >> PAGE_SHIFT;
1582 num_pages = end_page - start_page + 1;
1584 edbg("SEP Driver: app_virt_addr is %08lx\n", app_virt_addr);
1585 edbg("SEP Driver: data_size is %lu\n", data_size);
1586 edbg("SEP Driver: start_page is %lu\n", start_page);
1587 edbg("SEP Driver: end_page is %lu\n", end_page);
1588 edbg("SEP Driver: num_pages is %lu\n", num_pages);
1590 /* allocate array of pages structure pointers */
1591 page_array = kmalloc(sizeof(struct page *) * num_pages, GFP_ATOMIC);
1593 edbg("SEP Driver: kmalloc for page_array failed\n");
1599 lli_array = kmalloc(sizeof(struct sep_lli_entry_t) * num_pages, GFP_ATOMIC);
1601 edbg("SEP Driver: kmalloc for lli_array failed\n");
1604 goto end_function_with_error1;
1607 /* convert the application virtual address into a set of physical */
1608 down_read(¤t->mm->mmap_sem);
1609 result = get_user_pages(current, current->mm, app_virt_addr, num_pages, 1, 0, page_array, 0);
1610 up_read(¤t->mm->mmap_sem);
1612 /* check the number of pages locked - if not all then exit with error */
1613 if (result != num_pages) {
1614 dbg("SEP Driver: not all pages locked by get_user_pages\n");
1617 goto end_function_with_error2;
1620 /* flush the cache */
1621 for (count = 0; count < num_pages; count++)
1622 flush_dcache_page(page_array[count]);
1624 /* set the start address of the first page - app data may start not at
1625 the beginning of the page */
1626 lli_array[0].physical_address = ((unsigned long) page_to_phys(page_array[0])) + (app_virt_addr & (~PAGE_MASK));
1628 /* check that not all the data is in the first page only */
1629 if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size)
1630 lli_array[0].block_size = data_size;
1632 lli_array[0].block_size = PAGE_SIZE - (app_virt_addr & (~PAGE_MASK));
1635 dbg("lli_array[0].physical_address is %08lx, lli_array[0].block_size is %lu\n", lli_array[0].physical_address, lli_array[0].block_size);
1637 /* go from the second page to the prev before last */
1638 for (count = 1; count < (num_pages - 1); count++) {
1639 lli_array[count].physical_address = (unsigned long) page_to_phys(page_array[count]);
1640 lli_array[count].block_size = PAGE_SIZE;
1642 edbg("lli_array[%lu].physical_address is %08lx, \
1643 lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
1646 /* if more then 1 pages locked - then update for the last page size needed */
1647 if (num_pages > 1) {
1648 /* update the address of the last page */
1649 lli_array[count].physical_address = (unsigned long) page_to_phys(page_array[count]);
1651 /* set the size of the last page */
1652 lli_array[count].block_size = (app_virt_addr + data_size) & (~PAGE_MASK);
1654 if (lli_array[count].block_size == 0) {
1655 dbg("app_virt_addr is %08lx\n", app_virt_addr);
1656 dbg("data_size is %lu\n", data_size);
1659 edbg("lli_array[%lu].physical_address is %08lx, \
1660 lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
1663 /* set output params */
1664 *lli_array_ptr = lli_array;
1665 *num_pages_ptr = num_pages;
1666 *page_array_ptr = page_array;
1670 end_function_with_error2:
1672 /* release the cache */
1673 for (count = 0; count < num_pages; count++)
1674 page_cache_release(page_array[count]);
1676 /* free lli array */
1679 end_function_with_error1:
1681 /* free page array */
1686 dbg("SEP Driver:<-------- sep_lock_user_pages end\n");
1692 This function locks all the physical pages of the kernel virtual buffer
1693 and construct a basic lli array, where each entry holds the physical
1694 page address and the size that application data holds in this physical pages
1696 int sep_lock_kernel_pages(unsigned long kernel_virt_addr, unsigned long data_size, unsigned long *num_pages_ptr, struct sep_lli_entry_t **lli_array_ptr, struct page ***page_array_ptr)
1701 /* the the page of the end address of the user space buffer */
1702 unsigned long end_page;
1704 /* the page of the start address of the user space buffer */
1705 unsigned long start_page;
1707 /* the range in pages */
1708 unsigned long num_pages;
1711 struct sep_lli_entry_t *lli_array;
1713 /* next kernel address to map */
1714 unsigned long next_kernel_address;
1717 unsigned long count;
1720 /*------------------------
1722 --------------------------*/
1724 dbg("SEP Driver:--------> sep_lock_kernel_pages start\n");
1728 /* set start and end pages and num pages */
1729 end_page = (kernel_virt_addr + data_size - 1) >> PAGE_SHIFT;
1730 start_page = kernel_virt_addr >> PAGE_SHIFT;
1731 num_pages = end_page - start_page + 1;
1733 edbg("SEP Driver: kernel_virt_addr is %08lx\n", kernel_virt_addr);
1734 edbg("SEP Driver: data_size is %lu\n", data_size);
1735 edbg("SEP Driver: start_page is %lx\n", start_page);
1736 edbg("SEP Driver: end_page is %lx\n", end_page);
1737 edbg("SEP Driver: num_pages is %lu\n", num_pages);
1739 lli_array = kmalloc(sizeof(struct sep_lli_entry_t) * num_pages, GFP_ATOMIC);
1741 edbg("SEP Driver: kmalloc for lli_array failed\n");
1747 /* set the start address of the first page - app data may start not at
1748 the beginning of the page */
1749 lli_array[0].physical_address = (unsigned long) virt_to_phys((unsigned long *) kernel_virt_addr);
1751 /* check that not all the data is in the first page only */
1752 if ((PAGE_SIZE - (kernel_virt_addr & (~PAGE_MASK))) >= data_size)
1753 lli_array[0].block_size = data_size;
1755 lli_array[0].block_size = PAGE_SIZE - (kernel_virt_addr & (~PAGE_MASK));
1758 dbg("lli_array[0].physical_address is %08lx, lli_array[0].block_size is %lu\n", lli_array[0].physical_address, lli_array[0].block_size);
1760 /* advance the address to the start of the next page */
1761 next_kernel_address = (kernel_virt_addr & PAGE_MASK) + PAGE_SIZE;
1763 /* go from the second page to the prev before last */
1764 for (count = 1; count < (num_pages - 1); count++) {
1765 lli_array[count].physical_address = (unsigned long) virt_to_phys((unsigned long *) next_kernel_address);
1766 lli_array[count].block_size = PAGE_SIZE;
1768 edbg("lli_array[%lu].physical_address is %08lx, \
1769 lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
1771 next_kernel_address += PAGE_SIZE;
1774 /* if more then 1 pages locked - then update for the last page size needed */
1775 if (num_pages > 1) {
1776 /* update the address of the last page */
1777 lli_array[count].physical_address = (unsigned long) virt_to_phys((unsigned long *) next_kernel_address);
1779 /* set the size of the last page */
1780 lli_array[count].block_size = (kernel_virt_addr + data_size) & (~PAGE_MASK);
1782 if (lli_array[count].block_size == 0) {
1783 dbg("app_virt_addr is %08lx\n", kernel_virt_addr);
1784 dbg("data_size is %lu\n", data_size);
1788 edbg("lli_array[%lu].physical_address is %08lx, \
1789 lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
1792 /* set output params */
1793 *lli_array_ptr = lli_array;
1794 *num_pages_ptr = num_pages;
1795 *page_array_ptr = 0;
1800 dbg("SEP Driver:<-------- sep_lock_kernel_pages end\n");
1806 This function releases all the application virtual buffer physical pages,
1807 that were previously locked
1809 int sep_free_dma_pages(struct page **page_array_ptr, unsigned long num_pages, unsigned long dirtyFlag)
1812 unsigned long count;
1814 /*-------------------
1816 ---------------------*/
1819 for (count = 0; count < num_pages; count++) {
1820 /* the out array was written, therefore the data was changed */
1821 if (!PageReserved(page_array_ptr[count]))
1822 SetPageDirty(page_array_ptr[count]);
1823 page_cache_release(page_array_ptr[count]);
1826 /* free in pages - the data was only read, therefore no update was done
1828 for (count = 0; count < num_pages; count++)
1829 page_cache_release(page_array_ptr[count]);
1833 /* free the array */
1834 kfree(page_array_ptr);
1840 This function raises interrupt to SEP that signals that is has a new
1843 static void sep_send_command_handler()
1846 unsigned long count;
1848 dbg("SEP Driver:--------> sep_send_command_handler start\n");
1855 for (count = 0; count < 12 * 4; count += 4)
1856 edbg("Word %lu of the message is %lu\n", count, *((unsigned long *) (sep_dev->shared_area_addr + count)));
1858 /* update counter */
1859 sep_dev->host_to_sep_send_counter++;
1861 /* send interrupt to SEP */
1862 sep_write_reg(sep_dev, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
1864 dbg("SEP Driver:<-------- sep_send_command_handler end\n");
1870 This function raises interrupt to SEPm that signals that is has a
1871 new command from HOST
1873 static void sep_send_reply_command_handler()
1875 unsigned long count;
1877 dbg("SEP Driver:--------> sep_send_reply_command_handler start\n");
1882 for (count = 0; count < 12 * 4; count += 4)
1883 edbg("Word %lu of the message is %lu\n", count, *((unsigned long *) (sep_dev->shared_area_addr + count)));
1886 /* update counter */
1887 sep_dev->host_to_sep_send_counter++;
1889 /* send the interrupt to SEP */
1890 sep_write_reg(sep_dev, HW_HOST_HOST_SEP_GPR2_REG_ADDR, sep_dev->host_to_sep_send_counter);
1892 /* update both counters */
1893 sep_dev->host_to_sep_send_counter++;
1895 sep_dev->sep_to_host_reply_counter++;
1897 dbg("SEP Driver:<-------- sep_send_reply_command_handler end\n");
1905 This function handles the allocate data pool memory request
1906 This function returns calculates the physical address of the
1907 allocated memory, and the offset of this area from the mapped address.
1908 Therefore, the FVOs in user space can calculate the exact virtual
1909 address of this allocated memory
1911 static int sep_allocate_data_pool_memory_handler(unsigned long arg)
1916 /* command paramaters */
1917 struct sep_driver_alloc_t command_args;
1919 /*-------------------------
1921 ----------------------------*/
1923 dbg("SEP Driver:--------> sep_allocate_data_pool_memory_handler start\n");
1926 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_alloc_t));
1930 /* allocate memory */
1931 if ((sep_dev->data_pool_bytes_allocated + command_args.num_bytes) > SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES) {
1936 /* set the virtual and physical address */
1937 command_args.offset = SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES + sep_dev->data_pool_bytes_allocated;
1938 command_args.phys_address = sep_dev->phys_shared_area_addr + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES + sep_dev->data_pool_bytes_allocated;
1940 /* write the memory back to the user space */
1941 error = copy_to_user((void *) arg, (void *) &command_args, sizeof(struct sep_driver_alloc_t));
1945 /* set the allocation */
1946 sep_dev->data_pool_bytes_allocated += command_args.num_bytes;
1950 dbg("SEP Driver:<-------- sep_allocate_data_pool_memory_handler end\n");
1956 This function handles write into allocated data pool command
1958 static int sep_write_into_data_pool_handler(unsigned long arg)
1963 /* virtual address */
1964 unsigned long virt_address;
1966 /* application in address */
1967 unsigned long app_in_address;
1969 /* number of bytes */
1970 unsigned long num_bytes;
1972 /* address of the data pool */
1973 unsigned long data_pool_area_addr;
1975 /*--------------------------
1977 -----------------------------*/
1979 dbg("SEP Driver:--------> sep_write_into_data_pool_handler start\n");
1981 /* get the application address */
1982 error = get_user(app_in_address, &(((struct sep_driver_write_t *) arg)->app_address));
1986 /* get the virtual kernel address address */
1987 error = get_user(virt_address, &(((struct sep_driver_write_t *) arg)->datapool_address));
1991 /* get the number of bytes */
1992 error = get_user(num_bytes, &(((struct sep_driver_write_t *) arg)->num_bytes));
1996 /* calculate the start of the data pool */
1997 data_pool_area_addr = sep_dev->shared_area_addr + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES;
2000 /* check that the range of the virtual kernel address is correct */
2001 if ((virt_address < data_pool_area_addr) || (virt_address > (data_pool_area_addr + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES))) {
2006 /* copy the application data */
2007 error = copy_from_user((void *) virt_address, (void *) app_in_address, num_bytes);
2011 dbg("SEP Driver:<-------- sep_write_into_data_pool_handler end\n");
2017 this function handles the read from data pool command
2019 static int sep_read_from_data_pool_handler(unsigned long arg)
2024 /* virtual address of dest application buffer */
2025 unsigned long app_out_address;
2027 /* virtual address of the data pool */
2028 unsigned long virt_address;
2031 unsigned long num_bytes;
2033 /* address of the data pool */
2034 unsigned long data_pool_area_addr;
2036 /*------------------------
2038 -----------------------------*/
2040 dbg("SEP Driver:--------> sep_read_from_data_pool_handler start\n");
2042 /* get the application address */
2043 error = get_user(app_out_address, &(((struct sep_driver_write_t *) arg)->app_address));
2047 /* get the virtual kernel address address */
2048 error = get_user(virt_address, &(((struct sep_driver_write_t *) arg)->datapool_address));
2052 /* get the number of bytes */
2053 error = get_user(num_bytes, &(((struct sep_driver_write_t *) arg)->num_bytes));
2057 /* calculate the start of the data pool */
2058 data_pool_area_addr = sep_dev->shared_area_addr + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES;
2060 /* check that the range of the virtual kernel address is correct */
2061 if ((virt_address < data_pool_area_addr) || (virt_address > (data_pool_area_addr + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES))) {
2066 /* copy the application data */
2067 error = copy_to_user((void *) app_out_address, (void *) virt_address, num_bytes);
2071 dbg("SEP Driver:<-------- sep_read_from_data_pool_handler end\n");
2078 this function handles tha request for creation of the DMA table
2079 for the synchronic symmetric operations (AES,DES)
2081 static int sep_create_sync_dma_tables_handler(unsigned long arg)
2086 /* command arguments */
2087 struct sep_driver_build_sync_table_t command_args;
2089 /*------------------------
2091 --------------------------*/
2093 dbg("SEP Driver:--------> sep_create_sync_dma_tables_handler start\n");
2095 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_build_sync_table_t));
2099 edbg("app_in_address is %08lx\n", command_args.app_in_address);
2100 edbg("app_out_address is %08lx\n", command_args.app_out_address);
2101 edbg("data_size is %lu\n", command_args.data_in_size);
2102 edbg("block_size is %lu\n", command_args.block_size);
2105 /* check if we need to build only input table or input/output */
2106 if (command_args.app_out_address)
2107 /* prepare input and output tables */
2108 error = sep_prepare_input_output_dma_table(command_args.app_in_address,
2109 command_args.app_out_address,
2110 command_args.data_in_size,
2111 command_args.block_size,
2112 &command_args.in_table_address,
2113 &command_args.out_table_address, &command_args.in_table_num_entries, &command_args.out_table_num_entries, &command_args.table_data_size, command_args.isKernelVirtualAddress);
2115 /* prepare input tables */
2116 error = sep_prepare_input_dma_table(command_args.app_in_address,
2117 command_args.data_in_size, command_args.block_size, &command_args.in_table_address, &command_args.in_table_num_entries, &command_args.table_data_size, command_args.isKernelVirtualAddress);
2123 error = copy_to_user((void *) arg, (void *) &command_args, sizeof(struct sep_driver_build_sync_table_t));
2127 dbg("SEP Driver:<-------- sep_create_sync_dma_tables_handler end\n");
2133 this function handles the request for freeing dma table for synhronic actions
2135 int sep_free_dma_table_data_handler()
2137 /*-------------------------
2139 -----------------------------*/
2141 dbg("SEP Driver:--------> sep_free_dma_table_data_handler start\n");
2143 /* free input pages array */
2144 sep_free_dma_pages(sep_dev->in_page_array, sep_dev->in_num_pages, 0);
2146 /* free output pages array if needed */
2147 if (sep_dev->out_page_array)
2148 sep_free_dma_pages(sep_dev->out_page_array, sep_dev->out_num_pages, 1);
2150 /* reset all the values */
2151 sep_dev->in_page_array = 0;
2152 sep_dev->out_page_array = 0;
2153 sep_dev->in_num_pages = 0;
2154 sep_dev->out_num_pages = 0;
2157 dbg("SEP Driver:<-------- sep_free_dma_table_data_handler end\n");
2163 this function handles the request to create the DMA tables for flow
2165 static int sep_create_flow_dma_tables_handler(unsigned long arg)
2170 /* command arguments */
2171 struct sep_driver_build_flow_table_t command_args;
2173 /* first table - output */
2174 struct sep_lli_entry_t first_table_data;
2176 /* dma table data */
2177 struct sep_lli_entry_t last_table_data;
2179 /* pointer to the info entry of the previuos DMA table */
2180 struct sep_lli_entry_t *prev_info_entry_ptr;
2182 /* pointer to the flow data strucutre */
2183 struct sep_flow_context_t *flow_context_ptr;
2185 /*------------------------
2187 --------------------------*/
2189 dbg("SEP Driver:--------> sep_create_flow_dma_tables_handler start\n");
2191 /* init variables */
2192 prev_info_entry_ptr = 0;
2193 first_table_data.physical_address = 0xffffffff;
2195 /* find the free structure for flow data */
2196 error = sep_find_flow_context(SEP_FREE_FLOW_ID, &flow_context_ptr);
2200 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_build_flow_table_t));
2204 /* create flow tables */
2205 error = sep_prepare_flow_dma_tables(command_args.num_virtual_buffers, command_args.virt_buff_data_addr, flow_context_ptr, &first_table_data, &last_table_data, command_args.isKernelVirtualAddress);
2207 goto end_function_with_error;
2209 /* check if flow is static */
2210 if (!command_args.flow_type)
2211 /* point the info entry of the last to the info entry of the first */
2212 last_table_data = first_table_data;
2214 /* set output params */
2215 command_args.first_table_addr = first_table_data.physical_address;
2216 command_args.first_table_num_entries = ((first_table_data.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK);
2217 command_args.first_table_data_size = (first_table_data.block_size & SEP_TABLE_DATA_SIZE_MASK);
2219 /* send the parameters to user application */
2220 error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_build_flow_table_t));
2222 goto end_function_with_error;
2224 /* all the flow created - update the flow entry with temp id */
2225 flow_context_ptr->flow_id = SEP_TEMP_FLOW_ID;
2227 /* set the processing tables data in the context */
2228 if (command_args.input_output_flag == SEP_DRIVER_IN_FLAG)
2229 flow_context_ptr->input_tables_in_process = first_table_data;
2231 flow_context_ptr->output_tables_in_process = first_table_data;
2235 end_function_with_error:
2237 /* free the allocated tables */
2238 sep_deallocated_flow_tables(&first_table_data);
2242 dbg("SEP Driver:<-------- sep_create_flow_dma_tables_handler end\n");
2249 this functio n handles add tables to flow
2251 static int sep_add_flow_tables_handler(unsigned long arg)
2256 /* number of entries */
2257 unsigned long num_entries;
2259 /* command arguments */
2260 struct sep_driver_add_flow_table_t command_args;
2262 /* pointer to the flow data strucutre */
2263 struct sep_flow_context_t *flow_context_ptr;
2265 /* first dma table data */
2266 struct sep_lli_entry_t first_table_data;
2268 /* last dma table data */
2269 struct sep_lli_entry_t last_table_data;
2271 /* pointer to the info entry of the current DMA table */
2272 struct sep_lli_entry_t *info_entry_ptr;
2274 /*--------------------------
2276 ----------------------------*/
2278 dbg("SEP Driver:--------> sep_add_flow_tables_handler start\n");
2280 /* get input parameters */
2281 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_add_flow_table_t));
2285 /* find the flow structure for the flow id */
2286 error = sep_find_flow_context(command_args.flow_id, &flow_context_ptr);
2290 /* prepare the flow dma tables */
2291 error = sep_prepare_flow_dma_tables(command_args.num_virtual_buffers, command_args.virt_buff_data_addr, flow_context_ptr, &first_table_data, &last_table_data, command_args.isKernelVirtualAddress);
2293 goto end_function_with_error;
2295 /* now check if there is already an existing add table for this flow */
2296 if (command_args.inputOutputFlag == SEP_DRIVER_IN_FLAG) {
2297 /* this buffer was for input buffers */
2298 if (flow_context_ptr->input_tables_flag) {
2299 /* add table already exists - add the new tables to the end
2301 num_entries = (flow_context_ptr->last_input_table.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK;
2303 info_entry_ptr = (struct sep_lli_entry_t *)
2304 (flow_context_ptr->last_input_table.physical_address + (sizeof(struct sep_lli_entry_t) * (num_entries - 1)));
2306 /* connect to list of tables */
2307 *info_entry_ptr = first_table_data;
2309 /* set the first table data */
2310 first_table_data = flow_context_ptr->first_input_table;
2312 /* set the input flag */
2313 flow_context_ptr->input_tables_flag = 1;
2315 /* set the first table data */
2316 flow_context_ptr->first_input_table = first_table_data;
2318 /* set the last table data */
2319 flow_context_ptr->last_input_table = last_table_data;
2320 } else { /* this is output tables */
2322 /* this buffer was for input buffers */
2323 if (flow_context_ptr->output_tables_flag) {
2324 /* add table already exists - add the new tables to
2325 the end of the previous */
2326 num_entries = (flow_context_ptr->last_output_table.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK;
2328 info_entry_ptr = (struct sep_lli_entry_t *)
2329 (flow_context_ptr->last_output_table.physical_address + (sizeof(struct sep_lli_entry_t) * (num_entries - 1)));
2331 /* connect to list of tables */
2332 *info_entry_ptr = first_table_data;
2334 /* set the first table data */
2335 first_table_data = flow_context_ptr->first_output_table;
2337 /* set the input flag */
2338 flow_context_ptr->output_tables_flag = 1;
2340 /* set the first table data */
2341 flow_context_ptr->first_output_table = first_table_data;
2343 /* set the last table data */
2344 flow_context_ptr->last_output_table = last_table_data;
2347 /* set output params */
2348 command_args.first_table_addr = first_table_data.physical_address;
2349 command_args.first_table_num_entries = ((first_table_data.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK);
2350 command_args.first_table_data_size = (first_table_data.block_size & SEP_TABLE_DATA_SIZE_MASK);
2352 /* send the parameters to user application */
2353 error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_add_flow_table_t));
2355 goto end_function_with_error;
2357 end_function_with_error:
2359 /* free the allocated tables */
2360 sep_deallocated_flow_tables(&first_table_data);
2364 dbg("SEP Driver:<-------- sep_add_flow_tables_handler end\n");
2370 this function add the flow add message to the specific flow
2372 static int sep_add_flow_tables_message_handler(unsigned long arg)
2378 struct sep_driver_add_message_t command_args;
2381 struct sep_flow_context_t *flow_context_ptr;
2383 /*----------------------------
2385 ------------------------------*/
2387 dbg("SEP Driver:--------> sep_add_flow_tables_message_handler start\n");
2389 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_add_message_t));
2394 if (command_args.message_size_in_bytes > SEP_MAX_ADD_MESSAGE_LENGTH_IN_BYTES) {
2399 /* find the flow context */
2400 error = sep_find_flow_context(command_args.flow_id, &flow_context_ptr);
2404 /* copy the message into context */
2405 flow_context_ptr->message_size_in_bytes = command_args.message_size_in_bytes;
2407 error = copy_from_user(flow_context_ptr->message, (void *) command_args.message_address, command_args.message_size_in_bytes);
2412 dbg("SEP Driver:<-------- sep_add_flow_tables_message_handler end\n");
2419 this function returns the physical and virtual addresses of the static pool
2421 static int sep_get_static_pool_addr_handler(unsigned long arg)
2426 /* command arguments */
2427 struct sep_driver_static_pool_addr_t command_args;
2429 /*-----------------------------
2431 ------------------------------*/
2433 dbg("SEP Driver:--------> sep_get_static_pool_addr_handler start\n");
2435 /*prepare the output parameters in the struct */
2436 command_args.physical_static_address = sep_dev->phys_shared_area_addr + SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES;
2437 command_args.virtual_static_address = sep_dev->shared_area_addr + SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES;
2439 edbg("SEP Driver:physical_static_address is %08lx, virtual_static_address %08lx\n", command_args.physical_static_address, command_args.virtual_static_address);
2441 /* send the parameters to user application */
2442 error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_static_pool_addr_t));
2448 dbg("SEP Driver:<-------- sep_get_static_pool_addr_handler end\n");
2454 this address gets the offset of the physical address from the start
2457 static int sep_get_physical_mapped_offset_handler(unsigned long arg)
2462 /* command arguments */
2463 struct sep_driver_get_mapped_offset_t command_args;
2465 /*-----------------------------
2467 ------------------------------*/
2469 dbg("SEP Driver:--------> sep_get_physical_mapped_offset_handler start\n");
2471 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_get_mapped_offset_t));
2475 if (command_args.physical_address < sep_dev->phys_shared_area_addr) {
2480 /*prepare the output parameters in the struct */
2481 command_args.offset = command_args.physical_address - sep_dev->phys_shared_area_addr;
2483 edbg("SEP Driver:physical_address is %08lx, offset is %lu\n", command_args.physical_address, command_args.offset);
2485 /* send the parameters to user application */
2486 error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_get_mapped_offset_t));
2492 dbg("SEP Driver:<-------- sep_get_physical_mapped_offset_handler end\n");
2501 static int sep_start_handler(void)
2504 unsigned long reg_val;
2507 unsigned long error;
2509 /*-----------------------------
2511 ------------------------------*/
2513 dbg("SEP Driver:--------> sep_start_handler start\n");
2517 /* wait in polling for message from SEP */
2519 reg_val = sep_read_reg(sep_dev, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
2522 /* check the value */
2523 if (reg_val == 0x1) {
2524 /* fatal error - read erro status from GPRO */
2525 error = sep_read_reg(sep_dev, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
2531 dbg("SEP Driver:<-------- sep_start_handler end\n");
2537 this function handles the request for SEP initialization
2539 static int sep_init_handler(unsigned long arg)
2541 /* word from message */
2542 unsigned long message_word;
2545 unsigned long *message_ptr;
2547 /* command arguments */
2548 struct sep_driver_init_t command_args;
2551 unsigned long counter;
2554 unsigned long error;
2557 unsigned long reg_val;
2559 /*-------------------
2561 ---------------------*/
2563 dbg("SEP Driver:--------> sep_init_handler start\n");
2567 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_init_t));
2569 dbg("SEP Driver:--------> sep_init_handler - finished copy_from_user \n");
2574 /* PATCH - configure the DMA to single -burst instead of multi-burst */
2575 /*sep_configure_dma_burst(); */
2577 dbg("SEP Driver:--------> sep_init_handler - finished sep_configure_dma_burst \n");
2579 message_ptr = (unsigned long *) command_args.message_addr;
2581 /* set the base address of the SRAM */
2582 sep_write_reg(sep_dev, HW_SRAM_ADDR_REG_ADDR, HW_CC_SRAM_BASE_ADDRESS);
2584 for (counter = 0; counter < command_args.message_size_in_words; counter++, message_ptr++) {
2585 get_user(message_word, message_ptr);
2587 /* write data to SRAM */
2588 sep_write_reg(sep_dev, HW_SRAM_DATA_REG_ADDR, message_word);
2590 edbg("SEP Driver:message_word is %lu\n", message_word);
2592 /* wait for write complete */
2593 sep_wait_sram_write(sep_dev);
2596 dbg("SEP Driver:--------> sep_init_handler - finished getting messages from user space\n");
2599 sep_write_reg(sep_dev, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x1);
2602 reg_val = sep_read_reg(sep_dev, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
2603 } while (!(reg_val & 0xFFFFFFFD));
2605 dbg("SEP Driver:--------> sep_init_handler - finished waiting for reg_val & 0xFFFFFFFD \n");
2607 /* check the value */
2608 if (reg_val == 0x1) {
2609 edbg("SEP Driver:init failed\n");
2611 error = sep_read_reg(sep_dev, 0x8060);
2612 edbg("SEP Driver:sw monitor is %lu\n", error);
2614 /* fatal error - read erro status from GPRO */
2615 error = sep_read_reg(sep_dev, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
2616 edbg("SEP Driver:error is %lu\n", error);
2622 dbg("SEP Driver:<-------- sep_init_handler end\n");
2629 this function handles the request cache and resident reallocation
2631 static int sep_realloc_cache_resident_handler(unsigned long arg)
2636 /* physical cache addr */
2637 unsigned long phys_cache_address;
2639 /* physical resident addr */
2640 unsigned long phys_resident_address;
2642 /* command arguments */
2643 struct sep_driver_realloc_cache_resident_t command_args;
2645 /*------------------
2647 ---------------------*/
2650 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_realloc_cache_resident_t));
2654 /* copy cache and resident to the their intended locations */
2655 error = sep_copy_cache_resident_to_area(command_args.cache_addr, command_args.cache_size_in_bytes, command_args.resident_addr, command_args.resident_size_in_bytes, &phys_cache_address, &phys_resident_address);
2659 /* lock the area (if needed) */
2660 sep_lock_cache_resident_area();
2662 command_args.new_base_addr = sep_dev->phys_shared_area_addr;
2664 /* find the new base address according to the lowest address between
2665 cache, resident and shared area */
2666 if (phys_resident_address < command_args.new_base_addr)
2667 command_args.new_base_addr = phys_resident_address;
2668 if (phys_cache_address < command_args.new_base_addr)
2669 command_args.new_base_addr = phys_cache_address;
2671 /* set the return parameters */
2672 command_args.new_cache_addr = phys_cache_address;
2673 command_args.new_resident_addr = phys_resident_address;
2676 /* set the new shared area */
2677 command_args.new_shared_area_addr = sep_dev->phys_shared_area_addr;
2679 edbg("SEP Driver:command_args.new_shared_area_addr is %08lx\n", command_args.new_shared_area_addr);
2680 edbg("SEP Driver:command_args.new_base_addr is %08lx\n", command_args.new_base_addr);
2681 edbg("SEP Driver:command_args.new_resident_addr is %08lx\n", command_args.new_resident_addr);
2682 edbg("SEP Driver:command_args.new_cache_addr is %08lx\n", command_args.new_cache_addr);
2684 /* return to user */
2685 error = copy_to_user((void *) arg, (void *) &command_args, sizeof(struct sep_driver_realloc_cache_resident_t));
2693 this function handles the request for get time
2695 static int sep_get_time_handler(unsigned long arg)
2700 /* command arguments */
2701 struct sep_driver_get_time_t command_args;
2703 /*------------------------
2705 --------------------------*/
2707 error = sep_set_time(&command_args.time_physical_address, &command_args.time_value);
2709 /* return to user */
2710 error = copy_to_user((void *) arg, (void *) &command_args, sizeof(struct sep_driver_get_time_t));
2717 This api handles the setting of API mode to blocking or non-blocking
2719 static int sep_set_api_mode_handler(unsigned long arg)
2725 unsigned long mode_flag;
2727 /*----------------------------
2729 -----------------------------*/
2731 dbg("SEP Driver:--------> sep_set_api_mode_handler start\n");
2733 error = get_user(mode_flag, &(((struct sep_driver_set_api_mode_t *) arg)->mode));
2737 /* set the global flag */
2738 sep_dev->block_mode_flag = mode_flag;
2743 dbg("SEP Driver:<-------- sep_set_api_mode_handler end\n");
2749 This API handles the end transaction request
2751 static int sep_end_transaction_handler(unsigned long arg)
2753 /*----------------------------
2755 -----------------------------*/
2757 dbg("SEP Driver:--------> sep_end_transaction_handler start\n");
2759 #if 0 /*!SEP_DRIVER_POLLING_MODE */
2761 sep_write_reg(sep_dev, HW_HOST_IMR_REG_ADDR, 0x7FFF);
2763 /* release IRQ line */
2764 free_irq(SEP_DIRVER_IRQ_NUM, &sep_dev->reg_base_address);
2766 /* lock the sep mutex */
2767 mutex_unlock(&sep_mutex);
2770 dbg("SEP Driver:<-------- sep_end_transaction_handler end\n");
2775 /* handler for flow done interrupt */
2776 static void sep_flow_done_handler(struct work_struct *work)
2778 /* flow context_ptr */
2779 struct sep_flow_context_t *flow_data_ptr;
2780 /*-------------------------
2782 ---------------------------*/
2784 /* obtain the mutex */
2785 mutex_lock(&sep_mutex);
2787 /* get the pointer to context */
2788 flow_data_ptr = (struct sep_flow_context_t *) work;
2790 /* free all the current input tables in sep */
2791 sep_deallocated_flow_tables(&flow_data_ptr->input_tables_in_process);
2793 /* free all the current tables output tables in SEP (if needed) */
2794 if (flow_data_ptr->output_tables_in_process.physical_address != 0xffffffff)
2795 sep_deallocated_flow_tables(&flow_data_ptr->output_tables_in_process);
2797 /* check if we have additional tables to be sent to SEP only input
2798 flag may be checked */
2799 if (flow_data_ptr->input_tables_flag) {
2800 /* copy the message to the shared RAM and signal SEP */
2801 memcpy((void *) flow_data_ptr->message, (void *) sep_dev->shared_area_addr, flow_data_ptr->message_size_in_bytes);
2803 sep_write_reg(sep_dev, HW_HOST_HOST_SEP_GPR2_REG_ADDR, 0x2);
2805 mutex_unlock(&sep_mutex);
2810 This function creates a list of tables for flow and returns the data for
2811 the first and last tables of the list
2813 static int sep_prepare_flow_dma_tables(unsigned long num_virtual_buffers,
2814 unsigned long first_buff_addr, struct sep_flow_context_t *flow_data_ptr, struct sep_lli_entry_t *first_table_data_ptr, struct sep_lli_entry_t *last_table_data_ptr, bool isKernelVirtualAddress)
2819 /* virtaul address of one buffer */
2820 unsigned long virt_buff_addr;
2822 /* virtual size of one buffer */
2823 unsigned long virt_buff_size;
2825 /* table data for each created table */
2826 struct sep_lli_entry_t table_data;
2829 struct sep_lli_entry_t *info_entry_ptr;
2831 /* prevouis info entry */
2832 struct sep_lli_entry_t *prev_info_entry_ptr;
2837 /*-------------------------------
2839 ----------------------------------*/
2843 prev_info_entry_ptr = 0;
2845 /* init the first table to default */
2846 table_data.physical_address = 0xffffffff;
2847 first_table_data_ptr->physical_address = 0xffffffff;
2848 table_data.block_size = 0;
2850 for (i = 0; i < num_virtual_buffers; i++) {
2851 /* get the virtual buffer address */
2852 error = get_user(virt_buff_addr, &first_buff_addr);
2856 /* get the virtual buffer size */
2858 error = get_user(virt_buff_size, &first_buff_addr);
2862 /* advance the address to point to the next pair of address|size */
2865 /* now prepare the one flow LLI table from the data */
2866 error = sep_prepare_one_flow_dma_table(virt_buff_addr, virt_buff_size, &table_data, &info_entry_ptr, flow_data_ptr, isKernelVirtualAddress);
2871 /* if this is the first table - save it to return to the user
2873 *first_table_data_ptr = table_data;
2875 /* set the pointer to info entry */
2876 prev_info_entry_ptr = info_entry_ptr;
2878 /* not first table - the previous table info entry should
2880 prev_info_entry_ptr->block_size = (0x1 << SEP_INT_FLAG_OFFSET_IN_BITS) | (table_data.block_size);
2882 /* set the pointer to info entry */
2883 prev_info_entry_ptr = info_entry_ptr;
2887 /* set the last table data */
2888 *last_table_data_ptr = table_data;
2897 This function creates one DMA table for flow and returns its data,
2898 and pointer to its info entry
2900 static int sep_prepare_one_flow_dma_table(unsigned long virt_buff_addr, unsigned long virt_buff_size, struct sep_lli_entry_t *table_data, struct sep_lli_entry_t **info_entry_ptr, struct sep_flow_context_t *flow_data_ptr, bool isKernelVirtualAddress)
2905 /* the range in pages */
2906 unsigned long lli_array_size;
2908 /* array of pointers ot page */
2909 struct sep_lli_entry_t *lli_array;
2911 /* pointer to the entry in the dma table */
2912 struct sep_lli_entry_t *flow_dma_table_entry_ptr;
2914 /* address of the dma table */
2915 unsigned long *start_dma_table_ptr;
2917 /* total table data counter */
2918 unsigned long dma_table_data_count;
2920 /* pointer that will keep the pointer t the pages of the virtual buffer */
2921 struct page **page_array_ptr;
2924 unsigned long entry_count;
2926 /*-------------------------------
2928 ----------------------------------*/
2930 /* find the space for the new table */
2931 error = sep_find_free_flow_dma_table_space(&start_dma_table_ptr);
2935 /* check if the pages are in Kernel Virtual Address layout */
2936 if (isKernelVirtualAddress == true)
2937 /* lock kernel buffer in the memory */
2938 error = sep_lock_kernel_pages(virt_buff_addr, virt_buff_size, &lli_array_size, &lli_array, &page_array_ptr);
2940 /* lock user buffer in the memory */
2941 error = sep_lock_user_pages(virt_buff_addr, virt_buff_size, &lli_array_size, &lli_array, &page_array_ptr);
2946 /* set the pointer to page array at the beginning of table - this table is
2947 now considered taken */
2948 *start_dma_table_ptr = lli_array_size;
2950 /* point to the place of the pages pointers of the table */
2951 start_dma_table_ptr++;
2953 /* set the pages pointer */
2954 *start_dma_table_ptr = (unsigned long) page_array_ptr;
2956 /* set the pointer to the first entry */
2957 flow_dma_table_entry_ptr = (struct sep_lli_entry_t *) (++start_dma_table_ptr);
2959 /* now create the entries for table */
2960 for (dma_table_data_count = entry_count = 0; entry_count < lli_array_size; entry_count++) {
2961 flow_dma_table_entry_ptr->physical_address = lli_array[entry_count].physical_address;
2963 flow_dma_table_entry_ptr->block_size = lli_array[entry_count].block_size;
2965 /* set the total data of a table */
2966 dma_table_data_count += lli_array[entry_count].block_size;
2968 flow_dma_table_entry_ptr++;
2971 /* set the physical address */
2972 table_data->physical_address = virt_to_phys(start_dma_table_ptr);
2974 /* set the num_entries and total data size */
2975 table_data->block_size = ((lli_array_size + 1) << SEP_NUM_ENTRIES_OFFSET_IN_BITS) | (dma_table_data_count);
2977 /* set the info entry */
2978 flow_dma_table_entry_ptr->physical_address = 0xffffffff;
2979 flow_dma_table_entry_ptr->block_size = 0;
2981 /* set the pointer to info entry */
2982 *info_entry_ptr = flow_dma_table_entry_ptr;
2984 /* the array of the lli entries */
2994 This function returns pointer to the flow data structure
2995 that conatins the given id
2997 static int sep_find_flow_context(unsigned long flow_id, struct sep_flow_context_t **flow_data_ptr)
3000 unsigned long count;
3005 /*-----------------------
3007 ---------------------------*/
3012 always search for flow with id default first - in case we
3013 already started working on the flow there can be no situation
3014 when 2 flows are with default flag
3016 for (count = 0; count < SEP_DRIVER_NUM_FLOWS; count++) {
3017 if (sep_dev->flows_data_array[count].flow_id == flow_id) {
3018 *flow_data_ptr = &sep_dev->flows_data_array[count];
3023 if (count == SEP_DRIVER_NUM_FLOWS)
3031 this function find a space for the new flow dma table
3033 static int sep_find_free_flow_dma_table_space(unsigned long **table_address_ptr)
3038 /* pointer to the id field of the flow dma table */
3039 unsigned long *start_table_ptr;
3041 /* start address of the flow dma area */
3042 unsigned long flow_dma_area_start_addr;
3044 /* end address of the flow dma area */
3045 unsigned long flow_dma_area_end_addr;
3047 /* maximum table size in words */
3048 unsigned long table_size_in_words;
3050 /*---------------------
3052 -----------------------*/
3056 /* find the start address of the flow DMA table area */
3057 flow_dma_area_start_addr = sep_dev->shared_area_addr + SEP_DRIVER_FLOW_DMA_TABLES_AREA_OFFSET_IN_BYTES;
3059 /* set end address of the flow table area */
3060 flow_dma_area_end_addr = flow_dma_area_start_addr + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES;
3062 /* set table size in words */
3063 table_size_in_words = SEP_DRIVER_MAX_FLOW_NUM_ENTRIES_IN_TABLE * (sizeof(struct sep_lli_entry_t) / sizeof(long)) + 2;
3065 /* set the pointer to the start address of DMA area */
3066 start_table_ptr = (unsigned long *) flow_dma_area_start_addr;
3068 /* find the space for the next table */
3069 while (((*start_table_ptr & 0x7FFFFFFF) != 0) && ((unsigned long) start_table_ptr < flow_dma_area_end_addr))
3070 start_table_ptr += table_size_in_words;
3072 /* check if we reached the end of floa tables area */
3073 if ((unsigned long) start_table_ptr >= flow_dma_area_end_addr)
3076 *table_address_ptr = start_table_ptr;
3082 this function goes over all the flow tables connected to the given
3083 table and deallocate them
3085 static void sep_deallocated_flow_tables(struct sep_lli_entry_t *first_table_ptr)
3088 unsigned long *table_ptr;
3090 /* end address of the flow dma area */
3091 unsigned long num_entries;
3093 unsigned long num_pages;
3096 struct page **pages_ptr;
3098 /* maximum table size in words */
3099 struct sep_lli_entry_t *info_entry_ptr;
3101 /*-------------------------------
3103 ---------------------------------*/
3105 /* set the pointer to the first table */
3106 table_ptr = (unsigned long *) first_table_ptr->physical_address;
3108 /* set the num of entries */
3109 num_entries = (first_table_ptr->block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS)
3110 & SEP_NUM_ENTRIES_MASK;
3112 /* go over all the connected tables */
3113 while (*table_ptr != 0xffffffff) {
3114 /* get number of pages */
3115 num_pages = *(table_ptr - 2);
3117 /* get the pointer to the pages */
3118 pages_ptr = (struct page **) (*(table_ptr - 1));
3120 /* free the pages */
3121 sep_free_dma_pages(pages_ptr, num_pages, 1);
3123 /* goto to the info entry */
3124 info_entry_ptr = ((struct sep_lli_entry_t *) table_ptr) + (num_entries - 1);
3126 table_ptr = (unsigned long *) info_entry_ptr->physical_address;
3127 num_entries = (info_entry_ptr->block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK;
3134 This function handler the set flow id command
3136 static int sep_set_flow_id_handler(unsigned long arg)
3142 unsigned long flow_id;
3144 /* pointer to flow data structre */
3145 struct sep_flow_context_t *flow_data_ptr;
3147 /*----------------------
3149 -----------------------*/
3151 dbg("------------>SEP Driver: sep_set_flow_id_handler start\n");
3153 error = get_user(flow_id, &(((struct sep_driver_set_flow_id_t *) arg)->flow_id));
3157 /* find the flow data structure that was just used for creating new flow
3158 - its id should be default */
3159 error = sep_find_flow_context(SEP_TEMP_FLOW_ID, &flow_data_ptr);
3164 flow_data_ptr->flow_id = flow_id;
3168 dbg("SEP Driver:<-------- sep_set_flow_id_handler end\n");
3176 calculates time and sets it at the predefined address
3178 static int sep_set_time(unsigned long *address_ptr, unsigned long *time_in_sec_ptr)
3181 struct timeval time;
3183 /* address of time in the kernel */
3184 unsigned long time_addr;
3187 /*------------------------
3189 --------------------------*/
3191 dbg("SEP Driver:--------> sep_set_time start\n");
3194 do_gettimeofday(&time);
3196 /* set value in the SYSTEM MEMORY offset */
3197 time_addr = sep_dev->message_shared_area_addr + SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES;
3199 *(unsigned long *) time_addr = SEP_TIME_VAL_TOKEN;
3200 *(unsigned long *) (time_addr + 4) = time.tv_sec;
3202 edbg("SEP Driver:time.tv_sec is %lu\n", time.tv_sec);
3203 edbg("SEP Driver:time_addr is %lu\n", time_addr);
3204 edbg("SEP Driver:g_message_shared_area_addr is %lu\n", sep_dev->message_shared_area_addr);
3206 /* set the output parameters if needed */
3208 *address_ptr = sep_shared_area_virt_to_phys(time_addr);
3210 if (time_in_sec_ptr)
3211 *time_in_sec_ptr = time.tv_sec;
3213 dbg("SEP Driver:<-------- sep_set_time end\n");
3218 static void sep_wait_busy(struct sep_device *dev)
3223 reg = sep_read_reg(sep_dev, HW_HOST_SEP_BUSY_REG_ADDR);
3228 PATCH for configuring the DMA to single burst instead of multi-burst
3230 static void sep_configure_dma_burst(void)
3233 #define HW_AHB_RD_WR_BURSTS_REG_ADDR 0x0E10UL
3235 dbg("SEP Driver:<-------- sep_configure_dma_burst start \n");
3237 /* request access to registers from SEP */
3238 sep_write_reg(sep_dev, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
3240 dbg("SEP Driver:<-------- sep_configure_dma_burst finished request access to registers from SEP (write reg) \n");
3242 sep_wait_busy(sep_dev);
3244 dbg("SEP Driver:<-------- sep_configure_dma_burst finished request access to registers from SEP (while(revVal) wait loop) \n");
3246 /* set the DMA burst register to single burst */
3247 sep_write_reg(sep_dev, HW_AHB_RD_WR_BURSTS_REG_ADDR, 0x0UL);
3249 /* release the sep busy */
3250 sep_write_reg(sep_dev, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x0UL);
3251 sep_wait_busy(sep_dev);
3253 dbg("SEP Driver:<-------- sep_configure_dma_burst done \n");
3257 module_init(sep_init);
3258 module_exit(sep_exit);
3260 MODULE_LICENSE("GPL");