3 * sep_driver.c - Security Processor Driver main group of functions
5 * Copyright(c) 2009 Intel Corporation. All rights reserved.
6 * Copyright(c) 2009 Discretix. All rights reserved.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc., 59
20 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 * Mark Allyn mark.a.allyn@intel.com
28 * 2009.06.26 Initial publish
32 #include <linux/init.h>
33 #include <linux/module.h>
35 #include <linux/cdev.h>
36 #include <linux/kdev_t.h>
37 #include <linux/mutex.h>
39 #include <linux/poll.h>
40 #include <linux/wait.h>
41 #include <linux/pci.h>
42 #include <linux/firmware.h>
43 #include <asm/ioctl.h>
44 #include <linux/ioport.h>
46 #include <linux/interrupt.h>
47 #include <linux/pagemap.h>
48 #include <asm/cacheflush.h>
49 #include "sep_driver_hw_defs.h"
50 #include "sep_driver_config.h"
51 #include "sep_driver_api.h"
54 #if SEP_DRIVER_ARM_DEBUG_MODE
56 #define CRYS_SEP_ROM_length 0x4000
57 #define CRYS_SEP_ROM_start_address 0x8000C000UL
58 #define CRYS_SEP_ROM_start_address_offset 0xC000UL
59 #define SEP_ROM_BANK_register 0x80008420UL
60 #define SEP_ROM_BANK_register_offset 0x8420UL
61 #define SEP_RAR_IO_MEM_REGION_START_ADDRESS 0x82000000
64 * THESE 2 definitions are specific to the board - must be
65 * defined during integration
67 #define SEP_RAR_IO_MEM_REGION_START_ADDRESS 0xFF0D0000
71 static void sep_load_rom_code(struct sep_device *sep)
74 unsigned long i, k, j;
79 /* Loading ROM from SEP_ROM_image.h file */
80 k = sizeof(CRYS_SEP_ROM);
82 edbg("SEP Driver: DX_CC_TST_SepRomLoader start\n");
84 edbg("SEP Driver: k is %lu\n", k);
85 edbg("SEP Driver: sep->reg_addr is %p\n", sep->reg_addr);
86 edbg("SEP Driver: CRYS_SEP_ROM_start_address_offset is %p\n", CRYS_SEP_ROM_start_address_offset);
88 for (i = 0; i < 4; i++) {
90 sep_write_reg(sep, SEP_ROM_BANK_register_offset, i);
92 for (j = 0; j < CRYS_SEP_ROM_length / 4; j++) {
93 sep_write_reg(sep, CRYS_SEP_ROM_start_address_offset + 4 * j, CRYS_SEP_ROM[i * 0x1000 + j]);
98 j = CRYS_SEP_ROM_length;
105 sep_write_reg(sep, HW_HOST_SEP_SW_RST_REG_ADDR, 0x1);
107 /* poll for SEP ROM boot finish */
109 reg = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
112 edbg("SEP Driver: ROM polling ended\n");
116 /* fatal error - read erro status from GPRO */
117 error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
118 edbg("SEP Driver: ROM polling case 1\n");
121 /* Cold boot ended successfully */
123 /* Warmboot ended successfully */
125 /* ColdWarm boot ended successfully */
128 /* Boot First Phase ended */
129 warning = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
131 edbg("SEP Driver: ROM polling case %d\n", reg);
138 static void sep_load_rom_code(struct sep_device *sep) { }
139 #endif /* SEP_DRIVER_ARM_DEBUG_MODE */
143 /*----------------------------------------
145 -----------------------------------------*/
147 #define BASE_ADDRESS_FOR_SYSTEM 0xfffc0000
148 #define SEP_RAR_IO_MEM_REGION_SIZE 0x40000
150 /*--------------------------------------------
152 --------------------------------------------*/
154 /* debug messages level */
156 module_param(debug, int , 0);
157 MODULE_PARM_DESC(debug, "Flag to enable SEP debug messages");
159 /* Keep this a single static object for now to keep the conversion easy */
161 static struct sep_device sep_instance;
162 static struct sep_device *sep_dev = &sep_instance;
165 mutex for the access to the internals of the sep driver
167 static DEFINE_MUTEX(sep_mutex);
170 /* wait queue head (event) of the driver */
171 static DECLARE_WAIT_QUEUE_HEAD(sep_event);
174 This functions copies the cache and resident from their source location into
175 destination memory, which is external to Linux VM and is given as
178 static int sep_copy_cache_resident_to_area(struct sep_device *sep,
179 unsigned long src_cache_addr,
180 unsigned long cache_size_in_bytes,
181 unsigned long src_resident_addr,
182 unsigned long resident_size_in_bytes,
183 unsigned long *dst_new_cache_addr_ptr,
184 unsigned long *dst_new_resident_addr_ptr)
188 const struct firmware *fw;
190 char *cache_name = "cache.image.bin";
191 char *res_name = "resident.image.bin";
196 /*--------------------------------
198 -------------------------------------*/
201 edbg("SEP Driver:rar_virtual is %p\n", sep->rar_addr);
202 edbg("SEP Driver:rar_bus is %08llx\n", (unsigned long long)sep->rar_bus);
204 sep->rar_region_addr = (unsigned long) sep->rar_addr;
206 sep->cache_bus = sep->rar_bus;
207 sep->cache_addr = sep->rar_addr;
210 error = request_firmware(&fw, cache_name, &sep->pdev->dev);
212 edbg("SEP Driver:cant request cache fw\n");
216 edbg("SEP Driver:cache data loc is %p\n", (void *) fw->data);
217 edbg("SEP Driver:cache data size is %08Zx\n", fw->size);
219 memcpy(sep->cache_addr, (void *) fw->data, fw->size);
221 sep->cache_size = fw->size;
223 cache_addr = sep->cache_addr;
225 release_firmware(fw);
227 sep->resident_bus = sep->cache_bus + sep->cache_size;
228 sep->resident_addr = sep->cache_addr + sep->cache_size;
231 error = request_firmware(&fw, res_name, &sep->pdev->dev);
233 edbg("SEP Driver:cant request res fw\n");
237 edbg("SEP Driver:res data loc is %p\n", (void *) fw->data);
238 edbg("SEP Driver:res data size is %08Zx\n", fw->size);
240 memcpy((void *) sep->resident_addr, (void *) fw->data, fw->size);
242 sep->resident_size = fw->size;
244 release_firmware(fw);
246 resident_addr = sep->resident_addr;
248 edbg("SEP Driver:resident_addr (bus)is %08llx\n", (unsigned long long)sep->resident_bus);
249 edbg("SEP Driver:cache_addr (bus) is %08llx\n", (unsigned long long)sep->cache_bus);
251 edbg("SEP Driver:resident_addr (virtual)is %p\n", resident_addr);
252 edbg("SEP Driver:cache_addr (virtual) is %08llx\n", (unsigned long long)cache_addr);
254 edbg("SEP Driver:resident_size is %08lx\n", sep->resident_size);
255 edbg("SEP Driver:cache_size is %08llx\n", (unsigned long long)sep->cache_size);
260 *dst_new_cache_addr_ptr = sep->cache_bus;
261 *dst_new_resident_addr_ptr = sep->resident_bus;
267 * sep_map_and_alloc_shared_area - allocate shared block
268 * @sep: security processor
269 * @size: size of shared area
271 * Allocate a shared buffer in host memory that can be used by both the
272 * kernel and also the hardware interface via DMA.
275 static int sep_map_and_alloc_shared_area(struct sep_device *sep,
278 /* shared_addr = ioremap_nocache(0xda00000,shared_area_size); */
279 sep->shared_addr = dma_alloc_coherent(&sep->pdev->dev, size,
280 &sep->shared_bus, GFP_KERNEL);
282 if (!sep->shared_addr) {
283 edbg("sep_driver :shared memory dma_alloc_coherent failed\n");
286 sep->shared_area = sep->shared_addr;
287 /* set the bus address of the shared area */
288 sep->shared_area_bus = sep->shared_bus;
289 edbg("sep: shared_area %ld bytes @%p (bus %08llx)\n",
290 size, sep->shared_addr, (unsigned long long)sep->shared_bus);
295 * sep_unmap_and_free_shared_area - free shared block
296 * @sep: security processor
298 * Free the shared area allocated to the security processor. The
299 * processor must have finished with this and any final posted
300 * writes cleared before we do so.
302 static void sep_unmap_and_free_shared_area(struct sep_device *sep, int size)
304 dma_free_coherent(&sep->pdev->dev, size,
305 sep->shared_area, sep->shared_area_bus);
309 * sep_shared_area_virt_to_bus - convert bus/virt addresses
311 * Returns the bus address inside the shared area according
312 * to the virtual address.
315 static dma_addr_t sep_shared_area_virt_to_bus(struct sep_device *sep,
318 dma_addr_t pa = sep->shared_bus + (virt_address - sep->shared_addr);
319 edbg("sep: virt to bus b %08llx v %p\n", pa, virt_address);
324 * sep_shared_area_bus_to_virt - convert bus/virt addresses
326 * Returns virtual address inside the shared area according
327 * to the bus address.
330 static void *sep_shared_area_bus_to_virt(struct sep_device *sep,
331 dma_addr_t bus_address)
333 return sep->shared_addr + (bus_address - sep->shared_bus);
337 /*----------------------------------------------------------------------
338 open function of the character driver - must only lock the mutex
339 must also release the memory data pool allocations
340 ------------------------------------------------------------------------*/
341 static int sep_open(struct inode *inode, struct file *filp)
345 dbg("SEP Driver:--------> open start\n");
347 /* check the blocking mode */
348 if (filp->f_flags & O_NDELAY)
349 error = mutex_trylock(&sep_mutex);
352 mutex_lock(&sep_mutex);
354 /* check the error */
356 edbg("SEP Driver: down_interruptible failed\n");
359 /* Bind to the device, we only have one which makes it easy */
360 filp->private_data = sep_dev;
364 /* release data pool allocations */
365 sep_dev->data_pool_bytes_allocated = 0;
369 dbg("SEP Driver:<-------- open end\n");
376 /*------------------------------------------------------------
378 -------------------------------------------------------------*/
379 static int sep_release(struct inode *inode_ptr, struct file *filp)
381 struct sep_driver *sep = filp->private_data;
382 dbg("----------->SEP Driver: sep_release start\n");
384 #if 0 /*!SEP_DRIVER_POLLING_MODE */
386 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, 0x7FFF);
387 /* release IRQ line */
388 free_irq(SEP_DIRVER_IRQ_NUM, sep);
391 /* unlock the sep mutex */
392 mutex_unlock(&sep_mutex);
393 dbg("SEP Driver:<-------- sep_release end\n");
400 /*---------------------------------------------------------------
401 map function - this functions maps the message shared area
402 -----------------------------------------------------------------*/
403 static int sep_mmap(struct file *filp, struct vm_area_struct *vma)
406 struct sep_device *sep = filp->private_data;
408 dbg("-------->SEP Driver: mmap start\n");
410 /* check that the size of the mapped range is as the size of the message
412 if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) {
413 edbg("SEP Driver mmap requested size is more than allowed\n");
414 printk(KERN_WARNING "SEP Driver mmap requested size is more \
416 printk(KERN_WARNING "SEP Driver vma->vm_end is %08lx\n", vma->vm_end);
417 printk(KERN_WARNING "SEP Driver vma->vm_end is %08lx\n", vma->vm_start);
421 edbg("SEP Driver:sep->message_shared_area_addr is %p\n", sep->message_shared_area_addr);
423 /* get bus address */
424 bus_addr = sep->shared_area_bus;
426 edbg("SEP Driver: phys_addr is %08llx\n", (unsigned long long)bus_addr);
428 if (remap_pfn_range(vma, vma->vm_start, bus_addr >> PAGE_SHIFT, vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
429 edbg("SEP Driver remap_page_range failed\n");
430 printk(KERN_WARNING "SEP Driver remap_page_range failed\n");
434 dbg("SEP Driver:<-------- mmap end\n");
440 /*-----------------------------------------------
442 *----------------------------------------------*/
443 static unsigned int sep_poll(struct file *filp, poll_table * wait)
446 unsigned int mask = 0;
447 unsigned long retval = 0; /* flow id */
448 struct sep_device *sep = filp->private_data;
450 dbg("---------->SEP Driver poll: start\n");
453 #if SEP_DRIVER_POLLING_MODE
455 while (sep->send_ct != (retval & 0x7FFFFFFF)) {
456 retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
458 for (count = 0; count < 10 * 4; count += 4)
459 edbg("Poll Debug Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_area + SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES + count)));
464 /* add the event to the polling wait table */
465 poll_wait(filp, &sep_event, wait);
469 edbg("sep->send_ct is %lu\n", sep->send_ct);
470 edbg("sep->reply_ct is %lu\n", sep->reply_ct);
472 /* check if the data is ready */
473 if (sep->send_ct == sep->reply_ct) {
474 for (count = 0; count < 12 * 4; count += 4)
475 edbg("Sep Mesg Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_area + count)));
477 for (count = 0; count < 10 * 4; count += 4)
478 edbg("Debug Data Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_area + 0x1800 + count)));
480 retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
481 edbg("retval is %lu\n", retval);
482 /* check if the this is sep reply or request */
484 edbg("SEP Driver: sep request in\n");
486 mask |= POLLOUT | POLLWRNORM;
488 edbg("SEP Driver: sep reply in\n");
489 mask |= POLLIN | POLLRDNORM;
492 dbg("SEP Driver:<-------- poll exit\n");
497 calculates time and sets it at the predefined address
499 static int sep_set_time(struct sep_device *sep, unsigned long *address_ptr, unsigned long *time_in_sec_ptr)
502 /* address of time in the kernel */
506 dbg("SEP Driver:--------> sep_set_time start\n");
508 do_gettimeofday(&time);
510 /* set value in the SYSTEM MEMORY offset */
511 time_addr = sep->message_shared_area_addr + SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES;
513 time_addr[0] = SEP_TIME_VAL_TOKEN;
514 time_addr[1] = time.tv_sec;
516 edbg("SEP Driver:time.tv_sec is %lu\n", time.tv_sec);
517 edbg("SEP Driver:time_addr is %p\n", time_addr);
518 edbg("SEP Driver:sep->message_shared_area_addr is %p\n", sep->message_shared_area_addr);
520 /* set the output parameters if needed */
522 *address_ptr = sep_shared_area_virt_to_bus(sep, time_addr);
525 *time_in_sec_ptr = time.tv_sec;
527 dbg("SEP Driver:<-------- sep_set_time end\n");
533 This function raises interrupt to SEP that signals that is has a new
536 static void sep_send_command_handler(struct sep_device *sep)
540 dbg("SEP Driver:--------> sep_send_command_handler start\n");
541 sep_set_time(sep, 0, 0);
546 for (count = 0; count < 12 * 4; count += 4)
547 edbg("Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_area + count)));
551 /* send interrupt to SEP */
552 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
553 dbg("SEP Driver:<-------- sep_send_command_handler end\n");
558 This function raises interrupt to SEPm that signals that is has a
559 new command from HOST
561 static void sep_send_reply_command_handler(struct sep_device *sep)
565 dbg("SEP Driver:--------> sep_send_reply_command_handler start\n");
569 for (count = 0; count < 12 * 4; count += 4)
570 edbg("Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_area + count)));
573 /* send the interrupt to SEP */
574 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR2_REG_ADDR, sep->send_ct);
575 /* update both counters */
578 dbg("SEP Driver:<-------- sep_send_reply_command_handler end\n");
582 This function handles the allocate data pool memory request
583 This function returns calculates the bus address of the
584 allocated memory, and the offset of this area from the mapped address.
585 Therefore, the FVOs in user space can calculate the exact virtual
586 address of this allocated memory
588 static int sep_allocate_data_pool_memory_handler(struct sep_device *sep,
592 struct sep_driver_alloc_t command_args;
594 dbg("SEP Driver:--------> sep_allocate_data_pool_memory_handler start\n");
596 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_alloc_t));
600 /* allocate memory */
601 if ((sep->data_pool_bytes_allocated + command_args.num_bytes) > SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES) {
606 /* set the virtual and bus address */
607 command_args.offset = SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES + sep->data_pool_bytes_allocated;
608 command_args.phys_address = sep->shared_area_bus + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES + sep->data_pool_bytes_allocated;
610 /* write the memory back to the user space */
611 error = copy_to_user((void *) arg, (void *) &command_args, sizeof(struct sep_driver_alloc_t));
615 /* set the allocation */
616 sep->data_pool_bytes_allocated += command_args.num_bytes;
619 dbg("SEP Driver:<-------- sep_allocate_data_pool_memory_handler end\n");
624 This function handles write into allocated data pool command
626 static int sep_write_into_data_pool_handler(struct sep_device *sep, unsigned long arg)
631 unsigned long app_in_address;
632 unsigned long num_bytes;
633 void *data_pool_area_addr;
635 dbg("SEP Driver:--------> sep_write_into_data_pool_handler start\n");
637 /* get the application address */
638 error = get_user(app_in_address, &(((struct sep_driver_write_t *) arg)->app_address));
642 /* get the virtual kernel address address */
643 error = get_user(va, &(((struct sep_driver_write_t *) arg)->datapool_address));
646 virt_address = (void *)va;
648 /* get the number of bytes */
649 error = get_user(num_bytes, &(((struct sep_driver_write_t *) arg)->num_bytes));
653 /* calculate the start of the data pool */
654 data_pool_area_addr = sep->shared_area + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES;
657 /* check that the range of the virtual kernel address is correct */
658 if (virt_address < data_pool_area_addr || virt_address > (data_pool_area_addr + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES)) {
662 /* copy the application data */
663 error = copy_from_user(virt_address, (void *) app_in_address, num_bytes);
665 dbg("SEP Driver:<-------- sep_write_into_data_pool_handler end\n");
670 this function handles the read from data pool command
672 static int sep_read_from_data_pool_handler(struct sep_device *sep, unsigned long arg)
675 /* virtual address of dest application buffer */
676 unsigned long app_out_address;
677 /* virtual address of the data pool */
680 unsigned long num_bytes;
681 void *data_pool_area_addr;
683 dbg("SEP Driver:--------> sep_read_from_data_pool_handler start\n");
685 /* get the application address */
686 error = get_user(app_out_address, &(((struct sep_driver_write_t *) arg)->app_address));
690 /* get the virtual kernel address address */
691 error = get_user(va, &(((struct sep_driver_write_t *) arg)->datapool_address));
694 virt_address = (void *)va;
696 /* get the number of bytes */
697 error = get_user(num_bytes, &(((struct sep_driver_write_t *) arg)->num_bytes));
701 /* calculate the start of the data pool */
702 data_pool_area_addr = sep->shared_area + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES;
704 /* FIXME: These are incomplete all over the driver: what about + len
705 and when doing that also overflows */
706 /* check that the range of the virtual kernel address is correct */
707 if (virt_address < data_pool_area_addr || virt_address > data_pool_area_addr + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES) {
712 /* copy the application data */
713 error = copy_to_user((void *) app_out_address, virt_address, num_bytes);
715 dbg("SEP Driver:<-------- sep_read_from_data_pool_handler end\n");
720 This function releases all the application virtual buffer physical pages,
721 that were previously locked
723 static int sep_free_dma_pages(struct page **page_array_ptr, unsigned long num_pages, unsigned long dirtyFlag)
728 for (count = 0; count < num_pages; count++) {
729 /* the out array was written, therefore the data was changed */
730 if (!PageReserved(page_array_ptr[count]))
731 SetPageDirty(page_array_ptr[count]);
732 page_cache_release(page_array_ptr[count]);
735 /* free in pages - the data was only read, therefore no update was done
737 for (count = 0; count < num_pages; count++)
738 page_cache_release(page_array_ptr[count]);
743 kfree(page_array_ptr);
749 This function locks all the physical pages of the kernel virtual buffer
750 and construct a basic lli array, where each entry holds the physical
751 page address and the size that application data holds in this physical pages
753 static int sep_lock_kernel_pages(struct sep_device *sep,
754 unsigned long kernel_virt_addr,
755 unsigned long data_size,
756 unsigned long *num_pages_ptr,
757 struct sep_lli_entry_t **lli_array_ptr,
758 struct page ***page_array_ptr)
761 /* the the page of the end address of the user space buffer */
762 unsigned long end_page;
763 /* the page of the start address of the user space buffer */
764 unsigned long start_page;
765 /* the range in pages */
766 unsigned long num_pages;
767 struct sep_lli_entry_t *lli_array;
768 /* next kernel address to map */
769 unsigned long next_kernel_address;
772 dbg("SEP Driver:--------> sep_lock_kernel_pages start\n");
774 /* set start and end pages and num pages */
775 end_page = (kernel_virt_addr + data_size - 1) >> PAGE_SHIFT;
776 start_page = kernel_virt_addr >> PAGE_SHIFT;
777 num_pages = end_page - start_page + 1;
779 edbg("SEP Driver: kernel_virt_addr is %08lx\n", kernel_virt_addr);
780 edbg("SEP Driver: data_size is %lu\n", data_size);
781 edbg("SEP Driver: start_page is %lx\n", start_page);
782 edbg("SEP Driver: end_page is %lx\n", end_page);
783 edbg("SEP Driver: num_pages is %lu\n", num_pages);
785 lli_array = kmalloc(sizeof(struct sep_lli_entry_t) * num_pages, GFP_ATOMIC);
787 edbg("SEP Driver: kmalloc for lli_array failed\n");
792 /* set the start address of the first page - app data may start not at
793 the beginning of the page */
794 lli_array[0].physical_address = (unsigned long) virt_to_phys((unsigned long *) kernel_virt_addr);
796 /* check that not all the data is in the first page only */
797 if ((PAGE_SIZE - (kernel_virt_addr & (~PAGE_MASK))) >= data_size)
798 lli_array[0].block_size = data_size;
800 lli_array[0].block_size = PAGE_SIZE - (kernel_virt_addr & (~PAGE_MASK));
803 dbg("lli_array[0].physical_address is %08lx, lli_array[0].block_size is %lu\n", lli_array[0].physical_address, lli_array[0].block_size);
805 /* advance the address to the start of the next page */
806 next_kernel_address = (kernel_virt_addr & PAGE_MASK) + PAGE_SIZE;
808 /* go from the second page to the prev before last */
809 for (count = 1; count < (num_pages - 1); count++) {
810 lli_array[count].physical_address = (unsigned long) virt_to_phys((unsigned long *) next_kernel_address);
811 lli_array[count].block_size = PAGE_SIZE;
813 edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
814 next_kernel_address += PAGE_SIZE;
817 /* if more then 1 pages locked - then update for the last page size needed */
819 /* update the address of the last page */
820 lli_array[count].physical_address = (unsigned long) virt_to_phys((unsigned long *) next_kernel_address);
822 /* set the size of the last page */
823 lli_array[count].block_size = (kernel_virt_addr + data_size) & (~PAGE_MASK);
825 if (lli_array[count].block_size == 0) {
826 dbg("app_virt_addr is %08lx\n", kernel_virt_addr);
827 dbg("data_size is %lu\n", data_size);
831 edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
833 /* set output params */
834 *lli_array_ptr = lli_array;
835 *num_pages_ptr = num_pages;
838 dbg("SEP Driver:<-------- sep_lock_kernel_pages end\n");
843 This function locks all the physical pages of the application virtual buffer
844 and construct a basic lli array, where each entry holds the physical page
845 address and the size that application data holds in this physical pages
847 static int sep_lock_user_pages(struct sep_device *sep,
848 unsigned long app_virt_addr,
849 unsigned long data_size,
850 unsigned long *num_pages_ptr,
851 struct sep_lli_entry_t **lli_array_ptr,
852 struct page ***page_array_ptr)
855 /* the the page of the end address of the user space buffer */
856 unsigned long end_page;
857 /* the page of the start address of the user space buffer */
858 unsigned long start_page;
859 /* the range in pages */
860 unsigned long num_pages;
861 struct page **page_array;
862 struct sep_lli_entry_t *lli_array;
866 dbg("SEP Driver:--------> sep_lock_user_pages start\n");
868 /* set start and end pages and num pages */
869 end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT;
870 start_page = app_virt_addr >> PAGE_SHIFT;
871 num_pages = end_page - start_page + 1;
873 edbg("SEP Driver: app_virt_addr is %08lx\n", app_virt_addr);
874 edbg("SEP Driver: data_size is %lu\n", data_size);
875 edbg("SEP Driver: start_page is %lu\n", start_page);
876 edbg("SEP Driver: end_page is %lu\n", end_page);
877 edbg("SEP Driver: num_pages is %lu\n", num_pages);
879 /* allocate array of pages structure pointers */
880 page_array = kmalloc(sizeof(struct page *) * num_pages, GFP_ATOMIC);
882 edbg("SEP Driver: kmalloc for page_array failed\n");
888 lli_array = kmalloc(sizeof(struct sep_lli_entry_t) * num_pages, GFP_ATOMIC);
890 edbg("SEP Driver: kmalloc for lli_array failed\n");
893 goto end_function_with_error1;
896 /* convert the application virtual address into a set of physical */
897 down_read(¤t->mm->mmap_sem);
898 result = get_user_pages(current, current->mm, app_virt_addr, num_pages, 1, 0, page_array, 0);
899 up_read(¤t->mm->mmap_sem);
901 /* check the number of pages locked - if not all then exit with error */
902 if (result != num_pages) {
903 dbg("SEP Driver: not all pages locked by get_user_pages\n");
906 goto end_function_with_error2;
909 /* flush the cache */
910 for (count = 0; count < num_pages; count++)
911 flush_dcache_page(page_array[count]);
913 /* set the start address of the first page - app data may start not at
914 the beginning of the page */
915 lli_array[0].physical_address = ((unsigned long) page_to_phys(page_array[0])) + (app_virt_addr & (~PAGE_MASK));
917 /* check that not all the data is in the first page only */
918 if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size)
919 lli_array[0].block_size = data_size;
921 lli_array[0].block_size = PAGE_SIZE - (app_virt_addr & (~PAGE_MASK));
924 dbg("lli_array[0].physical_address is %08lx, lli_array[0].block_size is %lu\n", lli_array[0].physical_address, lli_array[0].block_size);
926 /* go from the second page to the prev before last */
927 for (count = 1; count < (num_pages - 1); count++) {
928 lli_array[count].physical_address = (unsigned long) page_to_phys(page_array[count]);
929 lli_array[count].block_size = PAGE_SIZE;
931 edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
934 /* if more then 1 pages locked - then update for the last page size needed */
936 /* update the address of the last page */
937 lli_array[count].physical_address = (unsigned long) page_to_phys(page_array[count]);
939 /* set the size of the last page */
940 lli_array[count].block_size = (app_virt_addr + data_size) & (~PAGE_MASK);
942 if (lli_array[count].block_size == 0) {
943 dbg("app_virt_addr is %08lx\n", app_virt_addr);
944 dbg("data_size is %lu\n", data_size);
947 edbg("lli_array[%lu].physical_address is %08lx, \
948 lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
951 /* set output params */
952 *lli_array_ptr = lli_array;
953 *num_pages_ptr = num_pages;
954 *page_array_ptr = page_array;
957 end_function_with_error2:
958 /* release the cache */
959 for (count = 0; count < num_pages; count++)
960 page_cache_release(page_array[count]);
962 end_function_with_error1:
965 dbg("SEP Driver:<-------- sep_lock_user_pages end\n");
971 this function calculates the size of data that can be inserted into the lli
972 table from this array the condition is that either the table is full
973 (all etnries are entered), or there are no more entries in the lli array
975 static unsigned long sep_calculate_lli_table_max_size(struct sep_lli_entry_t *lli_in_array_ptr, unsigned long num_array_entries)
977 unsigned long table_data_size = 0;
978 unsigned long counter;
980 /* calculate the data in the out lli table if till we fill the whole
981 table or till the data has ended */
982 for (counter = 0; (counter < (SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP - 1)) && (counter < num_array_entries); counter++)
983 table_data_size += lli_in_array_ptr[counter].block_size;
984 return table_data_size;
988 this functions builds ont lli table from the lli_array according to
989 the given size of data
991 static void sep_build_lli_table(struct sep_lli_entry_t *lli_array_ptr, struct sep_lli_entry_t *lli_table_ptr, unsigned long *num_processed_entries_ptr, unsigned long *num_table_entries_ptr, unsigned long table_data_size)
993 unsigned long curr_table_data_size;
994 /* counter of lli array entry */
995 unsigned long array_counter;
997 dbg("SEP Driver:--------> sep_build_lli_table start\n");
999 /* init currrent table data size and lli array entry counter */
1000 curr_table_data_size = 0;
1002 *num_table_entries_ptr = 1;
1004 edbg("SEP Driver:table_data_size is %lu\n", table_data_size);
1006 /* fill the table till table size reaches the needed amount */
1007 while (curr_table_data_size < table_data_size) {
1008 /* update the number of entries in table */
1009 (*num_table_entries_ptr)++;
1011 lli_table_ptr->physical_address = lli_array_ptr[array_counter].physical_address;
1012 lli_table_ptr->block_size = lli_array_ptr[array_counter].block_size;
1013 curr_table_data_size += lli_table_ptr->block_size;
1015 edbg("SEP Driver:lli_table_ptr is %08lx\n", (unsigned long) lli_table_ptr);
1016 edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
1017 edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
1019 /* check for overflow of the table data */
1020 if (curr_table_data_size > table_data_size) {
1021 edbg("SEP Driver:curr_table_data_size > table_data_size\n");
1023 /* update the size of block in the table */
1024 lli_table_ptr->block_size -= (curr_table_data_size - table_data_size);
1026 /* update the physical address in the lli array */
1027 lli_array_ptr[array_counter].physical_address += lli_table_ptr->block_size;
1029 /* update the block size left in the lli array */
1030 lli_array_ptr[array_counter].block_size = (curr_table_data_size - table_data_size);
1032 /* advance to the next entry in the lli_array */
1035 edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
1036 edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
1038 /* move to the next entry in table */
1042 /* set the info entry to default */
1043 lli_table_ptr->physical_address = 0xffffffff;
1044 lli_table_ptr->block_size = 0;
1046 edbg("SEP Driver:lli_table_ptr is %08lx\n", (unsigned long) lli_table_ptr);
1047 edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
1048 edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
1050 /* set the output parameter */
1051 *num_processed_entries_ptr += array_counter;
1053 edbg("SEP Driver:*num_processed_entries_ptr is %lu\n", *num_processed_entries_ptr);
1054 dbg("SEP Driver:<-------- sep_build_lli_table end\n");
1059 this function goes over the list of the print created tables and
1062 static void sep_debug_print_lli_tables(struct sep_device *sep, struct sep_lli_entry_t *lli_table_ptr, unsigned long num_table_entries, unsigned long table_data_size)
1064 unsigned long table_count;
1065 unsigned long entries_count;
1067 dbg("SEP Driver:--------> sep_debug_print_lli_tables start\n");
1070 while ((unsigned long) lli_table_ptr != 0xffffffff) {
1071 edbg("SEP Driver: lli table %08lx, table_data_size is %lu\n", table_count, table_data_size);
1072 edbg("SEP Driver: num_table_entries is %lu\n", num_table_entries);
1074 /* print entries of the table (without info entry) */
1075 for (entries_count = 0; entries_count < num_table_entries; entries_count++, lli_table_ptr++) {
1076 edbg("SEP Driver:lli_table_ptr address is %08lx\n", (unsigned long) lli_table_ptr);
1077 edbg("SEP Driver:phys address is %08lx block size is %lu\n", lli_table_ptr->physical_address, lli_table_ptr->block_size);
1080 /* point to the info entry */
1083 edbg("SEP Driver:phys lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
1084 edbg("SEP Driver:phys lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
1087 table_data_size = lli_table_ptr->block_size & 0xffffff;
1088 num_table_entries = (lli_table_ptr->block_size >> 24) & 0xff;
1089 lli_table_ptr = (struct sep_lli_entry_t *)
1090 (lli_table_ptr->physical_address);
1092 edbg("SEP Driver:phys table_data_size is %lu num_table_entries is %lu lli_table_ptr is%lu\n", table_data_size, num_table_entries, (unsigned long) lli_table_ptr);
1094 if ((unsigned long) lli_table_ptr != 0xffffffff)
1095 lli_table_ptr = (struct sep_lli_entry_t *) sep_shared_area_bus_to_virt(sep, (unsigned long) lli_table_ptr);
1099 dbg("SEP Driver:<-------- sep_debug_print_lli_tables end\n");
1104 This function prepares only input DMA table for synhronic symmetric
1107 static int sep_prepare_input_dma_table(struct sep_device *sep,
1108 unsigned long app_virt_addr,
1109 unsigned long data_size,
1110 unsigned long block_size,
1111 unsigned long *lli_table_ptr,
1112 unsigned long *num_entries_ptr,
1113 unsigned long *table_data_size_ptr,
1114 bool isKernelVirtualAddress)
1116 /* pointer to the info entry of the table - the last entry */
1117 struct sep_lli_entry_t *info_entry_ptr;
1118 /* array of pointers ot page */
1119 struct sep_lli_entry_t *lli_array_ptr;
1120 /* points to the first entry to be processed in the lli_in_array */
1121 unsigned long current_entry;
1122 /* num entries in the virtual buffer */
1123 unsigned long sep_lli_entries;
1124 /* lli table pointer */
1125 struct sep_lli_entry_t *in_lli_table_ptr;
1126 /* the total data in one table */
1127 unsigned long table_data_size;
1128 /* number of entries in lli table */
1129 unsigned long num_entries_in_table;
1130 /* next table address */
1131 void *lli_table_alloc_addr;
1132 unsigned long result;
1134 dbg("SEP Driver:--------> sep_prepare_input_dma_table start\n");
1136 edbg("SEP Driver:data_size is %lu\n", data_size);
1137 edbg("SEP Driver:block_size is %lu\n", block_size);
1139 /* initialize the pages pointers */
1140 sep->in_page_array = 0;
1141 sep->in_num_pages = 0;
1143 if (data_size == 0) {
1144 /* special case - created 2 entries table with zero data */
1145 in_lli_table_ptr = (struct sep_lli_entry_t *) (sep->shared_area + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES);
1146 /* FIXME: Should the entry below not be for _bus */
1147 in_lli_table_ptr->physical_address = (unsigned long)sep->shared_area + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1148 in_lli_table_ptr->block_size = 0;
1151 in_lli_table_ptr->physical_address = 0xFFFFFFFF;
1152 in_lli_table_ptr->block_size = 0;
1154 *lli_table_ptr = sep->shared_area_bus + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1155 *num_entries_ptr = 2;
1156 *table_data_size_ptr = 0;
1161 /* check if the pages are in Kernel Virtual Address layout */
1162 if (isKernelVirtualAddress == true)
1163 /* lock the pages of the kernel buffer and translate them to pages */
1164 result = sep_lock_kernel_pages(sep, app_virt_addr, data_size, &sep->in_num_pages, &lli_array_ptr, &sep->in_page_array);
1166 /* lock the pages of the user buffer and translate them to pages */
1167 result = sep_lock_user_pages(sep, app_virt_addr, data_size, &sep->in_num_pages, &lli_array_ptr, &sep->in_page_array);
1172 edbg("SEP Driver:output sep->in_num_pages is %lu\n", sep->in_num_pages);
1176 sep_lli_entries = sep->in_num_pages;
1178 /* initiate to point after the message area */
1179 lli_table_alloc_addr = sep->shared_area + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1181 /* loop till all the entries in in array are not processed */
1182 while (current_entry < sep_lli_entries) {
1183 /* set the new input and output tables */
1184 in_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr;
1186 lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1188 /* calculate the maximum size of data for input table */
1189 table_data_size = sep_calculate_lli_table_max_size(&lli_array_ptr[current_entry], (sep_lli_entries - current_entry));
1191 /* now calculate the table size so that it will be module block size */
1192 table_data_size = (table_data_size / block_size) * block_size;
1194 edbg("SEP Driver:output table_data_size is %lu\n", table_data_size);
1196 /* construct input lli table */
1197 sep_build_lli_table(&lli_array_ptr[current_entry], in_lli_table_ptr, ¤t_entry, &num_entries_in_table, table_data_size);
1199 if (info_entry_ptr == 0) {
1200 /* set the output parameters to physical addresses */
1201 *lli_table_ptr = sep_shared_area_virt_to_bus(sep, in_lli_table_ptr);
1202 *num_entries_ptr = num_entries_in_table;
1203 *table_data_size_ptr = table_data_size;
1205 edbg("SEP Driver:output lli_table_in_ptr is %08lx\n", *lli_table_ptr);
1207 /* update the info entry of the previous in table */
1208 info_entry_ptr->physical_address = sep_shared_area_virt_to_bus(sep, in_lli_table_ptr);
1209 info_entry_ptr->block_size = ((num_entries_in_table) << 24) | (table_data_size);
1212 /* save the pointer to the info entry of the current tables */
1213 info_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
1216 /* print input tables */
1217 sep_debug_print_lli_tables(sep, (struct sep_lli_entry_t *)
1218 sep_shared_area_bus_to_virt(sep, *lli_table_ptr), *num_entries_ptr, *table_data_size_ptr);
1220 /* the array of the pages */
1221 kfree(lli_array_ptr);
1223 dbg("SEP Driver:<-------- sep_prepare_input_dma_table end\n");
1229 This function creates the input and output dma tables for
1230 symmetric operations (AES/DES) according to the block size from LLI arays
1232 static int sep_construct_dma_tables_from_lli(struct sep_device *sep,
1233 struct sep_lli_entry_t *lli_in_array,
1234 unsigned long sep_in_lli_entries,
1235 struct sep_lli_entry_t *lli_out_array,
1236 unsigned long sep_out_lli_entries,
1237 unsigned long block_size, unsigned long *lli_table_in_ptr, unsigned long *lli_table_out_ptr, unsigned long *in_num_entries_ptr, unsigned long *out_num_entries_ptr, unsigned long *table_data_size_ptr)
1239 /* points to the area where next lli table can be allocated: keep void *
1240 as there is pointer scaling to fix otherwise */
1241 void *lli_table_alloc_addr;
1242 /* input lli table */
1243 struct sep_lli_entry_t *in_lli_table_ptr;
1244 /* output lli table */
1245 struct sep_lli_entry_t *out_lli_table_ptr;
1246 /* pointer to the info entry of the table - the last entry */
1247 struct sep_lli_entry_t *info_in_entry_ptr;
1248 /* pointer to the info entry of the table - the last entry */
1249 struct sep_lli_entry_t *info_out_entry_ptr;
1250 /* points to the first entry to be processed in the lli_in_array */
1251 unsigned long current_in_entry;
1252 /* points to the first entry to be processed in the lli_out_array */
1253 unsigned long current_out_entry;
1254 /* max size of the input table */
1255 unsigned long in_table_data_size;
1256 /* max size of the output table */
1257 unsigned long out_table_data_size;
1258 /* flag te signifies if this is the first tables build from the arrays */
1259 unsigned long first_table_flag;
1260 /* the data size that should be in table */
1261 unsigned long table_data_size;
1262 /* number of etnries in the input table */
1263 unsigned long num_entries_in_table;
1264 /* number of etnries in the output table */
1265 unsigned long num_entries_out_table;
1267 dbg("SEP Driver:--------> sep_construct_dma_tables_from_lli start\n");
1269 /* initiate to pint after the message area */
1270 lli_table_alloc_addr = sep->shared_area + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1272 current_in_entry = 0;
1273 current_out_entry = 0;
1274 first_table_flag = 1;
1275 info_in_entry_ptr = 0;
1276 info_out_entry_ptr = 0;
1278 /* loop till all the entries in in array are not processed */
1279 while (current_in_entry < sep_in_lli_entries) {
1280 /* set the new input and output tables */
1281 in_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr;
1283 lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1285 /* set the first output tables */
1286 out_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr;
1288 lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1290 /* calculate the maximum size of data for input table */
1291 in_table_data_size = sep_calculate_lli_table_max_size(&lli_in_array[current_in_entry], (sep_in_lli_entries - current_in_entry));
1293 /* calculate the maximum size of data for output table */
1294 out_table_data_size = sep_calculate_lli_table_max_size(&lli_out_array[current_out_entry], (sep_out_lli_entries - current_out_entry));
1296 edbg("SEP Driver:in_table_data_size is %lu\n", in_table_data_size);
1297 edbg("SEP Driver:out_table_data_size is %lu\n", out_table_data_size);
1299 /* check where the data is smallest */
1300 table_data_size = in_table_data_size;
1301 if (table_data_size > out_table_data_size)
1302 table_data_size = out_table_data_size;
1304 /* now calculate the table size so that it will be module block size */
1305 table_data_size = (table_data_size / block_size) * block_size;
1307 dbg("SEP Driver:table_data_size is %lu\n", table_data_size);
1309 /* construct input lli table */
1310 sep_build_lli_table(&lli_in_array[current_in_entry], in_lli_table_ptr, ¤t_in_entry, &num_entries_in_table, table_data_size);
1312 /* construct output lli table */
1313 sep_build_lli_table(&lli_out_array[current_out_entry], out_lli_table_ptr, ¤t_out_entry, &num_entries_out_table, table_data_size);
1315 /* if info entry is null - this is the first table built */
1316 if (info_in_entry_ptr == 0) {
1317 /* set the output parameters to physical addresses */
1318 *lli_table_in_ptr = sep_shared_area_virt_to_bus(sep, in_lli_table_ptr);
1319 *in_num_entries_ptr = num_entries_in_table;
1320 *lli_table_out_ptr = sep_shared_area_virt_to_bus(sep, out_lli_table_ptr);
1321 *out_num_entries_ptr = num_entries_out_table;
1322 *table_data_size_ptr = table_data_size;
1324 edbg("SEP Driver:output lli_table_in_ptr is %08lx\n", *lli_table_in_ptr);
1325 edbg("SEP Driver:output lli_table_out_ptr is %08lx\n", *lli_table_out_ptr);
1327 /* update the info entry of the previous in table */
1328 info_in_entry_ptr->physical_address = sep_shared_area_virt_to_bus(sep, in_lli_table_ptr);
1329 info_in_entry_ptr->block_size = ((num_entries_in_table) << 24) | (table_data_size);
1331 /* update the info entry of the previous in table */
1332 info_out_entry_ptr->physical_address = sep_shared_area_virt_to_bus(sep, out_lli_table_ptr);
1333 info_out_entry_ptr->block_size = ((num_entries_out_table) << 24) | (table_data_size);
1336 /* save the pointer to the info entry of the current tables */
1337 info_in_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
1338 info_out_entry_ptr = out_lli_table_ptr + num_entries_out_table - 1;
1340 edbg("SEP Driver:output num_entries_out_table is %lu\n", (unsigned long) num_entries_out_table);
1341 edbg("SEP Driver:output info_in_entry_ptr is %lu\n", (unsigned long) info_in_entry_ptr);
1342 edbg("SEP Driver:output info_out_entry_ptr is %lu\n", (unsigned long) info_out_entry_ptr);
1345 /* print input tables */
1346 sep_debug_print_lli_tables(sep, (struct sep_lli_entry_t *)
1347 sep_shared_area_bus_to_virt(sep, *lli_table_in_ptr), *in_num_entries_ptr, *table_data_size_ptr);
1348 /* print output tables */
1349 sep_debug_print_lli_tables(sep, (struct sep_lli_entry_t *)
1350 sep_shared_area_bus_to_virt(sep, *lli_table_out_ptr), *out_num_entries_ptr, *table_data_size_ptr);
1351 dbg("SEP Driver:<-------- sep_construct_dma_tables_from_lli end\n");
1357 This function builds input and output DMA tables for synhronic
1358 symmetric operations (AES, DES). It also checks that each table
1359 is of the modular block size
1361 static int sep_prepare_input_output_dma_table(struct sep_device *sep,
1362 unsigned long app_virt_in_addr,
1363 unsigned long app_virt_out_addr,
1364 unsigned long data_size,
1365 unsigned long block_size,
1366 unsigned long *lli_table_in_ptr, unsigned long *lli_table_out_ptr, unsigned long *in_num_entries_ptr, unsigned long *out_num_entries_ptr, unsigned long *table_data_size_ptr, bool isKernelVirtualAddress)
1368 /* array of pointers of page */
1369 struct sep_lli_entry_t *lli_in_array;
1370 /* array of pointers of page */
1371 struct sep_lli_entry_t *lli_out_array;
1374 dbg("SEP Driver:--------> sep_prepare_input_output_dma_table start\n");
1376 /* initialize the pages pointers */
1377 sep->in_page_array = 0;
1378 sep->out_page_array = 0;
1380 /* check if the pages are in Kernel Virtual Address layout */
1381 if (isKernelVirtualAddress == true) {
1382 /* lock the pages of the kernel buffer and translate them to pages */
1383 result = sep_lock_kernel_pages(sep, app_virt_in_addr, data_size, &sep->in_num_pages, &lli_in_array, &sep->in_page_array);
1385 edbg("SEP Driver: sep_lock_kernel_pages for input virtual buffer failed\n");
1389 /* lock the pages of the user buffer and translate them to pages */
1390 result = sep_lock_user_pages(sep, app_virt_in_addr, data_size, &sep->in_num_pages, &lli_in_array, &sep->in_page_array);
1392 edbg("SEP Driver: sep_lock_user_pages for input virtual buffer failed\n");
1397 if (isKernelVirtualAddress == true) {
1398 result = sep_lock_kernel_pages(sep, app_virt_out_addr, data_size, &sep->out_num_pages, &lli_out_array, &sep->out_page_array);
1400 edbg("SEP Driver: sep_lock_kernel_pages for output virtual buffer failed\n");
1401 goto end_function_with_error1;
1404 result = sep_lock_user_pages(sep, app_virt_out_addr, data_size, &sep->out_num_pages, &lli_out_array, &sep->out_page_array);
1406 edbg("SEP Driver: sep_lock_user_pages for output virtual buffer failed\n");
1407 goto end_function_with_error1;
1410 edbg("sep->in_num_pages is %lu\n", sep->in_num_pages);
1411 edbg("sep->out_num_pages is %lu\n", sep->out_num_pages);
1412 edbg("SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP is %x\n", SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
1415 /* call the fucntion that creates table from the lli arrays */
1416 result = sep_construct_dma_tables_from_lli(sep, lli_in_array, sep->in_num_pages, lli_out_array, sep->out_num_pages, block_size, lli_table_in_ptr, lli_table_out_ptr, in_num_entries_ptr, out_num_entries_ptr, table_data_size_ptr);
1418 edbg("SEP Driver: sep_construct_dma_tables_from_lli failed\n");
1419 goto end_function_with_error2;
1422 /* fall through - free the lli entry arrays */
1423 dbg("in_num_entries_ptr is %08lx\n", *in_num_entries_ptr);
1424 dbg("out_num_entries_ptr is %08lx\n", *out_num_entries_ptr);
1425 dbg("table_data_size_ptr is %08lx\n", *table_data_size_ptr);
1426 end_function_with_error2:
1427 kfree(lli_out_array);
1428 end_function_with_error1:
1429 kfree(lli_in_array);
1431 dbg("SEP Driver:<-------- sep_prepare_input_output_dma_table end result = %d\n", (int) result);
1437 this function handles tha request for creation of the DMA table
1438 for the synchronic symmetric operations (AES,DES)
1440 static int sep_create_sync_dma_tables_handler(struct sep_device *sep,
1444 /* command arguments */
1445 struct sep_driver_build_sync_table_t command_args;
1447 dbg("SEP Driver:--------> sep_create_sync_dma_tables_handler start\n");
1449 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_build_sync_table_t));
1453 edbg("app_in_address is %08lx\n", command_args.app_in_address);
1454 edbg("app_out_address is %08lx\n", command_args.app_out_address);
1455 edbg("data_size is %lu\n", command_args.data_in_size);
1456 edbg("block_size is %lu\n", command_args.block_size);
1458 /* check if we need to build only input table or input/output */
1459 if (command_args.app_out_address)
1460 /* prepare input and output tables */
1461 error = sep_prepare_input_output_dma_table(sep,
1462 command_args.app_in_address,
1463 command_args.app_out_address,
1464 command_args.data_in_size,
1465 command_args.block_size,
1466 &command_args.in_table_address,
1467 &command_args.out_table_address, &command_args.in_table_num_entries, &command_args.out_table_num_entries, &command_args.table_data_size, command_args.isKernelVirtualAddress);
1469 /* prepare input tables */
1470 error = sep_prepare_input_dma_table(sep,
1471 command_args.app_in_address,
1472 command_args.data_in_size, command_args.block_size, &command_args.in_table_address, &command_args.in_table_num_entries, &command_args.table_data_size, command_args.isKernelVirtualAddress);
1477 if (copy_to_user((void *) arg, (void *) &command_args, sizeof(struct sep_driver_build_sync_table_t)))
1480 dbg("SEP Driver:<-------- sep_create_sync_dma_tables_handler end\n");
1485 this function handles the request for freeing dma table for synhronic actions
1487 static int sep_free_dma_table_data_handler(struct sep_device *sep)
1489 dbg("SEP Driver:--------> sep_free_dma_table_data_handler start\n");
1491 /* free input pages array */
1492 sep_free_dma_pages(sep->in_page_array, sep->in_num_pages, 0);
1494 /* free output pages array if needed */
1495 if (sep->out_page_array)
1496 sep_free_dma_pages(sep->out_page_array, sep->out_num_pages, 1);
1498 /* reset all the values */
1499 sep->in_page_array = 0;
1500 sep->out_page_array = 0;
1501 sep->in_num_pages = 0;
1502 sep->out_num_pages = 0;
1503 dbg("SEP Driver:<-------- sep_free_dma_table_data_handler end\n");
1508 this function find a space for the new flow dma table
1510 static int sep_find_free_flow_dma_table_space(struct sep_device *sep,
1511 unsigned long **table_address_ptr)
1514 /* pointer to the id field of the flow dma table */
1515 unsigned long *start_table_ptr;
1516 /* Do not make start_addr unsigned long * unless fixing the offset
1518 void *flow_dma_area_start_addr;
1519 unsigned long *flow_dma_area_end_addr;
1520 /* maximum table size in words */
1521 unsigned long table_size_in_words;
1523 /* find the start address of the flow DMA table area */
1524 flow_dma_area_start_addr = sep->shared_area + SEP_DRIVER_FLOW_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1526 /* set end address of the flow table area */
1527 flow_dma_area_end_addr = flow_dma_area_start_addr + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES;
1529 /* set table size in words */
1530 table_size_in_words = SEP_DRIVER_MAX_FLOW_NUM_ENTRIES_IN_TABLE * (sizeof(struct sep_lli_entry_t) / sizeof(long)) + 2;
1532 /* set the pointer to the start address of DMA area */
1533 start_table_ptr = flow_dma_area_start_addr;
1535 /* find the space for the next table */
1536 while (((*start_table_ptr & 0x7FFFFFFF) != 0) && start_table_ptr < flow_dma_area_end_addr)
1537 start_table_ptr += table_size_in_words;
1539 /* check if we reached the end of floa tables area */
1540 if (start_table_ptr >= flow_dma_area_end_addr)
1543 *table_address_ptr = start_table_ptr;
1549 This function creates one DMA table for flow and returns its data,
1550 and pointer to its info entry
1552 static int sep_prepare_one_flow_dma_table(struct sep_device *sep,
1553 unsigned long virt_buff_addr,
1554 unsigned long virt_buff_size,
1555 struct sep_lli_entry_t *table_data,
1556 struct sep_lli_entry_t **info_entry_ptr,
1557 struct sep_flow_context_t *flow_data_ptr,
1558 bool isKernelVirtualAddress)
1561 /* the range in pages */
1562 unsigned long lli_array_size;
1563 struct sep_lli_entry_t *lli_array;
1564 struct sep_lli_entry_t *flow_dma_table_entry_ptr;
1565 unsigned long *start_dma_table_ptr;
1566 /* total table data counter */
1567 unsigned long dma_table_data_count;
1568 /* pointer that will keep the pointer to the pages of the virtual buffer */
1569 struct page **page_array_ptr;
1570 unsigned long entry_count;
1572 /* find the space for the new table */
1573 error = sep_find_free_flow_dma_table_space(sep, &start_dma_table_ptr);
1577 /* check if the pages are in Kernel Virtual Address layout */
1578 if (isKernelVirtualAddress == true)
1579 /* lock kernel buffer in the memory */
1580 error = sep_lock_kernel_pages(sep, virt_buff_addr, virt_buff_size, &lli_array_size, &lli_array, &page_array_ptr);
1582 /* lock user buffer in the memory */
1583 error = sep_lock_user_pages(sep, virt_buff_addr, virt_buff_size, &lli_array_size, &lli_array, &page_array_ptr);
1588 /* set the pointer to page array at the beginning of table - this table is
1589 now considered taken */
1590 *start_dma_table_ptr = lli_array_size;
1592 /* point to the place of the pages pointers of the table */
1593 start_dma_table_ptr++;
1595 /* set the pages pointer */
1596 *start_dma_table_ptr = (unsigned long) page_array_ptr;
1598 /* set the pointer to the first entry */
1599 flow_dma_table_entry_ptr = (struct sep_lli_entry_t *) (++start_dma_table_ptr);
1601 /* now create the entries for table */
1602 for (dma_table_data_count = entry_count = 0; entry_count < lli_array_size; entry_count++) {
1603 flow_dma_table_entry_ptr->physical_address = lli_array[entry_count].physical_address;
1605 flow_dma_table_entry_ptr->block_size = lli_array[entry_count].block_size;
1607 /* set the total data of a table */
1608 dma_table_data_count += lli_array[entry_count].block_size;
1610 flow_dma_table_entry_ptr++;
1613 /* set the physical address */
1614 table_data->physical_address = virt_to_phys(start_dma_table_ptr);
1616 /* set the num_entries and total data size */
1617 table_data->block_size = ((lli_array_size + 1) << SEP_NUM_ENTRIES_OFFSET_IN_BITS) | (dma_table_data_count);
1619 /* set the info entry */
1620 flow_dma_table_entry_ptr->physical_address = 0xffffffff;
1621 flow_dma_table_entry_ptr->block_size = 0;
1623 /* set the pointer to info entry */
1624 *info_entry_ptr = flow_dma_table_entry_ptr;
1626 /* the array of the lli entries */
1635 This function creates a list of tables for flow and returns the data for
1636 the first and last tables of the list
1638 static int sep_prepare_flow_dma_tables(struct sep_device *sep,
1639 unsigned long num_virtual_buffers,
1640 unsigned long first_buff_addr, struct sep_flow_context_t *flow_data_ptr, struct sep_lli_entry_t *first_table_data_ptr, struct sep_lli_entry_t *last_table_data_ptr, bool isKernelVirtualAddress)
1643 unsigned long virt_buff_addr;
1644 unsigned long virt_buff_size;
1645 struct sep_lli_entry_t table_data;
1646 struct sep_lli_entry_t *info_entry_ptr;
1647 struct sep_lli_entry_t *prev_info_entry_ptr;
1652 prev_info_entry_ptr = 0;
1654 /* init the first table to default */
1655 table_data.physical_address = 0xffffffff;
1656 first_table_data_ptr->physical_address = 0xffffffff;
1657 table_data.block_size = 0;
1659 for (i = 0; i < num_virtual_buffers; i++) {
1660 /* get the virtual buffer address */
1661 error = get_user(virt_buff_addr, &first_buff_addr);
1665 /* get the virtual buffer size */
1667 error = get_user(virt_buff_size, &first_buff_addr);
1671 /* advance the address to point to the next pair of address|size */
1674 /* now prepare the one flow LLI table from the data */
1675 error = sep_prepare_one_flow_dma_table(sep, virt_buff_addr, virt_buff_size, &table_data, &info_entry_ptr, flow_data_ptr, isKernelVirtualAddress);
1680 /* if this is the first table - save it to return to the user
1682 *first_table_data_ptr = table_data;
1684 /* set the pointer to info entry */
1685 prev_info_entry_ptr = info_entry_ptr;
1687 /* not first table - the previous table info entry should
1689 prev_info_entry_ptr->block_size = (0x1 << SEP_INT_FLAG_OFFSET_IN_BITS) | (table_data.block_size);
1691 /* set the pointer to info entry */
1692 prev_info_entry_ptr = info_entry_ptr;
1696 /* set the last table data */
1697 *last_table_data_ptr = table_data;
1703 this function goes over all the flow tables connected to the given
1704 table and deallocate them
1706 static void sep_deallocated_flow_tables(struct sep_lli_entry_t *first_table_ptr)
1709 unsigned long *table_ptr;
1710 /* end address of the flow dma area */
1711 unsigned long num_entries;
1712 unsigned long num_pages;
1713 struct page **pages_ptr;
1714 /* maximum table size in words */
1715 struct sep_lli_entry_t *info_entry_ptr;
1717 /* set the pointer to the first table */
1718 table_ptr = (unsigned long *) first_table_ptr->physical_address;
1720 /* set the num of entries */
1721 num_entries = (first_table_ptr->block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS)
1722 & SEP_NUM_ENTRIES_MASK;
1724 /* go over all the connected tables */
1725 while (*table_ptr != 0xffffffff) {
1726 /* get number of pages */
1727 num_pages = *(table_ptr - 2);
1729 /* get the pointer to the pages */
1730 pages_ptr = (struct page **) (*(table_ptr - 1));
1732 /* free the pages */
1733 sep_free_dma_pages(pages_ptr, num_pages, 1);
1735 /* goto to the info entry */
1736 info_entry_ptr = ((struct sep_lli_entry_t *) table_ptr) + (num_entries - 1);
1738 table_ptr = (unsigned long *) info_entry_ptr->physical_address;
1739 num_entries = (info_entry_ptr->block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK;
1746 * sep_find_flow_context - find a flow
1747 * @sep: the SEP we are working with
1748 * @flow_id: flow identifier
1750 * Returns a pointer the matching flow, or NULL if the flow does not
1754 static struct sep_flow_context_t *sep_find_flow_context(struct sep_device *sep,
1755 unsigned long flow_id)
1759 * always search for flow with id default first - in case we
1760 * already started working on the flow there can be no situation
1761 * when 2 flows are with default flag
1763 for (count = 0; count < SEP_DRIVER_NUM_FLOWS; count++) {
1764 if (sep->flows[count].flow_id == flow_id)
1765 return &sep->flows[count];
1772 this function handles the request to create the DMA tables for flow
1774 static int sep_create_flow_dma_tables_handler(struct sep_device *sep,
1778 struct sep_driver_build_flow_table_t command_args;
1779 /* first table - output */
1780 struct sep_lli_entry_t first_table_data;
1781 /* dma table data */
1782 struct sep_lli_entry_t last_table_data;
1783 /* pointer to the info entry of the previuos DMA table */
1784 struct sep_lli_entry_t *prev_info_entry_ptr;
1785 /* pointer to the flow data strucutre */
1786 struct sep_flow_context_t *flow_context_ptr;
1788 dbg("SEP Driver:--------> sep_create_flow_dma_tables_handler start\n");
1790 /* init variables */
1791 prev_info_entry_ptr = 0;
1792 first_table_data.physical_address = 0xffffffff;
1794 /* find the free structure for flow data */
1795 flow_context_ptr = sep_find_flow_context(sep, SEP_FREE_FLOW_ID);
1796 if (flow_context_ptr == NULL)
1799 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_build_flow_table_t));
1803 /* create flow tables */
1804 error = sep_prepare_flow_dma_tables(sep, command_args.num_virtual_buffers, command_args.virt_buff_data_addr, flow_context_ptr, &first_table_data, &last_table_data, command_args.isKernelVirtualAddress);
1806 goto end_function_with_error;
1808 /* check if flow is static */
1809 if (!command_args.flow_type)
1810 /* point the info entry of the last to the info entry of the first */
1811 last_table_data = first_table_data;
1813 /* set output params */
1814 command_args.first_table_addr = first_table_data.physical_address;
1815 command_args.first_table_num_entries = ((first_table_data.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK);
1816 command_args.first_table_data_size = (first_table_data.block_size & SEP_TABLE_DATA_SIZE_MASK);
1818 /* send the parameters to user application */
1819 error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_build_flow_table_t));
1821 goto end_function_with_error;
1823 /* all the flow created - update the flow entry with temp id */
1824 flow_context_ptr->flow_id = SEP_TEMP_FLOW_ID;
1826 /* set the processing tables data in the context */
1827 if (command_args.input_output_flag == SEP_DRIVER_IN_FLAG)
1828 flow_context_ptr->input_tables_in_process = first_table_data;
1830 flow_context_ptr->output_tables_in_process = first_table_data;
1834 end_function_with_error:
1835 /* free the allocated tables */
1836 sep_deallocated_flow_tables(&first_table_data);
1838 dbg("SEP Driver:<-------- sep_create_flow_dma_tables_handler end\n");
1843 this function handles add tables to flow
1845 static int sep_add_flow_tables_handler(struct sep_device *sep, unsigned long arg)
1848 unsigned long num_entries;
1849 struct sep_driver_add_flow_table_t command_args;
1850 struct sep_flow_context_t *flow_context_ptr;
1851 /* first dma table data */
1852 struct sep_lli_entry_t first_table_data;
1853 /* last dma table data */
1854 struct sep_lli_entry_t last_table_data;
1855 /* pointer to the info entry of the current DMA table */
1856 struct sep_lli_entry_t *info_entry_ptr;
1858 dbg("SEP Driver:--------> sep_add_flow_tables_handler start\n");
1860 /* get input parameters */
1861 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_add_flow_table_t));
1865 /* find the flow structure for the flow id */
1866 flow_context_ptr = sep_find_flow_context(sep, command_args.flow_id);
1867 if (flow_context_ptr == NULL)
1870 /* prepare the flow dma tables */
1871 error = sep_prepare_flow_dma_tables(sep, command_args.num_virtual_buffers, command_args.virt_buff_data_addr, flow_context_ptr, &first_table_data, &last_table_data, command_args.isKernelVirtualAddress);
1873 goto end_function_with_error;
1875 /* now check if there is already an existing add table for this flow */
1876 if (command_args.inputOutputFlag == SEP_DRIVER_IN_FLAG) {
1877 /* this buffer was for input buffers */
1878 if (flow_context_ptr->input_tables_flag) {
1879 /* add table already exists - add the new tables to the end
1881 num_entries = (flow_context_ptr->last_input_table.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK;
1883 info_entry_ptr = (struct sep_lli_entry_t *)
1884 (flow_context_ptr->last_input_table.physical_address + (sizeof(struct sep_lli_entry_t) * (num_entries - 1)));
1886 /* connect to list of tables */
1887 *info_entry_ptr = first_table_data;
1889 /* set the first table data */
1890 first_table_data = flow_context_ptr->first_input_table;
1892 /* set the input flag */
1893 flow_context_ptr->input_tables_flag = 1;
1895 /* set the first table data */
1896 flow_context_ptr->first_input_table = first_table_data;
1898 /* set the last table data */
1899 flow_context_ptr->last_input_table = last_table_data;
1900 } else { /* this is output tables */
1902 /* this buffer was for input buffers */
1903 if (flow_context_ptr->output_tables_flag) {
1904 /* add table already exists - add the new tables to
1905 the end of the previous */
1906 num_entries = (flow_context_ptr->last_output_table.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK;
1908 info_entry_ptr = (struct sep_lli_entry_t *)
1909 (flow_context_ptr->last_output_table.physical_address + (sizeof(struct sep_lli_entry_t) * (num_entries - 1)));
1911 /* connect to list of tables */
1912 *info_entry_ptr = first_table_data;
1914 /* set the first table data */
1915 first_table_data = flow_context_ptr->first_output_table;
1917 /* set the input flag */
1918 flow_context_ptr->output_tables_flag = 1;
1920 /* set the first table data */
1921 flow_context_ptr->first_output_table = first_table_data;
1923 /* set the last table data */
1924 flow_context_ptr->last_output_table = last_table_data;
1927 /* set output params */
1928 command_args.first_table_addr = first_table_data.physical_address;
1929 command_args.first_table_num_entries = ((first_table_data.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK);
1930 command_args.first_table_data_size = (first_table_data.block_size & SEP_TABLE_DATA_SIZE_MASK);
1932 /* send the parameters to user application */
1933 error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_add_flow_table_t));
1934 end_function_with_error:
1935 /* free the allocated tables */
1936 sep_deallocated_flow_tables(&first_table_data);
1938 dbg("SEP Driver:<-------- sep_add_flow_tables_handler end\n");
1943 this function add the flow add message to the specific flow
1945 static int sep_add_flow_tables_message_handler(struct sep_device *sep, unsigned long arg)
1948 struct sep_driver_add_message_t command_args;
1949 struct sep_flow_context_t *flow_context_ptr;
1951 dbg("SEP Driver:--------> sep_add_flow_tables_message_handler start\n");
1953 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_add_message_t));
1958 if (command_args.message_size_in_bytes > SEP_MAX_ADD_MESSAGE_LENGTH_IN_BYTES) {
1963 /* find the flow context */
1964 flow_context_ptr = sep_find_flow_context(sep, command_args.flow_id);
1965 if (flow_context_ptr == NULL)
1968 /* copy the message into context */
1969 flow_context_ptr->message_size_in_bytes = command_args.message_size_in_bytes;
1970 error = copy_from_user(flow_context_ptr->message, (void *) command_args.message_address, command_args.message_size_in_bytes);
1972 dbg("SEP Driver:<-------- sep_add_flow_tables_message_handler end\n");
1978 this function returns the bus and virtual addresses of the static pool
1980 static int sep_get_static_pool_addr_handler(struct sep_device *sep, unsigned long arg)
1983 struct sep_driver_static_pool_addr_t command_args;
1985 dbg("SEP Driver:--------> sep_get_static_pool_addr_handler start\n");
1987 /*prepare the output parameters in the struct */
1988 command_args.physical_static_address = sep->shared_area_bus + SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES;
1989 command_args.virtual_static_address = (unsigned long)sep->shared_area + SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES;
1991 edbg("SEP Driver:bus_static_address is %08lx, virtual_static_address %08lx\n", command_args.physical_static_address, command_args.virtual_static_address);
1993 /* send the parameters to user application */
1994 error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_static_pool_addr_t));
1995 dbg("SEP Driver:<-------- sep_get_static_pool_addr_handler end\n");
2000 this address gets the offset of the physical address from the start
2003 static int sep_get_physical_mapped_offset_handler(struct sep_device *sep, unsigned long arg)
2006 struct sep_driver_get_mapped_offset_t command_args;
2008 dbg("SEP Driver:--------> sep_get_physical_mapped_offset_handler start\n");
2010 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_get_mapped_offset_t));
2014 if (command_args.physical_address < sep->shared_area_bus) {
2019 /*prepare the output parameters in the struct */
2020 command_args.offset = command_args.physical_address - sep->shared_area_bus;
2022 edbg("SEP Driver:bus_address is %08lx, offset is %lu\n", command_args.physical_address, command_args.offset);
2024 /* send the parameters to user application */
2025 error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_get_mapped_offset_t));
2027 dbg("SEP Driver:<-------- sep_get_physical_mapped_offset_handler end\n");
2035 static int sep_start_handler(struct sep_device *sep)
2037 unsigned long reg_val;
2038 unsigned long error = 0;
2040 dbg("SEP Driver:--------> sep_start_handler start\n");
2042 /* wait in polling for message from SEP */
2044 reg_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
2047 /* check the value */
2049 /* fatal error - read error status from GPRO */
2050 error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
2051 dbg("SEP Driver:<-------- sep_start_handler end\n");
2056 this function handles the request for SEP initialization
2058 static int sep_init_handler(struct sep_device *sep, unsigned long arg)
2060 unsigned long message_word;
2061 unsigned long *message_ptr;
2062 struct sep_driver_init_t command_args;
2063 unsigned long counter;
2064 unsigned long error;
2065 unsigned long reg_val;
2067 dbg("SEP Driver:--------> sep_init_handler start\n");
2070 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_init_t));
2072 dbg("SEP Driver:--------> sep_init_handler - finished copy_from_user \n");
2077 /* PATCH - configure the DMA to single -burst instead of multi-burst */
2078 /*sep_configure_dma_burst(); */
2080 dbg("SEP Driver:--------> sep_init_handler - finished sep_configure_dma_burst \n");
2082 message_ptr = (unsigned long *) command_args.message_addr;
2084 /* set the base address of the SRAM */
2085 sep_write_reg(sep, HW_SRAM_ADDR_REG_ADDR, HW_CC_SRAM_BASE_ADDRESS);
2087 for (counter = 0; counter < command_args.message_size_in_words; counter++, message_ptr++) {
2088 get_user(message_word, message_ptr);
2089 /* write data to SRAM */
2090 sep_write_reg(sep, HW_SRAM_DATA_REG_ADDR, message_word);
2091 edbg("SEP Driver:message_word is %lu\n", message_word);
2092 /* wait for write complete */
2093 sep_wait_sram_write(sep);
2095 dbg("SEP Driver:--------> sep_init_handler - finished getting messages from user space\n");
2097 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x1);
2100 reg_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
2101 while (!(reg_val & 0xFFFFFFFD));
2103 dbg("SEP Driver:--------> sep_init_handler - finished waiting for reg_val & 0xFFFFFFFD \n");
2105 /* check the value */
2106 if (reg_val == 0x1) {
2107 edbg("SEP Driver:init failed\n");
2109 error = sep_read_reg(sep, 0x8060);
2110 edbg("SEP Driver:sw monitor is %lu\n", error);
2112 /* fatal error - read erro status from GPRO */
2113 error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
2114 edbg("SEP Driver:error is %lu\n", error);
2117 dbg("SEP Driver:<-------- sep_init_handler end\n");
2123 this function handles the request cache and resident reallocation
2125 static int sep_realloc_cache_resident_handler(struct sep_device *sep,
2129 unsigned long bus_cache_address;
2130 unsigned long bus_resident_address;
2131 struct sep_driver_realloc_cache_resident_t command_args;
2134 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_realloc_cache_resident_t));
2138 /* copy cache and resident to the their intended locations */
2139 error = sep_copy_cache_resident_to_area(sep, command_args.cache_addr, command_args.cache_size_in_bytes, command_args.resident_addr, command_args.resident_size_in_bytes, &bus_cache_address, &bus_resident_address);
2143 command_args.new_base_addr = sep->shared_area_bus;
2145 /* find the new base address according to the lowest address between
2146 cache, resident and shared area */
2147 if (bus_resident_address < command_args.new_base_addr)
2148 command_args.new_base_addr = bus_resident_address;
2149 if (bus_cache_address < command_args.new_base_addr)
2150 command_args.new_base_addr = bus_cache_address;
2152 /* set the return parameters */
2153 command_args.new_cache_addr = bus_cache_address;
2154 command_args.new_resident_addr = bus_resident_address;
2156 /* set the new shared area */
2157 command_args.new_shared_area_addr = sep->shared_area_bus;
2159 edbg("SEP Driver:command_args.new_shared_area is %08lx\n", command_args.new_shared_area_addr);
2160 edbg("SEP Driver:command_args.new_base_addr is %08lx\n", command_args.new_base_addr);
2161 edbg("SEP Driver:command_args.new_resident_addr is %08lx\n", command_args.new_resident_addr);
2162 edbg("SEP Driver:command_args.new_cache_addr is %08lx\n", command_args.new_cache_addr);
2164 /* return to user */
2165 error = copy_to_user((void *) arg, (void *) &command_args, sizeof(struct sep_driver_realloc_cache_resident_t));
2171 this function handles the request for get time
2173 static int sep_get_time_handler(struct sep_device *sep, unsigned long arg)
2176 struct sep_driver_get_time_t command_args;
2178 error = sep_set_time(sep, &command_args.time_physical_address, &command_args.time_value);
2180 error = copy_to_user((void __user *)arg,
2181 &command_args, sizeof(struct sep_driver_get_time_t));
2187 This API handles the end transaction request
2189 static int sep_end_transaction_handler(struct sep_device *sep, unsigned long arg)
2191 dbg("SEP Driver:--------> sep_end_transaction_handler start\n");
2193 #if 0 /*!SEP_DRIVER_POLLING_MODE */
2195 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, 0x7FFF);
2197 /* release IRQ line */
2198 free_irq(SEP_DIRVER_IRQ_NUM, sep);
2200 /* lock the sep mutex */
2201 mutex_unlock(&sep_mutex);
2204 dbg("SEP Driver:<-------- sep_end_transaction_handler end\n");
2211 This function handler the set flow id command
2213 static int sep_set_flow_id_handler(struct sep_device *sep, unsigned long arg)
2216 unsigned long flow_id;
2217 struct sep_flow_context_t *flow_data_ptr;
2219 dbg("------------>SEP Driver: sep_set_flow_id_handler start\n");
2221 error = get_user(flow_id, &(((struct sep_driver_set_flow_id_t *) arg)->flow_id));
2225 /* find the flow data structure that was just used for creating new flow
2226 - its id should be default */
2227 flow_data_ptr = sep_find_flow_context(sep, SEP_TEMP_FLOW_ID);
2228 if (flow_data_ptr == NULL)
2232 flow_data_ptr->flow_id = flow_id;
2235 dbg("SEP Driver:<-------- sep_set_flow_id_handler end\n");
2243 static int sep_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
2246 struct sep_device *sep = filp->private_data;
2248 dbg("------------>SEP Driver: ioctl start\n");
2250 edbg("SEP Driver: cmd is %x\n", cmd);
2253 case SEP_IOCSENDSEPCOMMAND:
2254 /* send command to SEP */
2255 sep_send_command_handler(sep);
2256 edbg("SEP Driver: after sep_send_command_handler\n");
2258 case SEP_IOCSENDSEPRPLYCOMMAND:
2259 /* send reply command to SEP */
2260 sep_send_reply_command_handler(sep);
2262 case SEP_IOCALLOCDATAPOLL:
2263 /* allocate data pool */
2264 error = sep_allocate_data_pool_memory_handler(sep, arg);
2266 case SEP_IOCWRITEDATAPOLL:
2267 /* write data into memory pool */
2268 error = sep_write_into_data_pool_handler(sep, arg);
2270 case SEP_IOCREADDATAPOLL:
2271 /* read data from data pool into application memory */
2272 error = sep_read_from_data_pool_handler(sep, arg);
2274 case SEP_IOCCREATESYMDMATABLE:
2275 /* create dma table for synhronic operation */
2276 error = sep_create_sync_dma_tables_handler(sep, arg);
2278 case SEP_IOCCREATEFLOWDMATABLE:
2279 /* create flow dma tables */
2280 error = sep_create_flow_dma_tables_handler(sep, arg);
2282 case SEP_IOCFREEDMATABLEDATA:
2283 /* free the pages */
2284 error = sep_free_dma_table_data_handler(sep);
2286 case SEP_IOCSETFLOWID:
2288 error = sep_set_flow_id_handler(sep, arg);
2290 case SEP_IOCADDFLOWTABLE:
2291 /* add tables to the dynamic flow */
2292 error = sep_add_flow_tables_handler(sep, arg);
2294 case SEP_IOCADDFLOWMESSAGE:
2295 /* add message of add tables to flow */
2296 error = sep_add_flow_tables_message_handler(sep, arg);
2298 case SEP_IOCSEPSTART:
2299 /* start command to sep */
2300 error = sep_start_handler(sep);
2302 case SEP_IOCSEPINIT:
2303 /* init command to sep */
2304 error = sep_init_handler(sep, arg);
2306 case SEP_IOCGETSTATICPOOLADDR:
2307 /* get the physical and virtual addresses of the static pool */
2308 error = sep_get_static_pool_addr_handler(sep, arg);
2310 case SEP_IOCENDTRANSACTION:
2311 error = sep_end_transaction_handler(sep, arg);
2313 case SEP_IOCREALLOCCACHERES:
2314 error = sep_realloc_cache_resident_handler(sep, arg);
2316 case SEP_IOCGETMAPPEDADDROFFSET:
2317 error = sep_get_physical_mapped_offset_handler(sep, arg);
2320 error = sep_get_time_handler(sep, arg);
2326 dbg("SEP Driver:<-------- ioctl end\n");
2332 #if !SEP_DRIVER_POLLING_MODE
2334 /* handler for flow done interrupt */
2336 static void sep_flow_done_handler(struct work_struct *work)
2338 struct sep_flow_context_t *flow_data_ptr;
2340 /* obtain the mutex */
2341 mutex_lock(&sep_mutex);
2343 /* get the pointer to context */
2344 flow_data_ptr = (struct sep_flow_context_t *) work;
2346 /* free all the current input tables in sep */
2347 sep_deallocated_flow_tables(&flow_data_ptr->input_tables_in_process);
2349 /* free all the current tables output tables in SEP (if needed) */
2350 if (flow_data_ptr->output_tables_in_process.physical_address != 0xffffffff)
2351 sep_deallocated_flow_tables(&flow_data_ptr->output_tables_in_process);
2353 /* check if we have additional tables to be sent to SEP only input
2354 flag may be checked */
2355 if (flow_data_ptr->input_tables_flag) {
2356 /* copy the message to the shared RAM and signal SEP */
2357 memcpy((void *) flow_data_ptr->message, (void *) sep->shared_area, flow_data_ptr->message_size_in_bytes);
2359 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR2_REG_ADDR, 0x2);
2361 mutex_unlock(&sep_mutex);
2364 interrupt handler function
2366 static irqreturn_t sep_inthandler(int irq, void *dev_id)
2368 irqreturn_t int_error;
2369 unsigned long reg_val;
2370 unsigned long flow_id;
2371 struct sep_flow_context_t *flow_context_ptr;
2372 struct sep_device *sep = dev_id;
2374 int_error = IRQ_HANDLED;
2376 /* read the IRR register to check if this is SEP interrupt */
2377 reg_val = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR);
2378 edbg("SEP Interrupt - reg is %08lx\n", reg_val);
2380 /* check if this is the flow interrupt */
2381 if (0 /*reg_val & (0x1 << 11) */ ) {
2382 /* read GPRO to find out the which flow is done */
2383 flow_id = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR);
2385 /* find the contex of the flow */
2386 flow_context_ptr = sep_find_flow_context(sep, flow_id >> 28);
2387 if (flow_context_ptr == NULL)
2388 goto end_function_with_error;
2390 /* queue the work */
2391 INIT_WORK(&flow_context_ptr->flow_wq, sep_flow_done_handler);
2392 queue_work(sep->flow_wq, &flow_context_ptr->flow_wq);
2395 /* check if this is reply interrupt from SEP */
2396 if (reg_val & (0x1 << 13)) {
2397 /* update the counter of reply messages */
2399 /* wake up the waiting process */
2400 wake_up(&sep_event);
2402 int_error = IRQ_NONE;
2406 end_function_with_error:
2407 /* clear the interrupt */
2408 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, reg_val);
2419 static void sep_wait_busy(struct sep_device *sep)
2424 reg = sep_read_reg(sep, HW_HOST_SEP_BUSY_REG_ADDR);
2429 PATCH for configuring the DMA to single burst instead of multi-burst
2431 static void sep_configure_dma_burst(struct sep_device *sep)
2433 #define HW_AHB_RD_WR_BURSTS_REG_ADDR 0x0E10UL
2435 dbg("SEP Driver:<-------- sep_configure_dma_burst start \n");
2437 /* request access to registers from SEP */
2438 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
2440 dbg("SEP Driver:<-------- sep_configure_dma_burst finished request access to registers from SEP (write reg) \n");
2444 dbg("SEP Driver:<-------- sep_configure_dma_burst finished request access to registers from SEP (while(revVal) wait loop) \n");
2446 /* set the DMA burst register to single burst */
2447 sep_write_reg(sep, HW_AHB_RD_WR_BURSTS_REG_ADDR, 0x0UL);
2449 /* release the sep busy */
2450 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x0UL);
2453 dbg("SEP Driver:<-------- sep_configure_dma_burst done \n");
2460 Function that is activaed on the succesful probe of the SEP device
2462 static int __devinit sep_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2465 struct sep_device *sep;
2467 int size; /* size of memory for allocation */
2469 edbg("Sep pci probe starting\n");
2470 if (sep_dev != NULL) {
2471 dev_warn(&pdev->dev, "only one SEP supported.\n");
2475 /* enable the device */
2476 error = pci_enable_device(pdev);
2478 edbg("error enabling pci device\n");
2482 /* set the pci dev pointer */
2483 sep_dev = &sep_instance;
2484 sep = &sep_instance;
2486 edbg("sep->shared_area = %lx\n", (unsigned long) &sep->shared_area);
2487 /* transaction counter that coordinates the transactions between SEP
2490 /* counter for the messages from sep */
2492 /* counter for the number of bytes allocated in the pool
2493 for the current transaction */
2494 sep->data_pool_bytes_allocated = 0;
2496 /* calculate the total size for allocation */
2497 size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
2498 SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES + SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
2500 /* allocate the shared area */
2501 if (sep_map_and_alloc_shared_area(sep, size)) {
2503 /* allocation failed */
2504 goto end_function_error;
2506 /* now set the memory regions */
2507 sep->message_shared_area_addr = sep->shared_area;
2509 edbg("SEP Driver: sep->message_shared_area_addr is %p\n", sep->message_shared_area_addr);
2511 #if (SEP_DRIVER_RECONFIG_MESSAGE_AREA == 1)
2512 /* send the new SHARED MESSAGE AREA to the SEP */
2513 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR1_REG_ADDR, sep->shared_area_bus);
2515 /* poll for SEP response */
2516 retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
2517 while (retval != 0xffffffff && retval != sep->shared_area_bus)
2518 retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
2520 /* check the return value (register) */
2521 if (retval != sep->shared_area_bus) {
2523 goto end_function_deallocate_sep_shared_area;
2526 /* init the flow contextes */
2527 for (counter = 0; counter < SEP_DRIVER_NUM_FLOWS; counter++)
2528 sep->flows[counter].flow_id = SEP_FREE_FLOW_ID;
2530 sep->flow_wq = create_singlethread_workqueue("sepflowwq");
2531 if (sep->flow_wq == NULL) {
2533 edbg("sep_driver:flow queue creation failed\n");
2534 goto end_function_deallocate_sep_shared_area;
2536 edbg("SEP Driver: create flow workqueue \n");
2537 /* load the rom code */
2538 sep_load_rom_code(sep);
2540 sep->pdev = pci_dev_get(pdev);
2542 /* get the io memory start address */
2543 sep->io_bus = pci_resource_start(pdev, 0);
2545 edbg("SEP Driver error pci resource start\n");
2546 goto end_function_deallocate_sep_shared_area;
2549 /* get the io memory end address */
2550 sep->io_end_bus = pci_resource_end(pdev, 0);
2551 if (!sep->io_end_bus) {
2552 edbg("SEP Driver error pci resource end\n");
2553 goto end_function_deallocate_sep_shared_area;
2556 sep->io_memory_size = sep->io_end_bus - sep->io_bus + 1;
2558 edbg("SEP Driver:io_bus is %08lx\n", sep->io_bus);
2560 edbg("SEP Driver:io_memory_end_phyaical_address is %08lx\n", sep->io_end_bus);
2562 edbg("SEP Driver:io_memory_size is %08lx\n", sep->io_memory_size);
2564 sep->io_addr = ioremap_nocache(sep->io_bus, sep->io_memory_size);
2565 if (!sep->io_addr) {
2566 edbg("SEP Driver error ioremap of io memory\n");
2567 goto end_function_deallocate_sep_shared_area;
2570 edbg("SEP Driver:io_addr is %p\n", sep->io_addr);
2572 sep->reg_addr = (void __iomem *) sep->io_addr;
2574 /* set up system base address and shared memory location */
2576 sep->rar_addr = kmalloc(2 * SEP_RAR_IO_MEM_REGION_SIZE, GFP_KERNEL);
2578 if (!sep->rar_addr) {
2579 edbg("SEP Driver:cant kmalloc rar\n");
2580 goto end_function_uniomap;
2583 sep->rar_bus = __pa(sep->rar_addr);
2585 edbg("SEP Driver:rar_bus is %08llx\n", (unsigned long long)sep->rar_bus);
2586 edbg("SEP Driver:rar_virtual is %p\n", sep->rar_addr);
2588 #if !SEP_DRIVER_POLLING_MODE
2590 edbg("SEP Driver: about to write IMR and ICR REG_ADDR\n");
2592 /* clear ICR register */
2593 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
2595 /* set the IMR register - open only GPR 2 */
2596 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
2598 edbg("SEP Driver: about to call request_irq\n");
2599 /* get the interrupt line */
2600 error = request_irq(pdev->irq, sep_inthandler, IRQF_SHARED, "sep_driver", sep);
2602 goto end_function_free_res;
2605 edbg("SEP Driver: about to write IMR REG_ADDR");
2607 /* set the IMR register - open only GPR 2 */
2608 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
2610 end_function_free_res:
2611 kfree(sep->rar_addr);
2612 #endif /* SEP_DRIVER_POLLING_MODE */
2613 end_function_uniomap:
2614 iounmap(sep->io_addr);
2615 end_function_deallocate_sep_shared_area:
2616 /* de-allocate shared area */
2617 sep_unmap_and_free_shared_area(sep, size);
2624 static struct pci_device_id sep_pci_id_tbl[] = {
2625 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080c)},
2629 MODULE_DEVICE_TABLE(pci, sep_pci_id_tbl);
2631 /* field for registering driver to PCI device */
2632 static struct pci_driver sep_pci_driver = {
2633 .name = "sep_sec_driver",
2634 .id_table = sep_pci_id_tbl,
2636 /* FIXME: remove handler */
2639 /* major and minor device numbers */
2640 static dev_t sep_devno;
2642 /* the files operations structure of the driver */
2643 static struct file_operations sep_file_operations = {
2644 .owner = THIS_MODULE,
2648 .release = sep_release,
2653 /* cdev struct of the driver */
2654 static struct cdev sep_cdev;
2657 this function registers the driver to the file system
2659 static int sep_register_driver_to_fs(void)
2661 int ret_val = alloc_chrdev_region(&sep_devno, 0, 1, "sep_sec_driver");
2663 edbg("sep_driver:major number allocation failed, retval is %d\n", ret_val);
2668 cdev_init(&sep_cdev, &sep_file_operations);
2669 sep_cdev.owner = THIS_MODULE;
2671 /* register the driver with the kernel */
2672 ret_val = cdev_add(&sep_cdev, sep_devno, 1);
2675 edbg("sep_driver:cdev_add failed, retval is %d\n", ret_val);
2676 goto end_function_unregister_devnum;
2681 end_function_unregister_devnum:
2683 /* unregister dev numbers */
2684 unregister_chrdev_region(sep_devno, 1);
2691 /*--------------------------------------------------------------
2693 ----------------------------------------------------------------*/
2694 static int __init sep_init(void)
2697 dbg("SEP Driver:-------->Init start\n");
2698 /* FIXME: Probe can occur before we are ready to survive a probe */
2699 ret_val = pci_register_driver(&sep_pci_driver);
2701 edbg("sep_driver:sep_driver_to_device failed, ret_val is %d\n", ret_val);
2702 goto end_function_unregister_from_fs;
2704 /* register driver to fs */
2705 ret_val = sep_register_driver_to_fs();
2707 goto end_function_unregister_pci;
2709 end_function_unregister_pci:
2710 pci_unregister_driver(&sep_pci_driver);
2711 end_function_unregister_from_fs:
2712 /* unregister from fs */
2713 cdev_del(&sep_cdev);
2714 /* unregister dev numbers */
2715 unregister_chrdev_region(sep_devno, 1);
2717 dbg("SEP Driver:<-------- Init end\n");
2722 /*-------------------------------------------------------------
2724 --------------------------------------------------------------*/
2725 static void __exit sep_exit(void)
2729 dbg("SEP Driver:--------> Exit start\n");
2731 /* unregister from fs */
2732 cdev_del(&sep_cdev);
2733 /* unregister dev numbers */
2734 unregister_chrdev_region(sep_devno, 1);
2735 /* calculate the total size for de-allocation */
2736 size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
2737 SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES + SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
2738 /* FIXME: We need to do this in the unload for the device */
2739 /* free shared area */
2741 sep_unmap_and_free_shared_area(sep_dev, size);
2742 edbg("SEP Driver: free pages SEP SHARED AREA \n");
2743 iounmap((void *) sep_dev->reg_addr);
2744 edbg("SEP Driver: iounmap \n");
2746 edbg("SEP Driver: release_mem_region \n");
2747 dbg("SEP Driver:<-------- Exit end\n");
2751 module_init(sep_init);
2752 module_exit(sep_exit);
2754 MODULE_LICENSE("GPL");