Blackfin arch: fix unused warning for some blackfin derivatives
[safe/jmp/linux-2.6] / arch / blackfin / mm / sram-alloc.c
1 /*
2  * File:         arch/blackfin/mm/sram-alloc.c
3  * Based on:
4  * Author:
5  *
6  * Created:
7  * Description:  SRAM allocator for Blackfin L1 and L2 memory
8  *
9  * Modified:
10  *               Copyright 2004-2008 Analog Devices Inc.
11  *
12  * Bugs:         Enter bugs at http://blackfin.uclinux.org/
13  *
14  * This program is free software; you can redistribute it and/or modify
15  * it under the terms of the GNU General Public License as published by
16  * the Free Software Foundation; either version 2 of the License, or
17  * (at your option) any later version.
18  *
19  * This program is distributed in the hope that it will be useful,
20  * but WITHOUT ANY WARRANTY; without even the implied warranty of
21  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
22  * GNU General Public License for more details.
23  *
24  * You should have received a copy of the GNU General Public License
25  * along with this program; if not, see the file COPYING, or write
26  * to the Free Software Foundation, Inc.,
27  * 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
28  */
29
30 #include <linux/module.h>
31 #include <linux/kernel.h>
32 #include <linux/types.h>
33 #include <linux/miscdevice.h>
34 #include <linux/ioport.h>
35 #include <linux/fcntl.h>
36 #include <linux/init.h>
37 #include <linux/poll.h>
38 #include <linux/proc_fs.h>
39 #include <linux/spinlock.h>
40 #include <linux/rtc.h>
41 #include <asm/blackfin.h>
42 #include "blackfin_sram.h"
43
44 static DEFINE_PER_CPU(spinlock_t, l1sram_lock) ____cacheline_aligned_in_smp;
45 static DEFINE_PER_CPU(spinlock_t, l1_data_sram_lock) ____cacheline_aligned_in_smp;
46 static DEFINE_PER_CPU(spinlock_t, l1_inst_sram_lock) ____cacheline_aligned_in_smp;
47 static spinlock_t l2_sram_lock ____cacheline_aligned_in_smp;
48
49 /* the data structure for L1 scratchpad and DATA SRAM */
50 struct sram_piece {
51         void *paddr;
52         int size;
53         pid_t pid;
54         struct sram_piece *next;
55 };
56
57 static DEFINE_PER_CPU(struct sram_piece, free_l1_ssram_head);
58 static DEFINE_PER_CPU(struct sram_piece, used_l1_ssram_head);
59
60 #if L1_DATA_A_LENGTH != 0
61 static DEFINE_PER_CPU(struct sram_piece, free_l1_data_A_sram_head);
62 static DEFINE_PER_CPU(struct sram_piece, used_l1_data_A_sram_head);
63 #endif
64
65 #if L1_DATA_B_LENGTH != 0
66 static DEFINE_PER_CPU(struct sram_piece, free_l1_data_B_sram_head);
67 static DEFINE_PER_CPU(struct sram_piece, used_l1_data_B_sram_head);
68 #endif
69
70 #if L1_CODE_LENGTH != 0
71 static DEFINE_PER_CPU(struct sram_piece, free_l1_inst_sram_head);
72 static DEFINE_PER_CPU(struct sram_piece, used_l1_inst_sram_head);
73 #endif
74
75 #if L2_LENGTH != 0
76 static struct sram_piece free_l2_sram_head, used_l2_sram_head;
77 #endif
78
79 static struct kmem_cache *sram_piece_cache;
80
81 /* L1 Scratchpad SRAM initialization function */
82 static void __init l1sram_init(void)
83 {
84         unsigned int cpu;
85         for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
86                 per_cpu(free_l1_ssram_head, cpu).next =
87                         kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
88                 if (!per_cpu(free_l1_ssram_head, cpu).next) {
89                         printk(KERN_INFO "Fail to initialize Scratchpad data SRAM.\n");
90                         return;
91                 }
92
93                 per_cpu(free_l1_ssram_head, cpu).next->paddr = (void *)get_l1_scratch_start_cpu(cpu);
94                 per_cpu(free_l1_ssram_head, cpu).next->size = L1_SCRATCH_LENGTH;
95                 per_cpu(free_l1_ssram_head, cpu).next->pid = 0;
96                 per_cpu(free_l1_ssram_head, cpu).next->next = NULL;
97
98                 per_cpu(used_l1_ssram_head, cpu).next = NULL;
99
100                 /* mutex initialize */
101                 spin_lock_init(&per_cpu(l1sram_lock, cpu));
102                 printk(KERN_INFO "Blackfin Scratchpad data SRAM: %d KB\n",
103                         L1_SCRATCH_LENGTH >> 10);
104         }
105 }
106
107 static void __init l1_data_sram_init(void)
108 {
109 #if L1_DATA_A_LENGTH != 0 || L1_DATA_B_LENGTH != 0
110         unsigned int cpu;
111 #endif
112 #if L1_DATA_A_LENGTH != 0
113         for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
114                 per_cpu(free_l1_data_A_sram_head, cpu).next =
115                         kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
116                 if (!per_cpu(free_l1_data_A_sram_head, cpu).next) {
117                         printk(KERN_INFO "Fail to initialize L1 Data A SRAM.\n");
118                         return;
119                 }
120
121                 per_cpu(free_l1_data_A_sram_head, cpu).next->paddr =
122                         (void *)get_l1_data_a_start_cpu(cpu) + (_ebss_l1 - _sdata_l1);
123                 per_cpu(free_l1_data_A_sram_head, cpu).next->size =
124                         L1_DATA_A_LENGTH - (_ebss_l1 - _sdata_l1);
125                 per_cpu(free_l1_data_A_sram_head, cpu).next->pid = 0;
126                 per_cpu(free_l1_data_A_sram_head, cpu).next->next = NULL;
127
128                 per_cpu(used_l1_data_A_sram_head, cpu).next = NULL;
129
130                 printk(KERN_INFO "Blackfin L1 Data A SRAM: %d KB (%d KB free)\n",
131                         L1_DATA_A_LENGTH >> 10,
132                         per_cpu(free_l1_data_A_sram_head, cpu).next->size >> 10);
133         }
134 #endif
135 #if L1_DATA_B_LENGTH != 0
136         for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
137                 per_cpu(free_l1_data_B_sram_head, cpu).next =
138                         kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
139                 if (!per_cpu(free_l1_data_B_sram_head, cpu).next) {
140                         printk(KERN_INFO "Fail to initialize L1 Data B SRAM.\n");
141                         return;
142                 }
143
144                 per_cpu(free_l1_data_B_sram_head, cpu).next->paddr =
145                         (void *)get_l1_data_b_start_cpu(cpu) + (_ebss_b_l1 - _sdata_b_l1);
146                 per_cpu(free_l1_data_B_sram_head, cpu).next->size =
147                         L1_DATA_B_LENGTH - (_ebss_b_l1 - _sdata_b_l1);
148                 per_cpu(free_l1_data_B_sram_head, cpu).next->pid = 0;
149                 per_cpu(free_l1_data_B_sram_head, cpu).next->next = NULL;
150
151                 per_cpu(used_l1_data_B_sram_head, cpu).next = NULL;
152
153                 printk(KERN_INFO "Blackfin L1 Data B SRAM: %d KB (%d KB free)\n",
154                         L1_DATA_B_LENGTH >> 10,
155                         per_cpu(free_l1_data_B_sram_head, cpu).next->size >> 10);
156                 /* mutex initialize */
157         }
158 #endif
159
160 #if L1_DATA_A_LENGTH != 0 || L1_DATA_B_LENGTH != 0
161         for (cpu = 0; cpu < num_possible_cpus(); ++cpu)
162                 spin_lock_init(&per_cpu(l1_data_sram_lock, cpu));
163 #endif
164 }
165
166 static void __init l1_inst_sram_init(void)
167 {
168 #if L1_CODE_LENGTH != 0
169         unsigned int cpu;
170         for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
171                 per_cpu(free_l1_inst_sram_head, cpu).next =
172                         kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
173                 if (!per_cpu(free_l1_inst_sram_head, cpu).next) {
174                         printk(KERN_INFO "Failed to initialize L1 Instruction SRAM\n");
175                         return;
176                 }
177
178                 per_cpu(free_l1_inst_sram_head, cpu).next->paddr =
179                         (void *)get_l1_code_start_cpu(cpu) + (_etext_l1 - _stext_l1);
180                 per_cpu(free_l1_inst_sram_head, cpu).next->size =
181                         L1_CODE_LENGTH - (_etext_l1 - _stext_l1);
182                 per_cpu(free_l1_inst_sram_head, cpu).next->pid = 0;
183                 per_cpu(free_l1_inst_sram_head, cpu).next->next = NULL;
184
185                 per_cpu(used_l1_inst_sram_head, cpu).next = NULL;
186
187                 printk(KERN_INFO "Blackfin L1 Instruction SRAM: %d KB (%d KB free)\n",
188                         L1_CODE_LENGTH >> 10,
189                         per_cpu(free_l1_inst_sram_head, cpu).next->size >> 10);
190
191                 /* mutex initialize */
192                 spin_lock_init(&per_cpu(l1_inst_sram_lock, cpu));
193         }
194 #endif
195 }
196
197 static void __init l2_sram_init(void)
198 {
199 #if L2_LENGTH != 0
200         free_l2_sram_head.next =
201                 kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
202         if (!free_l2_sram_head.next) {
203                 printk(KERN_INFO "Fail to initialize L2 SRAM.\n");
204                 return;
205         }
206
207         free_l2_sram_head.next->paddr =
208                 (void *)L2_START + (_ebss_l2 - _stext_l2);
209         free_l2_sram_head.next->size =
210                 L2_LENGTH - (_ebss_l2 - _stext_l2);
211         free_l2_sram_head.next->pid = 0;
212         free_l2_sram_head.next->next = NULL;
213
214         used_l2_sram_head.next = NULL;
215
216         printk(KERN_INFO "Blackfin L2 SRAM: %d KB (%d KB free)\n",
217                 L2_LENGTH >> 10,
218                 free_l2_sram_head.next->size >> 10);
219 #endif
220
221         /* mutex initialize */
222         spin_lock_init(&l2_sram_lock);
223 }
224
225 void __init bfin_sram_init(void)
226 {
227         sram_piece_cache = kmem_cache_create("sram_piece_cache",
228                                 sizeof(struct sram_piece),
229                                 0, SLAB_PANIC, NULL);
230
231         l1sram_init();
232         l1_data_sram_init();
233         l1_inst_sram_init();
234         l2_sram_init();
235 }
236
237 /* SRAM allocate function */
238 static void *_sram_alloc(size_t size, struct sram_piece *pfree_head,
239                 struct sram_piece *pused_head)
240 {
241         struct sram_piece *pslot, *plast, *pavail;
242
243         if (size <= 0 || !pfree_head || !pused_head)
244                 return NULL;
245
246         /* Align the size */
247         size = (size + 3) & ~3;
248
249         pslot = pfree_head->next;
250         plast = pfree_head;
251
252         /* search an available piece slot */
253         while (pslot != NULL && size > pslot->size) {
254                 plast = pslot;
255                 pslot = pslot->next;
256         }
257
258         if (!pslot)
259                 return NULL;
260
261         if (pslot->size == size) {
262                 plast->next = pslot->next;
263                 pavail = pslot;
264         } else {
265                 pavail = kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
266
267                 if (!pavail)
268                         return NULL;
269
270                 pavail->paddr = pslot->paddr;
271                 pavail->size = size;
272                 pslot->paddr += size;
273                 pslot->size -= size;
274         }
275
276         pavail->pid = current->pid;
277
278         pslot = pused_head->next;
279         plast = pused_head;
280
281         /* insert new piece into used piece list !!! */
282         while (pslot != NULL && pavail->paddr < pslot->paddr) {
283                 plast = pslot;
284                 pslot = pslot->next;
285         }
286
287         pavail->next = pslot;
288         plast->next = pavail;
289
290         return pavail->paddr;
291 }
292
293 /* Allocate the largest available block.  */
294 static void *_sram_alloc_max(struct sram_piece *pfree_head,
295                                 struct sram_piece *pused_head,
296                                 unsigned long *psize)
297 {
298         struct sram_piece *pslot, *pmax;
299
300         if (!pfree_head || !pused_head)
301                 return NULL;
302
303         pmax = pslot = pfree_head->next;
304
305         /* search an available piece slot */
306         while (pslot != NULL) {
307                 if (pslot->size > pmax->size)
308                         pmax = pslot;
309                 pslot = pslot->next;
310         }
311
312         if (!pmax)
313                 return NULL;
314
315         *psize = pmax->size;
316
317         return _sram_alloc(*psize, pfree_head, pused_head);
318 }
319
320 /* SRAM free function */
321 static int _sram_free(const void *addr,
322                         struct sram_piece *pfree_head,
323                         struct sram_piece *pused_head)
324 {
325         struct sram_piece *pslot, *plast, *pavail;
326
327         if (!pfree_head || !pused_head)
328                 return -1;
329
330         /* search the relevant memory slot */
331         pslot = pused_head->next;
332         plast = pused_head;
333
334         /* search an available piece slot */
335         while (pslot != NULL && pslot->paddr != addr) {
336                 plast = pslot;
337                 pslot = pslot->next;
338         }
339
340         if (!pslot)
341                 return -1;
342
343         plast->next = pslot->next;
344         pavail = pslot;
345         pavail->pid = 0;
346
347         /* insert free pieces back to the free list */
348         pslot = pfree_head->next;
349         plast = pfree_head;
350
351         while (pslot != NULL && addr > pslot->paddr) {
352                 plast = pslot;
353                 pslot = pslot->next;
354         }
355
356         if (plast != pfree_head && plast->paddr + plast->size == pavail->paddr) {
357                 plast->size += pavail->size;
358                 kmem_cache_free(sram_piece_cache, pavail);
359         } else {
360                 pavail->next = plast->next;
361                 plast->next = pavail;
362                 plast = pavail;
363         }
364
365         if (pslot && plast->paddr + plast->size == pslot->paddr) {
366                 plast->size += pslot->size;
367                 plast->next = pslot->next;
368                 kmem_cache_free(sram_piece_cache, pslot);
369         }
370
371         return 0;
372 }
373
374 int sram_free(const void *addr)
375 {
376
377 #if L1_CODE_LENGTH != 0
378         if (addr >= (void *)get_l1_code_start()
379                  && addr < (void *)(get_l1_code_start() + L1_CODE_LENGTH))
380                 return l1_inst_sram_free(addr);
381         else
382 #endif
383 #if L1_DATA_A_LENGTH != 0
384         if (addr >= (void *)get_l1_data_a_start()
385                  && addr < (void *)(get_l1_data_a_start() + L1_DATA_A_LENGTH))
386                 return l1_data_A_sram_free(addr);
387         else
388 #endif
389 #if L1_DATA_B_LENGTH != 0
390         if (addr >= (void *)get_l1_data_b_start()
391                  && addr < (void *)(get_l1_data_b_start() + L1_DATA_B_LENGTH))
392                 return l1_data_B_sram_free(addr);
393         else
394 #endif
395 #if L2_LENGTH != 0
396         if (addr >= (void *)L2_START
397                  && addr < (void *)(L2_START + L2_LENGTH))
398                 return l2_sram_free(addr);
399         else
400 #endif
401                 return -1;
402 }
403 EXPORT_SYMBOL(sram_free);
404
405 void *l1_data_A_sram_alloc(size_t size)
406 {
407         unsigned long flags;
408         void *addr = NULL;
409         unsigned int cpu;
410
411         cpu = get_cpu();
412         /* add mutex operation */
413         spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags);
414
415 #if L1_DATA_A_LENGTH != 0
416         addr = _sram_alloc(size, &per_cpu(free_l1_data_A_sram_head, cpu),
417                         &per_cpu(used_l1_data_A_sram_head, cpu));
418 #endif
419
420         /* add mutex operation */
421         spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags);
422         put_cpu();
423
424         pr_debug("Allocated address in l1_data_A_sram_alloc is 0x%lx+0x%lx\n",
425                  (long unsigned int)addr, size);
426
427         return addr;
428 }
429 EXPORT_SYMBOL(l1_data_A_sram_alloc);
430
431 int l1_data_A_sram_free(const void *addr)
432 {
433         unsigned long flags;
434         int ret;
435         unsigned int cpu;
436
437         cpu = get_cpu();
438         /* add mutex operation */
439         spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags);
440
441 #if L1_DATA_A_LENGTH != 0
442         ret = _sram_free(addr, &per_cpu(free_l1_data_A_sram_head, cpu),
443                         &per_cpu(used_l1_data_A_sram_head, cpu));
444 #else
445         ret = -1;
446 #endif
447
448         /* add mutex operation */
449         spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags);
450         put_cpu();
451
452         return ret;
453 }
454 EXPORT_SYMBOL(l1_data_A_sram_free);
455
456 void *l1_data_B_sram_alloc(size_t size)
457 {
458 #if L1_DATA_B_LENGTH != 0
459         unsigned long flags;
460         void *addr;
461         unsigned int cpu;
462
463         cpu = get_cpu();
464         /* add mutex operation */
465         spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags);
466
467         addr = _sram_alloc(size, &per_cpu(free_l1_data_B_sram_head, cpu),
468                         &per_cpu(used_l1_data_B_sram_head, cpu));
469
470         /* add mutex operation */
471         spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags);
472         put_cpu();
473
474         pr_debug("Allocated address in l1_data_B_sram_alloc is 0x%lx+0x%lx\n",
475                  (long unsigned int)addr, size);
476
477         return addr;
478 #else
479         return NULL;
480 #endif
481 }
482 EXPORT_SYMBOL(l1_data_B_sram_alloc);
483
484 int l1_data_B_sram_free(const void *addr)
485 {
486 #if L1_DATA_B_LENGTH != 0
487         unsigned long flags;
488         int ret;
489         unsigned int cpu;
490
491         cpu = get_cpu();
492         /* add mutex operation */
493         spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags);
494
495         ret = _sram_free(addr, &per_cpu(free_l1_data_B_sram_head, cpu),
496                         &per_cpu(used_l1_data_B_sram_head, cpu));
497
498         /* add mutex operation */
499         spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags);
500         put_cpu();
501
502         return ret;
503 #else
504         return -1;
505 #endif
506 }
507 EXPORT_SYMBOL(l1_data_B_sram_free);
508
509 void *l1_data_sram_alloc(size_t size)
510 {
511         void *addr = l1_data_A_sram_alloc(size);
512
513         if (!addr)
514                 addr = l1_data_B_sram_alloc(size);
515
516         return addr;
517 }
518 EXPORT_SYMBOL(l1_data_sram_alloc);
519
520 void *l1_data_sram_zalloc(size_t size)
521 {
522         void *addr = l1_data_sram_alloc(size);
523
524         if (addr)
525                 memset(addr, 0x00, size);
526
527         return addr;
528 }
529 EXPORT_SYMBOL(l1_data_sram_zalloc);
530
531 int l1_data_sram_free(const void *addr)
532 {
533         int ret;
534         ret = l1_data_A_sram_free(addr);
535         if (ret == -1)
536                 ret = l1_data_B_sram_free(addr);
537         return ret;
538 }
539 EXPORT_SYMBOL(l1_data_sram_free);
540
541 void *l1_inst_sram_alloc(size_t size)
542 {
543 #if L1_CODE_LENGTH != 0
544         unsigned long flags;
545         void *addr;
546         unsigned int cpu;
547
548         cpu = get_cpu();
549         /* add mutex operation */
550         spin_lock_irqsave(&per_cpu(l1_inst_sram_lock, cpu), flags);
551
552         addr = _sram_alloc(size, &per_cpu(free_l1_inst_sram_head, cpu),
553                         &per_cpu(used_l1_inst_sram_head, cpu));
554
555         /* add mutex operation */
556         spin_unlock_irqrestore(&per_cpu(l1_inst_sram_lock, cpu), flags);
557         put_cpu();
558
559         pr_debug("Allocated address in l1_inst_sram_alloc is 0x%lx+0x%lx\n",
560                  (long unsigned int)addr, size);
561
562         return addr;
563 #else
564         return NULL;
565 #endif
566 }
567 EXPORT_SYMBOL(l1_inst_sram_alloc);
568
569 int l1_inst_sram_free(const void *addr)
570 {
571 #if L1_CODE_LENGTH != 0
572         unsigned long flags;
573         int ret;
574         unsigned int cpu;
575
576         cpu = get_cpu();
577         /* add mutex operation */
578         spin_lock_irqsave(&per_cpu(l1_inst_sram_lock, cpu), flags);
579
580         ret = _sram_free(addr, &per_cpu(free_l1_inst_sram_head, cpu),
581                         &per_cpu(used_l1_inst_sram_head, cpu));
582
583         /* add mutex operation */
584         spin_unlock_irqrestore(&per_cpu(l1_inst_sram_lock, cpu), flags);
585         put_cpu();
586
587         return ret;
588 #else
589         return -1;
590 #endif
591 }
592 EXPORT_SYMBOL(l1_inst_sram_free);
593
594 /* L1 Scratchpad memory allocate function */
595 void *l1sram_alloc(size_t size)
596 {
597         unsigned long flags;
598         void *addr;
599         unsigned int cpu;
600
601         cpu = get_cpu();
602         /* add mutex operation */
603         spin_lock_irqsave(&per_cpu(l1sram_lock, cpu), flags);
604
605         addr = _sram_alloc(size, &per_cpu(free_l1_ssram_head, cpu),
606                         &per_cpu(used_l1_ssram_head, cpu));
607
608         /* add mutex operation */
609         spin_unlock_irqrestore(&per_cpu(l1sram_lock, cpu), flags);
610         put_cpu();
611
612         return addr;
613 }
614
615 /* L1 Scratchpad memory allocate function */
616 void *l1sram_alloc_max(size_t *psize)
617 {
618         unsigned long flags;
619         void *addr;
620         unsigned int cpu;
621
622         cpu = get_cpu();
623         /* add mutex operation */
624         spin_lock_irqsave(&per_cpu(l1sram_lock, cpu), flags);
625
626         addr = _sram_alloc_max(&per_cpu(free_l1_ssram_head, cpu),
627                         &per_cpu(used_l1_ssram_head, cpu), psize);
628
629         /* add mutex operation */
630         spin_unlock_irqrestore(&per_cpu(l1sram_lock, cpu), flags);
631         put_cpu();
632
633         return addr;
634 }
635
636 /* L1 Scratchpad memory free function */
637 int l1sram_free(const void *addr)
638 {
639         unsigned long flags;
640         int ret;
641         unsigned int cpu;
642
643         cpu = get_cpu();
644         /* add mutex operation */
645         spin_lock_irqsave(&per_cpu(l1sram_lock, cpu), flags);
646
647         ret = _sram_free(addr, &per_cpu(free_l1_ssram_head, cpu),
648                         &per_cpu(used_l1_ssram_head, cpu));
649
650         /* add mutex operation */
651         spin_unlock_irqrestore(&per_cpu(l1sram_lock, cpu), flags);
652         put_cpu();
653
654         return ret;
655 }
656
657 void *l2_sram_alloc(size_t size)
658 {
659 #if L2_LENGTH != 0
660         unsigned long flags;
661         void *addr;
662
663         /* add mutex operation */
664         spin_lock_irqsave(&l2_sram_lock, flags);
665
666         addr = _sram_alloc(size, &free_l2_sram_head,
667                         &used_l2_sram_head);
668
669         /* add mutex operation */
670         spin_unlock_irqrestore(&l2_sram_lock, flags);
671
672         pr_debug("Allocated address in l2_sram_alloc is 0x%lx+0x%lx\n",
673                  (long unsigned int)addr, size);
674
675         return addr;
676 #else
677         return NULL;
678 #endif
679 }
680 EXPORT_SYMBOL(l2_sram_alloc);
681
682 void *l2_sram_zalloc(size_t size)
683 {
684         void *addr = l2_sram_alloc(size);
685
686         if (addr)
687                 memset(addr, 0x00, size);
688
689         return addr;
690 }
691 EXPORT_SYMBOL(l2_sram_zalloc);
692
693 int l2_sram_free(const void *addr)
694 {
695 #if L2_LENGTH != 0
696         unsigned long flags;
697         int ret;
698
699         /* add mutex operation */
700         spin_lock_irqsave(&l2_sram_lock, flags);
701
702         ret = _sram_free(addr, &free_l2_sram_head,
703                         &used_l2_sram_head);
704
705         /* add mutex operation */
706         spin_unlock_irqrestore(&l2_sram_lock, flags);
707
708         return ret;
709 #else
710         return -1;
711 #endif
712 }
713 EXPORT_SYMBOL(l2_sram_free);
714
715 int sram_free_with_lsl(const void *addr)
716 {
717         struct sram_list_struct *lsl, **tmp;
718         struct mm_struct *mm = current->mm;
719
720         for (tmp = &mm->context.sram_list; *tmp; tmp = &(*tmp)->next)
721                 if ((*tmp)->addr == addr)
722                         goto found;
723         return -1;
724 found:
725         lsl = *tmp;
726         sram_free(addr);
727         *tmp = lsl->next;
728         kfree(lsl);
729
730         return 0;
731 }
732 EXPORT_SYMBOL(sram_free_with_lsl);
733
734 void *sram_alloc_with_lsl(size_t size, unsigned long flags)
735 {
736         void *addr = NULL;
737         struct sram_list_struct *lsl = NULL;
738         struct mm_struct *mm = current->mm;
739
740         lsl = kzalloc(sizeof(struct sram_list_struct), GFP_KERNEL);
741         if (!lsl)
742                 return NULL;
743
744         if (flags & L1_INST_SRAM)
745                 addr = l1_inst_sram_alloc(size);
746
747         if (addr == NULL && (flags & L1_DATA_A_SRAM))
748                 addr = l1_data_A_sram_alloc(size);
749
750         if (addr == NULL && (flags & L1_DATA_B_SRAM))
751                 addr = l1_data_B_sram_alloc(size);
752
753         if (addr == NULL && (flags & L2_SRAM))
754                 addr = l2_sram_alloc(size);
755
756         if (addr == NULL) {
757                 kfree(lsl);
758                 return NULL;
759         }
760         lsl->addr = addr;
761         lsl->length = size;
762         lsl->next = mm->context.sram_list;
763         mm->context.sram_list = lsl;
764         return addr;
765 }
766 EXPORT_SYMBOL(sram_alloc_with_lsl);
767
768 #ifdef CONFIG_PROC_FS
769 /* Once we get a real allocator, we'll throw all of this away.
770  * Until then, we need some sort of visibility into the L1 alloc.
771  */
772 /* Need to keep line of output the same.  Currently, that is 44 bytes
773  * (including newline).
774  */
775 static int _sram_proc_read(char *buf, int *len, int count, const char *desc,
776                 struct sram_piece *pfree_head,
777                 struct sram_piece *pused_head)
778 {
779         struct sram_piece *pslot;
780
781         if (!pfree_head || !pused_head)
782                 return -1;
783
784         *len += sprintf(&buf[*len], "--- SRAM %-14s Size   PID State     \n", desc);
785
786         /* search the relevant memory slot */
787         pslot = pused_head->next;
788
789         while (pslot != NULL) {
790                 *len += sprintf(&buf[*len], "%p-%p %10i %5i %-10s\n",
791                         pslot->paddr, pslot->paddr + pslot->size,
792                         pslot->size, pslot->pid, "ALLOCATED");
793
794                 pslot = pslot->next;
795         }
796
797         pslot = pfree_head->next;
798
799         while (pslot != NULL) {
800                 *len += sprintf(&buf[*len], "%p-%p %10i %5i %-10s\n",
801                         pslot->paddr, pslot->paddr + pslot->size,
802                         pslot->size, pslot->pid, "FREE");
803
804                 pslot = pslot->next;
805         }
806
807         return 0;
808 }
809 static int sram_proc_read(char *buf, char **start, off_t offset, int count,
810                 int *eof, void *data)
811 {
812         int len = 0;
813         unsigned int cpu;
814
815         for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
816                 if (_sram_proc_read(buf, &len, count, "Scratchpad",
817                         &per_cpu(free_l1_ssram_head, cpu), &per_cpu(used_l1_ssram_head, cpu)))
818                         goto not_done;
819 #if L1_DATA_A_LENGTH != 0
820                 if (_sram_proc_read(buf, &len, count, "L1 Data A",
821                         &per_cpu(free_l1_data_A_sram_head, cpu),
822                         &per_cpu(used_l1_data_A_sram_head, cpu)))
823                         goto not_done;
824 #endif
825 #if L1_DATA_B_LENGTH != 0
826                 if (_sram_proc_read(buf, &len, count, "L1 Data B",
827                         &per_cpu(free_l1_data_B_sram_head, cpu),
828                         &per_cpu(used_l1_data_B_sram_head, cpu)))
829                         goto not_done;
830 #endif
831 #if L1_CODE_LENGTH != 0
832                 if (_sram_proc_read(buf, &len, count, "L1 Instruction",
833                         &per_cpu(free_l1_inst_sram_head, cpu),
834                         &per_cpu(used_l1_inst_sram_head, cpu)))
835                         goto not_done;
836 #endif
837         }
838 #if L2_LENGTH != 0
839         if (_sram_proc_read(buf, &len, count, "L2", &free_l2_sram_head,
840                 &used_l2_sram_head))
841                 goto not_done;
842 #endif
843         *eof = 1;
844  not_done:
845         return len;
846 }
847
848 static int __init sram_proc_init(void)
849 {
850         struct proc_dir_entry *ptr;
851         ptr = create_proc_entry("sram", S_IFREG | S_IRUGO, NULL);
852         if (!ptr) {
853                 printk(KERN_WARNING "unable to create /proc/sram\n");
854                 return -1;
855         }
856         ptr->owner = THIS_MODULE;
857         ptr->read_proc = sram_proc_read;
858         return 0;
859 }
860 late_initcall(sram_proc_init);
861 #endif