sh: convert /proc/cpu/aligmnent, /proc/cpu/kernel_alignment to seq_file
[safe/jmp/linux-2.6] / lib / iommu-helper.c
1 /*
2  * IOMMU helper functions for the free area management
3  */
4
5 #include <linux/module.h>
6 #include <linux/bitops.h>
7
8 static unsigned long find_next_zero_area(unsigned long *map,
9                                          unsigned long size,
10                                          unsigned long start,
11                                          unsigned int nr,
12                                          unsigned long align_mask)
13 {
14         unsigned long index, end, i;
15 again:
16         index = find_next_zero_bit(map, size, start);
17
18         /* Align allocation */
19         index = (index + align_mask) & ~align_mask;
20
21         end = index + nr;
22         if (end >= size)
23                 return -1;
24         for (i = index; i < end; i++) {
25                 if (test_bit(i, map)) {
26                         start = i+1;
27                         goto again;
28                 }
29         }
30         return index;
31 }
32
33 void iommu_area_reserve(unsigned long *map, unsigned long i, int len)
34 {
35         unsigned long end = i + len;
36         while (i < end) {
37                 __set_bit(i, map);
38                 i++;
39         }
40 }
41
42 int iommu_is_span_boundary(unsigned int index, unsigned int nr,
43                            unsigned long shift,
44                            unsigned long boundary_size)
45 {
46         BUG_ON(!is_power_of_2(boundary_size));
47
48         shift = (shift + index) & (boundary_size - 1);
49         return shift + nr > boundary_size;
50 }
51
52 unsigned long iommu_area_alloc(unsigned long *map, unsigned long size,
53                                unsigned long start, unsigned int nr,
54                                unsigned long shift, unsigned long boundary_size,
55                                unsigned long align_mask)
56 {
57         unsigned long index;
58 again:
59         index = find_next_zero_area(map, size, start, nr, align_mask);
60         if (index != -1) {
61                 if (iommu_is_span_boundary(index, nr, shift, boundary_size)) {
62                         /* we could do more effectively */
63                         start = index + 1;
64                         goto again;
65                 }
66                 iommu_area_reserve(map, index, nr);
67         }
68         return index;
69 }
70 EXPORT_SYMBOL(iommu_area_alloc);
71
72 void iommu_area_free(unsigned long *map, unsigned long start, unsigned int nr)
73 {
74         unsigned long end = start + nr;
75
76         while (start < end) {
77                 __clear_bit(start, map);
78                 start++;
79         }
80 }
81 EXPORT_SYMBOL(iommu_area_free);
82
83 unsigned long iommu_num_pages(unsigned long addr, unsigned long len,
84                               unsigned long io_page_size)
85 {
86         unsigned long size = (addr & (io_page_size - 1)) + len;
87
88         return DIV_ROUND_UP(size, io_page_size);
89 }
90 EXPORT_SYMBOL(iommu_num_pages);