git://ftp.safe.ca
/
safe
/
jmp
/
linux-2.6
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
[ARM] Feroceon: add highmem support to L2 cache handling code
[safe/jmp/linux-2.6]
/
mm
/
vmalloc.c
diff --git
a/mm/vmalloc.c
b/mm/vmalloc.c
index
903cad4
..
520a759
100644
(file)
--- a/
mm/vmalloc.c
+++ b/
mm/vmalloc.c
@@
-323,6
+323,7
@@
static struct vmap_area *alloc_vmap_area(unsigned long size,
unsigned long addr;
int purged = 0;
unsigned long addr;
int purged = 0;
+ BUG_ON(!size);
BUG_ON(size & ~PAGE_MASK);
va = kmalloc_node(sizeof(struct vmap_area),
BUG_ON(size & ~PAGE_MASK);
va = kmalloc_node(sizeof(struct vmap_area),
@@
-334,6
+335,9
@@
retry:
addr = ALIGN(vstart, align);
spin_lock(&vmap_area_lock);
addr = ALIGN(vstart, align);
spin_lock(&vmap_area_lock);
+ if (addr + size - 1 < addr)
+ goto overflow;
+
/* XXX: could have a last_hole cache */
n = vmap_area_root.rb_node;
if (n) {
/* XXX: could have a last_hole cache */
n = vmap_area_root.rb_node;
if (n) {
@@
-365,6
+369,8
@@
retry:
while (addr + size > first->va_start && addr + size <= vend) {
addr = ALIGN(first->va_end + PAGE_SIZE, align);
while (addr + size > first->va_start && addr + size <= vend) {
addr = ALIGN(first->va_end + PAGE_SIZE, align);
+ if (addr + size - 1 < addr)
+ goto overflow;
n = rb_next(&first->rb_node);
if (n)
n = rb_next(&first->rb_node);
if (n)
@@
-375,6
+381,7
@@
retry:
}
found:
if (addr + size > vend) {
}
found:
if (addr + size > vend) {
+overflow:
spin_unlock(&vmap_area_lock);
if (!purged) {
purge_vmap_area_lazy();
spin_unlock(&vmap_area_lock);
if (!purged) {
purge_vmap_area_lazy();
@@
-498,6
+505,7
@@
static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
static DEFINE_SPINLOCK(purge_lock);
LIST_HEAD(valist);
struct vmap_area *va;
static DEFINE_SPINLOCK(purge_lock);
LIST_HEAD(valist);
struct vmap_area *va;
+ struct vmap_area *n_va;
int nr = 0;
/*
int nr = 0;
/*
@@
-537,7
+545,7
@@
static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
if (nr) {
spin_lock(&vmap_area_lock);
if (nr) {
spin_lock(&vmap_area_lock);
- list_for_each_entry
(
va, &valist, purge_list)
+ list_for_each_entry
_safe(va, n_
va, &valist, purge_list)
__free_vmap_area(va);
spin_unlock(&vmap_area_lock);
}
__free_vmap_area(va);
spin_unlock(&vmap_area_lock);
}