memcg: rework usage of stats by soft limit
[safe/jmp/linux-2.6] / mm / percpu.c
index 008fbd9..768419d 100644 (file)
@@ -46,8 +46,6 @@
  *
  * To use this allocator, arch code should do the followings.
  *
- * - drop CONFIG_HAVE_LEGACY_PER_CPU_AREA
- *
  * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
  *   regular address to percpu pointer and back if they need to be
  *   different from the default
 /* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
 #ifndef __addr_to_pcpu_ptr
 #define __addr_to_pcpu_ptr(addr)                                       \
-       (void *)((unsigned long)(addr) - (unsigned long)pcpu_base_addr  \
-                + (unsigned long)__per_cpu_start)
+       (void __percpu *)((unsigned long)(addr) -                       \
+                         (unsigned long)pcpu_base_addr +               \
+                         (unsigned long)__per_cpu_start)
 #endif
 #ifndef __pcpu_ptr_to_addr
 #define __pcpu_ptr_to_addr(ptr)                                                \
-       (void *)((unsigned long)(ptr) + (unsigned long)pcpu_base_addr   \
-                - (unsigned long)__per_cpu_start)
+       (void __force *)((unsigned long)(ptr) +                         \
+                        (unsigned long)pcpu_base_addr -                \
+                        (unsigned long)__per_cpu_start)
 #endif
 
 struct pcpu_chunk {
@@ -915,11 +915,10 @@ static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size)
        int rs, re;
 
        /* quick path, check whether it's empty already */
-       pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) {
-               if (rs == page_start && re == page_end)
-                       return;
-               break;
-       }
+       rs = page_start;
+       pcpu_next_unpop(chunk, &rs, &re, page_end);
+       if (rs == page_start && re == page_end)
+               return;
 
        /* immutable chunks can't be depopulated */
        WARN_ON(chunk->immutable);
@@ -970,11 +969,10 @@ static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size)
        int rs, re, rc;
 
        /* quick path, check whether all pages are already there */
-       pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end) {
-               if (rs == page_start && re == page_end)
-                       goto clear;
-               break;
-       }
+       rs = page_start;
+       pcpu_next_pop(chunk, &rs, &re, page_end);
+       if (rs == page_start && re == page_end)
+               goto clear;
 
        /* need to allocate and map pages, this chunk can't be immutable */
        WARN_ON(chunk->immutable);
@@ -1069,7 +1067,7 @@ static struct pcpu_chunk *alloc_pcpu_chunk(void)
  * RETURNS:
  * Percpu pointer to the allocated area on success, NULL on failure.
  */
-static void *pcpu_alloc(size_t size, size_t align, bool reserved)
+static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved)
 {
        static int warn_limit = 10;
        struct pcpu_chunk *chunk;
@@ -1198,7 +1196,7 @@ fail_unlock_mutex:
  * RETURNS:
  * Percpu pointer to the allocated area on success, NULL on failure.
  */
-void *__alloc_percpu(size_t size, size_t align)
+void __percpu *__alloc_percpu(size_t size, size_t align)
 {
        return pcpu_alloc(size, align, false);
 }
@@ -1219,7 +1217,7 @@ EXPORT_SYMBOL_GPL(__alloc_percpu);
  * RETURNS:
  * Percpu pointer to the allocated area on success, NULL on failure.
  */
-void *__alloc_reserved_percpu(size_t size, size_t align)
+void __percpu *__alloc_reserved_percpu(size_t size, size_t align)
 {
        return pcpu_alloc(size, align, true);
 }
@@ -1271,9 +1269,9 @@ static void pcpu_reclaim(struct work_struct *work)
  * CONTEXT:
  * Can be called from atomic context.
  */
-void free_percpu(void *ptr)
+void free_percpu(void __percpu *ptr)
 {
-       void *addr = __pcpu_ptr_to_addr(ptr);
+       void *addr;
        struct pcpu_chunk *chunk;
        unsigned long flags;
        int off;
@@ -1281,6 +1279,8 @@ void free_percpu(void *ptr)
        if (!ptr)
                return;
 
+       addr = __pcpu_ptr_to_addr(ptr);
+
        spin_lock_irqsave(&pcpu_lock, flags);
 
        chunk = pcpu_chunk_addr_search(addr);