perf: Use generic sample reordering in perf sched
[safe/jmp/linux-2.6] / mm / percpu.c
index b336638..6e09741 100644 (file)
 /* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
 #ifndef __addr_to_pcpu_ptr
 #define __addr_to_pcpu_ptr(addr)                                       \
-       (void *)((unsigned long)(addr) - (unsigned long)pcpu_base_addr  \
-                + (unsigned long)__per_cpu_start)
+       (void __percpu *)((unsigned long)(addr) -                       \
+                         (unsigned long)pcpu_base_addr +               \
+                         (unsigned long)__per_cpu_start)
 #endif
 #ifndef __pcpu_ptr_to_addr
 #define __pcpu_ptr_to_addr(ptr)                                                \
-       (void *)((unsigned long)(ptr) + (unsigned long)pcpu_base_addr   \
-                - (unsigned long)__per_cpu_start)
+       (void __force *)((unsigned long)(ptr) +                         \
+                        (unsigned long)pcpu_base_addr -                \
+                        (unsigned long)__per_cpu_start)
 #endif
 
 struct pcpu_chunk {
@@ -1065,7 +1067,7 @@ static struct pcpu_chunk *alloc_pcpu_chunk(void)
  * RETURNS:
  * Percpu pointer to the allocated area on success, NULL on failure.
  */
-static void *pcpu_alloc(size_t size, size_t align, bool reserved)
+static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved)
 {
        static int warn_limit = 10;
        struct pcpu_chunk *chunk;
@@ -1194,7 +1196,7 @@ fail_unlock_mutex:
  * RETURNS:
  * Percpu pointer to the allocated area on success, NULL on failure.
  */
-void *__alloc_percpu(size_t size, size_t align)
+void __percpu *__alloc_percpu(size_t size, size_t align)
 {
        return pcpu_alloc(size, align, false);
 }
@@ -1215,7 +1217,7 @@ EXPORT_SYMBOL_GPL(__alloc_percpu);
  * RETURNS:
  * Percpu pointer to the allocated area on success, NULL on failure.
  */
-void *__alloc_reserved_percpu(size_t size, size_t align)
+void __percpu *__alloc_reserved_percpu(size_t size, size_t align)
 {
        return pcpu_alloc(size, align, true);
 }
@@ -1267,7 +1269,7 @@ static void pcpu_reclaim(struct work_struct *work)
  * CONTEXT:
  * Can be called from atomic context.
  */
-void free_percpu(void *ptr)
+void free_percpu(void __percpu *ptr)
 {
        void *addr;
        struct pcpu_chunk *chunk;
@@ -1302,6 +1304,32 @@ void free_percpu(void *ptr)
 EXPORT_SYMBOL_GPL(free_percpu);
 
 /**
+ * is_kernel_percpu_address - test whether address is from static percpu area
+ * @addr: address to test
+ *
+ * Test whether @addr belongs to in-kernel static percpu area.  Module
+ * static percpu areas are not considered.  For those, use
+ * is_module_percpu_address().
+ *
+ * RETURNS:
+ * %true if @addr is from in-kernel static percpu area, %false otherwise.
+ */
+bool is_kernel_percpu_address(unsigned long addr)
+{
+       const size_t static_size = __per_cpu_end - __per_cpu_start;
+       void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
+       unsigned int cpu;
+
+       for_each_possible_cpu(cpu) {
+               void *start = per_cpu_ptr(base, cpu);
+
+               if ((void *)addr >= start && (void *)addr < start + static_size)
+                       return true;
+        }
+       return false;
+}
+
+/**
  * per_cpu_ptr_to_phys - convert translated percpu address to physical address
  * @addr: the address to be converted to physical address
  *