sched: Disable SD_PREFER_LOCAL for MC/CPU domains
[safe/jmp/linux-2.6] / arch / x86 / include / asm / topology.h
index 79e31e9..d823c24 100644 (file)
@@ -44,9 +44,6 @@
 
 #ifdef CONFIG_X86_32
 
-/* Mappings between node number and cpus on that node. */
-extern cpumask_t node_to_cpumask_map[];
-
 /* Mappings between logical cpu number and node number */
 extern int cpu_to_node_map[];
 
@@ -57,33 +54,18 @@ static inline int cpu_to_node(int cpu)
 }
 #define early_cpu_to_node(cpu) cpu_to_node(cpu)
 
-/* Returns a bitmask of CPUs on Node 'node'.
- *
- * Side note: this function creates the returned cpumask on the stack
- * so with a high NR_CPUS count, excessive stack space is used.  The
- * node_to_cpumask_ptr function should be used whenever possible.
- */
-static inline cpumask_t node_to_cpumask(int node)
-{
-       return node_to_cpumask_map[node];
-}
-
 #else /* CONFIG_X86_64 */
 
-/* Mappings between node number and cpus on that node. */
-extern cpumask_t *node_to_cpumask_map;
-
 /* Mappings between logical cpu number and node number */
 DECLARE_EARLY_PER_CPU(int, x86_cpu_to_node_map);
 
 /* Returns the number of the current Node. */
-#define numa_node_id()         read_pda(nodenumber)
+DECLARE_PER_CPU(int, node_number);
+#define numa_node_id()         percpu_read(node_number)
 
 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
 extern int cpu_to_node(int cpu);
 extern int early_cpu_to_node(int cpu);
-extern const cpumask_t *_node_to_cpumask_ptr(int node);
-extern cpumask_t node_to_cpumask(int node);
 
 #else  /* !CONFIG_DEBUG_PER_CPU_MAPS */
 
@@ -96,34 +78,27 @@ static inline int cpu_to_node(int cpu)
 /* Same function but used if called before per_cpu areas are setup */
 static inline int early_cpu_to_node(int cpu)
 {
-       if (early_per_cpu_ptr(x86_cpu_to_node_map))
-               return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
-
-       return per_cpu(x86_cpu_to_node_map, cpu);
+       return early_per_cpu(x86_cpu_to_node_map, cpu);
 }
 
-/* Returns a pointer to the cpumask of CPUs on Node 'node'. */
-static inline const cpumask_t *_node_to_cpumask_ptr(int node)
-{
-       return &node_to_cpumask_map[node];
-}
+#endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
 
-/* Returns a bitmask of CPUs on Node 'node'. */
-static inline cpumask_t node_to_cpumask(int node)
+#endif /* CONFIG_X86_64 */
+
+/* Mappings between node number and cpus on that node. */
+extern cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
+
+#ifdef CONFIG_DEBUG_PER_CPU_MAPS
+extern const struct cpumask *cpumask_of_node(int node);
+#else
+/* Returns a pointer to the cpumask of CPUs on Node 'node'. */
+static inline const struct cpumask *cpumask_of_node(int node)
 {
        return node_to_cpumask_map[node];
 }
+#endif
 
-#endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
-
-/* Replace default node_to_cpumask_ptr with optimized version */
-#define node_to_cpumask_ptr(v, node)           \
-               const cpumask_t *v = _node_to_cpumask_ptr(node)
-
-#define node_to_cpumask_ptr_next(v, node)      \
-                          v = _node_to_cpumask_ptr(node)
-
-#endif /* CONFIG_X86_64 */
+extern void setup_node_to_cpumask_map(void);
 
 /*
  * Returns the number of the node containing Node 'node'. This
@@ -132,7 +107,6 @@ static inline cpumask_t node_to_cpumask(int node)
 #define parent_node(node) (node)
 
 #define pcibus_to_node(bus) __pcibus_to_node(bus)
-#define pcibus_to_cpumask(bus) __pcibus_to_cpumask(bus)
 
 #ifdef CONFIG_X86_32
 extern unsigned long node_start_pfn[];
@@ -142,38 +116,42 @@ extern unsigned long node_remap_size[];
 
 # define SD_CACHE_NICE_TRIES   1
 # define SD_IDLE_IDX           1
-# define SD_NEWIDLE_IDX                2
-# define SD_FORKEXEC_IDX       0
 
 #else
 
 # define SD_CACHE_NICE_TRIES   2
 # define SD_IDLE_IDX           2
-# define SD_NEWIDLE_IDX                2
-# define SD_FORKEXEC_IDX       1
 
 #endif
 
 /* sched_domains SD_NODE_INIT for NUMA machines */
-#define SD_NODE_INIT (struct sched_domain) {           \
-       .min_interval           = 8,                    \
-       .max_interval           = 32,                   \
-       .busy_factor            = 32,                   \
-       .imbalance_pct          = 125,                  \
-       .cache_nice_tries       = SD_CACHE_NICE_TRIES,  \
-       .busy_idx               = 3,                    \
-       .idle_idx               = SD_IDLE_IDX,          \
-       .newidle_idx            = SD_NEWIDLE_IDX,       \
-       .wake_idx               = 1,                    \
-       .forkexec_idx           = SD_FORKEXEC_IDX,      \
-       .flags                  = SD_LOAD_BALANCE       \
-                               | SD_BALANCE_EXEC       \
-                               | SD_BALANCE_FORK       \
-                               | SD_WAKE_AFFINE        \
-                               | SD_WAKE_BALANCE       \
-                               | SD_SERIALIZE,         \
-       .last_balance           = jiffies,              \
-       .balance_interval       = 1,                    \
+#define SD_NODE_INIT (struct sched_domain) {                           \
+       .min_interval           = 8,                                    \
+       .max_interval           = 32,                                   \
+       .busy_factor            = 32,                                   \
+       .imbalance_pct          = 125,                                  \
+       .cache_nice_tries       = SD_CACHE_NICE_TRIES,                  \
+       .busy_idx               = 3,                                    \
+       .idle_idx               = SD_IDLE_IDX,                          \
+       .newidle_idx            = 0,                                    \
+       .wake_idx               = 0,                                    \
+       .forkexec_idx           = 0,                                    \
+                                                                       \
+       .flags                  = 1*SD_LOAD_BALANCE                     \
+                               | 1*SD_BALANCE_NEWIDLE                  \
+                               | 1*SD_BALANCE_EXEC                     \
+                               | 1*SD_BALANCE_FORK                     \
+                               | 0*SD_BALANCE_WAKE                     \
+                               | 1*SD_WAKE_AFFINE                      \
+                               | 1*SD_PREFER_LOCAL                     \
+                               | 0*SD_SHARE_CPUPOWER                   \
+                               | 0*SD_POWERSAVINGS_BALANCE             \
+                               | 0*SD_SHARE_PKG_RESOURCES              \
+                               | 1*SD_SERIALIZE                        \
+                               | 0*SD_PREFER_SIBLING                   \
+                               ,                                       \
+       .last_balance           = jiffies,                              \
+       .balance_interval       = 1,                                    \
 }
 
 #ifdef CONFIG_X86_64_ACPI_NUMA
@@ -183,51 +161,29 @@ extern int __node_distance(int, int);
 
 #else /* !CONFIG_NUMA */
 
-#define numa_node_id()         0
-#define        cpu_to_node(cpu)        0
-#define        early_cpu_to_node(cpu)  0
-
-static inline const cpumask_t *_node_to_cpumask_ptr(int node)
-{
-       return &cpu_online_map;
-}
-static inline cpumask_t node_to_cpumask(int node)
+static inline int numa_node_id(void)
 {
-       return cpu_online_map;
+       return 0;
 }
-static inline int node_to_first_cpu(int node)
+
+static inline int early_cpu_to_node(int cpu)
 {
-       return first_cpu(cpu_online_map);
+       return 0;
 }
 
-/* Replace default node_to_cpumask_ptr with optimized version */
-#define node_to_cpumask_ptr(v, node)           \
-               const cpumask_t *v = _node_to_cpumask_ptr(node)
+static inline void setup_node_to_cpumask_map(void) { }
 
-#define node_to_cpumask_ptr_next(v, node)      \
-                          v = _node_to_cpumask_ptr(node)
 #endif
 
 #include <asm-generic/topology.h>
 
-#ifdef CONFIG_NUMA
-/* Returns the number of the first CPU on Node 'node'. */
-static inline int node_to_first_cpu(int node)
-{
-       node_to_cpumask_ptr(mask, node);
-       return first_cpu(*mask);
-}
-#endif
-
-extern cpumask_t cpu_coregroup_map(int cpu);
+extern const struct cpumask *cpu_coregroup_mask(int cpu);
 
 #ifdef ENABLE_TOPO_DEFINES
 #define topology_physical_package_id(cpu)      (cpu_data(cpu).phys_proc_id)
 #define topology_core_id(cpu)                  (cpu_data(cpu).cpu_core_id)
-#define topology_core_siblings(cpu)            (per_cpu(cpu_core_map, cpu))
-#define topology_thread_siblings(cpu)          (per_cpu(cpu_sibling_map, cpu))
-#define topology_core_cpumask(cpu)             (&per_cpu(cpu_core_map, cpu))
-#define topology_thread_cpumask(cpu)           (&per_cpu(cpu_sibling_map, cpu))
+#define topology_core_cpumask(cpu)             (per_cpu(cpu_core_map, cpu))
+#define topology_thread_cpumask(cpu)           (per_cpu(cpu_sibling_map, cpu))
 
 /* indicates that pointers to the topology cpumask_t maps are valid */
 #define arch_provides_topology_pointers                yes
@@ -238,10 +194,11 @@ static inline void arch_fix_phys_package_id(int num, u32 slot)
 }
 
 struct pci_bus;
-void set_pci_bus_resources_arch_default(struct pci_bus *b);
+void x86_pci_root_bus_res_quirks(struct pci_bus *b);
 
 #ifdef CONFIG_SMP
-#define mc_capable()   (cpus_weight(per_cpu(cpu_core_map, 0)) != nr_cpu_ids)
+#define mc_capable()   ((boot_cpu_data.x86_max_cores > 1) && \
+                       (cpumask_weight(cpu_core_mask(0)) != nr_cpu_ids))
 #define smt_capable()                  (smp_num_siblings > 1)
 #endif