Merge branch 'x86/core' into core/ipi
authorIngo Molnar <mingo@elte.hu>
Fri, 13 Mar 2009 10:05:58 +0000 (11:05 +0100)
committerIngo Molnar <mingo@elte.hu>
Fri, 13 Mar 2009 10:05:58 +0000 (11:05 +0100)
1  2 
Makefile
arch/sparc/kernel/irq_64.c
include/linux/sched.h
include/linux/smp.h
kernel/sched.c
kernel/softirq.c

diff --combined Makefile
+++ b/Makefile
@@@ -533,8 -533,9 +533,9 @@@ KBUILD_CFLAGS += $(call cc-option,-Wfra
  endif
  
  # Force gcc to behave correct even for buggy distributions
- # Arch Makefiles may override this setting
+ ifndef CONFIG_CC_STACKPROTECTOR
  KBUILD_CFLAGS += $(call cc-option, -fno-stack-protector)
+ endif
  
  ifdef CONFIG_FRAME_POINTER
  KBUILD_CFLAGS += -fno-omit-frame-pointer -fno-optimize-sibling-calls
@@@ -904,18 -905,12 +905,18 @@@ localver = $(subst $(space),, $(string
  # and if the SCM is know a tag from the SCM is appended.
  # The appended tag is determined by the SCM used.
  #
 -# Currently, only git is supported.
 -# Other SCMs can edit scripts/setlocalversion and add the appropriate
 -# checks as needed.
 +# .scmversion is used when generating rpm packages so we do not loose
 +# the version information from the SCM when we do the build of the kernel
 +# from the copied source
  ifdef CONFIG_LOCALVERSION_AUTO
 -      _localver-auto = $(shell $(CONFIG_SHELL) \
 -                        $(srctree)/scripts/setlocalversion $(srctree))
 +
 +ifeq ($(wildcard .scmversion),)
 +        _localver-auto = $(shell $(CONFIG_SHELL) \
 +                         $(srctree)/scripts/setlocalversion $(srctree))
 +else
 +        _localver-auto = $(shell cat .scmversion 2> /dev/null)
 +endif
 +
        localver-auto  = $(LOCALVERSION)$(_localver-auto)
  endif
  
@@@ -1543,7 -1538,7 +1544,7 @@@ quiet_cmd_depmod = DEPMOD  $(KERNELRELE
        cmd_depmod = \
        if [ -r System.map -a -x $(DEPMOD) ]; then                              \
                $(DEPMOD) -ae -F System.map                                     \
 -              $(if $(strip $(INSTALL_MOD_PATH)), -b $(INSTALL_MOD_PATH) -r)   \
 +              $(if $(strip $(INSTALL_MOD_PATH)), -b $(INSTALL_MOD_PATH) )     \
                $(KERNELRELEASE);                                               \
        fi
  
@@@ -252,9 -252,10 +252,10 @@@ struct irq_handler_data 
  #ifdef CONFIG_SMP
  static int irq_choose_cpu(unsigned int virt_irq)
  {
-       cpumask_t mask = irq_desc[virt_irq].affinity;
+       cpumask_t mask;
        int cpuid;
  
+       cpumask_copy(&mask, irq_desc[virt_irq].affinity);
        if (cpus_equal(mask, CPU_MASK_ALL)) {
                static int irq_rover;
                static DEFINE_SPINLOCK(irq_rover_lock);
@@@ -323,25 -324,17 +324,25 @@@ static void sun4u_set_affinity(unsigne
        sun4u_irq_enable(virt_irq);
  }
  
 +/* Don't do anything.  The desc->status check for IRQ_DISABLED in
 + * handler_irq() will skip the handler call and that will leave the
 + * interrupt in the sent state.  The next ->enable() call will hit the
 + * ICLR register to reset the state machine.
 + *
 + * This scheme is necessary, instead of clearing the Valid bit in the
 + * IMAP register, to handle the case of IMAP registers being shared by
 + * multiple INOs (and thus ICLR registers).  Since we use a different
 + * virtual IRQ for each shared IMAP instance, the generic code thinks
 + * there is only one user so it prematurely calls ->disable() on
 + * free_irq().
 + *
 + * We have to provide an explicit ->disable() method instead of using
 + * NULL to get the default.  The reason is that if the generic code
 + * sees that, it also hooks up a default ->shutdown method which
 + * invokes ->mask() which we do not want.  See irq_chip_set_defaults().
 + */
  static void sun4u_irq_disable(unsigned int virt_irq)
  {
 -      struct irq_handler_data *data = get_irq_chip_data(virt_irq);
 -
 -      if (likely(data)) {
 -              unsigned long imap = data->imap;
 -              unsigned long tmp = upa_readq(imap);
 -
 -              tmp &= ~IMAP_VALID;
 -              upa_writeq(tmp, imap);
 -      }
  }
  
  static void sun4u_irq_eoi(unsigned int virt_irq)
@@@ -754,8 -747,7 +755,8 @@@ void handler_irq(int irq, struct pt_reg
  
                desc = irq_desc + virt_irq;
  
 -              desc->handle_irq(virt_irq, desc);
 +              if (!(desc->status & IRQ_DISABLED))
 +                      desc->handle_irq(virt_irq, desc);
  
                bucket_pa = next_pa;
        }
@@@ -805,7 -797,7 +806,7 @@@ void fixup_irqs(void
                    !(irq_desc[irq].status & IRQ_PER_CPU)) {
                        if (irq_desc[irq].chip->set_affinity)
                                irq_desc[irq].chip->set_affinity(irq,
-                                       &irq_desc[irq].affinity);
+                                       irq_desc[irq].affinity);
                }
                spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
        }
diff --combined include/linux/sched.h
@@@ -1178,10 -1178,9 +1178,9 @@@ struct task_struct 
        pid_t pid;
        pid_t tgid;
  
- #ifdef CONFIG_CC_STACKPROTECTOR
        /* Canary value for the -fstack-protector gcc feature */
        unsigned long stack_canary;
- #endif
        /* 
         * pointers to (original) parent process, youngest child, younger sibling,
         * older sibling, respectively.  (p->father can be replaced with 
  #endif
  };
  
 +/* Future-safe accessor for struct task_struct's cpus_allowed. */
 +#define tsk_cpumask(tsk) (&(tsk)->cpus_allowed)
 +
  /*
   * Priority of a process goes from 0..MAX_PRIO-1, valid RT
   * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH
@@@ -2090,6 -2086,19 +2089,19 @@@ static inline int object_is_on_stack(vo
  
  extern void thread_info_cache_init(void);
  
+ #ifdef CONFIG_DEBUG_STACK_USAGE
+ static inline unsigned long stack_not_used(struct task_struct *p)
+ {
+       unsigned long *n = end_of_stack(p);
+       do {    /* Skip over canary */
+               n++;
+       } while (!*n);
+       return (unsigned long)n - (unsigned long)end_of_stack(p);
+ }
+ #endif
  /* set thread flags in other task's structures
   * - see asm/thread_info.h for TIF_xxxx flags available
   */
diff --combined include/linux/smp.h
@@@ -82,8 -82,7 +82,8 @@@ smp_call_function_mask(cpumask_t mask, 
        return 0;
  }
  
 -void __smp_call_function_single(int cpuid, struct call_single_data *data);
 +void __smp_call_function_single(int cpuid, struct call_single_data *data,
 +                              int wait);
  
  /*
   * Generic and arch helpers
@@@ -177,6 -176,12 +177,12 @@@ static inline void init_call_single_dat
  #define put_cpu()             preempt_enable()
  #define put_cpu_no_resched()  preempt_enable_no_resched()
  
+ /*
+  * Callback to arch code if there's nosmp or maxcpus=0 on the
+  * boot command line:
+  */
+ extern void arch_disable_smp_support(void);
  void smp_setup_processor_id(void);
  
  #endif /* __LINUX_SMP_H */
diff --combined kernel/sched.c
@@@ -1093,7 -1093,7 +1093,7 @@@ static void hrtick_start(struct rq *rq
        if (rq == this_rq()) {
                hrtimer_restart(timer);
        } else if (!rq->hrtick_csd_pending) {
 -              __smp_call_function_single(cpu_of(rq), &rq->hrtick_csd);
 +              __smp_call_function_single(cpu_of(rq), &rq->hrtick_csd, 0);
                rq->hrtick_csd_pending = 1;
        }
  }
@@@ -5944,12 -5944,7 +5944,7 @@@ void sched_show_task(struct task_struc
                printk(KERN_CONT " %016lx ", thread_saved_pc(p));
  #endif
  #ifdef CONFIG_DEBUG_STACK_USAGE
-       {
-               unsigned long *n = end_of_stack(p);
-               while (!*n)
-                       n++;
-               free = (unsigned long)n - (unsigned long)end_of_stack(p);
-       }
+       free = stack_not_used(p);
  #endif
        printk(KERN_CONT "%5lu %5d %6d\n", free,
                task_pid_nr(p), task_pid_nr(p->real_parent));
@@@ -9490,7 -9485,7 +9485,7 @@@ cpuacct_destroy(struct cgroup_subsys *s
  
  static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu)
  {
-       u64 *cpuusage = percpu_ptr(ca->cpuusage, cpu);
+       u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
        u64 data;
  
  #ifndef CONFIG_64BIT
  
  static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
  {
-       u64 *cpuusage = percpu_ptr(ca->cpuusage, cpu);
+       u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
  
  #ifndef CONFIG_64BIT
        /*
@@@ -9605,7 -9600,7 +9600,7 @@@ static void cpuacct_charge(struct task_
        ca = task_ca(tsk);
  
        for (; ca; ca = ca->parent) {
-               u64 *cpuusage = percpu_ptr(ca->cpuusage, cpu);
+               u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
                *cpuusage += cputime;
        }
  }
diff --combined kernel/softirq.c
@@@ -496,7 -496,7 +496,7 @@@ static int __try_remote_softirq(struct 
                cp->flags = 0;
                cp->priv = softirq;
  
 -              __smp_call_function_single(cpu, cp);
 +              __smp_call_function_single(cpu, cp, 0);
                return 0;
        }
        return 1;
@@@ -796,6 -796,11 +796,11 @@@ int __init __weak early_irq_init(void
        return 0;
  }
  
+ int __init __weak arch_probe_nr_irqs(void)
+ {
+       return 0;
+ }
  int __init __weak arch_early_irq_init(void)
  {
        return 0;