Merge git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core-2.6
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 31 Oct 2009 19:15:28 +0000 (12:15 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 31 Oct 2009 19:15:28 +0000 (12:15 -0700)
* git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core-2.6:
  Documentation: ABI: /sys/devices/system/cpu/cpu#/node
  Documentation: ABI: /sys/devices/system/cpu/cpuidle/
  Documentation: ABI: /sys/devices/system/cpu/sched_[mc|smt]_power_savings
  Documentation: ABI: /sys/devices/system/cpu/cpu#/ topology files
  Documentation: ABI: /sys/devices/system/cpu/ topology files
  Documentation: ABI: document /sys/devices/system/cpu/
  Documentation: ABI: rename sysfs-devices-cache_disable properly
  Driver core: allow certain drivers prohibit bind/unbind via sysfs
  Driver core: fix driver_register() return value

25 files changed:
arch/s390/include/asm/cputime.h
arch/s390/kernel/ipl.c
arch/s390/kernel/smp.c
arch/s390/kernel/swsusp_asm64.S
crypto/async_tx/async_pq.c
crypto/async_tx/async_raid6_recov.c
crypto/async_tx/async_xor.c
drivers/md/Makefile
drivers/md/bitmap.c
drivers/md/md.c
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid5.c
drivers/md/raid5.h
drivers/md/raid6altivec.uc
drivers/md/raid6int.uc
drivers/md/raid6test/Makefile
drivers/md/unroll.awk [new file with mode: 0644]
drivers/md/unroll.pl [deleted file]
drivers/s390/char/sclp_async.c
drivers/s390/net/smsgiucv.c
drivers/scsi/dpt_i2o.c
fs/xfs/linux-2.6/xfs_quotaops.c
fs/xfs/xfs_ialloc.c
mm/nommu.c

index 24b1244..f23961a 100644 (file)
@@ -78,7 +78,7 @@ cputime64_to_jiffies64(cputime64_t cputime)
 static inline unsigned int
 cputime_to_msecs(const cputime_t cputime)
 {
-       return __div(cputime, 4096000);
+       return cputime_div(cputime, 4096000);
 }
 
 static inline cputime_t
@@ -160,7 +160,7 @@ cputime_to_timeval(const cputime_t cputime, struct timeval *value)
 static inline clock_t
 cputime_to_clock_t(cputime_t cputime)
 {
-       return __div(cputime, 4096000000ULL / USER_HZ);
+       return cputime_div(cputime, 4096000000ULL / USER_HZ);
 }
 
 static inline cputime_t
@@ -175,7 +175,7 @@ clock_t_to_cputime(unsigned long x)
 static inline clock_t
 cputime64_to_clock_t(cputime64_t cputime)
 {
-       return __div(cputime, 4096000000ULL / USER_HZ);
+       return cputime_div(cputime, 4096000000ULL / USER_HZ);
 }
 
 struct s390_idle_data {
index ee57a42..4890ac6 100644 (file)
@@ -1595,10 +1595,9 @@ static void stop_run(struct shutdown_trigger *trigger)
 {
        if (strcmp(trigger->name, ON_PANIC_STR) == 0)
                disabled_wait((unsigned long) __builtin_return_address(0));
-       else {
-               signal_processor(smp_processor_id(), sigp_stop);
-               for (;;);
-       }
+       while (signal_processor(smp_processor_id(), sigp_stop) == sigp_busy)
+               cpu_relax();
+       for (;;);
 }
 
 static struct shutdown_action stop_action = {SHUTDOWN_ACTION_STOP_STR,
index c932caa..93e5203 100644 (file)
@@ -76,7 +76,6 @@ static int cpu_stopped(int cpu)
        __u32 status;
 
        switch (signal_processor_ps(&status, 0, cpu, sigp_sense)) {
-       case sigp_order_code_accepted:
        case sigp_status_stored:
                /* Check for stopped and check stop state */
                if (status & 0x50)
@@ -638,6 +637,8 @@ void __cpu_die(unsigned int cpu)
        /* Wait until target cpu is down */
        while (!cpu_stopped(cpu))
                cpu_relax();
+       while (signal_processor_p(0, cpu, sigp_set_prefix) == sigp_busy)
+               udelay(10);
        smp_free_lowcore(cpu);
        pr_info("Processor %d stopped\n", cpu);
 }
@@ -645,8 +646,8 @@ void __cpu_die(unsigned int cpu)
 void cpu_die(void)
 {
        idle_task_exit();
-       signal_processor(smp_processor_id(), sigp_stop);
-       BUG();
+       while (signal_processor(smp_processor_id(), sigp_stop) == sigp_busy)
+               cpu_relax();
        for (;;);
 }
 
index 7c8653e..0c26cc1 100644 (file)
@@ -199,6 +199,7 @@ pgm_check_entry:
        brc     2,4b                    /* busy, try again */
 5:
        sigp    %r9,%r2,__SIGP_STOP     /* stop resume (current) CPU */
+       brc     2,5b                    /* busy, try again */
 6:     j       6b
 
 restart_suspend:
@@ -206,6 +207,7 @@ restart_suspend:
        llgh    %r2,0(%r1)
 7:
        sigp    %r9,%r2,__SIGP_SENSE    /* Wait for resume CPU */
+       brc     8,7b                    /* accepted, status 0, still running */
        brc     2,7b                    /* busy, try again */
        tmll    %r9,0x40                /* Test if resume CPU is stopped */
        jz      7b
index b88db6d..6b5cc4f 100644 (file)
 #include <linux/async_tx.h>
 
 /**
- * scribble - space to hold throwaway P buffer for synchronous gen_syndrome
+ * pq_scribble_page - space to hold throwaway P or Q buffer for
+ * synchronous gen_syndrome
  */
-static struct page *scribble;
-
-static bool is_raid6_zero_block(struct page *p)
-{
-       return p == (void *) raid6_empty_zero_page;
-}
+static struct page *pq_scribble_page;
 
 /* the struct page *blocks[] parameter passed to async_gen_syndrome()
  * and async_syndrome_val() contains the 'P' destination address at
@@ -83,7 +79,7 @@ do_async_gen_syndrome(struct dma_chan *chan, struct page **blocks,
         * sources and update the coefficients accordingly
         */
        for (i = 0, idx = 0; i < src_cnt; i++) {
-               if (is_raid6_zero_block(blocks[i]))
+               if (blocks[i] == NULL)
                        continue;
                dma_src[idx] = dma_map_page(dma->dev, blocks[i], offset, len,
                                            DMA_TO_DEVICE);
@@ -160,9 +156,9 @@ do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
                srcs = (void **) blocks;
 
        for (i = 0; i < disks; i++) {
-               if (is_raid6_zero_block(blocks[i])) {
+               if (blocks[i] == NULL) {
                        BUG_ON(i > disks - 3); /* P or Q can't be zero */
-                       srcs[i] = blocks[i];
+                       srcs[i] = (void*)raid6_empty_zero_page;
                } else
                        srcs[i] = page_address(blocks[i]) + offset;
        }
@@ -186,10 +182,14 @@ do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
  * blocks[disks-1] to NULL.  When P or Q is omitted 'len' must be <=
  * PAGE_SIZE as a temporary buffer of this size is used in the
  * synchronous path.  'disks' always accounts for both destination
- * buffers.
+ * buffers.  If any source buffers (blocks[i] where i < disks - 2) are
+ * set to NULL those buffers will be replaced with the raid6_zero_page
+ * in the synchronous path and omitted in the hardware-asynchronous
+ * path.
  *
  * 'blocks' note: if submit->scribble is NULL then the contents of
- * 'blocks' may be overridden
+ * 'blocks' may be overwritten to perform address conversions
+ * (dma_map_page() or page_address()).
  */
 struct dma_async_tx_descriptor *
 async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
@@ -227,11 +227,11 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
        async_tx_quiesce(&submit->depend_tx);
 
        if (!P(blocks, disks)) {
-               P(blocks, disks) = scribble;
+               P(blocks, disks) = pq_scribble_page;
                BUG_ON(len + offset > PAGE_SIZE);
        }
        if (!Q(blocks, disks)) {
-               Q(blocks, disks) = scribble;
+               Q(blocks, disks) = pq_scribble_page;
                BUG_ON(len + offset > PAGE_SIZE);
        }
        do_sync_gen_syndrome(blocks, offset, disks, len, submit);
@@ -265,8 +265,10 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
                                                      len);
        struct dma_device *device = chan ? chan->device : NULL;
        struct dma_async_tx_descriptor *tx;
+       unsigned char coefs[disks-2];
        enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0;
        dma_addr_t *dma_src = NULL;
+       int src_cnt = 0;
 
        BUG_ON(disks < 4);
 
@@ -285,22 +287,32 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
                         __func__, disks, len);
                if (!P(blocks, disks))
                        dma_flags |= DMA_PREP_PQ_DISABLE_P;
+               else
+                       pq[0] = dma_map_page(dev, P(blocks, disks),
+                                            offset, len,
+                                            DMA_TO_DEVICE);
                if (!Q(blocks, disks))
                        dma_flags |= DMA_PREP_PQ_DISABLE_Q;
+               else
+                       pq[1] = dma_map_page(dev, Q(blocks, disks),
+                                            offset, len,
+                                            DMA_TO_DEVICE);
+
                if (submit->flags & ASYNC_TX_FENCE)
                        dma_flags |= DMA_PREP_FENCE;
-               for (i = 0; i < disks; i++)
+               for (i = 0; i < disks-2; i++)
                        if (likely(blocks[i])) {
-                               BUG_ON(is_raid6_zero_block(blocks[i]));
-                               dma_src[i] = dma_map_page(dev, blocks[i],
-                                                         offset, len,
-                                                         DMA_TO_DEVICE);
+                               dma_src[src_cnt] = dma_map_page(dev, blocks[i],
+                                                               offset, len,
+                                                               DMA_TO_DEVICE);
+                               coefs[src_cnt] = raid6_gfexp[i];
+                               src_cnt++;
                        }
 
                for (;;) {
                        tx = device->device_prep_dma_pq_val(chan, pq, dma_src,
-                                                           disks - 2,
-                                                           raid6_gfexp,
+                                                           src_cnt,
+                                                           coefs,
                                                            len, pqres,
                                                            dma_flags);
                        if (likely(tx))
@@ -373,9 +385,9 @@ EXPORT_SYMBOL_GPL(async_syndrome_val);
 
 static int __init async_pq_init(void)
 {
-       scribble = alloc_page(GFP_KERNEL);
+       pq_scribble_page = alloc_page(GFP_KERNEL);
 
-       if (scribble)
+       if (pq_scribble_page)
                return 0;
 
        pr_err("%s: failed to allocate required spare page\n", __func__);
@@ -385,7 +397,7 @@ static int __init async_pq_init(void)
 
 static void __exit async_pq_exit(void)
 {
-       put_page(scribble);
+       put_page(pq_scribble_page);
 }
 
 module_init(async_pq_init);
index 6d73dde..943f2ab 100644 (file)
@@ -131,8 +131,8 @@ async_mult(struct page *dest, struct page *src, u8 coef, size_t len,
 }
 
 static struct dma_async_tx_descriptor *
-__2data_recov_4(size_t bytes, int faila, int failb, struct page **blocks,
-             struct async_submit_ctl *submit)
+__2data_recov_4(int disks, size_t bytes, int faila, int failb,
+               struct page **blocks, struct async_submit_ctl *submit)
 {
        struct dma_async_tx_descriptor *tx = NULL;
        struct page *p, *q, *a, *b;
@@ -143,8 +143,8 @@ __2data_recov_4(size_t bytes, int faila, int failb, struct page **blocks,
        void *cb_param = submit->cb_param;
        void *scribble = submit->scribble;
 
-       p = blocks[4-2];
-       q = blocks[4-1];
+       p = blocks[disks-2];
+       q = blocks[disks-1];
 
        a = blocks[faila];
        b = blocks[failb];
@@ -170,8 +170,8 @@ __2data_recov_4(size_t bytes, int faila, int failb, struct page **blocks,
 }
 
 static struct dma_async_tx_descriptor *
-__2data_recov_5(size_t bytes, int faila, int failb, struct page **blocks,
-             struct async_submit_ctl *submit)
+__2data_recov_5(int disks, size_t bytes, int faila, int failb,
+               struct page **blocks, struct async_submit_ctl *submit)
 {
        struct dma_async_tx_descriptor *tx = NULL;
        struct page *p, *q, *g, *dp, *dq;
@@ -181,21 +181,22 @@ __2data_recov_5(size_t bytes, int faila, int failb, struct page **blocks,
        dma_async_tx_callback cb_fn = submit->cb_fn;
        void *cb_param = submit->cb_param;
        void *scribble = submit->scribble;
-       int uninitialized_var(good);
-       int i;
+       int good_srcs, good, i;
 
-       for (i = 0; i < 3; i++) {
+       good_srcs = 0;
+       good = -1;
+       for (i = 0; i < disks-2; i++) {
+               if (blocks[i] == NULL)
+                       continue;
                if (i == faila || i == failb)
                        continue;
-               else {
-                       good = i;
-                       break;
-               }
+               good = i;
+               good_srcs++;
        }
-       BUG_ON(i >= 3);
+       BUG_ON(good_srcs > 1);
 
-       p = blocks[5-2];
-       q = blocks[5-1];
+       p = blocks[disks-2];
+       q = blocks[disks-1];
        g = blocks[good];
 
        /* Compute syndrome with zero for the missing data pages
@@ -263,10 +264,10 @@ __2data_recov_n(int disks, size_t bytes, int faila, int failb,
         * delta p and delta q
         */
        dp = blocks[faila];
-       blocks[faila] = (void *)raid6_empty_zero_page;
+       blocks[faila] = NULL;
        blocks[disks-2] = dp;
        dq = blocks[failb];
-       blocks[failb] = (void *)raid6_empty_zero_page;
+       blocks[failb] = NULL;
        blocks[disks-1] = dq;
 
        init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble);
@@ -323,6 +324,8 @@ struct dma_async_tx_descriptor *
 async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb,
                        struct page **blocks, struct async_submit_ctl *submit)
 {
+       int non_zero_srcs, i;
+
        BUG_ON(faila == failb);
        if (failb < faila)
                swap(faila, failb);
@@ -334,11 +337,13 @@ async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb,
         */
        if (!submit->scribble) {
                void **ptrs = (void **) blocks;
-               int i;
 
                async_tx_quiesce(&submit->depend_tx);
                for (i = 0; i < disks; i++)
-                       ptrs[i] = page_address(blocks[i]);
+                       if (blocks[i] == NULL)
+                               ptrs[i] = (void *) raid6_empty_zero_page;
+                       else
+                               ptrs[i] = page_address(blocks[i]);
 
                raid6_2data_recov(disks, bytes, faila, failb, ptrs);
 
@@ -347,19 +352,30 @@ async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb,
                return NULL;
        }
 
-       switch (disks) {
-       case 4:
+       non_zero_srcs = 0;
+       for (i = 0; i < disks-2 && non_zero_srcs < 4; i++)
+               if (blocks[i])
+                       non_zero_srcs++;
+       switch (non_zero_srcs) {
+       case 0:
+       case 1:
+               /* There must be at least 2 sources - the failed devices. */
+               BUG();
+
+       case 2:
                /* dma devices do not uniformly understand a zero source pq
                 * operation (in contrast to the synchronous case), so
-                * explicitly handle the 4 disk special case
+                * explicitly handle the special case of a 4 disk array with
+                * both data disks missing.
                 */
-               return __2data_recov_4(bytes, faila, failb, blocks, submit);
-       case 5:
+               return __2data_recov_4(disks, bytes, faila, failb, blocks, submit);
+       case 3:
                /* dma devices do not uniformly understand a single
                 * source pq operation (in contrast to the synchronous
-                * case), so explicitly handle the 5 disk special case
+                * case), so explicitly handle the special case of a 5 disk
+                * array with 2 of 3 data disks missing.
                 */
-               return __2data_recov_5(bytes, faila, failb, blocks, submit);
+               return __2data_recov_5(disks, bytes, faila, failb, blocks, submit);
        default:
                return __2data_recov_n(disks, bytes, faila, failb, blocks, submit);
        }
@@ -385,6 +401,7 @@ async_raid6_datap_recov(int disks, size_t bytes, int faila,
        dma_async_tx_callback cb_fn = submit->cb_fn;
        void *cb_param = submit->cb_param;
        void *scribble = submit->scribble;
+       int good_srcs, good, i;
        struct page *srcs[2];
 
        pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes);
@@ -394,11 +411,13 @@ async_raid6_datap_recov(int disks, size_t bytes, int faila,
         */
        if (!scribble) {
                void **ptrs = (void **) blocks;
-               int i;
 
                async_tx_quiesce(&submit->depend_tx);
                for (i = 0; i < disks; i++)
-                       ptrs[i] = page_address(blocks[i]);
+                       if (blocks[i] == NULL)
+                               ptrs[i] = (void*)raid6_empty_zero_page;
+                       else
+                               ptrs[i] = page_address(blocks[i]);
 
                raid6_datap_recov(disks, bytes, faila, ptrs);
 
@@ -407,6 +426,20 @@ async_raid6_datap_recov(int disks, size_t bytes, int faila,
                return NULL;
        }
 
+       good_srcs = 0;
+       good = -1;
+       for (i = 0; i < disks-2; i++) {
+               if (i == faila)
+                       continue;
+               if (blocks[i]) {
+                       good = i;
+                       good_srcs++;
+                       if (good_srcs > 1)
+                               break;
+               }
+       }
+       BUG_ON(good_srcs == 0);
+
        p = blocks[disks-2];
        q = blocks[disks-1];
 
@@ -414,14 +447,13 @@ async_raid6_datap_recov(int disks, size_t bytes, int faila,
         * Use the dead data page as temporary storage for delta q
         */
        dq = blocks[faila];
-       blocks[faila] = (void *)raid6_empty_zero_page;
+       blocks[faila] = NULL;
        blocks[disks-1] = dq;
 
-       /* in the 4 disk case we only need to perform a single source
-        * multiplication
+       /* in the 4-disk case we only need to perform a single source
+        * multiplication with the one good data block.
         */
-       if (disks == 4) {
-               int good = faila == 0 ? 1 : 0;
+       if (good_srcs == 1) {
                struct page *g = blocks[good];
 
                init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL,
index b459a90..79182dc 100644 (file)
@@ -44,20 +44,23 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,
        void *cb_param_orig = submit->cb_param;
        enum async_tx_flags flags_orig = submit->flags;
        enum dma_ctrl_flags dma_flags;
-       int xor_src_cnt;
+       int xor_src_cnt = 0;
        dma_addr_t dma_dest;
 
        /* map the dest bidrectional in case it is re-used as a source */
        dma_dest = dma_map_page(dma->dev, dest, offset, len, DMA_BIDIRECTIONAL);
        for (i = 0; i < src_cnt; i++) {
                /* only map the dest once */
+               if (!src_list[i])
+                       continue;
                if (unlikely(src_list[i] == dest)) {
-                       dma_src[i] = dma_dest;
+                       dma_src[xor_src_cnt++] = dma_dest;
                        continue;
                }
-               dma_src[i] = dma_map_page(dma->dev, src_list[i], offset,
-                                         len, DMA_TO_DEVICE);
+               dma_src[xor_src_cnt++] = dma_map_page(dma->dev, src_list[i], offset,
+                                                     len, DMA_TO_DEVICE);
        }
+       src_cnt = xor_src_cnt;
 
        while (src_cnt) {
                submit->flags = flags_orig;
@@ -123,7 +126,7 @@ do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset,
            int src_cnt, size_t len, struct async_submit_ctl *submit)
 {
        int i;
-       int xor_src_cnt;
+       int xor_src_cnt = 0;
        int src_off = 0;
        void *dest_buf;
        void **srcs;
@@ -135,8 +138,9 @@ do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset,
 
        /* convert to buffer pointers */
        for (i = 0; i < src_cnt; i++)
-               srcs[i] = page_address(src_list[i]) + offset;
-
+               if (src_list[i])
+                       srcs[xor_src_cnt++] = page_address(src_list[i]) + offset;
+       src_cnt = xor_src_cnt;
        /* set destination address */
        dest_buf = page_address(dest) + offset;
 
index 1dc4185..e355e7f 100644 (file)
@@ -46,7 +46,7 @@ obj-$(CONFIG_DM_LOG_USERSPACE)        += dm-log-userspace.o
 obj-$(CONFIG_DM_ZERO)          += dm-zero.o
 
 quiet_cmd_unroll = UNROLL  $@
-      cmd_unroll = $(PERL) $(srctree)/$(src)/unroll.pl $(UNROLL) \
+      cmd_unroll = $(AWK) -f$(srctree)/$(src)/unroll.awk -vN=$(UNROLL) \
                    < $< > $@ || ( rm -f $@ && exit 1 )
 
 ifeq ($(CONFIG_ALTIVEC),y)
@@ -59,56 +59,56 @@ endif
 
 targets += raid6int1.c
 $(obj)/raid6int1.c:   UNROLL := 1
-$(obj)/raid6int1.c:   $(src)/raid6int.uc $(src)/unroll.pl FORCE
+$(obj)/raid6int1.c:   $(src)/raid6int.uc $(src)/unroll.awk FORCE
        $(call if_changed,unroll)
 
 targets += raid6int2.c
 $(obj)/raid6int2.c:   UNROLL := 2
-$(obj)/raid6int2.c:   $(src)/raid6int.uc $(src)/unroll.pl FORCE
+$(obj)/raid6int2.c:   $(src)/raid6int.uc $(src)/unroll.awk FORCE
        $(call if_changed,unroll)
 
 targets += raid6int4.c
 $(obj)/raid6int4.c:   UNROLL := 4
-$(obj)/raid6int4.c:   $(src)/raid6int.uc $(src)/unroll.pl FORCE
+$(obj)/raid6int4.c:   $(src)/raid6int.uc $(src)/unroll.awk FORCE
        $(call if_changed,unroll)
 
 targets += raid6int8.c
 $(obj)/raid6int8.c:   UNROLL := 8
-$(obj)/raid6int8.c:   $(src)/raid6int.uc $(src)/unroll.pl FORCE
+$(obj)/raid6int8.c:   $(src)/raid6int.uc $(src)/unroll.awk FORCE
        $(call if_changed,unroll)
 
 targets += raid6int16.c
 $(obj)/raid6int16.c:  UNROLL := 16
-$(obj)/raid6int16.c:  $(src)/raid6int.uc $(src)/unroll.pl FORCE
+$(obj)/raid6int16.c:  $(src)/raid6int.uc $(src)/unroll.awk FORCE
        $(call if_changed,unroll)
 
 targets += raid6int32.c
 $(obj)/raid6int32.c:  UNROLL := 32
-$(obj)/raid6int32.c:  $(src)/raid6int.uc $(src)/unroll.pl FORCE
+$(obj)/raid6int32.c:  $(src)/raid6int.uc $(src)/unroll.awk FORCE
        $(call if_changed,unroll)
 
 CFLAGS_raid6altivec1.o += $(altivec_flags)
 targets += raid6altivec1.c
 $(obj)/raid6altivec1.c:   UNROLL := 1
-$(obj)/raid6altivec1.c:   $(src)/raid6altivec.uc $(src)/unroll.pl FORCE
+$(obj)/raid6altivec1.c:   $(src)/raid6altivec.uc $(src)/unroll.awk FORCE
        $(call if_changed,unroll)
 
 CFLAGS_raid6altivec2.o += $(altivec_flags)
 targets += raid6altivec2.c
 $(obj)/raid6altivec2.c:   UNROLL := 2
-$(obj)/raid6altivec2.c:   $(src)/raid6altivec.uc $(src)/unroll.pl FORCE
+$(obj)/raid6altivec2.c:   $(src)/raid6altivec.uc $(src)/unroll.awk FORCE
        $(call if_changed,unroll)
 
 CFLAGS_raid6altivec4.o += $(altivec_flags)
 targets += raid6altivec4.c
 $(obj)/raid6altivec4.c:   UNROLL := 4
-$(obj)/raid6altivec4.c:   $(src)/raid6altivec.uc $(src)/unroll.pl FORCE
+$(obj)/raid6altivec4.c:   $(src)/raid6altivec.uc $(src)/unroll.awk FORCE
        $(call if_changed,unroll)
 
 CFLAGS_raid6altivec8.o += $(altivec_flags)
 targets += raid6altivec8.c
 $(obj)/raid6altivec8.c:   UNROLL := 8
-$(obj)/raid6altivec8.c:   $(src)/raid6altivec.uc $(src)/unroll.pl FORCE
+$(obj)/raid6altivec8.c:   $(src)/raid6altivec.uc $(src)/unroll.awk FORCE
        $(call if_changed,unroll)
 
 quiet_cmd_mktable = TABLE   $@
index 6986b00..60e2b32 100644 (file)
@@ -1624,10 +1624,11 @@ int bitmap_create(mddev_t *mddev)
        bitmap->offset = mddev->bitmap_offset;
        if (file) {
                get_file(file);
-               do_sync_mapping_range(file->f_mapping, 0, LLONG_MAX,
-                                     SYNC_FILE_RANGE_WAIT_BEFORE |
-                                     SYNC_FILE_RANGE_WRITE |
-                                     SYNC_FILE_RANGE_WAIT_AFTER);
+               /* As future accesses to this file will use bmap,
+                * and bypass the page cache, we must sync the file
+                * first.
+                */
+               vfs_fsync(file, file->f_dentry, 1);
        }
        /* read superblock from bitmap file (this sets bitmap->chunksize) */
        err = bitmap_read_sb(bitmap);
index 26ba42a..10eb1fc 100644 (file)
@@ -2631,7 +2631,7 @@ static void analyze_sbs(mddev_t * mddev)
                        rdev->desc_nr = i++;
                        rdev->raid_disk = rdev->desc_nr;
                        set_bit(In_sync, &rdev->flags);
-               } else if (rdev->raid_disk >= mddev->raid_disks) {
+               } else if (rdev->raid_disk >= (mddev->raid_disks - min(0, mddev->delta_disks))) {
                        rdev->raid_disk = -1;
                        clear_bit(In_sync, &rdev->flags);
                }
index d1b9bd5..a053423 100644 (file)
@@ -64,7 +64,7 @@ static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
 
        /* allocate a r1bio with room for raid_disks entries in the bios array */
        r1_bio = kzalloc(size, gfp_flags);
-       if (!r1_bio)
+       if (!r1_bio && pi->mddev)
                unplug_slaves(pi->mddev);
 
        return r1_bio;
@@ -1683,6 +1683,7 @@ static void raid1d(mddev_t *mddev)
                                generic_make_request(bio);
                        }
                }
+               cond_resched();
        }
        if (unplug)
                unplug_slaves(mddev);
@@ -1978,13 +1979,14 @@ static int run(mddev_t *mddev)
        conf->poolinfo = kmalloc(sizeof(*conf->poolinfo), GFP_KERNEL);
        if (!conf->poolinfo)
                goto out_no_mem;
-       conf->poolinfo->mddev = mddev;
+       conf->poolinfo->mddev = NULL;
        conf->poolinfo->raid_disks = mddev->raid_disks;
        conf->r1bio_pool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
                                          r1bio_pool_free,
                                          conf->poolinfo);
        if (!conf->r1bio_pool)
                goto out_no_mem;
+       conf->poolinfo->mddev = mddev;
 
        spin_lock_init(&conf->device_lock);
        mddev->queue->queue_lock = &conf->device_lock;
index 51c4c5c..c2cb7b8 100644 (file)
@@ -68,7 +68,7 @@ static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
 
        /* allocate a r10bio with room for raid_disks entries in the bios array */
        r10_bio = kzalloc(size, gfp_flags);
-       if (!r10_bio)
+       if (!r10_bio && conf->mddev)
                unplug_slaves(conf->mddev);
 
        return r10_bio;
@@ -1632,6 +1632,7 @@ static void raid10d(mddev_t *mddev)
                                generic_make_request(bio);
                        }
                }
+               cond_resched();
        }
        if (unplug)
                unplug_slaves(mddev);
@@ -2095,7 +2096,6 @@ static int run(mddev_t *mddev)
        if (!conf->tmppage)
                goto out_free_conf;
 
-       conf->mddev = mddev;
        conf->raid_disks = mddev->raid_disks;
        conf->near_copies = nc;
        conf->far_copies = fc;
@@ -2132,6 +2132,7 @@ static int run(mddev_t *mddev)
                goto out_free_conf;
        }
 
+       conf->mddev = mddev;
        spin_lock_init(&conf->device_lock);
        mddev->queue->queue_lock = &conf->device_lock;
 
index 9482980..81abefc 100644 (file)
@@ -156,13 +156,16 @@ static inline int raid6_next_disk(int disk, int raid_disks)
 static int raid6_idx_to_slot(int idx, struct stripe_head *sh,
                             int *count, int syndrome_disks)
 {
-       int slot;
+       int slot = *count;
 
+       if (sh->ddf_layout)
+               (*count)++;
        if (idx == sh->pd_idx)
                return syndrome_disks;
        if (idx == sh->qd_idx)
                return syndrome_disks + 1;
-       slot = (*count)++;
+       if (!sh->ddf_layout)
+               (*count)++;
        return slot;
 }
 
@@ -717,7 +720,7 @@ static int set_syndrome_sources(struct page **srcs, struct stripe_head *sh)
        int i;
 
        for (i = 0; i < disks; i++)
-               srcs[i] = (void *)raid6_empty_zero_page;
+               srcs[i] = NULL;
 
        count = 0;
        i = d0_idx;
@@ -727,9 +730,8 @@ static int set_syndrome_sources(struct page **srcs, struct stripe_head *sh)
                srcs[slot] = sh->dev[i].page;
                i = raid6_next_disk(i, disks);
        } while (i != d0_idx);
-       BUG_ON(count != syndrome_disks);
 
-       return count;
+       return syndrome_disks;
 }
 
 static struct dma_async_tx_descriptor *
@@ -814,7 +816,7 @@ ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu)
         * slot number conversion for 'faila' and 'failb'
         */
        for (i = 0; i < disks ; i++)
-               blocks[i] = (void *)raid6_empty_zero_page;
+               blocks[i] = NULL;
        count = 0;
        i = d0_idx;
        do {
@@ -828,7 +830,6 @@ ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu)
                        failb = slot;
                i = raid6_next_disk(i, disks);
        } while (i != d0_idx);
-       BUG_ON(count != syndrome_disks);
 
        BUG_ON(faila == failb);
        if (failb < faila)
@@ -845,7 +846,7 @@ ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu)
                        init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
                                          ops_complete_compute, sh,
                                          to_addr_conv(sh, percpu));
-                       return async_gen_syndrome(blocks, 0, count+2,
+                       return async_gen_syndrome(blocks, 0, syndrome_disks+2,
                                                  STRIPE_SIZE, &submit);
                } else {
                        struct page *dest;
@@ -1139,7 +1140,7 @@ static void ops_run_check_pq(struct stripe_head *sh, struct raid5_percpu *percpu
                           &sh->ops.zero_sum_result, percpu->spare_page, &submit);
 }
 
-static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
+static void __raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
 {
        int overlap_clear = 0, i, disks = sh->disks;
        struct dma_async_tx_descriptor *tx = NULL;
@@ -1204,22 +1205,55 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
        put_cpu();
 }
 
+#ifdef CONFIG_MULTICORE_RAID456
+static void async_run_ops(void *param, async_cookie_t cookie)
+{
+       struct stripe_head *sh = param;
+       unsigned long ops_request = sh->ops.request;
+
+       clear_bit_unlock(STRIPE_OPS_REQ_PENDING, &sh->state);
+       wake_up(&sh->ops.wait_for_ops);
+
+       __raid_run_ops(sh, ops_request);
+       release_stripe(sh);
+}
+
+static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
+{
+       /* since handle_stripe can be called outside of raid5d context
+        * we need to ensure sh->ops.request is de-staged before another
+        * request arrives
+        */
+       wait_event(sh->ops.wait_for_ops,
+                  !test_and_set_bit_lock(STRIPE_OPS_REQ_PENDING, &sh->state));
+       sh->ops.request = ops_request;
+
+       atomic_inc(&sh->count);
+       async_schedule(async_run_ops, sh);
+}
+#else
+#define raid_run_ops __raid_run_ops
+#endif
+
 static int grow_one_stripe(raid5_conf_t *conf)
 {
        struct stripe_head *sh;
+       int disks = max(conf->raid_disks, conf->previous_raid_disks);
        sh = kmem_cache_alloc(conf->slab_cache, GFP_KERNEL);
        if (!sh)
                return 0;
-       memset(sh, 0, sizeof(*sh) + (conf->raid_disks-1)*sizeof(struct r5dev));
+       memset(sh, 0, sizeof(*sh) + (disks-1)*sizeof(struct r5dev));
        sh->raid_conf = conf;
        spin_lock_init(&sh->lock);
+       #ifdef CONFIG_MULTICORE_RAID456
+       init_waitqueue_head(&sh->ops.wait_for_ops);
+       #endif
 
-       if (grow_buffers(sh, conf->raid_disks)) {
-               shrink_buffers(sh, conf->raid_disks);
+       if (grow_buffers(sh, disks)) {
+               shrink_buffers(sh, disks);
                kmem_cache_free(conf->slab_cache, sh);
                return 0;
        }
-       sh->disks = conf->raid_disks;
        /* we just created an active stripe so... */
        atomic_set(&sh->count, 1);
        atomic_inc(&conf->active_stripes);
@@ -1231,7 +1265,7 @@ static int grow_one_stripe(raid5_conf_t *conf)
 static int grow_stripes(raid5_conf_t *conf, int num)
 {
        struct kmem_cache *sc;
-       int devs = conf->raid_disks;
+       int devs = max(conf->raid_disks, conf->previous_raid_disks);
 
        sprintf(conf->cache_name[0],
                "raid%d-%s", conf->level, mdname(conf->mddev));
@@ -1329,6 +1363,9 @@ static int resize_stripes(raid5_conf_t *conf, int newsize)
 
                nsh->raid_conf = conf;
                spin_lock_init(&nsh->lock);
+               #ifdef CONFIG_MULTICORE_RAID456
+               init_waitqueue_head(&nsh->ops.wait_for_ops);
+               #endif
 
                list_add(&nsh->lru, &newstripes);
        }
@@ -1899,10 +1936,15 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
                case ALGORITHM_PARITY_N:
                        break;
                case ALGORITHM_ROTATING_N_CONTINUE:
+                       /* Like left_symmetric, but P is before Q */
                        if (sh->pd_idx == 0)
                                i--;    /* P D D D Q */
-                       else if (i > sh->pd_idx)
-                               i -= 2; /* D D Q P D */
+                       else {
+                               /* D D Q P D */
+                               if (i < sh->pd_idx)
+                                       i += raid_disks;
+                               i -= (sh->pd_idx + 1);
+                       }
                        break;
                case ALGORITHM_LEFT_ASYMMETRIC_6:
                case ALGORITHM_RIGHT_ASYMMETRIC_6:
@@ -2896,7 +2938,7 @@ static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh,
  *
  */
 
-static bool handle_stripe5(struct stripe_head *sh)
+static void handle_stripe5(struct stripe_head *sh)
 {
        raid5_conf_t *conf = sh->raid_conf;
        int disks = sh->disks, i;
@@ -3167,11 +3209,9 @@ static bool handle_stripe5(struct stripe_head *sh)
        ops_run_io(sh, &s);
 
        return_io(return_bi);
-
-       return blocked_rdev == NULL;
 }
 
-static bool handle_stripe6(struct stripe_head *sh)
+static void handle_stripe6(struct stripe_head *sh)
 {
        raid5_conf_t *conf = sh->raid_conf;
        int disks = sh->disks;
@@ -3455,17 +3495,14 @@ static bool handle_stripe6(struct stripe_head *sh)
        ops_run_io(sh, &s);
 
        return_io(return_bi);
-
-       return blocked_rdev == NULL;
 }
 
-/* returns true if the stripe was handled */
-static bool handle_stripe(struct stripe_head *sh)
+static void handle_stripe(struct stripe_head *sh)
 {
        if (sh->raid_conf->level == 6)
-               return handle_stripe6(sh);
+               handle_stripe6(sh);
        else
-               return handle_stripe5(sh);
+               handle_stripe5(sh);
 }
 
 static void raid5_activate_delayed(raid5_conf_t *conf)
@@ -3503,9 +3540,10 @@ static void unplug_slaves(mddev_t *mddev)
 {
        raid5_conf_t *conf = mddev->private;
        int i;
+       int devs = max(conf->raid_disks, conf->previous_raid_disks);
 
        rcu_read_lock();
-       for (i = 0; i < conf->raid_disks; i++) {
+       for (i = 0; i < devs; i++) {
                mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
                if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
                        struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
@@ -4277,9 +4315,7 @@ static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *ski
        clear_bit(STRIPE_INSYNC, &sh->state);
        spin_unlock(&sh->lock);
 
-       /* wait for any blocked device to be handled */
-       while (unlikely(!handle_stripe(sh)))
-               ;
+       handle_stripe(sh);
        release_stripe(sh);
 
        return STRIPE_SECTORS;
@@ -4349,37 +4385,6 @@ static int  retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio)
        return handled;
 }
 
-#ifdef CONFIG_MULTICORE_RAID456
-static void __process_stripe(void *param, async_cookie_t cookie)
-{
-       struct stripe_head *sh = param;
-
-       handle_stripe(sh);
-       release_stripe(sh);
-}
-
-static void process_stripe(struct stripe_head *sh, struct list_head *domain)
-{
-       async_schedule_domain(__process_stripe, sh, domain);
-}
-
-static void synchronize_stripe_processing(struct list_head *domain)
-{
-       async_synchronize_full_domain(domain);
-}
-#else
-static void process_stripe(struct stripe_head *sh, struct list_head *domain)
-{
-       handle_stripe(sh);
-       release_stripe(sh);
-       cond_resched();
-}
-
-static void synchronize_stripe_processing(struct list_head *domain)
-{
-}
-#endif
-
 
 /*
  * This is our raid5 kernel thread.
@@ -4393,7 +4398,6 @@ static void raid5d(mddev_t *mddev)
        struct stripe_head *sh;
        raid5_conf_t *conf = mddev->private;
        int handled;
-       LIST_HEAD(raid_domain);
 
        pr_debug("+++ raid5d active\n");
 
@@ -4430,7 +4434,9 @@ static void raid5d(mddev_t *mddev)
                spin_unlock_irq(&conf->device_lock);
                
                handled++;
-               process_stripe(sh, &raid_domain);
+               handle_stripe(sh);
+               release_stripe(sh);
+               cond_resched();
 
                spin_lock_irq(&conf->device_lock);
        }
@@ -4438,7 +4444,6 @@ static void raid5d(mddev_t *mddev)
 
        spin_unlock_irq(&conf->device_lock);
 
-       synchronize_stripe_processing(&raid_domain);
        async_tx_issue_pending_all();
        unplug_slaves(mddev);
 
@@ -4558,13 +4563,9 @@ raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks)
 
        if (!sectors)
                sectors = mddev->dev_sectors;
-       if (!raid_disks) {
+       if (!raid_disks)
                /* size is defined by the smallest of previous and new size */
-               if (conf->raid_disks < conf->previous_raid_disks)
-                       raid_disks = conf->raid_disks;
-               else
-                       raid_disks = conf->previous_raid_disks;
-       }
+               raid_disks = min(conf->raid_disks, conf->previous_raid_disks);
 
        sectors &= ~((sector_t)mddev->chunk_sectors - 1);
        sectors &= ~((sector_t)mddev->new_chunk_sectors - 1);
@@ -4665,7 +4666,7 @@ static int raid5_alloc_percpu(raid5_conf_t *conf)
                        }
                        per_cpu_ptr(conf->percpu, cpu)->spare_page = spare_page;
                }
-               scribble = kmalloc(scribble_len(conf->raid_disks), GFP_KERNEL);
+               scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
                if (!scribble) {
                        err = -ENOMEM;
                        break;
@@ -4686,7 +4687,7 @@ static int raid5_alloc_percpu(raid5_conf_t *conf)
 static raid5_conf_t *setup_conf(mddev_t *mddev)
 {
        raid5_conf_t *conf;
-       int raid_disk, memory;
+       int raid_disk, memory, max_disks;
        mdk_rdev_t *rdev;
        struct disk_info *disk;
 
@@ -4722,15 +4723,28 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
        conf = kzalloc(sizeof(raid5_conf_t), GFP_KERNEL);
        if (conf == NULL)
                goto abort;
+       spin_lock_init(&conf->device_lock);
+       init_waitqueue_head(&conf->wait_for_stripe);
+       init_waitqueue_head(&conf->wait_for_overlap);
+       INIT_LIST_HEAD(&conf->handle_list);
+       INIT_LIST_HEAD(&conf->hold_list);
+       INIT_LIST_HEAD(&conf->delayed_list);
+       INIT_LIST_HEAD(&conf->bitmap_list);
+       INIT_LIST_HEAD(&conf->inactive_list);
+       atomic_set(&conf->active_stripes, 0);
+       atomic_set(&conf->preread_active_stripes, 0);
+       atomic_set(&conf->active_aligned_reads, 0);
+       conf->bypass_threshold = BYPASS_THRESHOLD;
 
        conf->raid_disks = mddev->raid_disks;
-       conf->scribble_len = scribble_len(conf->raid_disks);
        if (mddev->reshape_position == MaxSector)
                conf->previous_raid_disks = mddev->raid_disks;
        else
                conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks;
+       max_disks = max(conf->raid_disks, conf->previous_raid_disks);
+       conf->scribble_len = scribble_len(max_disks);
 
-       conf->disks = kzalloc(conf->raid_disks * sizeof(struct disk_info),
+       conf->disks = kzalloc(max_disks * sizeof(struct disk_info),
                              GFP_KERNEL);
        if (!conf->disks)
                goto abort;
@@ -4744,24 +4758,11 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
        if (raid5_alloc_percpu(conf) != 0)
                goto abort;
 
-       spin_lock_init(&conf->device_lock);
-       init_waitqueue_head(&conf->wait_for_stripe);
-       init_waitqueue_head(&conf->wait_for_overlap);
-       INIT_LIST_HEAD(&conf->handle_list);
-       INIT_LIST_HEAD(&conf->hold_list);
-       INIT_LIST_HEAD(&conf->delayed_list);
-       INIT_LIST_HEAD(&conf->bitmap_list);
-       INIT_LIST_HEAD(&conf->inactive_list);
-       atomic_set(&conf->active_stripes, 0);
-       atomic_set(&conf->preread_active_stripes, 0);
-       atomic_set(&conf->active_aligned_reads, 0);
-       conf->bypass_threshold = BYPASS_THRESHOLD;
-
        pr_debug("raid5: run(%s) called.\n", mdname(mddev));
 
        list_for_each_entry(rdev, &mddev->disks, same_set) {
                raid_disk = rdev->raid_disk;
-               if (raid_disk >= conf->raid_disks
+               if (raid_disk >= max_disks
                    || raid_disk < 0)
                        continue;
                disk = conf->disks + raid_disk;
@@ -4793,7 +4794,7 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
        }
 
        memory = conf->max_nr_stripes * (sizeof(struct stripe_head) +
-                conf->raid_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
+                max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
        if (grow_stripes(conf, conf->max_nr_stripes)) {
                printk(KERN_ERR
                        "raid5: couldn't allocate %dkB for buffers\n", memory);
@@ -4918,7 +4919,8 @@ static int run(mddev_t *mddev)
                    test_bit(In_sync, &rdev->flags))
                        working_disks++;
 
-       mddev->degraded = conf->raid_disks - working_disks;
+       mddev->degraded = (max(conf->raid_disks, conf->previous_raid_disks)
+                          - working_disks);
 
        if (mddev->degraded > conf->max_degraded) {
                printk(KERN_ERR "raid5: not enough operational devices for %s"
index 2390e0e..dd70835 100644 (file)
@@ -214,12 +214,20 @@ struct stripe_head {
        int                     disks;          /* disks in stripe */
        enum check_states       check_state;
        enum reconstruct_states reconstruct_state;
-       /* stripe_operations
+       /**
+        * struct stripe_operations
         * @target - STRIPE_OP_COMPUTE_BLK target
+        * @target2 - 2nd compute target in the raid6 case
+        * @zero_sum_result - P and Q verification flags
+        * @request - async service request flags for raid_run_ops
         */
        struct stripe_operations {
                int                  target, target2;
                enum sum_check_flags zero_sum_result;
+               #ifdef CONFIG_MULTICORE_RAID456
+               unsigned long        request;
+               wait_queue_head_t    wait_for_ops;
+               #endif
        } ops;
        struct r5dev {
                struct bio      req;
@@ -294,6 +302,8 @@ struct r6_state {
 #define        STRIPE_FULL_WRITE       13 /* all blocks are set to be overwritten */
 #define        STRIPE_BIOFILL_RUN      14
 #define        STRIPE_COMPUTE_RUN      15
+#define        STRIPE_OPS_REQ_PENDING  16
+
 /*
  * Operation request flags
  */
@@ -478,7 +488,7 @@ static inline int algorithm_valid_raid6(int layout)
 {
        return (layout >= 0 && layout <= 5)
                ||
-               (layout == 8 || layout == 10)
+               (layout >= 8 && layout <= 10)
                ||
                (layout >= 16 && layout <= 20);
 }
index 699dfee..2654d5c 100644 (file)
@@ -15,7 +15,7 @@
  *
  * $#-way unrolled portable integer math RAID-6 instruction set
  *
- * This file is postprocessed using unroll.pl
+ * This file is postprocessed using unroll.awk
  *
  * <benh> hpa: in process,
  * you can just "steal" the vec unit with enable_kernel_altivec() (but
index f9bf9cb..d1e276a 100644 (file)
@@ -15,7 +15,7 @@
  *
  * $#-way unrolled portable integer math RAID-6 instruction set
  *
- * This file is postprocessed using unroll.pl
+ * This file is postprocessed using unroll.awk
  */
 
 #include <linux/raid/pq.h>
index 58ffdf4..2874cbe 100644 (file)
@@ -7,7 +7,7 @@ CC       = gcc
 OPTFLAGS = -O2                 # Adjust as desired
 CFLAGS  = -I.. -I ../../../include -g $(OPTFLAGS)
 LD      = ld
-PERL    = perl
+AWK     = awk
 AR      = ar
 RANLIB  = ranlib
 
@@ -35,35 +35,35 @@ raid6.a: raid6int1.o raid6int2.o raid6int4.o raid6int8.o raid6int16.o \
 raid6test: test.c raid6.a
        $(CC) $(CFLAGS) -o raid6test $^
 
-raid6altivec1.c: raid6altivec.uc ../unroll.pl
-       $(PERL) ../unroll.pl 1 < raid6altivec.uc > $@
+raid6altivec1.c: raid6altivec.uc ../unroll.awk
+       $(AWK) ../unroll.awk -vN=1 < raid6altivec.uc > $@
 
-raid6altivec2.c: raid6altivec.uc ../unroll.pl
-       $(PERL) ../unroll.pl 2 < raid6altivec.uc > $@
+raid6altivec2.c: raid6altivec.uc ../unroll.awk
+       $(AWK) ../unroll.awk -vN=2 < raid6altivec.uc > $@
 
-raid6altivec4.c: raid6altivec.uc ../unroll.pl
-       $(PERL) ../unroll.pl 4 < raid6altivec.uc > $@
+raid6altivec4.c: raid6altivec.uc ../unroll.awk
+       $(AWK) ../unroll.awk -vN=4 < raid6altivec.uc > $@
 
-raid6altivec8.c: raid6altivec.uc ../unroll.pl
-       $(PERL) ../unroll.pl 8 < raid6altivec.uc > $@
+raid6altivec8.c: raid6altivec.uc ../unroll.awk
+       $(AWK) ../unroll.awk -vN=8 < raid6altivec.uc > $@
 
-raid6int1.c: raid6int.uc ../unroll.pl
-       $(PERL) ../unroll.pl 1 < raid6int.uc > $@
+raid6int1.c: raid6int.uc ../unroll.awk
+       $(AWK) ../unroll.awk -vN=1 < raid6int.uc > $@
 
-raid6int2.c: raid6int.uc ../unroll.pl
-       $(PERL) ../unroll.pl 2 < raid6int.uc > $@
+raid6int2.c: raid6int.uc ../unroll.awk
+       $(AWK) ../unroll.awk -vN=2 < raid6int.uc > $@
 
-raid6int4.c: raid6int.uc ../unroll.pl
-       $(PERL) ../unroll.pl 4 < raid6int.uc > $@
+raid6int4.c: raid6int.uc ../unroll.awk
+       $(AWK) ../unroll.awk -vN=4 < raid6int.uc > $@
 
-raid6int8.c: raid6int.uc ../unroll.pl
-       $(PERL) ../unroll.pl 8 < raid6int.uc > $@
+raid6int8.c: raid6int.uc ../unroll.awk
+       $(AWK) ../unroll.awk -vN=8 < raid6int.uc > $@
 
-raid6int16.c: raid6int.uc ../unroll.pl
-       $(PERL) ../unroll.pl 16 < raid6int.uc > $@
+raid6int16.c: raid6int.uc ../unroll.awk
+       $(AWK) ../unroll.awk -vN=16 < raid6int.uc > $@
 
-raid6int32.c: raid6int.uc ../unroll.pl
-       $(PERL) ../unroll.pl 32 < raid6int.uc > $@
+raid6int32.c: raid6int.uc ../unroll.awk
+       $(AWK) ../unroll.awk -vN=32 < raid6int.uc > $@
 
 raid6tables.c: mktables
        ./mktables > raid6tables.c
diff --git a/drivers/md/unroll.awk b/drivers/md/unroll.awk
new file mode 100644 (file)
index 0000000..c6aa036
--- /dev/null
@@ -0,0 +1,20 @@
+
+# This filter requires one command line option of form -vN=n
+# where n must be a decimal number.
+#
+# Repeat each input line containing $$ n times, replacing $$ with 0...n-1.
+# Replace each $# with n, and each $* with a single $.
+
+BEGIN {
+       n = N + 0
+}
+{
+       if (/\$\$/) { rep = n } else { rep = 1 }
+       for (i = 0; i < rep; ++i) {
+               tmp = $0
+               gsub(/\$\$/, i, tmp)
+               gsub(/\$\#/, n, tmp)
+               gsub(/\$\*/, "$", tmp)
+               print tmp
+       }
+}
diff --git a/drivers/md/unroll.pl b/drivers/md/unroll.pl
deleted file mode 100644 (file)
index 3acc710..0000000
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/usr/bin/perl
-#
-# Take a piece of C code and for each line which contains the sequence $$
-# repeat n times with $ replaced by 0...n-1; the sequence $# is replaced
-# by the unrolling factor, and $* with a single $
-#
-
-($n) = @ARGV;
-$n += 0;
-
-while ( defined($line = <STDIN>) ) {
-    if ( $line =~ /\$\$/ ) {
-       $rep = $n;
-    } else {
-       $rep = 1;
-    }
-    for ( $i = 0 ; $i < $rep ; $i++ ) {
-       $tmp = $line;
-       $tmp =~ s/\$\$/$i/g;
-       $tmp =~ s/\$\#/$n/g;
-       $tmp =~ s/\$\*/\$/g;
-       print $tmp;
-    }
-}
index a4f68e5..b44462a 100644 (file)
@@ -26,7 +26,6 @@ static struct sclp_async_sccb *sccb;
 static int sclp_async_send_wait(char *message);
 static struct ctl_table_header *callhome_sysctl_header;
 static DEFINE_SPINLOCK(sclp_async_lock);
-static char nodename[64];
 #define SCLP_NORMAL_WRITE      0x00
 
 struct async_evbuf {
@@ -52,9 +51,10 @@ static struct sclp_register sclp_async_register = {
 static int call_home_on_panic(struct notifier_block *self,
                              unsigned long event, void *data)
 {
-               strncat(data, nodename, strlen(nodename));
-               sclp_async_send_wait(data);
-               return NOTIFY_DONE;
+       strncat(data, init_utsname()->nodename,
+               sizeof(init_utsname()->nodename));
+       sclp_async_send_wait(data);
+       return NOTIFY_DONE;
 }
 
 static struct notifier_block call_home_panic_nb = {
@@ -68,15 +68,14 @@ static int proc_handler_callhome(struct ctl_table *ctl, int write,
 {
        unsigned long val;
        int len, rc;
-       char buf[2];
+       char buf[3];
 
-       if (!*count | (*ppos && !write)) {
+       if (!*count || (*ppos && !write)) {
                *count = 0;
                return 0;
        }
        if (!write) {
-               len =  sprintf(buf, "%d\n", callhome_enabled);
-               buf[len] = '\0';
+               len = snprintf(buf, sizeof(buf), "%d\n", callhome_enabled);
                rc = copy_to_user(buffer, buf, sizeof(buf));
                if (rc != 0)
                        return -EFAULT;
@@ -171,39 +170,29 @@ static int __init sclp_async_init(void)
        rc = sclp_register(&sclp_async_register);
        if (rc)
                return rc;
-       callhome_sysctl_header = register_sysctl_table(kern_dir_table);
-       if (!callhome_sysctl_header) {
-               rc = -ENOMEM;
-               goto out_sclp;
-       }
-       if (!(sclp_async_register.sclp_receive_mask & EVTYP_ASYNC_MASK)) {
-               rc = -EOPNOTSUPP;
+       rc = -EOPNOTSUPP;
+       if (!(sclp_async_register.sclp_receive_mask & EVTYP_ASYNC_MASK))
                goto out_sclp;
-       }
        rc = -ENOMEM;
+       callhome_sysctl_header = register_sysctl_table(kern_dir_table);
+       if (!callhome_sysctl_header)
+               goto out_sclp;
        request = kzalloc(sizeof(struct sclp_req), GFP_KERNEL);
-       if (!request)
-               goto out_sys;
        sccb = (struct sclp_async_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
-       if (!sccb)
+       if (!request || !sccb)
                goto out_mem;
-       rc =  atomic_notifier_chain_register(&panic_notifier_list,
-                                            &call_home_panic_nb);
-       if (rc)
-               goto out_mem;
-
-       strncpy(nodename, init_utsname()->nodename, 64);
-       return 0;
-
+       rc = atomic_notifier_chain_register(&panic_notifier_list,
+                                           &call_home_panic_nb);
+       if (!rc)
+               goto out;
 out_mem:
        kfree(request);
        free_page((unsigned long) sccb);
-out_sys:
        unregister_sysctl_table(callhome_sysctl_header);
 out_sclp:
        sclp_unregister(&sclp_async_register);
+out:
        return rc;
-
 }
 module_init(sclp_async_init);
 
index 102000d..3012355 100644 (file)
@@ -158,7 +158,12 @@ static int smsg_pm_restore_thaw(struct device *dev)
                smsg_path->flags = 0;
                rc = iucv_path_connect(smsg_path, &smsg_handler, "*MSG    ",
                                       NULL, NULL, NULL);
-               printk(KERN_ERR "iucv_path_connect returned with rc %i\n", rc);
+#ifdef CONFIG_PM_DEBUG
+               if (rc)
+                       printk(KERN_ERR
+                              "iucv_path_connect returned with rc %i\n", rc);
+#endif
+               cpcmd("SET SMSG IUCV", NULL, 0, NULL);
        }
        return 0;
 }
index 7d1aac3..4967643 100644 (file)
@@ -1919,7 +1919,7 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
                size = size>>16;
                size *= 4;
                if (size > MAX_MESSAGE_SIZE) {
-                       rcode = EINVAL;
+                       rcode = -EINVAL;
                        goto cleanup;
                }
                /* Copy in the user's I2O command */
index 9e41f91..3d4a0c8 100644 (file)
@@ -80,7 +80,7 @@ xfs_fs_set_xstate(
 
        if (sb->s_flags & MS_RDONLY)
                return -EROFS;
-       if (!XFS_IS_QUOTA_RUNNING(mp))
+       if (op != Q_XQUOTARM && !XFS_IS_QUOTA_RUNNING(mp))
                return -ENOSYS;
        if (!capable(CAP_SYS_ADMIN))
                return -EPERM;
index ab64f3e..0785797 100644 (file)
@@ -880,6 +880,7 @@ nextag:
                                 * Not in range - save last search
                                 * location and allocate a new inode
                                 */
+                               xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
                                pag->pagl_leftrec = trec.ir_startino;
                                pag->pagl_rightrec = rec.ir_startino;
                                pag->pagl_pagino = pagino;
index 5189b5a..9876fa0 100644 (file)
@@ -1362,9 +1362,11 @@ share:
 error_just_free:
        up_write(&nommu_region_sem);
 error:
-       fput(region->vm_file);
+       if (region->vm_file)
+               fput(region->vm_file);
        kmem_cache_free(vm_region_jar, region);
-       fput(vma->vm_file);
+       if (vma->vm_file)
+               fput(vma->vm_file);
        if (vma->vm_flags & VM_EXECUTABLE)
                removed_exe_file_vma(vma->vm_mm);
        kmem_cache_free(vm_area_cachep, vma);