Merge git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-for-linus
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 22 Oct 2009 22:35:16 +0000 (07:35 +0900)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 22 Oct 2009 22:35:16 +0000 (07:35 +0900)
* git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-for-linus:
  move virtrng_remove to .devexit.text
  move virtballoon_remove to .devexit.text
  virtio_blk: Revert serial number support
  virtio: let header files include virtio_ids.h
  virtio_blk: revert QUEUE_FLAG_VIRT addition

50 files changed:
Documentation/feature-removal-schedule.txt
arch/sh/Kconfig
arch/sh/boards/mach-landisk/gio.c
arch/sh/mm/cache-sh4.c
arch/sh/mm/cache.c
arch/x86/crypto/aesni-intel_glue.c
arch/x86/kvm/i8254.c
arch/x86/kvm/lapic.c
arch/x86/kvm/mmu.c
drivers/crypto/padlock-sha.c
drivers/input/input.c
drivers/input/keyboard/atkbd.c
drivers/input/misc/hp_sdc_rtc.c
drivers/input/mouse/logips2pp.c
drivers/input/mouse/synaptics.c
drivers/input/serio/i8042-x86ia64io.h
drivers/md/dm-exception-store.c
drivers/md/dm-exception-store.h
drivers/md/dm-log-userspace-base.c
drivers/md/dm-snap-persistent.c
drivers/md/dm-snap.c
drivers/md/dm.c
drivers/mmc/host/at91_mci.c
drivers/net/benet/be_cmds.c
drivers/net/benet/be_cmds.h
drivers/net/benet/be_main.c
drivers/net/ethoc.c
drivers/net/fec.c
drivers/net/ks8851.c
drivers/net/ks8851.h
drivers/net/niu.c
drivers/net/virtio_net.c
drivers/net/vmxnet3/vmxnet3_drv.c
drivers/net/vmxnet3/vmxnet3_int.h
fs/nfs/super.c
fs/notify/dnotify/dnotify.c
fs/notify/inode_mark.c
fs/notify/notification.c
fs/pipe.c
include/net/inet_timewait_sock.h
kernel/power/suspend_test.c
net/bluetooth/hci_sysfs.c
net/bluetooth/l2cap.c
net/ipv4/inet_connection_sock.c
net/ipv4/ip_sockglue.c
net/ipv4/tcp.c
net/ipv4/tcp_minisocks.c
net/ipv6/ipv6_sockglue.c
net/unix/af_unix.c
virt/kvm/kvm_main.c

index 04e6c81..bc693ff 100644 (file)
@@ -418,6 +418,14 @@ When:      2.6.33
 Why:   Should be implemented in userspace, policy daemon.
 Who:   Johannes Berg <johannes@sipsolutions.net>
 
+---------------------------
+
+What:  CONFIG_INOTIFY
+When:  2.6.33
+Why:   last user (audit) will be converted to the newer more generic
+       and more easily maintained fsnotify subsystem
+Who:   Eric Paris <eparis@redhat.com>
+
 ----------------------------
 
 What:  lock_policy_rwsem_* and unlock_policy_rwsem_* will not be
index b940424..0dc7e3c 100644 (file)
@@ -37,7 +37,6 @@ config SUPERH32
        select HAVE_FTRACE_MCOUNT_RECORD
        select HAVE_DYNAMIC_FTRACE
        select HAVE_FUNCTION_TRACE_MCOUNT_TEST
-       select HAVE_FTRACE_SYSCALLS
        select HAVE_FUNCTION_GRAPH_TRACER
        select HAVE_ARCH_KGDB
        select ARCH_HIBERNATION_POSSIBLE if MMU
index 25cdf73..5280131 100644 (file)
@@ -14,7 +14,6 @@
  */
 #include <linux/module.h>
 #include <linux/init.h>
-#include <linux/smp_lock.h>
 #include <linux/kdev_t.h>
 #include <linux/cdev.h>
 #include <linux/fs.h>
@@ -35,7 +34,7 @@ static int gio_open(struct inode *inode, struct file *filp)
        int minor;
        int ret = -ENOENT;
 
-       lock_kernel();
+       preempt_disable();
        minor = MINOR(inode->i_rdev);
        if (minor < DEVCOUNT) {
                if (openCnt > 0) {
@@ -45,7 +44,7 @@ static int gio_open(struct inode *inode, struct file *filp)
                        ret = 0;
                }
        }
-       unlock_kernel();
+       preempt_enable();
        return ret;
 }
 
@@ -60,8 +59,7 @@ static int gio_close(struct inode *inode, struct file *filp)
        return 0;
 }
 
-static int gio_ioctl(struct inode *inode, struct file *filp,
-                            unsigned int cmd, unsigned long arg)
+static long gio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
        unsigned int data;
        static unsigned int addr = 0;
@@ -129,7 +127,7 @@ static const struct file_operations gio_fops = {
        .owner = THIS_MODULE,
        .open = gio_open,       /* open */
        .release = gio_close,   /* release */
-       .ioctl = gio_ioctl,     /* ioctl */
+       .unlocked_ioctl = gio_ioctl,
 };
 
 static int __init gio_init(void)
index a98c7d8..519e2d1 100644 (file)
@@ -26,7 +26,7 @@
 #define MAX_DCACHE_PAGES       64      /* XXX: Tune for ways */
 #define MAX_ICACHE_PAGES       32
 
-static void __flush_cache_4096(unsigned long addr, unsigned long phys,
+static void __flush_cache_one(unsigned long addr, unsigned long phys,
                               unsigned long exec_offset);
 
 /*
@@ -89,8 +89,7 @@ static void __uses_jump_to_uncached sh4_flush_icache_range(void *args)
        local_irq_restore(flags);
 }
 
-static inline void flush_cache_4096(unsigned long start,
-                                   unsigned long phys)
+static inline void flush_cache_one(unsigned long start, unsigned long phys)
 {
        unsigned long flags, exec_offset = 0;
 
@@ -103,8 +102,7 @@ static inline void flush_cache_4096(unsigned long start,
                exec_offset = 0x20000000;
 
        local_irq_save(flags);
-       __flush_cache_4096(start | SH_CACHE_ASSOC,
-                          P1SEGADDR(phys), exec_offset);
+       __flush_cache_one(start | SH_CACHE_ASSOC, P1SEGADDR(phys), exec_offset);
        local_irq_restore(flags);
 }
 
@@ -129,8 +127,8 @@ static void sh4_flush_dcache_page(void *arg)
 
                /* Loop all the D-cache */
                n = boot_cpu_data.dcache.n_aliases;
-               for (i = 0; i < n; i++, addr += 4096)
-                       flush_cache_4096(addr, phys);
+               for (i = 0; i < n; i++, addr += PAGE_SIZE)
+                       flush_cache_one(addr, phys);
        }
 
        wmb();
@@ -318,11 +316,11 @@ static void sh4_flush_cache_page(void *args)
        /* We only need to flush D-cache when we have alias */
        if ((address^phys) & alias_mask) {
                /* Loop 4K of the D-cache */
-               flush_cache_4096(
+               flush_cache_one(
                        CACHE_OC_ADDRESS_ARRAY | (address & alias_mask),
                        phys);
                /* Loop another 4K of the D-cache */
-               flush_cache_4096(
+               flush_cache_one(
                        CACHE_OC_ADDRESS_ARRAY | (phys & alias_mask),
                        phys);
        }
@@ -337,7 +335,7 @@ static void sh4_flush_cache_page(void *args)
                 * kernel has never executed the code through its identity
                 * translation.
                 */
-               flush_cache_4096(
+               flush_cache_one(
                        CACHE_IC_ADDRESS_ARRAY | (address & alias_mask),
                        phys);
        }
@@ -393,7 +391,7 @@ static void sh4_flush_cache_range(void *args)
 }
 
 /**
- * __flush_cache_4096
+ * __flush_cache_one
  *
  * @addr:  address in memory mapped cache array
  * @phys:  P1 address to flush (has to match tags if addr has 'A' bit
@@ -406,7 +404,7 @@ static void sh4_flush_cache_range(void *args)
  * operation (purge/write-back) is selected by the lower 2 bits of
  * 'phys'.
  */
-static void __flush_cache_4096(unsigned long addr, unsigned long phys,
+static void __flush_cache_one(unsigned long addr, unsigned long phys,
                               unsigned long exec_offset)
 {
        int way_count;
index 5e1091b..a2dc7f9 100644 (file)
@@ -265,6 +265,8 @@ static void __init emit_cache_params(void)
 
 void __init cpu_cache_init(void)
 {
+       unsigned int cache_disabled = !(__raw_readl(CCR) & CCR_CACHE_ENABLE);
+
        compute_alias(&boot_cpu_data.icache);
        compute_alias(&boot_cpu_data.dcache);
        compute_alias(&boot_cpu_data.scache);
@@ -273,6 +275,13 @@ void __init cpu_cache_init(void)
        __flush_purge_region            = noop__flush_region;
        __flush_invalidate_region       = noop__flush_region;
 
+       /*
+        * No flushing is necessary in the disabled cache case so we can
+        * just keep the noop functions in local_flush_..() and __flush_..()
+        */
+       if (unlikely(cache_disabled))
+               goto skip;
+
        if (boot_cpu_data.family == CPU_FAMILY_SH2) {
                extern void __weak sh2_cache_init(void);
 
@@ -312,5 +321,6 @@ void __init cpu_cache_init(void)
                sh5_cache_init();
        }
 
+skip:
        emit_cache_params();
 }
index 585edeb..49c552c 100644 (file)
@@ -82,7 +82,7 @@ static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
                return -EINVAL;
        }
 
-       if (irq_fpu_usable())
+       if (!irq_fpu_usable())
                err = crypto_aes_expand_key(ctx, in_key, key_len);
        else {
                kernel_fpu_begin();
@@ -103,7 +103,7 @@ static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
 {
        struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
 
-       if (irq_fpu_usable())
+       if (!irq_fpu_usable())
                crypto_aes_encrypt_x86(ctx, dst, src);
        else {
                kernel_fpu_begin();
@@ -116,7 +116,7 @@ static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
 {
        struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
 
-       if (irq_fpu_usable())
+       if (!irq_fpu_usable())
                crypto_aes_decrypt_x86(ctx, dst, src);
        else {
                kernel_fpu_begin();
@@ -342,7 +342,7 @@ static int ablk_encrypt(struct ablkcipher_request *req)
        struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
        struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
 
-       if (irq_fpu_usable()) {
+       if (!irq_fpu_usable()) {
                struct ablkcipher_request *cryptd_req =
                        ablkcipher_request_ctx(req);
                memcpy(cryptd_req, req, sizeof(*req));
@@ -363,7 +363,7 @@ static int ablk_decrypt(struct ablkcipher_request *req)
        struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
        struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
 
-       if (irq_fpu_usable()) {
+       if (!irq_fpu_usable()) {
                struct ablkcipher_request *cryptd_req =
                        ablkcipher_request_ctx(req);
                memcpy(cryptd_req, req, sizeof(*req));
index 82ad523..144e7f6 100644 (file)
@@ -116,7 +116,7 @@ static s64 __kpit_elapsed(struct kvm *kvm)
         * itself with the initial count and continues counting
         * from there.
         */
-       remaining = hrtimer_expires_remaining(&ps->pit_timer.timer);
+       remaining = hrtimer_get_remaining(&ps->pit_timer.timer);
        elapsed = ps->pit_timer.period - ktime_to_ns(remaining);
        elapsed = mod_64(elapsed, ps->pit_timer.period);
 
index 7024224..23c2176 100644 (file)
@@ -521,7 +521,7 @@ static u32 apic_get_tmcct(struct kvm_lapic *apic)
        if (apic_get_reg(apic, APIC_TMICT) == 0)
                return 0;
 
-       remaining = hrtimer_expires_remaining(&apic->lapic_timer.timer);
+       remaining = hrtimer_get_remaining(&apic->lapic_timer.timer);
        if (ktime_to_ns(remaining) < 0)
                remaining = ktime_set(0, 0);
 
index 685a4ff..818b92a 100644 (file)
@@ -748,7 +748,8 @@ static int rmap_write_protect(struct kvm *kvm, u64 gfn)
        return write_protected;
 }
 
-static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp, u64 data)
+static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
+                          unsigned long data)
 {
        u64 *spte;
        int need_tlb_flush = 0;
@@ -763,7 +764,8 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp, u64 data)
        return need_tlb_flush;
 }
 
-static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp, u64 data)
+static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp,
+                            unsigned long data)
 {
        int need_flush = 0;
        u64 *spte, new_spte;
@@ -799,9 +801,10 @@ static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp, u64 data)
        return 0;
 }
 
-static int kvm_handle_hva(struct kvm *kvm, unsigned long hva, u64 data,
+static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
+                         unsigned long data,
                          int (*handler)(struct kvm *kvm, unsigned long *rmapp,
-                                        u64 data))
+                                        unsigned long data))
 {
        int i, j;
        int retval = 0;
@@ -846,10 +849,11 @@ int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
 
 void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
 {
-       kvm_handle_hva(kvm, hva, (u64)&pte, kvm_set_pte_rmapp);
+       kvm_handle_hva(kvm, hva, (unsigned long)&pte, kvm_set_pte_rmapp);
 }
 
-static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp, u64 data)
+static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
+                        unsigned long data)
 {
        u64 *spte;
        int young = 0;
index 76cb6b3..0af8057 100644 (file)
 #include <asm/i387.h>
 #include "padlock.h"
 
+#ifdef CONFIG_64BIT
+#define STACK_ALIGN 16
+#else
+#define STACK_ALIGN 4
+#endif
+
 struct padlock_sha_desc {
        struct shash_desc fallback;
 };
@@ -64,7 +70,9 @@ static int padlock_sha1_finup(struct shash_desc *desc, const u8 *in,
        /* We can't store directly to *out as it may be unaligned. */
        /* BTW Don't reduce the buffer size below 128 Bytes!
         *     PadLock microcode needs it that big. */
-       char result[128] __attribute__ ((aligned(PADLOCK_ALIGNMENT)));
+       char buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
+               ((aligned(STACK_ALIGN)));
+       char *result = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
        struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
        struct sha1_state state;
        unsigned int space;
@@ -128,7 +136,9 @@ static int padlock_sha256_finup(struct shash_desc *desc, const u8 *in,
        /* We can't store directly to *out as it may be unaligned. */
        /* BTW Don't reduce the buffer size below 128 Bytes!
         *     PadLock microcode needs it that big. */
-       char result[128] __attribute__ ((aligned(PADLOCK_ALIGNMENT)));
+       char buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
+               ((aligned(STACK_ALIGN)));
+       char *result = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
        struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
        struct sha256_state state;
        unsigned int space;
index c6f88eb..cc763c9 100644 (file)
@@ -782,10 +782,29 @@ static unsigned int input_proc_devices_poll(struct file *file, poll_table *wait)
        return 0;
 }
 
+union input_seq_state {
+       struct {
+               unsigned short pos;
+               bool mutex_acquired;
+       };
+       void *p;
+};
+
 static void *input_devices_seq_start(struct seq_file *seq, loff_t *pos)
 {
-       if (mutex_lock_interruptible(&input_mutex))
-               return NULL;
+       union input_seq_state *state = (union input_seq_state *)&seq->private;
+       int error;
+
+       /* We need to fit into seq->private pointer */
+       BUILD_BUG_ON(sizeof(union input_seq_state) != sizeof(seq->private));
+
+       error = mutex_lock_interruptible(&input_mutex);
+       if (error) {
+               state->mutex_acquired = false;
+               return ERR_PTR(error);
+       }
+
+       state->mutex_acquired = true;
 
        return seq_list_start(&input_dev_list, *pos);
 }
@@ -795,9 +814,12 @@ static void *input_devices_seq_next(struct seq_file *seq, void *v, loff_t *pos)
        return seq_list_next(v, &input_dev_list, pos);
 }
 
-static void input_devices_seq_stop(struct seq_file *seq, void *v)
+static void input_seq_stop(struct seq_file *seq, void *v)
 {
-       mutex_unlock(&input_mutex);
+       union input_seq_state *state = (union input_seq_state *)&seq->private;
+
+       if (state->mutex_acquired)
+               mutex_unlock(&input_mutex);
 }
 
 static void input_seq_print_bitmap(struct seq_file *seq, const char *name,
@@ -861,7 +883,7 @@ static int input_devices_seq_show(struct seq_file *seq, void *v)
 static const struct seq_operations input_devices_seq_ops = {
        .start  = input_devices_seq_start,
        .next   = input_devices_seq_next,
-       .stop   = input_devices_seq_stop,
+       .stop   = input_seq_stop,
        .show   = input_devices_seq_show,
 };
 
@@ -881,40 +903,49 @@ static const struct file_operations input_devices_fileops = {
 
 static void *input_handlers_seq_start(struct seq_file *seq, loff_t *pos)
 {
-       if (mutex_lock_interruptible(&input_mutex))
-               return NULL;
+       union input_seq_state *state = (union input_seq_state *)&seq->private;
+       int error;
+
+       /* We need to fit into seq->private pointer */
+       BUILD_BUG_ON(sizeof(union input_seq_state) != sizeof(seq->private));
+
+       error = mutex_lock_interruptible(&input_mutex);
+       if (error) {
+               state->mutex_acquired = false;
+               return ERR_PTR(error);
+       }
+
+       state->mutex_acquired = true;
+       state->pos = *pos;
 
-       seq->private = (void *)(unsigned long)*pos;
        return seq_list_start(&input_handler_list, *pos);
 }
 
 static void *input_handlers_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 {
-       seq->private = (void *)(unsigned long)(*pos + 1);
-       return seq_list_next(v, &input_handler_list, pos);
-}
+       union input_seq_state *state = (union input_seq_state *)&seq->private;
 
-static void input_handlers_seq_stop(struct seq_file *seq, void *v)
-{
-       mutex_unlock(&input_mutex);
+       state->pos = *pos + 1;
+       return seq_list_next(v, &input_handler_list, pos);
 }
 
 static int input_handlers_seq_show(struct seq_file *seq, void *v)
 {
        struct input_handler *handler = container_of(v, struct input_handler, node);
+       union input_seq_state *state = (union input_seq_state *)&seq->private;
 
-       seq_printf(seq, "N: Number=%ld Name=%s",
-                  (unsigned long)seq->private, handler->name);
+       seq_printf(seq, "N: Number=%u Name=%s", state->pos, handler->name);
        if (handler->fops)
                seq_printf(seq, " Minor=%d", handler->minor);
        seq_putc(seq, '\n');
 
        return 0;
 }
+
 static const struct seq_operations input_handlers_seq_ops = {
        .start  = input_handlers_seq_start,
        .next   = input_handlers_seq_next,
-       .stop   = input_handlers_seq_stop,
+       .stop   = input_seq_stop,
        .show   = input_handlers_seq_show,
 };
 
index 4709e15..a651237 100644 (file)
@@ -574,11 +574,22 @@ static void atkbd_event_work(struct work_struct *work)
 
        mutex_lock(&atkbd->event_mutex);
 
-       if (test_and_clear_bit(ATKBD_LED_EVENT_BIT, &atkbd->event_mask))
-               atkbd_set_leds(atkbd);
+       if (!atkbd->enabled) {
+               /*
+                * Serio ports are resumed asynchronously so while driver core
+                * thinks that device is already fully operational in reality
+                * it may not be ready yet. In this case we need to keep
+                * rescheduling till reconnect completes.
+                */
+               schedule_delayed_work(&atkbd->event_work,
+                                       msecs_to_jiffies(100));
+       } else {
+               if (test_and_clear_bit(ATKBD_LED_EVENT_BIT, &atkbd->event_mask))
+                       atkbd_set_leds(atkbd);
 
-       if (test_and_clear_bit(ATKBD_REP_EVENT_BIT, &atkbd->event_mask))
-               atkbd_set_repeat_rate(atkbd);
+               if (test_and_clear_bit(ATKBD_REP_EVENT_BIT, &atkbd->event_mask))
+                       atkbd_set_repeat_rate(atkbd);
+       }
 
        mutex_unlock(&atkbd->event_mutex);
 }
@@ -770,6 +781,30 @@ static int atkbd_select_set(struct atkbd *atkbd, int target_set, int allow_extra
        return 3;
 }
 
+static int atkbd_reset_state(struct atkbd *atkbd)
+{
+        struct ps2dev *ps2dev = &atkbd->ps2dev;
+       unsigned char param[1];
+
+/*
+ * Set the LEDs to a predefined state (all off).
+ */
+
+       param[0] = 0;
+       if (ps2_command(ps2dev, param, ATKBD_CMD_SETLEDS))
+               return -1;
+
+/*
+ * Set autorepeat to fastest possible.
+ */
+
+       param[0] = 0;
+       if (ps2_command(ps2dev, param, ATKBD_CMD_SETREP))
+               return -1;
+
+       return 0;
+}
+
 static int atkbd_activate(struct atkbd *atkbd)
 {
        struct ps2dev *ps2dev = &atkbd->ps2dev;
@@ -852,29 +887,6 @@ static unsigned int atkbd_hp_forced_release_keys[] = {
 };
 
 /*
- * Inventec system with broken key release on volume keys
- */
-static unsigned int atkbd_inventec_forced_release_keys[] = {
-       0xae, 0xb0, -1U
-};
-
-/*
- * Perform fixup for HP Pavilion ZV6100 laptop that doesn't generate release
- * for its volume buttons
- */
-static unsigned int atkbd_hp_zv6100_forced_release_keys[] = {
-       0xae, 0xb0, -1U
-};
-
-/*
- * Perform fixup for HP (Compaq) Presario R4000 R4100 R4200 that don't generate
- * release for their volume buttons
- */
-static unsigned int atkbd_hp_r4000_forced_release_keys[] = {
-       0xae, 0xb0, -1U
-};
-
-/*
  * Samsung NC10,NC20 with Fn+F? key release not working
  */
 static unsigned int atkbd_samsung_forced_release_keys[] = {
@@ -882,14 +894,6 @@ static unsigned int atkbd_samsung_forced_release_keys[] = {
 };
 
 /*
- * The volume up and volume down special keys on a Fujitsu Amilo PA 1510 laptop
- * do not generate release events so we have to do it ourselves.
- */
-static unsigned int atkbd_amilo_pa1510_forced_release_keys[] = {
-       0xb0, 0xae, -1U
-};
-
-/*
  * Amilo Pi 3525 key release for Fn+Volume keys not working
  */
 static unsigned int atkbd_amilo_pi3525_forced_release_keys[] = {
@@ -911,6 +915,14 @@ static unsigned int atkdb_soltech_ta12_forced_release_keys[] = {
 };
 
 /*
+ * Many notebooks don't send key release event for volume up/down
+ * keys, with key list below common among them
+ */
+static unsigned int atkbd_volume_forced_release_keys[] = {
+       0xae, 0xb0, -1U
+};
+
+/*
  * atkbd_set_keycode_table() initializes keyboard's keycode table
  * according to the selected scancode set
  */
@@ -1087,6 +1099,7 @@ static int atkbd_connect(struct serio *serio, struct serio_driver *drv)
                }
 
                atkbd->set = atkbd_select_set(atkbd, atkbd_set, atkbd_extra);
+               atkbd_reset_state(atkbd);
                atkbd_activate(atkbd);
 
        } else {
@@ -1267,6 +1280,7 @@ static ssize_t atkbd_set_extra(struct atkbd *atkbd, const char *buf, size_t coun
 
                atkbd->dev = new_dev;
                atkbd->set = atkbd_select_set(atkbd, atkbd->set, value);
+               atkbd_reset_state(atkbd);
                atkbd_activate(atkbd);
                atkbd_set_keycode_table(atkbd);
                atkbd_set_device_attrs(atkbd);
@@ -1548,7 +1562,7 @@ static struct dmi_system_id atkbd_dmi_quirk_table[] __initdata = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "Pavilion ZV6100"),
                },
                .callback = atkbd_setup_forced_release,
-               .driver_data = atkbd_hp_zv6100_forced_release_keys,
+               .driver_data = atkbd_volume_forced_release_keys,
        },
        {
                .ident = "HP Presario R4000",
@@ -1557,7 +1571,7 @@ static struct dmi_system_id atkbd_dmi_quirk_table[] __initdata = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "Presario R4000"),
                },
                .callback = atkbd_setup_forced_release,
-               .driver_data = atkbd_hp_r4000_forced_release_keys,
+               .driver_data = atkbd_volume_forced_release_keys,
        },
        {
                .ident = "HP Presario R4100",
@@ -1566,7 +1580,7 @@ static struct dmi_system_id atkbd_dmi_quirk_table[] __initdata = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "Presario R4100"),
                },
                .callback = atkbd_setup_forced_release,
-               .driver_data = atkbd_hp_r4000_forced_release_keys,
+               .driver_data = atkbd_volume_forced_release_keys,
        },
        {
                .ident = "HP Presario R4200",
@@ -1575,7 +1589,7 @@ static struct dmi_system_id atkbd_dmi_quirk_table[] __initdata = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "Presario R4200"),
                },
                .callback = atkbd_setup_forced_release,
-               .driver_data = atkbd_hp_r4000_forced_release_keys,
+               .driver_data = atkbd_volume_forced_release_keys,
        },
        {
                .ident = "Inventec Symphony",
@@ -1584,7 +1598,7 @@ static struct dmi_system_id atkbd_dmi_quirk_table[] __initdata = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "SYMPHONY 6.0/7.0"),
                },
                .callback = atkbd_setup_forced_release,
-               .driver_data = atkbd_inventec_forced_release_keys,
+               .driver_data = atkbd_volume_forced_release_keys,
        },
        {
                .ident = "Samsung NC10",
@@ -1620,7 +1634,7 @@ static struct dmi_system_id atkbd_dmi_quirk_table[] __initdata = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Pa 1510"),
                },
                .callback = atkbd_setup_forced_release,
-               .driver_data = atkbd_amilo_pa1510_forced_release_keys,
+               .driver_data = atkbd_volume_forced_release_keys,
        },
        {
                .ident = "Fujitsu Amilo Pi 3525",
index 216a559..ea821b5 100644 (file)
@@ -209,7 +209,7 @@ static inline int hp_sdc_rtc_read_rt(struct timeval *res) {
 
 /* Read the i8042 fast handshake timer */
 static inline int hp_sdc_rtc_read_fhs(struct timeval *res) {
-       uint64_t raw;
+       int64_t raw;
        unsigned int tenms;
 
        raw = hp_sdc_rtc_read_i8042timer(HP_SDC_CMD_LOAD_FHS, 2);
index de745d7..ab5dc5f 100644 (file)
@@ -219,7 +219,7 @@ static const struct ps2pp_info *get_model_info(unsigned char model)
                                PS2PP_WHEEL | PS2PP_SIDE_BTN | PS2PP_TASK_BTN |
                                PS2PP_EXTRA_BTN | PS2PP_NAV_BTN | PS2PP_HWHEEL },
                { 72,   PS2PP_KIND_TRACKMAN,    0 },                    /* T-CH11: TrackMan Marble */
-               { 73,   0,                      PS2PP_SIDE_BTN },
+               { 73,   PS2PP_KIND_TRACKMAN,    PS2PP_SIDE_BTN },       /* TrackMan FX */
                { 75,   PS2PP_KIND_WHEEL,       PS2PP_WHEEL },
                { 76,   PS2PP_KIND_WHEEL,       PS2PP_WHEEL },
                { 79,   PS2PP_KIND_TRACKMAN,    PS2PP_WHEEL },          /* TrackMan with wheel */
index b66ff1a..f4a6125 100644 (file)
@@ -652,6 +652,16 @@ static const struct dmi_system_id toshiba_dmi_table[] = {
                        DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
                        DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE M300"),
                },
+
+       },
+       {
+               .ident = "Toshiba Portege M300",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Portable PC"),
+                       DMI_MATCH(DMI_PRODUCT_VERSION, "Version 1.0"),
+               },
+
        },
        { }
 };
index a39bc4e..a537925 100644 (file)
@@ -327,6 +327,17 @@ static struct dmi_system_id __initdata i8042_dmi_nomux_table[] = {
                },
        },
        {
+               /*
+                * Reset and GET ID commands issued via KBD port are
+                * sometimes being delivered to AUX3.
+                */
+               .ident = "Sony Vaio FZ-240E",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FZ240E"),
+               },
+       },
+       {
                .ident = "Amoi M636/A737",
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "Amoi Electronics CO.,LTD."),
@@ -661,7 +672,7 @@ static void i8042_pnp_exit(void)
 static int __init i8042_pnp_init(void)
 {
        char kbd_irq_str[4] = { 0 }, aux_irq_str[4] = { 0 };
-       int pnp_data_busted = false;
+       bool pnp_data_busted = false;
        int err;
 
 #ifdef CONFIG_X86
index 556acff..7dbe652 100644 (file)
@@ -138,16 +138,6 @@ int dm_exception_store_type_unregister(struct dm_exception_store_type *type)
 }
 EXPORT_SYMBOL(dm_exception_store_type_unregister);
 
-/*
- * Round a number up to the nearest 'size' boundary.  size must
- * be a power of 2.
- */
-static ulong round_up(ulong n, ulong size)
-{
-       size--;
-       return (n + size) & ~size;
-}
-
 static int set_chunk_size(struct dm_exception_store *store,
                          const char *chunk_size_arg, char **error)
 {
@@ -155,7 +145,8 @@ static int set_chunk_size(struct dm_exception_store *store,
        char *value;
 
        chunk_size_ulong = simple_strtoul(chunk_size_arg, &value, 10);
-       if (*chunk_size_arg == '\0' || *value != '\0') {
+       if (*chunk_size_arg == '\0' || *value != '\0' ||
+           chunk_size_ulong > UINT_MAX) {
                *error = "Invalid chunk size";
                return -EINVAL;
        }
@@ -165,40 +156,35 @@ static int set_chunk_size(struct dm_exception_store *store,
                return 0;
        }
 
-       /*
-        * Chunk size must be multiple of page size.  Silently
-        * round up if it's not.
-        */
-       chunk_size_ulong = round_up(chunk_size_ulong, PAGE_SIZE >> 9);
-
-       return dm_exception_store_set_chunk_size(store, chunk_size_ulong,
+       return dm_exception_store_set_chunk_size(store,
+                                                (unsigned) chunk_size_ulong,
                                                 error);
 }
 
 int dm_exception_store_set_chunk_size(struct dm_exception_store *store,
-                                     unsigned long chunk_size_ulong,
+                                     unsigned chunk_size,
                                      char **error)
 {
        /* Check chunk_size is a power of 2 */
-       if (!is_power_of_2(chunk_size_ulong)) {
+       if (!is_power_of_2(chunk_size)) {
                *error = "Chunk size is not a power of 2";
                return -EINVAL;
        }
 
        /* Validate the chunk size against the device block size */
-       if (chunk_size_ulong % (bdev_logical_block_size(store->cow->bdev) >> 9)) {
+       if (chunk_size % (bdev_logical_block_size(store->cow->bdev) >> 9)) {
                *error = "Chunk size is not a multiple of device blocksize";
                return -EINVAL;
        }
 
-       if (chunk_size_ulong > INT_MAX >> SECTOR_SHIFT) {
+       if (chunk_size > INT_MAX >> SECTOR_SHIFT) {
                *error = "Chunk size is too high";
                return -EINVAL;
        }
 
-       store->chunk_size = chunk_size_ulong;
-       store->chunk_mask = chunk_size_ulong - 1;
-       store->chunk_shift = ffs(chunk_size_ulong) - 1;
+       store->chunk_size = chunk_size;
+       store->chunk_mask = chunk_size - 1;
+       store->chunk_shift = ffs(chunk_size) - 1;
 
        return 0;
 }
@@ -251,7 +237,7 @@ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv,
 
        r = set_chunk_size(tmp_store, argv[2], &ti->error);
        if (r)
-               goto bad_cow;
+               goto bad_ctr;
 
        r = type->ctr(tmp_store, 0, NULL);
        if (r) {
index 812c718..8a223a4 100644 (file)
@@ -101,9 +101,9 @@ struct dm_exception_store {
        struct dm_dev *cow;
 
        /* Size of data blocks saved - must be a power of 2 */
-       chunk_t chunk_size;
-       chunk_t chunk_mask;
-       chunk_t chunk_shift;
+       unsigned chunk_size;
+       unsigned chunk_mask;
+       unsigned chunk_shift;
 
        void *context;
 };
@@ -169,7 +169,7 @@ int dm_exception_store_type_register(struct dm_exception_store_type *type);
 int dm_exception_store_type_unregister(struct dm_exception_store_type *type);
 
 int dm_exception_store_set_chunk_size(struct dm_exception_store *store,
-                                     unsigned long chunk_size_ulong,
+                                     unsigned chunk_size,
                                      char **error);
 
 int dm_exception_store_create(struct dm_target *ti, int argc, char **argv,
index 652bd33..7ac2c14 100644 (file)
@@ -156,7 +156,7 @@ static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti,
        }
 
        /* The ptr value is sufficient for local unique id */
-       lc->luid = (uint64_t)lc;
+       lc->luid = (unsigned long)lc;
 
        lc->ti = ti;
 
index d5b2e08..0c74642 100644 (file)
@@ -284,12 +284,13 @@ static int read_header(struct pstore *ps, int *new_snapshot)
 {
        int r;
        struct disk_header *dh;
-       chunk_t chunk_size;
+       unsigned chunk_size;
        int chunk_size_supplied = 1;
        char *chunk_err;
 
        /*
-        * Use default chunk size (or hardsect_size, if larger) if none supplied
+        * Use default chunk size (or logical_block_size, if larger)
+        * if none supplied
         */
        if (!ps->store->chunk_size) {
                ps->store->chunk_size = max(DM_CHUNK_SIZE_DEFAULT_SECTORS,
@@ -334,10 +335,9 @@ static int read_header(struct pstore *ps, int *new_snapshot)
                return 0;
 
        if (chunk_size_supplied)
-               DMWARN("chunk size %llu in device metadata overrides "
-                      "table chunk size of %llu.",
-                      (unsigned long long)chunk_size,
-                      (unsigned long long)ps->store->chunk_size);
+               DMWARN("chunk size %u in device metadata overrides "
+                      "table chunk size of %u.",
+                      chunk_size, ps->store->chunk_size);
 
        /* We had a bogus chunk_size. Fix stuff up. */
        free_area(ps);
@@ -345,8 +345,8 @@ static int read_header(struct pstore *ps, int *new_snapshot)
        r = dm_exception_store_set_chunk_size(ps->store, chunk_size,
                                              &chunk_err);
        if (r) {
-               DMERR("invalid on-disk chunk size %llu: %s.",
-                     (unsigned long long)chunk_size, chunk_err);
+               DMERR("invalid on-disk chunk size %u: %s.",
+                     chunk_size, chunk_err);
                return r;
        }
 
index 57f1bf7..3a3ba46 100644 (file)
@@ -296,6 +296,7 @@ static void __insert_origin(struct origin *o)
  */
 static int register_snapshot(struct dm_snapshot *snap)
 {
+       struct dm_snapshot *l;
        struct origin *o, *new_o;
        struct block_device *bdev = snap->origin->bdev;
 
@@ -319,7 +320,11 @@ static int register_snapshot(struct dm_snapshot *snap)
                __insert_origin(o);
        }
 
-       list_add_tail(&snap->list, &o->snapshots);
+       /* Sort the list according to chunk size, largest-first smallest-last */
+       list_for_each_entry(l, &o->snapshots, list)
+               if (l->store->chunk_size < snap->store->chunk_size)
+                       break;
+       list_add_tail(&snap->list, &l->list);
 
        up_write(&_origins_lock);
        return 0;
@@ -668,6 +673,11 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
        bio_list_init(&s->queued_bios);
        INIT_WORK(&s->queued_bios_work, flush_queued_bios);
 
+       if (!s->store->chunk_size) {
+               ti->error = "Chunk size not set";
+               goto bad_load_and_register;
+       }
+
        /* Add snapshot to the list of snapshots for this origin */
        /* Exceptions aren't triggered till snapshot_resume() is called */
        if (register_snapshot(s)) {
@@ -951,7 +961,7 @@ static void start_copy(struct dm_snap_pending_exception *pe)
 
        src.bdev = bdev;
        src.sector = chunk_to_sector(s->store, pe->e.old_chunk);
-       src.count = min(s->store->chunk_size, dev_size - src.sector);
+       src.count = min((sector_t)s->store->chunk_size, dev_size - src.sector);
 
        dest.bdev = s->store->cow->bdev;
        dest.sector = chunk_to_sector(s->store, pe->e.new_chunk);
@@ -1142,6 +1152,8 @@ static int snapshot_status(struct dm_target *ti, status_type_t type,
        unsigned sz = 0;
        struct dm_snapshot *snap = ti->private;
 
+       down_write(&snap->lock);
+
        switch (type) {
        case STATUSTYPE_INFO:
                if (!snap->valid)
@@ -1173,6 +1185,8 @@ static int snapshot_status(struct dm_target *ti, status_type_t type,
                break;
        }
 
+       up_write(&snap->lock);
+
        return 0;
 }
 
@@ -1388,7 +1402,7 @@ static void origin_resume(struct dm_target *ti)
        struct dm_dev *dev = ti->private;
        struct dm_snapshot *snap;
        struct origin *o;
-       chunk_t chunk_size = 0;
+       unsigned chunk_size = 0;
 
        down_read(&_origins_lock);
        o = __lookup_origin(dev->bdev);
@@ -1465,7 +1479,7 @@ static int __init dm_snapshot_init(void)
        r = dm_register_target(&snapshot_target);
        if (r) {
                DMERR("snapshot target register failed %d", r);
-               return r;
+               goto bad_register_snapshot_target;
        }
 
        r = dm_register_target(&origin_target);
@@ -1522,6 +1536,9 @@ bad2:
        dm_unregister_target(&origin_target);
 bad1:
        dm_unregister_target(&snapshot_target);
+
+bad_register_snapshot_target:
+       dm_exception_store_exit();
        return r;
 }
 
index 376f1ab..724efc6 100644 (file)
@@ -47,6 +47,7 @@ struct dm_io {
        atomic_t io_count;
        struct bio *bio;
        unsigned long start_time;
+       spinlock_t endio_lock;
 };
 
 /*
@@ -578,8 +579,12 @@ static void dec_pending(struct dm_io *io, int error)
        struct mapped_device *md = io->md;
 
        /* Push-back supersedes any I/O errors */
-       if (error && !(io->error > 0 && __noflush_suspending(md)))
-               io->error = error;
+       if (unlikely(error)) {
+               spin_lock_irqsave(&io->endio_lock, flags);
+               if (!(io->error > 0 && __noflush_suspending(md)))
+                       io->error = error;
+               spin_unlock_irqrestore(&io->endio_lock, flags);
+       }
 
        if (atomic_dec_and_test(&io->io_count)) {
                if (io->error == DM_ENDIO_REQUEUE) {
@@ -1226,6 +1231,7 @@ static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
        atomic_set(&ci.io->io_count, 1);
        ci.io->bio = bio;
        ci.io->md = md;
+       spin_lock_init(&ci.io->endio_lock);
        ci.sector = bio->bi_sector;
        ci.sector_count = bio_sectors(bio);
        if (unlikely(bio_empty_barrier(bio)))
@@ -1822,6 +1828,7 @@ static struct mapped_device *alloc_dev(int minor)
 bad_bdev:
        destroy_workqueue(md->wq);
 bad_thread:
+       del_gendisk(md->disk);
        put_disk(md->disk);
 bad_disk:
        blk_cleanup_queue(md->queue);
index e556d42..63924e0 100644 (file)
@@ -72,7 +72,6 @@
 #include <asm/irq.h>
 #include <asm/gpio.h>
 
-#include <asm/mach/mmc.h>
 #include <mach/board.h>
 #include <mach/cpu.h>
 #include <mach/at91_mci.h>
index 89876ad..28a0eda 100644 (file)
@@ -243,15 +243,26 @@ static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
 
 int be_cmd_POST(struct be_adapter *adapter)
 {
-       u16 stage, error;
+       u16 stage;
+       int status, timeout = 0;
 
-       error = be_POST_stage_get(adapter, &stage);
-       if (error || stage != POST_STAGE_ARMFW_RDY) {
-               dev_err(&adapter->pdev->dev, "POST failed.\n");
-               return -1;
-       }
+       do {
+               status = be_POST_stage_get(adapter, &stage);
+               if (status) {
+                       dev_err(&adapter->pdev->dev, "POST error; stage=0x%x\n",
+                               stage);
+                       return -1;
+               } else if (stage != POST_STAGE_ARMFW_RDY) {
+                       set_current_state(TASK_INTERRUPTIBLE);
+                       schedule_timeout(2 * HZ);
+                       timeout += 2;
+               } else {
+                       return 0;
+               }
+       } while (timeout < 20);
 
-       return 0;
+       dev_err(&adapter->pdev->dev, "POST timeout; stage=0x%x\n", stage);
+       return -1;
 }
 
 static inline void *embedded_payload(struct be_mcc_wrb *wrb)
@@ -729,8 +740,8 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
 /* Create an rx filtering policy configuration on an i/f
  * Uses mbox
  */
-int be_cmd_if_create(struct be_adapter *adapter, u32 flags, u8 *mac,
-               bool pmac_invalid, u32 *if_handle, u32 *pmac_id)
+int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
+               u8 *mac, bool pmac_invalid, u32 *if_handle, u32 *pmac_id)
 {
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_if_create *req;
@@ -746,8 +757,8 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 flags, u8 *mac,
        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
                OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req));
 
-       req->capability_flags = cpu_to_le32(flags);
-       req->enable_flags = cpu_to_le32(flags);
+       req->capability_flags = cpu_to_le32(cap_flags);
+       req->enable_flags = cpu_to_le32(en_flags);
        req->pmac_invalid = pmac_invalid;
        if (!pmac_invalid)
                memcpy(req->mac_addr, mac, ETH_ALEN);
index a86f917..4995378 100644 (file)
@@ -720,8 +720,9 @@ extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
 extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
                        u32 if_id, u32 *pmac_id);
 extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id);
-extern int be_cmd_if_create(struct be_adapter *adapter, u32 if_flags, u8 *mac,
-                       bool pmac_invalid, u32 *if_handle, u32 *pmac_id);
+extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags,
+                       u32 en_flags, u8 *mac, bool pmac_invalid,
+                       u32 *if_handle, u32 *pmac_id);
 extern int be_cmd_if_destroy(struct be_adapter *adapter, u32 if_handle);
 extern int be_cmd_eq_create(struct be_adapter *adapter,
                        struct be_queue_info *eq, int eq_delay);
index 6d5e81f..1f941f0 100644 (file)
@@ -1620,19 +1620,22 @@ static int be_open(struct net_device *netdev)
 static int be_setup(struct be_adapter *adapter)
 {
        struct net_device *netdev = adapter->netdev;
-       u32 if_flags;
+       u32 cap_flags, en_flags;
        int status;
 
-       if_flags = BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_PROMISCUOUS |
-               BE_IF_FLAGS_MCAST_PROMISCUOUS | BE_IF_FLAGS_UNTAGGED |
-               BE_IF_FLAGS_PASS_L3L4_ERRORS;
-       status = be_cmd_if_create(adapter, if_flags, netdev->dev_addr,
-                       false/* pmac_invalid */, &adapter->if_handle,
-                       &adapter->pmac_id);
+       cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
+                       BE_IF_FLAGS_MCAST_PROMISCUOUS |
+                       BE_IF_FLAGS_PROMISCUOUS |
+                       BE_IF_FLAGS_PASS_L3L4_ERRORS;
+       en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
+                       BE_IF_FLAGS_PASS_L3L4_ERRORS;
+
+       status = be_cmd_if_create(adapter, cap_flags, en_flags,
+                       netdev->dev_addr, false/* pmac_invalid */,
+                       &adapter->if_handle, &adapter->pmac_id);
        if (status != 0)
                goto do_none;
 
-
        status = be_tx_queues_create(adapter);
        if (status != 0)
                goto if_destroy;
@@ -2055,6 +2058,10 @@ static int be_hw_up(struct be_adapter *adapter)
        if (status)
                return status;
 
+       status = be_cmd_reset_function(adapter);
+       if (status)
+               return status;
+
        status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
        if (status)
                return status;
@@ -2108,10 +2115,6 @@ static int __devinit be_probe(struct pci_dev *pdev,
        if (status)
                goto free_netdev;
 
-       status = be_cmd_reset_function(adapter);
-       if (status)
-               goto ctrl_clean;
-
        status = be_stats_init(adapter);
        if (status)
                goto ctrl_clean;
index 9c950bb..f7d9ac8 100644 (file)
@@ -223,24 +223,25 @@ struct ethoc_bd {
        u32 addr;
 };
 
-static u32 ethoc_read(struct ethoc *dev, loff_t offset)
+static inline u32 ethoc_read(struct ethoc *dev, loff_t offset)
 {
        return ioread32(dev->iobase + offset);
 }
 
-static void ethoc_write(struct ethoc *dev, loff_t offset, u32 data)
+static inline void ethoc_write(struct ethoc *dev, loff_t offset, u32 data)
 {
        iowrite32(data, dev->iobase + offset);
 }
 
-static void ethoc_read_bd(struct ethoc *dev, int index, struct ethoc_bd *bd)
+static inline void ethoc_read_bd(struct ethoc *dev, int index,
+               struct ethoc_bd *bd)
 {
        loff_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd));
        bd->stat = ethoc_read(dev, offset + 0);
        bd->addr = ethoc_read(dev, offset + 4);
 }
 
-static void ethoc_write_bd(struct ethoc *dev, int index,
+static inline void ethoc_write_bd(struct ethoc *dev, int index,
                const struct ethoc_bd *bd)
 {
        loff_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd));
@@ -248,33 +249,33 @@ static void ethoc_write_bd(struct ethoc *dev, int index,
        ethoc_write(dev, offset + 4, bd->addr);
 }
 
-static void ethoc_enable_irq(struct ethoc *dev, u32 mask)
+static inline void ethoc_enable_irq(struct ethoc *dev, u32 mask)
 {
        u32 imask = ethoc_read(dev, INT_MASK);
        imask |= mask;
        ethoc_write(dev, INT_MASK, imask);
 }
 
-static void ethoc_disable_irq(struct ethoc *dev, u32 mask)
+static inline void ethoc_disable_irq(struct ethoc *dev, u32 mask)
 {
        u32 imask = ethoc_read(dev, INT_MASK);
        imask &= ~mask;
        ethoc_write(dev, INT_MASK, imask);
 }
 
-static void ethoc_ack_irq(struct ethoc *dev, u32 mask)
+static inline void ethoc_ack_irq(struct ethoc *dev, u32 mask)
 {
        ethoc_write(dev, INT_SOURCE, mask);
 }
 
-static void ethoc_enable_rx_and_tx(struct ethoc *dev)
+static inline void ethoc_enable_rx_and_tx(struct ethoc *dev)
 {
        u32 mode = ethoc_read(dev, MODER);
        mode |= MODER_RXEN | MODER_TXEN;
        ethoc_write(dev, MODER, mode);
 }
 
-static void ethoc_disable_rx_and_tx(struct ethoc *dev)
+static inline void ethoc_disable_rx_and_tx(struct ethoc *dev)
 {
        u32 mode = ethoc_read(dev, MODER);
        mode &= ~(MODER_RXEN | MODER_TXEN);
@@ -508,7 +509,7 @@ static irqreturn_t ethoc_interrupt(int irq, void *dev_id)
                return IRQ_NONE;
        }
 
-       ethoc_ack_irq(priv, INT_MASK_ALL);
+       ethoc_ack_irq(priv, pending);
 
        if (pending & INT_MASK_BUSY) {
                dev_err(&dev->dev, "packet dropped\n");
index 2923438..16a1d58 100644 (file)
@@ -1654,7 +1654,7 @@ static const struct net_device_ops fec_netdev_ops = {
   *
   * index is only used in legacy code
   */
-int __init fec_enet_init(struct net_device *dev, int index)
+static int fec_enet_init(struct net_device *dev, int index)
 {
        struct fec_enet_private *fep = netdev_priv(dev);
        struct bufdesc *cbd_base;
index 2378358..a23f739 100644 (file)
@@ -171,6 +171,36 @@ static void ks8851_wrreg16(struct ks8851_net *ks, unsigned reg, unsigned val)
 }
 
 /**
+ * ks8851_wrreg8 - write 8bit register value to chip
+ * @ks: The chip state
+ * @reg: The register address
+ * @val: The value to write
+ *
+ * Issue a write to put the value @val into the register specified in @reg.
+ */
+static void ks8851_wrreg8(struct ks8851_net *ks, unsigned reg, unsigned val)
+{
+       struct spi_transfer *xfer = &ks->spi_xfer1;
+       struct spi_message *msg = &ks->spi_msg1;
+       __le16 txb[2];
+       int ret;
+       int bit;
+
+       bit = 1 << (reg & 3);
+
+       txb[0] = cpu_to_le16(MK_OP(bit, reg) | KS_SPIOP_WR);
+       txb[1] = val;
+
+       xfer->tx_buf = txb;
+       xfer->rx_buf = NULL;
+       xfer->len = 3;
+
+       ret = spi_sync(ks->spidev, msg);
+       if (ret < 0)
+               ks_err(ks, "spi_sync() failed\n");
+}
+
+/**
  * ks8851_rx_1msg - select whether to use one or two messages for spi read
  * @ks: The device structure
  *
@@ -322,13 +352,12 @@ static void ks8851_soft_reset(struct ks8851_net *ks, unsigned op)
 static int ks8851_write_mac_addr(struct net_device *dev)
 {
        struct ks8851_net *ks = netdev_priv(dev);
-       u16 *mcp = (u16 *)dev->dev_addr;
+       int i;
 
        mutex_lock(&ks->lock);
 
-       ks8851_wrreg16(ks, KS_MARL, mcp[0]);
-       ks8851_wrreg16(ks, KS_MARM, mcp[1]);
-       ks8851_wrreg16(ks, KS_MARH, mcp[2]);
+       for (i = 0; i < ETH_ALEN; i++)
+               ks8851_wrreg8(ks, KS_MAR(i), dev->dev_addr[i]);
 
        mutex_unlock(&ks->lock);
 
@@ -951,7 +980,7 @@ static void ks8851_set_rx_mode(struct net_device *dev)
                        mcptr = mcptr->next;
                }
 
-               rxctrl.rxcr1 = RXCR1_RXME | RXCR1_RXAE | RXCR1_RXPAFMA;
+               rxctrl.rxcr1 = RXCR1_RXME | RXCR1_RXPAFMA;
        } else {
                /* just accept broadcast / unicast */
                rxctrl.rxcr1 = RXCR1_RXPAFMA;
@@ -1239,6 +1268,9 @@ static int __devinit ks8851_probe(struct spi_device *spi)
        ndev->netdev_ops = &ks8851_netdev_ops;
        ndev->irq = spi->irq;
 
+       /* issue a global soft reset to reset the device. */
+       ks8851_soft_reset(ks, GRR_GSR);
+
        /* simple check for a valid chip being connected to the bus */
 
        if ((ks8851_rdreg16(ks, KS_CIDER) & ~CIDER_REV_MASK) != CIDER_ID) {
index 85abe14..f52c312 100644 (file)
@@ -16,6 +16,7 @@
 #define CCR_32PIN                              (1 << 0)
 
 /* MAC address registers */
+#define KS_MAR(_m)                             0x15 - (_m)
 #define KS_MARL                                        0x10
 #define KS_MARM                                        0x12
 #define KS_MARH                                        0x14
index f9364d0..d6c7ac6 100644 (file)
@@ -3545,7 +3545,7 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
        rp->rcr_index = index;
 
        skb_reserve(skb, NET_IP_ALIGN);
-       __pskb_pull_tail(skb, min(len, NIU_RXPULL_MAX));
+       __pskb_pull_tail(skb, min(len, VLAN_ETH_HLEN));
 
        rp->rx_packets++;
        rp->rx_bytes += skb->len;
index 50ac94c..3709d6a 100644 (file)
@@ -453,7 +453,7 @@ static unsigned int free_old_xmit_skbs(struct virtnet_info *vi)
                vi->dev->stats.tx_bytes += skb->len;
                vi->dev->stats.tx_packets++;
                tot_sgs += skb_vnet_hdr(skb)->num_sg;
-               kfree_skb(skb);
+               dev_kfree_skb_any(skb);
        }
        return tot_sgs;
 }
index 6a16f76..004353a 100644 (file)
@@ -481,7 +481,8 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
        }
        rq->uncommitted[ring_idx] += num_allocated;
 
-       dprintk(KERN_ERR "alloc_rx_buf: %d allocated, next2fill %u, next2comp "
+       dev_dbg(&adapter->netdev->dev,
+               "alloc_rx_buf: %d allocated, next2fill %u, next2comp "
                "%u, uncommited %u\n", num_allocated, ring->next2fill,
                ring->next2comp, rq->uncommitted[ring_idx]);
 
@@ -539,7 +540,8 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
                tbi = tq->buf_info + tq->tx_ring.next2fill;
                tbi->map_type = VMXNET3_MAP_NONE;
 
-               dprintk(KERN_ERR "txd[%u]: 0x%Lx 0x%x 0x%x\n",
+               dev_dbg(&adapter->netdev->dev,
+                       "txd[%u]: 0x%Lx 0x%x 0x%x\n",
                        tq->tx_ring.next2fill, ctx->sop_txd->txd.addr,
                        ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]);
                vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
@@ -572,7 +574,8 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
                gdesc->dword[2] = dw2 | buf_size;
                gdesc->dword[3] = 0;
 
-               dprintk(KERN_ERR "txd[%u]: 0x%Lx 0x%x 0x%x\n",
+               dev_dbg(&adapter->netdev->dev,
+                       "txd[%u]: 0x%Lx 0x%x 0x%x\n",
                        tq->tx_ring.next2fill, gdesc->txd.addr,
                        gdesc->dword[2], gdesc->dword[3]);
                vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
@@ -600,7 +603,8 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
                gdesc->dword[2] = dw2 | frag->size;
                gdesc->dword[3] = 0;
 
-               dprintk(KERN_ERR "txd[%u]: 0x%llu %u %u\n",
+               dev_dbg(&adapter->netdev->dev,
+                       "txd[%u]: 0x%llu %u %u\n",
                        tq->tx_ring.next2fill, gdesc->txd.addr,
                        gdesc->dword[2], gdesc->dword[3]);
                vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
@@ -697,7 +701,8 @@ vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
        tdd = tq->data_ring.base + tq->tx_ring.next2fill;
 
        memcpy(tdd->data, skb->data, ctx->copy_size);
-       dprintk(KERN_ERR "copy %u bytes to dataRing[%u]\n",
+       dev_dbg(&adapter->netdev->dev,
+               "copy %u bytes to dataRing[%u]\n",
                ctx->copy_size, tq->tx_ring.next2fill);
        return 1;
 
@@ -808,7 +813,8 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
 
        if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
                tq->stats.tx_ring_full++;
-               dprintk(KERN_ERR "tx queue stopped on %s, next2comp %u"
+               dev_dbg(&adapter->netdev->dev,
+                       "tx queue stopped on %s, next2comp %u"
                        " next2fill %u\n", adapter->netdev->name,
                        tq->tx_ring.next2comp, tq->tx_ring.next2fill);
 
@@ -853,7 +859,8 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
 
        /* finally flips the GEN bit of the SOP desc */
        gdesc->dword[2] ^= VMXNET3_TXD_GEN;
-       dprintk(KERN_ERR "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
+       dev_dbg(&adapter->netdev->dev,
+               "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
                (u32)((union Vmxnet3_GenericDesc *)ctx.sop_txd -
                tq->tx_ring.base), gdesc->txd.addr, gdesc->dword[2],
                gdesc->dword[3]);
@@ -990,7 +997,8 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
                        if (unlikely(rcd->len == 0)) {
                                /* Pretend the rx buffer is skipped. */
                                BUG_ON(!(rcd->sop && rcd->eop));
-                               dprintk(KERN_ERR "rxRing[%u][%u] 0 length\n",
+                               dev_dbg(&adapter->netdev->dev,
+                                       "rxRing[%u][%u] 0 length\n",
                                        ring_idx, idx);
                                goto rcd_done;
                        }
@@ -1683,7 +1691,8 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
        int err;
        u32 ret;
 
-       dprintk(KERN_ERR "%s: skb_buf_size %d, rx_buf_per_pkt %d, ring sizes"
+       dev_dbg(&adapter->netdev->dev,
+               "%s: skb_buf_size %d, rx_buf_per_pkt %d, ring sizes"
                " %u %u %u\n", adapter->netdev->name, adapter->skb_buf_size,
                adapter->rx_buf_per_pkt, adapter->tx_queue.tx_ring.size,
                adapter->rx_queue.rx_ring[0].size,
index 6bb9157..3c0d70d 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/types.h>
 #include <linux/ethtool.h>
 #include <linux/delay.h>
+#include <linux/device.h>
 #include <linux/netdevice.h>
 #include <linux/pci.h>
 #include <linux/ethtool.h>
@@ -59,7 +60,6 @@
 #include <linux/if_vlan.h>
 #include <linux/if_arp.h>
 #include <linux/inetdevice.h>
-#include <linux/dst.h>
 
 #include "vmxnet3_defs.h"
 
index a2c18ac..90be551 100644 (file)
@@ -1253,6 +1253,7 @@ static int nfs_parse_mount_options(char *raw,
                        default:
                                dfprintk(MOUNT, "NFS:   unrecognized "
                                                "transport protocol\n");
+                               kfree(string);
                                return 0;
                        }
                        break;
index 828a889..7e54e52 100644 (file)
@@ -91,6 +91,7 @@ static int dnotify_handle_event(struct fsnotify_group *group,
        struct dnotify_struct *dn;
        struct dnotify_struct **prev;
        struct fown_struct *fown;
+       __u32 test_mask = event->mask & ~FS_EVENT_ON_CHILD;
 
        to_tell = event->to_tell;
 
@@ -106,7 +107,7 @@ static int dnotify_handle_event(struct fsnotify_group *group,
        spin_lock(&entry->lock);
        prev = &dnentry->dn;
        while ((dn = *prev) != NULL) {
-               if ((dn->dn_mask & event->mask) == 0) {
+               if ((dn->dn_mask & test_mask) == 0) {
                        prev = &dn->dn_next;
                        continue;
                }
index c8a07c6..3165d85 100644 (file)
@@ -324,11 +324,11 @@ int fsnotify_add_mark(struct fsnotify_mark_entry *entry,
        spin_lock(&group->mark_lock);
        spin_lock(&inode->i_lock);
 
-       entry->group = group;
-       entry->inode = inode;
-
        lentry = fsnotify_find_mark_entry(group, inode);
        if (!lentry) {
+               entry->group = group;
+               entry->inode = inode;
+
                hlist_add_head(&entry->i_list, &inode->i_fsnotify_mark_entries);
                list_add(&entry->g_list, &group->mark_entries);
 
index 3816d57..b8bf53b 100644 (file)
@@ -143,7 +143,7 @@ static bool event_compare(struct fsnotify_event *old, struct fsnotify_event *new
                        /* remember, after old was put on the wait_q we aren't
                         * allowed to look at the inode any more, only thing
                         * left to check was if the file_name is the same */
-                       if (old->name_len &&
+                       if (!old->name_len ||
                            !strcmp(old->file_name, new->file_name))
                                return true;
                        break;
index 52c4151..ae17d02 100644 (file)
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -777,36 +777,55 @@ pipe_rdwr_release(struct inode *inode, struct file *filp)
 static int
 pipe_read_open(struct inode *inode, struct file *filp)
 {
-       /* We could have perhaps used atomic_t, but this and friends
-          below are the only places.  So it doesn't seem worthwhile.  */
+       int ret = -ENOENT;
+
        mutex_lock(&inode->i_mutex);
-       inode->i_pipe->readers++;
+
+       if (inode->i_pipe) {
+               ret = 0;
+               inode->i_pipe->readers++;
+       }
+
        mutex_unlock(&inode->i_mutex);
 
-       return 0;
+       return ret;
 }
 
 static int
 pipe_write_open(struct inode *inode, struct file *filp)
 {
+       int ret = -ENOENT;
+
        mutex_lock(&inode->i_mutex);
-       inode->i_pipe->writers++;
+
+       if (inode->i_pipe) {
+               ret = 0;
+               inode->i_pipe->writers++;
+       }
+
        mutex_unlock(&inode->i_mutex);
 
-       return 0;
+       return ret;
 }
 
 static int
 pipe_rdwr_open(struct inode *inode, struct file *filp)
 {
+       int ret = -ENOENT;
+
        mutex_lock(&inode->i_mutex);
-       if (filp->f_mode & FMODE_READ)
-               inode->i_pipe->readers++;
-       if (filp->f_mode & FMODE_WRITE)
-               inode->i_pipe->writers++;
+
+       if (inode->i_pipe) {
+               ret = 0;
+               if (filp->f_mode & FMODE_READ)
+                       inode->i_pipe->readers++;
+               if (filp->f_mode & FMODE_WRITE)
+                       inode->i_pipe->writers++;
+       }
+
        mutex_unlock(&inode->i_mutex);
 
-       return 0;
+       return ret;
 }
 
 /*
index b63b80f..f93ad90 100644 (file)
@@ -130,11 +130,11 @@ struct inet_timewait_sock {
        __u16                   tw_num;
        kmemcheck_bitfield_begin(flags);
        /* And these are ours. */
-       __u8                    tw_ipv6only:1,
-                               tw_transparent:1;
-       /* 14 bits hole, try to pack */
+       unsigned int            tw_ipv6only     : 1,
+                               tw_transparent  : 1,
+                               tw_pad          : 14,   /* 14 bits hole */
+                               tw_ipv6_offset  : 16;
        kmemcheck_bitfield_end(flags);
-       __u16                   tw_ipv6_offset;
        unsigned long           tw_ttd;
        struct inet_bind_bucket *tw_tb;
        struct hlist_node       tw_death_node;
index 17d8bb1..25596e4 100644 (file)
@@ -19,7 +19,7 @@
  * The time it takes is system-specific though, so when we test this
  * during system bootup we allow a LOT of time.
  */
-#define TEST_SUSPEND_SECONDS   5
+#define TEST_SUSPEND_SECONDS   10
 
 static unsigned long suspend_test_start_time;
 
@@ -49,7 +49,8 @@ void suspend_test_finish(const char *label)
         * has some performance issues.  The stack dump of a WARN_ON
         * is more likely to get the right attention than a printk...
         */
-       WARN(msec > (TEST_SUSPEND_SECONDS * 1000), "Component: %s\n", label);
+       WARN(msec > (TEST_SUSPEND_SECONDS * 1000),
+            "Component: %s, time: %u\n", label, msec);
 }
 
 /*
index 7f939ce..2bc6f6a 100644 (file)
@@ -92,6 +92,8 @@ static void add_conn(struct work_struct *work)
 
        dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle);
 
+       dev_set_drvdata(&conn->dev, conn);
+
        if (device_add(&conn->dev) < 0) {
                BT_ERR("Failed to register connection device");
                return;
@@ -144,8 +146,6 @@ void hci_conn_init_sysfs(struct hci_conn *conn)
        conn->dev.class = bt_class;
        conn->dev.parent = &hdev->dev;
 
-       dev_set_drvdata(&conn->dev, conn);
-
        device_initialize(&conn->dev);
 
        INIT_WORK(&conn->work_add, add_conn);
index 555d9da..77e9fb1 100644 (file)
@@ -555,12 +555,12 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
 
        conn->feat_mask = 0;
 
-       setup_timer(&conn->info_timer, l2cap_info_timeout,
-                                               (unsigned long) conn);
-
        spin_lock_init(&conn->lock);
        rwlock_init(&conn->chan_list.lock);
 
+       setup_timer(&conn->info_timer, l2cap_info_timeout,
+                                               (unsigned long) conn);
+
        conn->disc_reason = 0x13;
 
        return conn;
@@ -783,6 +783,9 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent)
        /* Default config options */
        pi->conf_len = 0;
        pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
+       skb_queue_head_init(TX_QUEUE(sk));
+       skb_queue_head_init(SREJ_QUEUE(sk));
+       INIT_LIST_HEAD(SREJ_LIST(sk));
 }
 
 static struct proto l2cap_proto = {
index 4351ca2..537731b 100644 (file)
@@ -446,6 +446,28 @@ extern int sysctl_tcp_synack_retries;
 
 EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_hash_add);
 
+/* Decide when to expire the request and when to resend SYN-ACK */
+static inline void syn_ack_recalc(struct request_sock *req, const int thresh,
+                                 const int max_retries,
+                                 const u8 rskq_defer_accept,
+                                 int *expire, int *resend)
+{
+       if (!rskq_defer_accept) {
+               *expire = req->retrans >= thresh;
+               *resend = 1;
+               return;
+       }
+       *expire = req->retrans >= thresh &&
+                 (!inet_rsk(req)->acked || req->retrans >= max_retries);
+       /*
+        * Do not resend while waiting for data after ACK,
+        * start to resend on end of deferring period to give
+        * last chance for data or ACK to create established socket.
+        */
+       *resend = !inet_rsk(req)->acked ||
+                 req->retrans >= rskq_defer_accept - 1;
+}
+
 void inet_csk_reqsk_queue_prune(struct sock *parent,
                                const unsigned long interval,
                                const unsigned long timeout,
@@ -501,9 +523,15 @@ void inet_csk_reqsk_queue_prune(struct sock *parent,
                reqp=&lopt->syn_table[i];
                while ((req = *reqp) != NULL) {
                        if (time_after_eq(now, req->expires)) {
-                               if ((req->retrans < thresh ||
-                                    (inet_rsk(req)->acked && req->retrans < max_retries))
-                                   && !req->rsk_ops->rtx_syn_ack(parent, req)) {
+                               int expire = 0, resend = 0;
+
+                               syn_ack_recalc(req, thresh, max_retries,
+                                              queue->rskq_defer_accept,
+                                              &expire, &resend);
+                               if (!expire &&
+                                   (!resend ||
+                                    !req->rsk_ops->rtx_syn_ack(parent, req) ||
+                                    inet_rsk(req)->acked)) {
                                        unsigned long timeo;
 
                                        if (req->retrans++ == 0)
index 0c0b6e3..e982b5c 100644 (file)
@@ -634,17 +634,16 @@ static int do_ip_setsockopt(struct sock *sk, int level,
                                break;
                        }
                        dev = ip_dev_find(sock_net(sk), mreq.imr_address.s_addr);
-                       if (dev) {
+                       if (dev)
                                mreq.imr_ifindex = dev->ifindex;
-                               dev_put(dev);
-                       }
                } else
-                       dev = __dev_get_by_index(sock_net(sk), mreq.imr_ifindex);
+                       dev = dev_get_by_index(sock_net(sk), mreq.imr_ifindex);
 
 
                err = -EADDRNOTAVAIL;
                if (!dev)
                        break;
+               dev_put(dev);
 
                err = -EINVAL;
                if (sk->sk_bound_dev_if &&
index 64d0af6..90b2e06 100644 (file)
@@ -326,6 +326,43 @@ void tcp_enter_memory_pressure(struct sock *sk)
 
 EXPORT_SYMBOL(tcp_enter_memory_pressure);
 
+/* Convert seconds to retransmits based on initial and max timeout */
+static u8 secs_to_retrans(int seconds, int timeout, int rto_max)
+{
+       u8 res = 0;
+
+       if (seconds > 0) {
+               int period = timeout;
+
+               res = 1;
+               while (seconds > period && res < 255) {
+                       res++;
+                       timeout <<= 1;
+                       if (timeout > rto_max)
+                               timeout = rto_max;
+                       period += timeout;
+               }
+       }
+       return res;
+}
+
+/* Convert retransmits to seconds based on initial and max timeout */
+static int retrans_to_secs(u8 retrans, int timeout, int rto_max)
+{
+       int period = 0;
+
+       if (retrans > 0) {
+               period = timeout;
+               while (--retrans) {
+                       timeout <<= 1;
+                       if (timeout > rto_max)
+                               timeout = rto_max;
+                       period += timeout;
+               }
+       }
+       return period;
+}
+
 /*
  *     Wait for a TCP event.
  *
@@ -1405,7 +1442,9 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
                                goto found_ok_skb;
                        if (tcp_hdr(skb)->fin)
                                goto found_fin_ok;
-                       WARN_ON(!(flags & MSG_PEEK));
+                       if (WARN_ON(!(flags & MSG_PEEK)))
+                               printk(KERN_INFO "recvmsg bug 2: copied %X "
+                                      "seq %X\n", *seq, TCP_SKB_CB(skb)->seq);
                }
 
                /* Well, if we have backlog, try to process it now yet. */
@@ -2163,16 +2202,10 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
                break;
 
        case TCP_DEFER_ACCEPT:
-               icsk->icsk_accept_queue.rskq_defer_accept = 0;
-               if (val > 0) {
-                       /* Translate value in seconds to number of
-                        * retransmits */
-                       while (icsk->icsk_accept_queue.rskq_defer_accept < 32 &&
-                              val > ((TCP_TIMEOUT_INIT / HZ) <<
-                                      icsk->icsk_accept_queue.rskq_defer_accept))
-                               icsk->icsk_accept_queue.rskq_defer_accept++;
-                       icsk->icsk_accept_queue.rskq_defer_accept++;
-               }
+               /* Translate value in seconds to number of retransmits */
+               icsk->icsk_accept_queue.rskq_defer_accept =
+                       secs_to_retrans(val, TCP_TIMEOUT_INIT / HZ,
+                                       TCP_RTO_MAX / HZ);
                break;
 
        case TCP_WINDOW_CLAMP:
@@ -2353,8 +2386,8 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
                        val = (val ? : sysctl_tcp_fin_timeout) / HZ;
                break;
        case TCP_DEFER_ACCEPT:
-               val = !icsk->icsk_accept_queue.rskq_defer_accept ? 0 :
-                       ((TCP_TIMEOUT_INIT / HZ) << (icsk->icsk_accept_queue.rskq_defer_accept - 1));
+               val = retrans_to_secs(icsk->icsk_accept_queue.rskq_defer_accept,
+                                     TCP_TIMEOUT_INIT / HZ, TCP_RTO_MAX / HZ);
                break;
        case TCP_WINDOW_CLAMP:
                val = tp->window_clamp;
index e320afe..4c03598 100644 (file)
@@ -641,10 +641,9 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
        if (!(flg & TCP_FLAG_ACK))
                return NULL;
 
-       /* If TCP_DEFER_ACCEPT is set, drop bare ACK. */
-       if (inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
+       /* While TCP_DEFER_ACCEPT is active, drop bare ACK. */
+       if (req->retrans < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
            TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
-               inet_csk(sk)->icsk_accept_queue.rskq_defer_accept--;
                inet_rsk(req)->acked = 1;
                return NULL;
        }
index 14f54eb..4f7aaf6 100644 (file)
@@ -496,13 +496,17 @@ done:
                        goto e_inval;
 
                if (val) {
+                       struct net_device *dev;
+
                        if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != val)
                                goto e_inval;
 
-                       if (__dev_get_by_index(net, val) == NULL) {
+                       dev = dev_get_by_index(net, val);
+                       if (!dev) {
                                retv = -ENODEV;
                                break;
                        }
+                       dev_put(dev);
                }
                np->mcast_oif = val;
                retv = 0;
index 51ab497..fc820cd 100644 (file)
@@ -1074,6 +1074,8 @@ restart:
        err = -ECONNREFUSED;
        if (other->sk_state != TCP_LISTEN)
                goto out_unlock;
+       if (other->sk_shutdown & RCV_SHUTDOWN)
+               goto out_unlock;
 
        if (unix_recvq_full(other)) {
                err = -EAGAIN;
index b7c78a4..7495ce3 100644 (file)
@@ -2717,8 +2717,6 @@ int kvm_init(void *opaque, unsigned int vcpu_size,
        int r;
        int cpu;
 
-       kvm_init_debug();
-
        r = kvm_arch_init(opaque);
        if (r)
                goto out_fail;
@@ -2785,6 +2783,8 @@ int kvm_init(void *opaque, unsigned int vcpu_size,
        kvm_preempt_ops.sched_in = kvm_sched_in;
        kvm_preempt_ops.sched_out = kvm_sched_out;
 
+       kvm_init_debug();
+
        return 0;
 
 out_free:
@@ -2807,7 +2807,6 @@ out_free_0:
 out:
        kvm_arch_exit();
 out_fail:
-       kvm_exit_debug();
        return r;
 }
 EXPORT_SYMBOL_GPL(kvm_init);
@@ -2815,6 +2814,7 @@ EXPORT_SYMBOL_GPL(kvm_init);
 void kvm_exit(void)
 {
        tracepoint_synchronize_unregister();
+       kvm_exit_debug();
        misc_deregister(&kvm_dev);
        kmem_cache_destroy(kvm_vcpu_cache);
        sysdev_unregister(&kvm_sysdev);
@@ -2824,7 +2824,6 @@ void kvm_exit(void)
        on_each_cpu(hardware_disable, NULL, 1);
        kvm_arch_hardware_unsetup();
        kvm_arch_exit();
-       kvm_exit_debug();
        free_cpumask_var(cpus_hardware_enabled);
        __free_page(bad_page);
 }