Merge branch 'for-fsnotify' into for-linus
authorAl Viro <viro@zeniv.linux.org.uk>
Wed, 3 Mar 2010 22:12:40 +0000 (17:12 -0500)
committerAl Viro <viro@zeniv.linux.org.uk>
Wed, 3 Mar 2010 22:12:40 +0000 (17:12 -0500)
441 files changed:
Documentation/cpu-freq/governors.txt
Documentation/filesystems/sharedsubtree.txt
Documentation/kernel-parameters.txt
Documentation/networking/ip-sysctl.txt
MAINTAINERS
Makefile
arch/arm/include/asm/cacheflush.h
arch/arm/kernel/setup.c
arch/arm/mach-gemini/gpio.c
arch/arm/mach-omap2/mmc-twl4030.c
arch/arm/mach-omap2/mux.c
arch/arm/mm/alignment.c
arch/arm/tools/mach-types
arch/avr32/mach-at32ap/at32ap700x.c
arch/ia64/include/asm/acpi.h
arch/ia64/include/asm/elf.h
arch/ia64/sn/kernel/setup.c
arch/microblaze/include/asm/io.h
arch/microblaze/kernel/cpu/cache.c
arch/microblaze/kernel/setup.c
arch/mips/bcm47xx/prom.c
arch/mips/configs/ip27_defconfig
arch/mips/kernel/cpu-probe.c
arch/mips/kernel/traps.c
arch/mips/mm/c-octeon.c
arch/mips/mm/cache.c
arch/mips/mm/highmem.c
arch/mips/sni/rm200.c
arch/parisc/Kconfig
arch/parisc/kernel/pci.c
arch/parisc/kernel/signal.c
arch/powerpc/mm/tlb_hash64.c
arch/powerpc/platforms/85xx/mpc85xx_mds.c
arch/powerpc/platforms/85xx/smp.c
arch/powerpc/platforms/pseries/xics.c
arch/s390/hypfs/inode.c
arch/s390/include/asm/lowcore.h
arch/sh/kernel/cpu/sh3/entry.S
arch/sh/kernel/dwarf.c
arch/sh/kernel/entry-common.S
arch/sh/kernel/ptrace_64.c
arch/sh/kernel/signal_64.c
arch/sparc/include/asm/stat.h
arch/sparc/kernel/kstack.h
arch/sparc/kernel/of_device_32.c
arch/sparc/kernel/pci.c
arch/sparc/kernel/process_32.c
arch/sparc/kernel/process_64.c
arch/sparc/kernel/signal32.c
arch/sparc/kernel/signal_32.c
arch/sparc/kernel/signal_64.c
arch/sparc/kernel/tsb.S
arch/um/drivers/mconsole_kern.c
arch/x86/include/asm/elf.h
arch/x86/include/asm/processor.h
arch/x86/include/asm/system.h
arch/x86/kernel/acpi/boot.c
arch/x86/kernel/apic/apic.c
arch/x86/kernel/apic/probe_32.c
arch/x86/kernel/apic/probe_64.c
arch/x86/kernel/cpu/cpufreq/powernow-k8.c
arch/x86/kernel/hw_breakpoint.c
arch/x86/kernel/mpparse.c
arch/x86/kernel/process_64.c
arch/x86/kernel/ptrace.c
arch/x86/kernel/smpboot.c
arch/x86/kvm/i8254.c
arch/x86/kvm/x86.c
arch/x86/mm/gup.c
block/blk-core.c
block/cfq-iosched.c
drivers/acpi/dock.c
drivers/acpi/processor_idle.c
drivers/acpi/processor_pdc.c
drivers/acpi/processor_perflib.c
drivers/acpi/scan.c
drivers/acpi/tables.c
drivers/base/class.c
drivers/block/cciss.c
drivers/bluetooth/btmrvl_sdio.c
drivers/char/agp/amd64-agp.c
drivers/char/tpm/tpm_infineon.c
drivers/clocksource/cs5535-clockevt.c
drivers/cpufreq/cpufreq_ondemand.c
drivers/dma/coh901318.c
drivers/dma/dmaengine.c
drivers/dma/dmatest.c
drivers/dma/ioat/dma_v2.c
drivers/dma/ipu/ipu_idmac.c
drivers/edac/amd64_edac.c
drivers/edac/mpc85xx_edac.c
drivers/firewire/net.c
drivers/firewire/ohci.c
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/drm_mm.c
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_fb.c
drivers/gpu/drm/i915/intel_lvds.c
drivers/gpu/drm/nouveau/nouveau_acpi.c
drivers/gpu/drm/nouveau/nouveau_bios.c
drivers/gpu/drm/nouveau/nouveau_bios.h
drivers/gpu/drm/nouveau/nouveau_bo.c
drivers/gpu/drm/nouveau/nouveau_channel.c
drivers/gpu/drm/nouveau/nouveau_connector.c
drivers/gpu/drm/nouveau/nouveau_dp.c
drivers/gpu/drm/nouveau/nouveau_drv.c
drivers/gpu/drm/nouveau/nouveau_drv.h
drivers/gpu/drm/nouveau/nouveau_fbcon.c
drivers/gpu/drm/nouveau/nouveau_fbcon.h
drivers/gpu/drm/nouveau/nouveau_gem.c
drivers/gpu/drm/nouveau/nouveau_grctx.c
drivers/gpu/drm/nouveau/nouveau_irq.c
drivers/gpu/drm/nouveau/nouveau_mem.c
drivers/gpu/drm/nouveau/nouveau_notifier.c
drivers/gpu/drm/nouveau/nouveau_object.c
drivers/gpu/drm/nouveau/nouveau_reg.h
drivers/gpu/drm/nouveau/nouveau_sgdma.c
drivers/gpu/drm/nouveau/nouveau_state.c
drivers/gpu/drm/nouveau/nv04_dac.c
drivers/gpu/drm/nouveau/nv04_fbcon.c
drivers/gpu/drm/nouveau/nv17_tv.c
drivers/gpu/drm/nouveau/nv50_crtc.c
drivers/gpu/drm/nouveau/nv50_fbcon.c
drivers/gpu/drm/nouveau/nv50_fifo.c
drivers/gpu/drm/nouveau/nv50_graph.c
drivers/gpu/drm/nouveau/nv50_instmem.c
drivers/gpu/drm/nouveau/nv50_sor.c
drivers/gpu/drm/radeon/Kconfig
drivers/gpu/drm/radeon/atom.c
drivers/gpu/drm/radeon/atombios_dp.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/r600_audio.c
drivers/gpu/drm/radeon/r600_blit_kms.c
drivers/gpu/drm/radeon/r600_cp.c
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_atombios.c
drivers/gpu/drm/radeon/radeon_benchmark.c
drivers/gpu/drm/radeon/radeon_connectors.c
drivers/gpu/drm/radeon/radeon_cs.c
drivers/gpu/drm/radeon/radeon_display.c
drivers/gpu/drm/radeon/radeon_drv.h
drivers/gpu/drm/radeon/radeon_fb.c
drivers/gpu/drm/radeon/radeon_object.c
drivers/gpu/drm/radeon/radeon_object.h
drivers/gpu/drm/radeon/radeon_ring.c
drivers/gpu/drm/radeon/rv770.c
drivers/gpu/drm/ttm/ttm_bo.c
drivers/gpu/drm/ttm/ttm_tt.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
drivers/gpu/vga/vgaarb.c
drivers/i2c/busses/i2c-tiny-usb.c
drivers/infiniband/Kconfig
drivers/infiniband/core/cma.c
drivers/infiniband/core/uverbs.h
drivers/infiniband/core/uverbs_cmd.c
drivers/infiniband/core/uverbs_main.c
drivers/input/input-polldev.c
drivers/input/mouse/psmouse-base.c
drivers/input/serio/i8042.c
drivers/input/touchscreen/usbtouchscreen.c
drivers/md/dm-log-userspace-transfer.c
drivers/md/dm-raid1.c
drivers/md/dm-region-hash.c
drivers/md/dm-snap-persistent.c
drivers/md/dm-stripe.c
drivers/md/dm-sysfs.c
drivers/md/dm.c
drivers/md/md.c
drivers/md/raid5.c
drivers/media/dvb/dvb-core/dmxdev.c
drivers/media/dvb/dvb-core/dvb_demux.c
drivers/media/dvb/dvb-usb/Kconfig
drivers/media/dvb/frontends/l64781.c
drivers/media/video/bt8xx/bttv-driver.c
drivers/media/video/bt8xx/bttv-i2c.c
drivers/media/video/bt8xx/bttvp.h
drivers/media/video/mt9t112.c
drivers/media/video/pwc/pwc-ctrl.c
drivers/message/fusion/mptscsih.c
drivers/mmc/card/mmc_test.c
drivers/net/ax88796.c
drivers/net/benet/be_cmds.c
drivers/net/cxgb3/sge.c
drivers/net/e1000/e1000_main.c
drivers/net/igb/igb_main.c
drivers/net/ixgbe/ixgbe_82598.c
drivers/net/ixgbe/ixgbe_main.c
drivers/net/netxen/netxen_nic_main.c
drivers/net/sfc/efx.c
drivers/net/sfc/falcon_boards.c
drivers/net/sfc/mcdi.c
drivers/net/sfc/qt202x_phy.c
drivers/net/sky2.c
drivers/net/tc35815.c
drivers/net/usb/cdc_ether.c
drivers/net/via-velocity.c
drivers/net/wireless/ath/ath9k/xmit.c
drivers/net/wireless/b43/b43.h
drivers/net/wireless/b43/main.c
drivers/net/wireless/iwlwifi/iwl-4965.c
drivers/net/wireless/iwlwifi/iwl-5000.c
drivers/net/wireless/iwlwifi/iwl-core.c
drivers/net/wireless/iwlwifi/iwl-core.h
drivers/net/wireless/iwlwifi/iwl-rx.c
drivers/net/wireless/iwlwifi/iwl-tx.c
drivers/net/wireless/iwmc3200wifi/rx.c
drivers/net/wireless/rtl818x/rtl8187_dev.c
drivers/pci/hotplug/acpiphp_glue.c
drivers/platform/x86/acer-wmi.c
drivers/platform/x86/thinkpad_acpi.c
drivers/power/wm97xx_battery.c
drivers/regulator/core.c
drivers/regulator/lp3971.c
drivers/s390/cio/qdio_main.c
drivers/s390/scsi/zfcp_fc.c
drivers/scsi/arm/fas216.c
drivers/scsi/fcoe/fcoe.c
drivers/scsi/fcoe/libfcoe.c
drivers/scsi/libfc/fc_exch.c
drivers/scsi/libfc/fc_fcp.c
drivers/scsi/libfc/fc_lport.c
drivers/scsi/libfc/fc_rport.c
drivers/scsi/libiscsi_tcp.c
drivers/scsi/megaraid/megaraid_sas.c
drivers/scsi/qla2xxx/qla_gbl.h
drivers/scsi/qla2xxx/qla_isr.c
drivers/scsi/qla2xxx/qla_mid.c
drivers/serial/8250.c
drivers/ssb/main.c
drivers/usb/core/devio.c
drivers/usb/gadget/f_eem.c
drivers/usb/gadget/f_mass_storage.c
drivers/usb/gadget/file_storage.c
drivers/usb/gadget/multi.c
drivers/usb/gadget/r8a66597-udc.c
drivers/usb/gadget/s3c-hsotg.c
drivers/usb/host/ehci-hub.c
drivers/usb/host/fhci-tds.c
drivers/usb/host/r8a66597-hcd.c
drivers/usb/misc/sisusbvga/sisusb.c
drivers/usb/otg/Kconfig
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/ftdi_sio_ids.h
drivers/usb/serial/sierra.c
drivers/usb/storage/unusual_devs.h
drivers/video/efifb.c
drivers/watchdog/bfin_wdt.c
fs/9p/v9fs.c
fs/9p/v9fs_vfs.h
fs/9p/vfs_file.c
fs/9p/vfs_inode.c
fs/autofs4/autofs_i.h
fs/autofs4/dev-ioctl.c
fs/autofs4/expire.c
fs/autofs4/inode.c
fs/autofs4/root.c
fs/btrfs/file.c
fs/cachefiles/namei.c
fs/cifs/CHANGES
fs/cifs/cifsfs.h
fs/cifs/cifsglob.h
fs/cifs/connect.c
fs/cifs/file.c
fs/cifs/inode.c
fs/cifs/readdir.c
fs/cifs/sess.c
fs/compat_ioctl.c
fs/dcache.c
fs/exec.c
fs/ext4/file.c
fs/gfs2/bmap.c
fs/gfs2/ops_fstype.c
fs/gfs2/ops_inode.c
fs/hpfs/anode.c
fs/hpfs/dentry.c
fs/hpfs/dir.c
fs/hpfs/dnode.c
fs/hpfs/ea.c
fs/hpfs/hpfs_fn.h
fs/hpfs/inode.c
fs/hpfs/map.c
fs/hpfs/name.c
fs/hpfs/namei.c
fs/hppfs/hppfs.c
fs/internal.h
fs/libfs.c
fs/locks.c
fs/namei.c
fs/namespace.c
fs/nfs/direct.c
fs/nfs/fscache.c
fs/nfs/inode.c
fs/nfs/mount_clnt.c
fs/nfs/nfs2xdr.c
fs/nfs/nfs4proc.c
fs/nfs/nfs4xdr.c
fs/nfs/write.c
fs/nfsctl.c
fs/nfsd/export.c
fs/nfsd/nfs4xdr.c
fs/nfsd/vfs.c
fs/nilfs2/dir.c
fs/nilfs2/namei.c
fs/nilfs2/nilfs.h
fs/ocfs2/aops.c
fs/ocfs2/buffer_head_io.c
fs/ocfs2/cluster/heartbeat.c
fs/ocfs2/cluster/tcp.c
fs/ocfs2/cluster/tcp_internal.h
fs/ocfs2/dlm/dlmapi.h
fs/ocfs2/dlm/dlmast.c
fs/ocfs2/dlm/dlmconvert.c
fs/ocfs2/dlm/dlmdebug.c
fs/ocfs2/dlm/dlmdomain.c
fs/ocfs2/dlm/dlmlock.c
fs/ocfs2/dlm/dlmmaster.c
fs/ocfs2/dlm/dlmrecovery.c
fs/ocfs2/dlm/dlmunlock.c
fs/ocfs2/dlmglue.c
fs/ocfs2/export.c
fs/ocfs2/extent_map.c
fs/ocfs2/file.c
fs/ocfs2/inode.c
fs/ocfs2/ioctl.c
fs/ocfs2/journal.c
fs/ocfs2/ocfs2.h
fs/ocfs2/ocfs2_fs.h
fs/ocfs2/refcounttree.c
fs/ocfs2/stack_o2cb.c
fs/ocfs2/super.c
fs/ocfs2/symlink.c
fs/ocfs2/uptodate.c
fs/open.c
fs/pnode.c
fs/pnode.h
fs/proc/base.c
fs/proc/generic.c
fs/proc/root.c
fs/reiserfs/inode.c
fs/super.c
fs/sysfs/inode.c
fs/udf/balloc.c
fs/udf/dir.c
fs/udf/inode.c
fs/udf/namei.c
fs/udf/symlink.c
fs/ufs/dir.c
fs/ufs/ufs.h
include/drm/nouveau_drm.h
include/drm/vmwgfx_drm.h
include/linux/amba/bus.h
include/linux/blkdev.h
include/linux/fs.h
include/linux/hw_breakpoint.h
include/linux/input.h
include/linux/kfifo.h
include/linux/mnt_namespace.h
include/linux/mount.h
include/linux/perf_event.h
include/net/netns/conntrack.h
include/net/netns/ipv4.h
init/do_mounts_initrd.c
init/main.c
ipc/mqueue.c
kernel/audit_tree.c
kernel/hw_breakpoint.c
kernel/kfifo.c
kernel/perf_event.c
kernel/softirq.c
kernel/sys.c
kernel/sysctl_binary.c
kernel/time/timekeeping.c
kernel/trace/trace_kprobe.c
kernel/trace/trace_stack.c
lib/idr.c
mm/filemap.c
mm/migrate.c
mm/oom_kill.c
net/9p/client.c
net/9p/trans_fd.c
net/9p/trans_rdma.c
net/9p/trans_virtio.c
net/bluetooth/hci_conn.c
net/bluetooth/hci_event.c
net/bluetooth/hidp/core.c
net/bluetooth/hidp/hidp.h
net/bluetooth/rfcomm/core.c
net/core/dev.c
net/core/dst.c
net/core/ethtool.c
net/core/net-sysfs.c
net/core/pktgen.c
net/dccp/ccid.c
net/dccp/ccid.h
net/dccp/probe.c
net/ipv4/devinet.c
net/ipv4/igmp.c
net/ipv4/ipcomp.c
net/ipv4/netfilter/arp_tables.c
net/ipv4/netfilter/ip_tables.c
net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
net/ipv4/netfilter/nf_nat_core.c
net/ipv4/tcp_input.c
net/ipv6/addrconf.c
net/ipv6/ipcomp6.c
net/ipv6/netfilter/ip6_tables.c
net/irda/irnet/irnet_ppp.c
net/key/af_key.c
net/mac80211/ibss.c
net/mac80211/rate.c
net/mac80211/scan.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_conntrack_expect.c
net/netfilter/nf_conntrack_helper.c
net/netfilter/nf_conntrack_netlink.c
net/netfilter/nf_conntrack_standalone.c
net/netlink/af_netlink.c
net/sched/Kconfig
net/sunrpc/rpc_pipe.c
net/xfrm/xfrm_state.c
security/smack/smack_lsm.c
security/tomoyo/realpath.c
sound/pci/hda/hda_intel.c
tools/perf/builtin-top.c
tools/perf/util/event.c
tools/perf/util/probe-event.c

index aed082f..737988f 100644 (file)
@@ -145,8 +145,8 @@ show_sampling_rate_max: THIS INTERFACE IS DEPRECATED, DON'T USE IT.
 up_threshold: defines what the average CPU usage between the samplings
 of 'sampling_rate' needs to be for the kernel to make a decision on
 whether it should increase the frequency.  For example when it is set
-to its default value of '80' it means that between the checking
-intervals the CPU needs to be on average more than 80% in use to then
+to its default value of '95' it means that between the checking
+intervals the CPU needs to be on average more than 95% in use to then
 decide that the CPU frequency needs to be increased.  
 
 ignore_nice_load: this parameter takes a value of '0' or '1'. When
index 23a1810..fc0e39a 100644 (file)
@@ -837,6 +837,9 @@ replicas continue to be exactly same.
         individual lists does not affect propagation or the way propagation
         tree is modified by operations.
 
+       All vfsmounts in a peer group have the same ->mnt_master.  If it is
+       non-NULL, they form a contiguous (ordered) segment of slave list.
+
        A example propagation tree looks as shown in the figure below.
        [ NOTE: Though it looks like a forest, if we consider all the shared
        mounts as a conceptual entity called 'pnode', it becomes a tree]
@@ -874,8 +877,19 @@ replicas continue to be exactly same.
 
        NOTE: The propagation tree is orthogonal to the mount tree.
 
+8B Locking:
+
+       ->mnt_share, ->mnt_slave, ->mnt_slave_list, ->mnt_master are protected
+       by namespace_sem (exclusive for modifications, shared for reading).
+
+       Normally we have ->mnt_flags modifications serialized by vfsmount_lock.
+       There are two exceptions: do_add_mount() and clone_mnt().
+       The former modifies a vfsmount that has not been visible in any shared
+       data structures yet.
+       The latter holds namespace_sem and the only references to vfsmount
+       are in lists that can't be traversed without namespace_sem.
 
-8B Algorithm:
+8C Algorithm:
 
        The crux of the implementation resides in rbind/move operation.
 
index 736d456..e7848a0 100644 (file)
@@ -199,6 +199,10 @@ and is between 256 and 4096 characters. It is defined in the file
                        acpi_display_output=video
                        See above.
 
+       acpi_early_pdc_eval     [HW,ACPI] Evaluate processor _PDC methods
+                               early. Needed on some platforms to properly
+                               initialize the EC.
+
        acpi_irq_balance [HW,ACPI]
                        ACPI will balance active IRQs
                        default in APIC mode
@@ -311,6 +315,11 @@ and is between 256 and 4096 characters. It is defined in the file
        aic79xx=        [HW,SCSI]
                        See Documentation/scsi/aic79xx.txt.
 
+       alignment=      [KNL,ARM]
+                       Allow the default userspace alignment fault handler
+                       behaviour to be specified.  Bit 0 enables warnings,
+                       bit 1 enables fixups, and bit 2 sends a segfault.
+
        amd_iommu=      [HW,X86-84]
                        Pass parameters to the AMD IOMMU driver in the system.
                        Possible values are:
index 006b39d..e87f3cd 100644 (file)
@@ -1074,10 +1074,10 @@ regen_max_retry - INTEGER
        Default: 5
 
 max_addresses - INTEGER
-       Number of maximum addresses per interface.  0 disables limitation.
-       It is recommended not set too large value (or 0) because it would
-       be too easy way to crash kernel to allow to create too much of
-       autoconfigured addresses.
+       Maximum number of autoconfigured addresses per interface.  Setting
+       to zero disables the limitation.  It is not recommended to set this
+       value too large (or to zero) because it would be an easy way to
+       crash the kernel by allowing too many addresses to be created.
        Default: 16
 
 disable_ipv6 - BOOLEAN
index 03f38c1..2533fc4 100644 (file)
@@ -616,10 +616,10 @@ M:        Richard Purdie <rpurdie@rpsys.net>
 S:     Maintained
 
 ARM/CORTINA SYSTEMS GEMINI ARM ARCHITECTURE
-M:     Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
+M:     Paulius Zaleckas <paulius.zaleckas@gmail.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 T:     git git://gitorious.org/linux-gemini/mainline.git
-S:     Maintained
+S:     Odd Fixes
 F:     arch/arm/mach-gemini/
 
 ARM/EBSA110 MACHINE SUPPORT
@@ -641,9 +641,9 @@ T:  topgit git://git.openezx.org/openezx.git
 F:     arch/arm/mach-pxa/ezx.c
 
 ARM/FARADAY FA526 PORT
-M:     Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
+M:     Paulius Zaleckas <paulius.zaleckas@gmail.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-S:     Maintained
+S:     Odd Fixes
 F:     arch/arm/mm/*-fa*
 
 ARM/FOOTBRIDGE ARCHITECTURE
@@ -1733,10 +1733,9 @@ F:       include/linux/tfrc.h
 F:     net/dccp/
 
 DECnet NETWORK LAYER
-M:     Christine Caulfield <christine.caulfield@googlemail.com>
 W:     http://linux-decnet.sourceforge.net
 L:     linux-decnet-user@lists.sourceforge.net
-S:     Maintained
+S:     Orphan
 F:     Documentation/networking/decnet.txt
 F:     net/decnet/
 
@@ -3411,8 +3410,10 @@ S:       Maintained
 F:     drivers/scsi/sym53c8xx_2/
 
 LTP (Linux Test Project)
-M:     Subrata Modak <subrata@linux.vnet.ibm.com>
-M:     Mike Frysinger <vapier@gentoo.org>
+M:     Rishikesh K Rajak <risrajak@linux.vnet.ibm.com>
+M:     Garrett Cooper <yanegomi@gmail.com>
+M:     Mike Frysinger <vapier@gentoo.org>
+M:     Subrata Modak <subrata@linux.vnet.ibm.com>
 L:     ltp-list@lists.sourceforge.net (subscribers-only)
 W:     http://ltp.sourceforge.net/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/galak/ltp.git
@@ -3488,9 +3489,9 @@ S:        Maintained
 F:     drivers/net/wireless/libertas/
 
 MARVELL MV643XX ETHERNET DRIVER
-M:     Lennert Buytenhek <buytenh@marvell.com>
+M:     Lennert Buytenhek <buytenh@wantstofly.org>
 L:     netdev@vger.kernel.org
-S:     Supported
+S:     Maintained
 F:     drivers/net/mv643xx_eth.*
 F:     include/linux/mv643xx.h
 
@@ -3836,6 +3837,7 @@ NETWORKING DRIVERS
 L:     netdev@vger.kernel.org
 W:     http://www.linuxfoundation.org/en/Net
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6.git
 S:     Odd Fixes
 F:     drivers/net/
 F:     include/linux/if_*
index f8e02e9..1b24895 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 2
 PATCHLEVEL = 6
 SUBLEVEL = 33
-EXTRAVERSION = -rc7
+EXTRAVERSION =
 NAME = Man-Eating Seals of Antiquity
 
 # *DOCUMENTATION*
index c77d2fa..8113bb5 100644 (file)
@@ -42,7 +42,8 @@
 #endif
 
 #if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \
-    defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020)
+    defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020) || \
+    defined(CONFIG_CPU_ARM1026)
 # define MULTI_CACHE 1
 #endif
 
index c6c57b6..621acad 100644 (file)
@@ -102,6 +102,7 @@ struct cpu_cache_fns cpu_cache;
 #endif
 #ifdef CONFIG_OUTER_CACHE
 struct outer_cache_fns outer_cache;
+EXPORT_SYMBOL(outer_cache);
 #endif
 
 struct stack {
index e726385..fe3bd5a 100644 (file)
@@ -86,7 +86,7 @@ static int gpio_set_irq_type(unsigned int irq, unsigned int type)
        unsigned int reg_both, reg_level, reg_type;
 
        reg_type = __raw_readl(base + GPIO_INT_TYPE);
-       reg_level = __raw_readl(base + GPIO_INT_BOTH_EDGE);
+       reg_level = __raw_readl(base + GPIO_INT_LEVEL);
        reg_both = __raw_readl(base + GPIO_INT_BOTH_EDGE);
 
        switch (type) {
@@ -117,7 +117,7 @@ static int gpio_set_irq_type(unsigned int irq, unsigned int type)
        }
 
        __raw_writel(reg_type, base + GPIO_INT_TYPE);
-       __raw_writel(reg_level, base + GPIO_INT_BOTH_EDGE);
+       __raw_writel(reg_level, base + GPIO_INT_LEVEL);
        __raw_writel(reg_both, base + GPIO_INT_BOTH_EDGE);
 
        gpio_ack_irq(irq);
index 0c3c72d..8afe9dd 100644 (file)
@@ -408,6 +408,7 @@ void __init twl4030_mmc_init(struct twl4030_hsmmc_info *controllers)
 {
        struct twl4030_hsmmc_info *c;
        int nr_hsmmc = ARRAY_SIZE(hsmmc_data);
+       int i;
 
        if (cpu_is_omap2430()) {
                control_pbias_offset = OMAP243X_CONTROL_PBIAS_LITE;
@@ -434,7 +435,7 @@ void __init twl4030_mmc_init(struct twl4030_hsmmc_info *controllers)
                mmc = kzalloc(sizeof(struct omap_mmc_platform_data), GFP_KERNEL);
                if (!mmc) {
                        pr_err("Cannot allocate memory for mmc device!\n");
-                       return;
+                       goto done;
                }
 
                if (c->name)
@@ -532,6 +533,10 @@ void __init twl4030_mmc_init(struct twl4030_hsmmc_info *controllers)
                        continue;
                c->dev = mmc->dev;
        }
+
+done:
+       for (i = 0; i < nr_hsmmc; i++)
+               kfree(hsmmc_data[i]);
 }
 
 #endif
index 5fedc50..5fef73f 100644 (file)
@@ -961,16 +961,14 @@ static void __init omap_mux_init_list(struct omap_mux *superset)
        while (superset->reg_offset !=  OMAP_MUX_TERMINATOR) {
                struct omap_mux *entry;
 
-#ifndef CONFIG_OMAP_MUX
-               /* Skip pins that are not muxed as GPIO by bootloader */
-               if (!OMAP_MODE_GPIO(omap_mux_read(superset->reg_offset))) {
+#ifdef CONFIG_OMAP_MUX
+               if (!superset->muxnames || !superset->muxnames[0]) {
                        superset++;
                        continue;
                }
-#endif
-
-#if defined(CONFIG_OMAP_MUX) && defined(CONFIG_DEBUG_FS)
-               if (!superset->muxnames || !superset->muxnames[0]) {
+#else
+               /* Skip pins that are not muxed as GPIO by bootloader */
+               if (!OMAP_MODE_GPIO(omap_mux_read(superset->reg_offset))) {
                        superset++;
                        continue;
                }
index b270d62..62820ed 100644 (file)
@@ -11,6 +11,7 @@
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  */
+#include <linux/moduleparam.h>
 #include <linux/compiler.h>
 #include <linux/kernel.h>
 #include <linux/errno.h>
@@ -77,6 +78,8 @@ static unsigned long ai_dword;
 static unsigned long ai_multi;
 static int ai_usermode;
 
+core_param(alignment, ai_usermode, int, 0600);
+
 #define UM_WARN                (1 << 0)
 #define UM_FIXUP       (1 << 1)
 #define UM_SIGNAL      (1 << 2)
index 5a79fc6..31c2f4c 100644 (file)
@@ -12,7 +12,7 @@
 #
 #   http://www.arm.linux.org.uk/developer/machines/?action=new
 #
-# Last update: Thu Jan 28 22:15:54 2010
+# Last update: Sat Feb 20 14:16:15 2010
 #
 # machine_is_xxx       CONFIG_xxxx             MACH_TYPE_xxx           number
 #
@@ -2257,7 +2257,7 @@ oratisalog                MACH_ORATISALOG         ORATISALOG              2268
 oratismadi             MACH_ORATISMADI         ORATISMADI              2269
 oratisot16             MACH_ORATISOT16         ORATISOT16              2270
 oratisdesk             MACH_ORATISDESK         ORATISDESK              2271
-v2_ca9                 MACH_V2P_CA9            V2P_CA9                 2272
+vexpress               MACH_VEXPRESS           VEXPRESS                2272
 sintexo                        MACH_SINTEXO            SINTEXO                 2273
 cm3389                 MACH_CM3389             CM3389                  2274
 omap3_cio              MACH_OMAP3_CIO          OMAP3_CIO               2275
@@ -2636,3 +2636,45 @@ hw90240                  MACH_HW90240            HW90240                 2648
 dm365_leopard          MACH_DM365_LEOPARD      DM365_LEOPARD           2649
 mityomapl138           MACH_MITYOMAPL138       MITYOMAPL138            2650
 scat110                        MACH_SCAT110            SCAT110                 2651
+acer_a1                        MACH_ACER_A1            ACER_A1                 2652
+cmcontrol              MACH_CMCONTROL          CMCONTROL               2653
+pelco_lamar            MACH_PELCO_LAMAR        PELCO_LAMAR             2654
+rfp43                  MACH_RFP43              RFP43                   2655
+sk86r0301              MACH_SK86R0301          SK86R0301               2656
+ctpxa                  MACH_CTPXA              CTPXA                   2657
+epb_arm9_a             MACH_EPB_ARM9_A         EPB_ARM9_A              2658
+guruplug               MACH_GURUPLUG           GURUPLUG                2659
+spear310               MACH_SPEAR310           SPEAR310                2660
+spear320               MACH_SPEAR320           SPEAR320                2661
+robotx                 MACH_ROBOTX             ROBOTX                  2662
+lsxhl                  MACH_LSXHL              LSXHL                   2663
+smartlite              MACH_SMARTLITE          SMARTLITE               2664
+cws2                   MACH_CWS2               CWS2                    2665
+m619                   MACH_M619               M619                    2666
+smartview              MACH_SMARTVIEW          SMARTVIEW               2667
+lsa_salsa              MACH_LSA_SALSA          LSA_SALSA               2668
+kizbox                 MACH_KIZBOX             KIZBOX                  2669
+htccharmer             MACH_HTCCHARMER         HTCCHARMER              2670
+guf_neso_lt            MACH_GUF_NESO_LT        GUF_NESO_LT             2671
+pm9g45                 MACH_PM9G45             PM9G45                  2672
+htcpanther             MACH_HTCPANTHER         HTCPANTHER              2673
+htcpanther_cdma                MACH_HTCPANTHER_CDMA    HTCPANTHER_CDMA         2674
+reb01                  MACH_REB01              REB01                   2675
+aquila                 MACH_AQUILA             AQUILA                  2676
+spark_sls_hw2          MACH_SPARK_SLS_HW2      SPARK_SLS_HW2           2677
+sheeva_esata           MACH_ESATA_SHEEVAPLUG   ESATA_SHEEVAPLUG        2678
+surf7x30               MACH_SURF7X30           SURF7X30                2679
+micro2440              MACH_MICRO2440          MICRO2440               2680
+am2440                 MACH_AM2440             AM2440                  2681
+tq2440                 MACH_TQ2440             TQ2440                  2682
+lpc2478oem             MACH_LPC2478OEM         LPC2478OEM              2683
+ak880x                 MACH_AK880X             AK880X                  2684
+cobra3530              MACH_COBRA3530          COBRA3530               2685
+pmppb                  MACH_PMPPB              PMPPB                   2686
+u6715                  MACH_U6715              U6715                   2687
+axar1500_sender                MACH_AXAR1500_SENDER    AXAR1500_SENDER         2688
+g30_dvb                        MACH_G30_DVB            G30_DVB                 2689
+vc088x                 MACH_VC088X             VC088X                  2690
+mioa702                        MACH_MIOA702            MIOA702                 2691
+hpmin                  MACH_HPMIN              HPMIN                   2692
+ak880xak               MACH_AK880XAK           AK880XAK                2693
index 1aa1ea5..b13d187 100644 (file)
@@ -1325,7 +1325,7 @@ struct platform_device *__init
 at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
 {
        struct platform_device          *pdev;
-       struct mci_dma_slave            *slave;
+       struct mci_dma_data             *slave;
        u32                             pioa_mask;
        u32                             piob_mask;
 
@@ -1344,7 +1344,9 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
                                ARRAY_SIZE(atmel_mci0_resource)))
                goto fail;
 
-       slave = kzalloc(sizeof(struct mci_dma_slave), GFP_KERNEL);
+       slave = kzalloc(sizeof(struct mci_dma_data), GFP_KERNEL);
+       if (!slave)
+               goto fail;
 
        slave->sdata.dma_dev = &dw_dmac0_device.dev;
        slave->sdata.reg_width = DW_DMA_SLAVE_WIDTH_32BIT;
@@ -1357,7 +1359,7 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
 
        if (platform_device_add_data(pdev, data,
                                sizeof(struct mci_platform_data)))
-               goto fail;
+               goto fail_free;
 
        /* CLK line is common to both slots */
        pioa_mask = 1 << 10;
@@ -1381,7 +1383,7 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
                /* Slot is unused */
                break;
        default:
-               goto fail;
+               goto fail_free;
        }
 
        select_peripheral(PIOA, pioa_mask, PERIPH_A, 0);
@@ -1408,7 +1410,7 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
                break;
        default:
                if (!data->slot[0].bus_width)
-                       goto fail;
+                       goto fail_free;
 
                data->slot[1].bus_width = 0;
                break;
@@ -1419,9 +1421,10 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
        platform_device_add(pdev);
        return pdev;
 
+fail_free:
+       kfree(slave);
 fail:
        data->dma_slave = NULL;
-       kfree(slave);
        platform_device_put(pdev);
        return NULL;
 }
index 7ae5889..e97b255 100644 (file)
@@ -94,6 +94,7 @@ ia64_acpi_release_global_lock (unsigned int *lock)
 #define acpi_noirq 0   /* ACPI always enabled on IA64 */
 #define acpi_pci_disabled 0 /* ACPI PCI always enabled on IA64 */
 #define acpi_strict 1  /* no ACPI spec workarounds on IA64 */
+#define acpi_ht 0      /* no HT-only mode on IA64 */
 #endif
 #define acpi_processor_cstate_check(x) (x) /* no idle limits on IA64 :) */
 static inline void disable_acpi(void) { }
index e14108b..4c41656 100644 (file)
@@ -201,7 +201,9 @@ extern void ia64_elf_core_copy_regs (struct pt_regs *src, elf_gregset_t dst);
    relevant until we have real hardware to play with... */
 #define ELF_PLATFORM   NULL
 
-#define SET_PERSONALITY(ex)    set_personality(PER_LINUX)
+#define SET_PERSONALITY(ex)    \
+       set_personality((current->personality & ~PER_MASK) | PER_LINUX)
+
 #define elf_read_implies_exec(ex, executable_stack)                                    \
        ((executable_stack!=EXSTACK_DISABLE_X) && ((ex).e_flags & EF_IA_64_LINUX_EXECUTABLE_STACK) != 0)
 
index ece1bf9..e456f06 100644 (file)
@@ -71,7 +71,7 @@ EXPORT_SYMBOL(sn_rtc_cycles_per_second);
 DEFINE_PER_CPU(struct sn_hub_info_s, __sn_hub_info);
 EXPORT_PER_CPU_SYMBOL(__sn_hub_info);
 
-DEFINE_PER_CPU(short [MAX_COMPACT_NODES], __sn_cnodeid_to_nasid);
+DEFINE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_COMPACT_NODES]);
 EXPORT_PER_CPU_SYMBOL(__sn_cnodeid_to_nasid);
 
 DEFINE_PER_CPU(struct nodepda_s *, __sn_nodepda);
index fc9997b..267c7c7 100644 (file)
@@ -217,7 +217,7 @@ static inline void __iomem *__ioremap(phys_addr_t address, unsigned long size,
  * Little endian
  */
 
-#define out_le32(a, v) __raw_writel(__cpu_to_le32(v), (a));
+#define out_le32(a, v) __raw_writel(__cpu_to_le32(v), (a))
 #define out_le16(a, v) __raw_writew(__cpu_to_le16(v), (a))
 
 #define in_le32(a) __le32_to_cpu(__raw_readl(a))
index d9d6383..2a56bcc 100644 (file)
@@ -172,16 +172,15 @@ do {                                                                      \
 /* It is used only first parameter for OP - for wic, wdc */
 #define CACHE_RANGE_LOOP_1(start, end, line_length, op)                        \
 do {                                                                   \
-       int step = -line_length;                                        \
-       int count = end - start;                                        \
-       BUG_ON(count <= 0);                                             \
+       int volatile temp;                                              \
+       BUG_ON(end - start <= 0);                                       \
                                                                        \
-       __asm__ __volatile__ (" 1:      addk    %0, %0, %1;             \
-                                       " #op " %0, r0;                 \
-                                       bgtid   %1, 1b;                 \
-                                       addk    %1, %1, %2;             \
-                                       " : : "r" (start), "r" (count), \
-                                       "r" (step) : "memory");         \
+       __asm__ __volatile__ (" 1:      " #op " %1, r0;                 \
+                                       cmpu    %0, %1, %2;             \
+                                       bgtid   %0, 1b;                 \
+                                       addk    %1, %1, %3;             \
+                               " : : "r" (temp), "r" (start), "r" (end),\
+                                       "r" (line_length) : "memory");  \
 } while (0);
 
 static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end)
@@ -313,16 +312,6 @@ static void __invalidate_dcache_all_wb(void)
        pr_debug("%s\n", __func__);
        CACHE_ALL_LOOP2(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
                                        wdc.clear)
-
-#if 0
-       unsigned int i;
-
-       pr_debug("%s\n", __func__);
-
-       /* Just loop through cache size and invalidate it */
-       for (i = 0; i < cpuinfo.dcache_size; i += cpuinfo.dcache_line_length)
-                       __invalidate_dcache(0, i);
-#endif
 }
 
 static void __invalidate_dcache_range_wb(unsigned long start,
index 5372b24..bb8c4b9 100644 (file)
@@ -54,6 +54,7 @@ void __init setup_arch(char **cmdline_p)
 
        microblaze_cache_init();
 
+       invalidate_dcache();
        enable_dcache();
 
        invalidate_icache();
index c51405e..29d3cbf 100644 (file)
@@ -141,6 +141,14 @@ static __init void prom_init_mem(void)
                        break;
        }
 
+       /* Ignoring the last page when ddr size is 128M. Cached
+        * accesses to last page is causing the processor to prefetch
+        * using address above 128M stepping out of the ddr address
+        * space.
+        */
+       if (mem == 0x8000000)
+               mem -= 0x1000;
+
        add_memory_region(0, mem, BOOT_MEM_RAM);
 }
 
index ed84b4c..84b6503 100644 (file)
@@ -1,7 +1,7 @@
 #
 # Automatically generated make config: don't edit
-# Linux kernel version: 2.6.23-rc2
-# Tue Aug  7 13:04:24 2007
+# Linux kernel version: 2.6.33-rc6
+# Wed Feb  3 18:12:31 2010
 #
 CONFIG_MIPS=y
 
@@ -9,20 +9,28 @@ CONFIG_MIPS=y
 # Machine selection
 #
 # CONFIG_MACH_ALCHEMY is not set
+# CONFIG_AR7 is not set
+# CONFIG_BCM47XX is not set
+# CONFIG_BCM63XX is not set
 # CONFIG_MIPS_COBALT is not set
 # CONFIG_MACH_DECSTATION is not set
 # CONFIG_MACH_JAZZ is not set
-# CONFIG_LEMOTE_FULONG is not set
+# CONFIG_LASAT is not set
+# CONFIG_MACH_LOONGSON is not set
 # CONFIG_MIPS_MALTA is not set
 # CONFIG_MIPS_SIM is not set
-# CONFIG_MARKEINS is not set
+# CONFIG_NEC_MARKEINS is not set
 # CONFIG_MACH_VR41XX is not set
+# CONFIG_NXP_STB220 is not set
+# CONFIG_NXP_STB225 is not set
 # CONFIG_PNX8550_JBS is not set
 # CONFIG_PNX8550_STB810 is not set
 # CONFIG_PMC_MSP is not set
 # CONFIG_PMC_YOSEMITE is not set
+# CONFIG_POWERTV is not set
 # CONFIG_SGI_IP22 is not set
 CONFIG_SGI_IP27=y
+# CONFIG_SGI_IP28 is not set
 # CONFIG_SGI_IP32 is not set
 # CONFIG_SIBYTE_CRHINE is not set
 # CONFIG_SIBYTE_CARMEL is not set
@@ -33,32 +41,39 @@ CONFIG_SGI_IP27=y
 # CONFIG_SIBYTE_SENTOSA is not set
 # CONFIG_SIBYTE_BIGSUR is not set
 # CONFIG_SNI_RM is not set
-# CONFIG_TOSHIBA_JMR3927 is not set
-# CONFIG_TOSHIBA_RBTX4927 is not set
-# CONFIG_TOSHIBA_RBTX4938 is not set
+# CONFIG_MACH_TX39XX is not set
+# CONFIG_MACH_TX49XX is not set
+# CONFIG_MIKROTIK_RB532 is not set
 # CONFIG_WR_PPMC is not set
+# CONFIG_CAVIUM_OCTEON_SIMULATOR is not set
+# CONFIG_CAVIUM_OCTEON_REFERENCE_BOARD is not set
+# CONFIG_ALCHEMY_GPIO_INDIRECT is not set
 CONFIG_SGI_SN_M_MODE=y
 # CONFIG_SGI_SN_N_MODE is not set
 # CONFIG_MAPPED_KERNEL is not set
 # CONFIG_REPLICATE_KTEXT is not set
 # CONFIG_REPLICATE_EXHANDLERS is not set
+CONFIG_LOONGSON_UART_BASE=y
 CONFIG_RWSEM_GENERIC_SPINLOCK=y
 # CONFIG_ARCH_HAS_ILOG2_U32 is not set
 # CONFIG_ARCH_HAS_ILOG2_U64 is not set
+CONFIG_ARCH_SUPPORTS_OPROFILE=y
 CONFIG_GENERIC_FIND_NEXT_BIT=y
 CONFIG_GENERIC_HWEIGHT=y
 CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_GENERIC_CLOCKEVENTS=y
 CONFIG_GENERIC_TIME=y
-CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
+CONFIG_GENERIC_CMOS_UPDATE=y
+CONFIG_SCHED_OMIT_FRAME_POINTER=y
 CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
 CONFIG_ARC=y
 CONFIG_DMA_COHERENT=y
-CONFIG_EARLY_PRINTK=y
 CONFIG_SYS_HAS_EARLY_PRINTK=y
 # CONFIG_NO_IOPORT is not set
 CONFIG_CPU_BIG_ENDIAN=y
 # CONFIG_CPU_LITTLE_ENDIAN is not set
 CONFIG_SYS_SUPPORTS_BIG_ENDIAN=y
+CONFIG_DEFAULT_SGI_PARTITION=y
 CONFIG_MIPS_L1_CACHE_SHIFT=7
 CONFIG_ARC64=y
 CONFIG_BOOT_ELF64=y
@@ -66,7 +81,8 @@ CONFIG_BOOT_ELF64=y
 #
 # CPU selection
 #
-# CONFIG_CPU_LOONGSON2 is not set
+# CONFIG_CPU_LOONGSON2E is not set
+# CONFIG_CPU_LOONGSON2F is not set
 # CONFIG_CPU_MIPS32_R1 is not set
 # CONFIG_CPU_MIPS32_R2 is not set
 # CONFIG_CPU_MIPS64_R1 is not set
@@ -79,6 +95,7 @@ CONFIG_BOOT_ELF64=y
 # CONFIG_CPU_TX49XX is not set
 # CONFIG_CPU_R5000 is not set
 # CONFIG_CPU_R5432 is not set
+# CONFIG_CPU_R5500 is not set
 # CONFIG_CPU_R6000 is not set
 # CONFIG_CPU_NEVADA is not set
 # CONFIG_CPU_R8000 is not set
@@ -86,6 +103,7 @@ CONFIG_CPU_R10000=y
 # CONFIG_CPU_RM7000 is not set
 # CONFIG_CPU_RM9000 is not set
 # CONFIG_CPU_SB1 is not set
+# CONFIG_CPU_CAVIUM_OCTEON is not set
 CONFIG_SYS_HAS_CPU_R10000=y
 CONFIG_SYS_SUPPORTS_64BIT_KERNEL=y
 CONFIG_CPU_SUPPORTS_32BIT_KERNEL=y
@@ -99,6 +117,7 @@ CONFIG_64BIT=y
 CONFIG_PAGE_SIZE_4KB=y
 # CONFIG_PAGE_SIZE_8KB is not set
 # CONFIG_PAGE_SIZE_16KB is not set
+# CONFIG_PAGE_SIZE_32KB is not set
 # CONFIG_PAGE_SIZE_64KB is not set
 CONFIG_CPU_HAS_PREFETCH=y
 CONFIG_MIPS_MT_DISABLED=y
@@ -110,6 +129,7 @@ CONFIG_GENERIC_IRQ_PROBE=y
 CONFIG_IRQ_PER_CPU=y
 CONFIG_CPU_SUPPORTS_HIGHMEM=y
 CONFIG_ARCH_DISCONTIGMEM_ENABLE=y
+CONFIG_ARCH_POPULATES_NODE_MAP=y
 CONFIG_NUMA=y
 CONFIG_SYS_SUPPORTS_NUMA=y
 CONFIG_NODES_SHIFT=6
@@ -120,16 +140,22 @@ CONFIG_DISCONTIGMEM_MANUAL=y
 CONFIG_DISCONTIGMEM=y
 CONFIG_FLAT_NODE_MEM_MAP=y
 CONFIG_NEED_MULTIPLE_NODES=y
-# CONFIG_SPARSEMEM_STATIC is not set
+CONFIG_PAGEFLAGS_EXTENDED=y
 CONFIG_SPLIT_PTLOCK_CPUS=4
 CONFIG_MIGRATION=y
-CONFIG_RESOURCES_64BIT=y
+CONFIG_PHYS_ADDR_T_64BIT=y
 CONFIG_ZONE_DMA_FLAG=0
 CONFIG_VIRT_TO_BUS=y
+# CONFIG_KSM is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=65536
 CONFIG_SMP=y
 CONFIG_SYS_SUPPORTS_SMP=y
 CONFIG_NR_CPUS_DEFAULT_64=y
 CONFIG_NR_CPUS=64
+CONFIG_TICK_ONESHOT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
 # CONFIG_HZ_48 is not set
 # CONFIG_HZ_100 is not set
 # CONFIG_HZ_128 is not set
@@ -142,13 +168,13 @@ CONFIG_HZ=1000
 CONFIG_PREEMPT_NONE=y
 # CONFIG_PREEMPT_VOLUNTARY is not set
 # CONFIG_PREEMPT is not set
-CONFIG_PREEMPT_BKL=y
 # CONFIG_MIPS_INSANE_LARGE is not set
 # CONFIG_KEXEC is not set
 CONFIG_SECCOMP=y
 CONFIG_LOCKDEP_SUPPORT=y
 CONFIG_STACKTRACE_SUPPORT=y
 CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
 
 #
 # General setup
@@ -162,20 +188,41 @@ CONFIG_SWAP=y
 CONFIG_SYSVIPC=y
 CONFIG_SYSVIPC_SYSCTL=y
 CONFIG_POSIX_MQUEUE=y
+CONFIG_POSIX_MQUEUE_SYSCTL=y
 # CONFIG_BSD_PROCESS_ACCT is not set
 # CONFIG_TASKSTATS is not set
-# CONFIG_USER_NS is not set
 # CONFIG_AUDIT is not set
+
+#
+# RCU Subsystem
+#
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_TINY_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=64
+# CONFIG_RCU_FANOUT_EXACT is not set
+# CONFIG_TREE_RCU_TRACE is not set
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=15
+# CONFIG_GROUP_SCHED is not set
 CONFIG_CGROUPS=y
+# CONFIG_CGROUP_DEBUG is not set
+# CONFIG_CGROUP_NS is not set
+# CONFIG_CGROUP_FREEZER is not set
+# CONFIG_CGROUP_DEVICE is not set
 CONFIG_CPUSETS=y
-CONFIG_SYSFS_DEPRECATED=y
+CONFIG_PROC_PID_CPUSET=y
+# CONFIG_CGROUP_CPUACCT is not set
+# CONFIG_RESOURCE_COUNTERS is not set
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
 CONFIG_RELAY=y
+# CONFIG_NAMESPACES is not set
 # CONFIG_BLK_DEV_INITRD is not set
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
 CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
 CONFIG_EMBEDDED=y
 CONFIG_SYSCTL_SYSCALL=y
 CONFIG_KALLSYMS=y
@@ -184,44 +231,92 @@ CONFIG_HOTPLUG=y
 CONFIG_PRINTK=y
 CONFIG_BUG=y
 CONFIG_ELF_CORE=y
+# CONFIG_PCSPKR_PLATFORM is not set
 CONFIG_BASE_FULL=y
 CONFIG_FUTEX=y
-CONFIG_ANON_INODES=y
 CONFIG_EPOLL=y
 CONFIG_SIGNALFD=y
 CONFIG_TIMERFD=y
 CONFIG_EVENTFD=y
 CONFIG_SHMEM=y
+CONFIG_AIO=y
+
+#
+# Kernel Performance Events And Counters
+#
 CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_PCI_QUIRKS=y
+CONFIG_COMPAT_BRK=y
 CONFIG_SLAB=y
 # CONFIG_SLUB is not set
 # CONFIG_SLOB is not set
+# CONFIG_PROFILING is not set
+CONFIG_HAVE_OPROFILE=y
+CONFIG_HAVE_SYSCALL_WRAPPERS=y
+CONFIG_USE_GENERIC_SMP_HELPERS=y
+
+#
+# GCOV-based kernel profiling
+#
+CONFIG_SLOW_WORK=y
+CONFIG_HAVE_GENERIC_DMA_COHERENT=y
+CONFIG_SLABINFO=y
 CONFIG_RT_MUTEXES=y
-# CONFIG_TINY_SHMEM is not set
 CONFIG_BASE_SMALL=0
 CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_MODULE_FORCE_UNLOAD is not set
 # CONFIG_MODVERSIONS is not set
 CONFIG_MODULE_SRCVERSION_ALL=y
-CONFIG_KMOD=y
 CONFIG_STOP_MACHINE=y
 CONFIG_BLOCK=y
-# CONFIG_BLK_DEV_IO_TRACE is not set
 # CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
+# CONFIG_BLK_CGROUP is not set
+CONFIG_BLOCK_COMPAT=y
 
 #
 # IO Schedulers
 #
 CONFIG_IOSCHED_NOOP=y
-CONFIG_IOSCHED_AS=y
 CONFIG_IOSCHED_DEADLINE=y
 CONFIG_IOSCHED_CFQ=y
-CONFIG_DEFAULT_AS=y
+# CONFIG_CFQ_GROUP_IOSCHED is not set
 # CONFIG_DEFAULT_DEADLINE is not set
-# CONFIG_DEFAULT_CFQ is not set
+CONFIG_DEFAULT_CFQ=y
 # CONFIG_DEFAULT_NOOP is not set
-CONFIG_DEFAULT_IOSCHED="anticipatory"
+CONFIG_DEFAULT_IOSCHED="cfq"
+# CONFIG_INLINE_SPIN_TRYLOCK is not set
+# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK is not set
+# CONFIG_INLINE_SPIN_LOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
+CONFIG_INLINE_SPIN_UNLOCK=y
+# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
+CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
+# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_READ_TRYLOCK is not set
+# CONFIG_INLINE_READ_LOCK is not set
+# CONFIG_INLINE_READ_LOCK_BH is not set
+# CONFIG_INLINE_READ_LOCK_IRQ is not set
+# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
+CONFIG_INLINE_READ_UNLOCK=y
+# CONFIG_INLINE_READ_UNLOCK_BH is not set
+CONFIG_INLINE_READ_UNLOCK_IRQ=y
+# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_WRITE_TRYLOCK is not set
+# CONFIG_INLINE_WRITE_LOCK is not set
+# CONFIG_INLINE_WRITE_LOCK_BH is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
+CONFIG_INLINE_WRITE_UNLOCK=y
+# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
+CONFIG_INLINE_WRITE_UNLOCK_IRQ=y
+# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
+CONFIG_MUTEX_SPIN_ON_OWNER=y
+# CONFIG_FREEZER is not set
 
 #
 # Bus options (PCI, PCMCIA, EISA, ISA, TC)
@@ -230,11 +325,10 @@ CONFIG_HW_HAS_PCI=y
 CONFIG_PCI=y
 CONFIG_PCI_DOMAINS=y
 # CONFIG_ARCH_SUPPORTS_MSI is not set
+# CONFIG_PCI_LEGACY is not set
+# CONFIG_PCI_STUB is not set
+# CONFIG_PCI_IOV is not set
 CONFIG_MMU=y
-
-#
-# PCCARD (PCMCIA/CardBus) support
-#
 # CONFIG_PCCARD is not set
 # CONFIG_HOTPLUG_PCI is not set
 
@@ -242,8 +336,9 @@ CONFIG_MMU=y
 # Executable file formats
 #
 CONFIG_BINFMT_ELF=y
+CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
+# CONFIG_HAVE_AOUT is not set
 # CONFIG_BINFMT_MISC is not set
-# CONFIG_BUILD_ELF64 is not set
 CONFIG_MIPS32_COMPAT=y
 CONFIG_COMPAT=y
 CONFIG_SYSVIPC_COMPAT=y
@@ -255,13 +350,10 @@ CONFIG_BINFMT_ELF32=y
 # Power management options
 #
 CONFIG_PM=y
-# CONFIG_PM_LEGACY is not set
 # CONFIG_PM_DEBUG is not set
-
-#
-# Networking
-#
+# CONFIG_PM_RUNTIME is not set
 CONFIG_NET=y
+CONFIG_COMPAT_NETLINK_MESSAGES=y
 
 #
 # Networking options
@@ -273,6 +365,8 @@ CONFIG_XFRM=y
 CONFIG_XFRM_USER=m
 # CONFIG_XFRM_SUB_POLICY is not set
 CONFIG_XFRM_MIGRATE=y
+CONFIG_XFRM_STATISTICS=y
+CONFIG_XFRM_IPCOMP=m
 CONFIG_NET_KEY=y
 CONFIG_NET_KEY_MIGRATE=y
 CONFIG_INET=y
@@ -292,19 +386,40 @@ CONFIG_IP_PNP=y
 # CONFIG_INET_ESP is not set
 # CONFIG_INET_IPCOMP is not set
 # CONFIG_INET_XFRM_TUNNEL is not set
-# CONFIG_INET_TUNNEL is not set
+CONFIG_INET_TUNNEL=m
 CONFIG_INET_XFRM_MODE_TRANSPORT=m
 CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_INET_XFRM_MODE_BEET=m
+CONFIG_INET_LRO=y
 CONFIG_INET_DIAG=y
 CONFIG_INET_TCP_DIAG=y
 # CONFIG_TCP_CONG_ADVANCED is not set
 CONFIG_TCP_CONG_CUBIC=y
 CONFIG_DEFAULT_TCP_CONG="cubic"
 CONFIG_TCP_MD5SIG=y
-# CONFIG_IPV6 is not set
-# CONFIG_INET6_XFRM_TUNNEL is not set
-# CONFIG_INET6_TUNNEL is not set
+CONFIG_IPV6=y
+CONFIG_IPV6_PRIVACY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_MIP6=m
+CONFIG_INET6_XFRM_TUNNEL=m
+CONFIG_INET6_TUNNEL=m
+CONFIG_INET6_XFRM_MODE_TRANSPORT=m
+CONFIG_INET6_XFRM_MODE_TUNNEL=m
+CONFIG_INET6_XFRM_MODE_BEET=m
+CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
+CONFIG_IPV6_SIT=m
+CONFIG_IPV6_SIT_6RD=y
+CONFIG_IPV6_NDISC_NODETYPE=y
+CONFIG_IPV6_TUNNEL=m
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_IPV6_MROUTE=y
+CONFIG_IPV6_PIMSM_V2=y
 CONFIG_NETWORK_SECMARK=y
 # CONFIG_NETFILTER is not set
 # CONFIG_IP_DCCP is not set
@@ -314,9 +429,11 @@ CONFIG_IP_SCTP=m
 # CONFIG_SCTP_HMAC_NONE is not set
 # CONFIG_SCTP_HMAC_SHA1 is not set
 CONFIG_SCTP_HMAC_MD5=y
+# CONFIG_RDS is not set
 # CONFIG_TIPC is not set
 # CONFIG_ATM is not set
 # CONFIG_BRIDGE is not set
+# CONFIG_NET_DSA is not set
 # CONFIG_VLAN_8021Q is not set
 # CONFIG_DECNET is not set
 # CONFIG_LLC2 is not set
@@ -326,12 +443,9 @@ CONFIG_SCTP_HMAC_MD5=y
 # CONFIG_LAPB is not set
 # CONFIG_ECONET is not set
 # CONFIG_WAN_ROUTER is not set
-
-#
-# QoS and/or fair queueing
-#
+# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
 CONFIG_NET_SCHED=y
-CONFIG_NET_SCH_FIFO=y
 
 #
 # Queueing/Scheduling
@@ -340,7 +454,7 @@ CONFIG_NET_SCH_CBQ=m
 CONFIG_NET_SCH_HTB=m
 CONFIG_NET_SCH_HFSC=m
 CONFIG_NET_SCH_PRIO=m
-CONFIG_NET_SCH_RR=m
+CONFIG_NET_SCH_MULTIQ=y
 CONFIG_NET_SCH_RED=m
 CONFIG_NET_SCH_SFQ=m
 CONFIG_NET_SCH_TEQL=m
@@ -348,6 +462,7 @@ CONFIG_NET_SCH_TBF=m
 CONFIG_NET_SCH_GRED=m
 CONFIG_NET_SCH_DSMARK=m
 CONFIG_NET_SCH_NETEM=m
+# CONFIG_NET_SCH_DRR is not set
 CONFIG_NET_SCH_INGRESS=m
 
 #
@@ -364,41 +479,63 @@ CONFIG_NET_CLS_U32=m
 CONFIG_CLS_U32_MARK=y
 CONFIG_NET_CLS_RSVP=m
 CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_CLS_FLOW=m
+CONFIG_NET_CLS_CGROUP=y
 # CONFIG_NET_EMATCH is not set
 CONFIG_NET_CLS_ACT=y
 CONFIG_NET_ACT_POLICE=y
 CONFIG_NET_ACT_GACT=m
 CONFIG_GACT_PROB=y
 CONFIG_NET_ACT_MIRRED=m
+CONFIG_NET_ACT_NAT=m
 CONFIG_NET_ACT_PEDIT=m
 # CONFIG_NET_ACT_SIMP is not set
-CONFIG_NET_CLS_POLICE=y
+CONFIG_NET_ACT_SKBEDIT=m
 # CONFIG_NET_CLS_IND is not set
+CONFIG_NET_SCH_FIFO=y
+# CONFIG_DCB is not set
 
 #
 # Network testing
 #
 # CONFIG_NET_PKTGEN is not set
 # CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
 # CONFIG_IRDA is not set
 # CONFIG_BT is not set
 # CONFIG_AF_RXRPC is not set
-
-#
-# Wireless
-#
-CONFIG_CFG80211=m
+CONFIG_FIB_RULES=y
+CONFIG_WIRELESS=y
 CONFIG_WIRELESS_EXT=y
+CONFIG_WEXT_CORE=y
+CONFIG_WEXT_PROC=y
+CONFIG_WEXT_SPY=y
+CONFIG_WEXT_PRIV=y
+CONFIG_CFG80211=m
+# CONFIG_NL80211_TESTMODE is not set
+# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set
+# CONFIG_CFG80211_REG_DEBUG is not set
+CONFIG_CFG80211_DEFAULT_PS=y
+# CONFIG_WIRELESS_OLD_REGULATORY is not set
+CONFIG_CFG80211_WEXT=y
+CONFIG_WIRELESS_EXT_SYSFS=y
+CONFIG_LIB80211=m
+CONFIG_LIB80211_CRYPT_WEP=m
+CONFIG_LIB80211_CRYPT_CCMP=m
+CONFIG_LIB80211_CRYPT_TKIP=m
+# CONFIG_LIB80211_DEBUG is not set
 CONFIG_MAC80211=m
-# CONFIG_MAC80211_DEBUG is not set
-CONFIG_IEEE80211=m
-# CONFIG_IEEE80211_DEBUG is not set
-CONFIG_IEEE80211_CRYPT_WEP=m
-CONFIG_IEEE80211_CRYPT_CCMP=m
-CONFIG_IEEE80211_CRYPT_TKIP=m
-CONFIG_IEEE80211_SOFTMAC=m
-# CONFIG_IEEE80211_SOFTMAC_DEBUG is not set
+CONFIG_MAC80211_RC_PID=y
+CONFIG_MAC80211_RC_MINSTREL=y
+# CONFIG_MAC80211_RC_DEFAULT_PID is not set
+CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y
+CONFIG_MAC80211_RC_DEFAULT="minstrel"
+# CONFIG_MAC80211_MESH is not set
+CONFIG_MAC80211_LEDS=y
+# CONFIG_MAC80211_DEBUG_MENU is not set
+# CONFIG_WIMAX is not set
 CONFIG_RFKILL=m
+CONFIG_RFKILL_LEDS=y
 # CONFIG_NET_9P is not set
 
 #
@@ -408,9 +545,13 @@ CONFIG_RFKILL=m
 #
 # Generic Driver Options
 #
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
 CONFIG_STANDALONE=y
 CONFIG_PREVENT_FIRMWARE_BUILD=y
 CONFIG_FW_LOADER=y
+CONFIG_FIRMWARE_IN_KERNEL=y
+CONFIG_EXTRA_FIRMWARE=""
 # CONFIG_SYS_HYPERVISOR is not set
 CONFIG_CONNECTOR=m
 # CONFIG_MTD is not set
@@ -423,14 +564,19 @@ CONFIG_BLK_DEV=y
 # CONFIG_BLK_DEV_COW_COMMON is not set
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_CRYPTOLOOP=m
+# CONFIG_BLK_DEV_DRBD is not set
 # CONFIG_BLK_DEV_NBD is not set
+CONFIG_BLK_DEV_OSD=m
 # CONFIG_BLK_DEV_SX8 is not set
 # CONFIG_BLK_DEV_RAM is not set
 CONFIG_CDROM_PKTCDVD=m
 CONFIG_CDROM_PKTCDVD_BUFFERS=8
 # CONFIG_CDROM_PKTCDVD_WCACHE is not set
 CONFIG_ATA_OVER_ETH=m
+# CONFIG_BLK_DEV_HD is not set
 # CONFIG_MISC_DEVICES is not set
+CONFIG_EEPROM_93CX6=m
+CONFIG_HAVE_IDE=y
 # CONFIG_IDE is not set
 
 #
@@ -453,10 +599,6 @@ CONFIG_BLK_DEV_SR=m
 CONFIG_BLK_DEV_SR_VENDOR=y
 CONFIG_CHR_DEV_SG=m
 CONFIG_CHR_DEV_SCH=m
-
-#
-# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
-#
 # CONFIG_SCSI_MULTI_LUN is not set
 CONFIG_SCSI_CONSTANTS=y
 CONFIG_SCSI_LOGGING=y
@@ -471,11 +613,18 @@ CONFIG_SCSI_FC_ATTRS=y
 CONFIG_SCSI_ISCSI_ATTRS=m
 CONFIG_SCSI_SAS_ATTRS=m
 CONFIG_SCSI_SAS_LIBSAS=m
+CONFIG_SCSI_SAS_HOST_SMP=y
 # CONFIG_SCSI_SAS_LIBSAS_DEBUG is not set
+# CONFIG_SCSI_SRP_ATTRS is not set
 CONFIG_SCSI_LOWLEVEL=y
 # CONFIG_ISCSI_TCP is not set
+CONFIG_SCSI_CXGB3_ISCSI=m
+CONFIG_SCSI_BNX2_ISCSI=m
+CONFIG_BE2ISCSI=m
 # CONFIG_BLK_DEV_3W_XXXX_RAID is not set
+CONFIG_SCSI_HPSA=m
 # CONFIG_SCSI_3W_9XXX is not set
+CONFIG_SCSI_3W_SAS=m
 # CONFIG_SCSI_ACARD is not set
 # CONFIG_SCSI_AACRAID is not set
 # CONFIG_SCSI_AIC7XXX is not set
@@ -483,11 +632,21 @@ CONFIG_SCSI_LOWLEVEL=y
 # CONFIG_SCSI_AIC79XX is not set
 CONFIG_SCSI_AIC94XX=m
 # CONFIG_AIC94XX_DEBUG is not set
+CONFIG_SCSI_MVSAS=m
+# CONFIG_SCSI_MVSAS_DEBUG is not set
+CONFIG_SCSI_DPT_I2O=m
+# CONFIG_SCSI_ADVANSYS is not set
 # CONFIG_SCSI_ARCMSR is not set
 # CONFIG_MEGARAID_NEWGEN is not set
 # CONFIG_MEGARAID_LEGACY is not set
 # CONFIG_MEGARAID_SAS is not set
+CONFIG_SCSI_MPT2SAS=m
+CONFIG_SCSI_MPT2SAS_MAX_SGE=128
+# CONFIG_SCSI_MPT2SAS_LOGGING is not set
 # CONFIG_SCSI_HPTIOP is not set
+CONFIG_LIBFC=m
+# CONFIG_LIBFCOE is not set
+# CONFIG_FCOE is not set
 # CONFIG_SCSI_DMX3191D is not set
 # CONFIG_SCSI_FUTURE_DOMAIN is not set
 # CONFIG_SCSI_IPS is not set
@@ -502,16 +661,31 @@ CONFIG_SCSI_QLOGIC_1280=y
 # CONFIG_SCSI_DC395x is not set
 # CONFIG_SCSI_DC390T is not set
 # CONFIG_SCSI_DEBUG is not set
+CONFIG_SCSI_PMCRAID=m
+# CONFIG_SCSI_PM8001 is not set
 # CONFIG_SCSI_SRP is not set
+CONFIG_SCSI_BFA_FC=m
+CONFIG_SCSI_DH=m
+CONFIG_SCSI_DH_RDAC=m
+CONFIG_SCSI_DH_HP_SW=m
+CONFIG_SCSI_DH_EMC=m
+CONFIG_SCSI_DH_ALUA=m
+CONFIG_SCSI_OSD_INITIATOR=m
+CONFIG_SCSI_OSD_ULD=m
+CONFIG_SCSI_OSD_DPRINT_SENSE=1
+# CONFIG_SCSI_OSD_DEBUG is not set
 # CONFIG_ATA is not set
 CONFIG_MD=y
 CONFIG_BLK_DEV_MD=y
+CONFIG_MD_AUTODETECT=y
 CONFIG_MD_LINEAR=m
 CONFIG_MD_RAID0=y
 CONFIG_MD_RAID1=y
 CONFIG_MD_RAID10=m
 CONFIG_MD_RAID456=y
-CONFIG_MD_RAID5_RESHAPE=y
+# CONFIG_MULTICORE_RAID456 is not set
+CONFIG_MD_RAID6_PQ=y
+# CONFIG_ASYNC_RAID6_TEST is not set
 CONFIG_MD_MULTIPATH=m
 CONFIG_MD_FAULTY=m
 CONFIG_BLK_DEV_DM=m
@@ -519,36 +693,39 @@ CONFIG_BLK_DEV_DM=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
 CONFIG_DM_MIRROR=m
+CONFIG_DM_LOG_USERSPACE=m
 CONFIG_DM_ZERO=m
 CONFIG_DM_MULTIPATH=m
-CONFIG_DM_MULTIPATH_EMC=m
-CONFIG_DM_MULTIPATH_RDAC=m
+CONFIG_DM_MULTIPATH_QL=m
+CONFIG_DM_MULTIPATH_ST=m
 # CONFIG_DM_DELAY is not set
+CONFIG_DM_UEVENT=y
+# CONFIG_FUSION is not set
 
 #
-# Fusion MPT device support
+# IEEE 1394 (FireWire) support
 #
-# CONFIG_FUSION is not set
-# CONFIG_FUSION_SPI is not set
-# CONFIG_FUSION_FC is not set
-# CONFIG_FUSION_SAS is not set
 
 #
-# IEEE 1394 (FireWire) support
+# You can enable one or both FireWire driver stacks.
+#
+
+#
+# The newer stack is recommended.
 #
 # CONFIG_FIREWIRE is not set
 # CONFIG_IEEE1394 is not set
 # CONFIG_I2O is not set
 CONFIG_NETDEVICES=y
-CONFIG_NETDEVICES_MULTIQUEUE=y
 CONFIG_IFB=m
 # CONFIG_DUMMY is not set
 # CONFIG_BONDING is not set
 CONFIG_MACVLAN=m
 # CONFIG_EQUALIZER is not set
 # CONFIG_TUN is not set
+CONFIG_VETH=m
 # CONFIG_ARCNET is not set
-CONFIG_PHYLIB=m
+CONFIG_PHYLIB=y
 
 #
 # MII PHY device drivers
@@ -562,23 +739,51 @@ CONFIG_VITESSE_PHY=m
 CONFIG_SMSC_PHY=m
 # CONFIG_BROADCOM_PHY is not set
 CONFIG_ICPLUS_PHY=m
+CONFIG_REALTEK_PHY=m
+CONFIG_NATIONAL_PHY=m
+CONFIG_STE10XP=m
+CONFIG_LSI_ET1011C_PHY=m
 # CONFIG_FIXED_PHY is not set
+CONFIG_MDIO_BITBANG=m
 CONFIG_NET_ETHERNET=y
 CONFIG_MII=y
 CONFIG_AX88796=m
+CONFIG_AX88796_93CX6=y
 CONFIG_SGI_IOC3_ETH=y
 # CONFIG_HAPPYMEAL is not set
 # CONFIG_SUNGEM is not set
 # CONFIG_CASSINI is not set
 # CONFIG_NET_VENDOR_3COM is not set
+CONFIG_SMC91X=m
 # CONFIG_DM9000 is not set
+CONFIG_ETHOC=m
+CONFIG_SMSC911X=m
+CONFIG_DNET=m
 # CONFIG_NET_TULIP is not set
 # CONFIG_HP100 is not set
+# CONFIG_IBM_NEW_EMAC_ZMII is not set
+# CONFIG_IBM_NEW_EMAC_RGMII is not set
+# CONFIG_IBM_NEW_EMAC_TAH is not set
+# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
 # CONFIG_NET_PCI is not set
+CONFIG_B44=m
+CONFIG_B44_PCI_AUTOSELECT=y
+CONFIG_B44_PCICORE_AUTOSELECT=y
+CONFIG_B44_PCI=y
+CONFIG_KS8842=m
+CONFIG_KS8851_MLL=m
+CONFIG_ATL2=m
 CONFIG_NETDEV_1000=y
 # CONFIG_ACENIC is not set
 # CONFIG_DL2K is not set
 # CONFIG_E1000 is not set
+CONFIG_E1000E=m
+CONFIG_IP1000=m
+CONFIG_IGB=m
+CONFIG_IGBVF=m
 # CONFIG_NS83820 is not set
 # CONFIG_HAMACHI is not set
 # CONFIG_YELLOWFIN is not set
@@ -588,24 +793,75 @@ CONFIG_NETDEV_1000=y
 # CONFIG_SKY2 is not set
 CONFIG_VIA_VELOCITY=m
 # CONFIG_TIGON3 is not set
-# CONFIG_BNX2 is not set
+CONFIG_BNX2=m
+CONFIG_CNIC=m
 CONFIG_QLA3XXX=m
 # CONFIG_ATL1 is not set
+CONFIG_ATL1E=m
+CONFIG_ATL1C=m
+CONFIG_JME=m
 CONFIG_NETDEV_10000=y
+CONFIG_MDIO=m
 # CONFIG_CHELSIO_T1 is not set
+CONFIG_CHELSIO_T3_DEPENDS=y
 CONFIG_CHELSIO_T3=m
+CONFIG_ENIC=m
+CONFIG_IXGBE=m
 # CONFIG_IXGB is not set
 # CONFIG_S2IO is not set
+CONFIG_VXGE=m
+# CONFIG_VXGE_DEBUG_TRACE_ALL is not set
 # CONFIG_MYRI10GE is not set
 CONFIG_NETXEN_NIC=m
-# CONFIG_MLX4_CORE is not set
+CONFIG_NIU=m
+CONFIG_MLX4_EN=m
+CONFIG_MLX4_CORE=m
+# CONFIG_MLX4_DEBUG is not set
+CONFIG_TEHUTI=m
+CONFIG_BNX2X=m
+CONFIG_QLGE=m
+CONFIG_SFC=m
+CONFIG_BE2NET=m
 # CONFIG_TR is not set
-
-#
-# Wireless LAN
-#
-# CONFIG_WLAN_PRE80211 is not set
-CONFIG_WLAN_80211=y
+CONFIG_WLAN=y
+CONFIG_LIBERTAS_THINFIRM=m
+CONFIG_ATMEL=m
+CONFIG_PCI_ATMEL=m
+CONFIG_PRISM54=m
+CONFIG_RTL8180=m
+CONFIG_ADM8211=m
+# CONFIG_MAC80211_HWSIM is not set
+CONFIG_MWL8K=m
+CONFIG_ATH_COMMON=m
+# CONFIG_ATH_DEBUG is not set
+CONFIG_ATH5K=m
+# CONFIG_ATH5K_DEBUG is not set
+CONFIG_ATH9K_HW=m
+CONFIG_ATH9K_COMMON=m
+CONFIG_ATH9K=m
+CONFIG_B43=m
+CONFIG_B43_PCI_AUTOSELECT=y
+CONFIG_B43_PCICORE_AUTOSELECT=y
+CONFIG_B43_PHY_LP=y
+CONFIG_B43_LEDS=y
+CONFIG_B43_HWRNG=y
+# CONFIG_B43_DEBUG is not set
+CONFIG_B43LEGACY=m
+CONFIG_B43LEGACY_PCI_AUTOSELECT=y
+CONFIG_B43LEGACY_PCICORE_AUTOSELECT=y
+CONFIG_B43LEGACY_LEDS=y
+CONFIG_B43LEGACY_HWRNG=y
+# CONFIG_B43LEGACY_DEBUG is not set
+CONFIG_B43LEGACY_DMA=y
+CONFIG_B43LEGACY_PIO=y
+CONFIG_B43LEGACY_DMA_AND_PIO_MODE=y
+# CONFIG_B43LEGACY_DMA_MODE is not set
+# CONFIG_B43LEGACY_PIO_MODE is not set
+CONFIG_HOSTAP=m
+CONFIG_HOSTAP_FIRMWARE=y
+CONFIG_HOSTAP_FIRMWARE_NVRAM=y
+CONFIG_HOSTAP_PLX=m
+CONFIG_HOSTAP_PCI=m
 CONFIG_IPW2100=m
 CONFIG_IPW2100_MONITOR=y
 CONFIG_IPW2100_DEBUG=y
@@ -615,38 +871,57 @@ CONFIG_IPW2200_RADIOTAP=y
 CONFIG_IPW2200_PROMISCUOUS=y
 CONFIG_IPW2200_QOS=y
 CONFIG_IPW2200_DEBUG=y
+CONFIG_LIBIPW=m
+# CONFIG_LIBIPW_DEBUG is not set
+CONFIG_IWLWIFI=m
+CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT=y
+# CONFIG_IWLWIFI_DEBUG is not set
+CONFIG_IWLAGN=m
+CONFIG_IWL4965=y
+CONFIG_IWL5000=y
+CONFIG_IWL3945=m
+CONFIG_IWL3945_SPECTRUM_MEASUREMENT=y
 CONFIG_LIBERTAS=m
 # CONFIG_LIBERTAS_DEBUG is not set
 CONFIG_HERMES=m
+# CONFIG_HERMES_CACHE_FW_ON_INIT is not set
 CONFIG_PLX_HERMES=m
 CONFIG_TMD_HERMES=m
 CONFIG_NORTEL_HERMES=m
 CONFIG_PCI_HERMES=m
-CONFIG_ATMEL=m
-CONFIG_PCI_ATMEL=m
-CONFIG_PRISM54=m
-CONFIG_HOSTAP=m
-CONFIG_HOSTAP_FIRMWARE=y
-CONFIG_HOSTAP_FIRMWARE_NVRAM=y
-CONFIG_HOSTAP_PLX=m
-CONFIG_HOSTAP_PCI=m
-CONFIG_BCM43XX=m
-CONFIG_BCM43XX_DEBUG=y
-CONFIG_BCM43XX_DMA=y
-CONFIG_BCM43XX_PIO=y
-CONFIG_BCM43XX_DMA_AND_PIO_MODE=y
-# CONFIG_BCM43XX_DMA_MODE is not set
-# CONFIG_BCM43XX_PIO_MODE is not set
+CONFIG_P54_COMMON=m
+CONFIG_P54_PCI=m
+CONFIG_P54_LEDS=y
+CONFIG_RT2X00=m
+CONFIG_RT2400PCI=m
+CONFIG_RT2500PCI=m
+CONFIG_RT61PCI=m
+CONFIG_RT2800PCI_PCI=m
+CONFIG_RT2800PCI=m
+CONFIG_RT2800_LIB=m
+CONFIG_RT2X00_LIB_PCI=m
+CONFIG_RT2X00_LIB=m
+CONFIG_RT2X00_LIB_HT=y
+CONFIG_RT2X00_LIB_FIRMWARE=y
+CONFIG_RT2X00_LIB_CRYPTO=y
+CONFIG_RT2X00_LIB_LEDS=y
+# CONFIG_RT2X00_DEBUG is not set
+CONFIG_WL12XX=m
+CONFIG_WL1251=m
+
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
 # CONFIG_WAN is not set
 # CONFIG_FDDI is not set
 # CONFIG_HIPPI is not set
 # CONFIG_PPP is not set
 # CONFIG_SLIP is not set
 # CONFIG_NET_FC is not set
-# CONFIG_SHAPER is not set
 # CONFIG_NETCONSOLE is not set
 # CONFIG_NETPOLL is not set
 # CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_VMXNET3 is not set
 # CONFIG_ISDN is not set
 # CONFIG_PHONE is not set
 
@@ -664,13 +939,16 @@ CONFIG_SERIO_SERPORT=y
 # CONFIG_SERIO_PCIPS2 is not set
 CONFIG_SERIO_LIBPS2=m
 CONFIG_SERIO_RAW=m
+CONFIG_SERIO_ALTERA_PS2=m
 # CONFIG_GAMEPORT is not set
 
 #
 # Character devices
 #
 # CONFIG_VT is not set
+CONFIG_DEVKMEM=y
 # CONFIG_SERIAL_NONSTANDARD is not set
+CONFIG_NOZOMI=m
 
 #
 # Serial drivers
@@ -693,95 +971,258 @@ CONFIG_SERIAL_CORE=y
 CONFIG_SERIAL_CORE_CONSOLE=y
 # CONFIG_SERIAL_JSM is not set
 CONFIG_UNIX98_PTYS=y
+CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
 CONFIG_LEGACY_PTYS=y
 CONFIG_LEGACY_PTY_COUNT=256
 # CONFIG_IPMI_HANDLER is not set
-# CONFIG_WATCHDOG is not set
 CONFIG_HW_RANDOM=m
-# CONFIG_RTC is not set
+CONFIG_HW_RANDOM_TIMERIOMEM=m
 # CONFIG_R3964 is not set
 # CONFIG_APPLICOM is not set
-# CONFIG_DRM is not set
 # CONFIG_RAW_DRIVER is not set
 # CONFIG_TCG_TPM is not set
 CONFIG_DEVPORT=y
-# CONFIG_I2C is not set
+CONFIG_I2C=m
+CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_COMPAT=y
+CONFIG_I2C_CHARDEV=m
+CONFIG_I2C_HELPER_AUTO=y
+CONFIG_I2C_ALGOBIT=m
+CONFIG_I2C_ALGOPCA=m
+
+#
+# I2C Hardware Bus support
+#
+
+#
+# PC SMBus host controller drivers
+#
+CONFIG_I2C_ALI1535=m
+CONFIG_I2C_ALI1563=m
+CONFIG_I2C_ALI15X3=m
+CONFIG_I2C_AMD756=m
+CONFIG_I2C_AMD8111=m
+CONFIG_I2C_I801=m
+CONFIG_I2C_ISCH=m
+CONFIG_I2C_PIIX4=m
+CONFIG_I2C_NFORCE2=m
+CONFIG_I2C_SIS5595=m
+CONFIG_I2C_SIS630=m
+CONFIG_I2C_SIS96X=m
+CONFIG_I2C_VIA=m
+CONFIG_I2C_VIAPRO=m
 
 #
-# SPI support
+# I2C system bus drivers (mostly embedded / system-on-chip)
 #
+CONFIG_I2C_OCORES=m
+CONFIG_I2C_SIMTEC=m
+
+#
+# External I2C/SMBus adapter drivers
+#
+CONFIG_I2C_PARPORT_LIGHT=m
+CONFIG_I2C_TAOS_EVM=m
+
+#
+# Other I2C/SMBus bus drivers
+#
+CONFIG_I2C_PCA_PLATFORM=m
+CONFIG_I2C_STUB=m
+
+#
+# Miscellaneous I2C Chip support
+#
+CONFIG_SENSORS_TSL2550=m
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_I2C_DEBUG_CHIP is not set
 # CONFIG_SPI is not set
-# CONFIG_SPI_MASTER is not set
+
+#
+# PPS support
+#
+CONFIG_PPS=m
+# CONFIG_PPS_DEBUG is not set
 # CONFIG_W1 is not set
 # CONFIG_POWER_SUPPLY is not set
 # CONFIG_HWMON is not set
+CONFIG_THERMAL=m
+# CONFIG_WATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
 
 #
-# Multifunction device drivers
-#
-# CONFIG_MFD_SM501 is not set
+# Sonics Silicon Backplane
+#
+CONFIG_SSB=m
+CONFIG_SSB_SPROM=y
+CONFIG_SSB_PCIHOST_POSSIBLE=y
+CONFIG_SSB_PCIHOST=y
+CONFIG_SSB_B43_PCI_BRIDGE=y
+# CONFIG_SSB_SILENT is not set
+# CONFIG_SSB_DEBUG is not set
+CONFIG_SSB_DRIVER_PCICORE_POSSIBLE=y
+CONFIG_SSB_DRIVER_PCICORE=y
+# CONFIG_SSB_DRIVER_MIPS is not set
 
 #
-# Multimedia devices
+# Multifunction device drivers
 #
-# CONFIG_VIDEO_DEV is not set
-# CONFIG_DVB_CORE is not set
-# CONFIG_DAB is not set
+# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_WM8400 is not set
+CONFIG_MFD_WM8350=m
+CONFIG_MFD_WM8350_I2C=m
+CONFIG_MFD_PCF50633=m
+CONFIG_PCF50633_ADC=m
+CONFIG_PCF50633_GPIO=m
+CONFIG_AB3100_CORE=m
+CONFIG_AB3100_OTP=m
+# CONFIG_REGULATOR is not set
+# CONFIG_MEDIA_SUPPORT is not set
 
 #
 # Graphics support
 #
-# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
-
-#
-# Display device support
-#
-# CONFIG_DISPLAY_SUPPORT is not set
+# CONFIG_VGA_ARB is not set
+# CONFIG_DRM is not set
 # CONFIG_VGASTATE is not set
 # CONFIG_VIDEO_OUTPUT_CONTROL is not set
 # CONFIG_FB is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
 
 #
-# Sound
+# Display device support
 #
+# CONFIG_DISPLAY_SUPPORT is not set
 # CONFIG_SOUND is not set
 CONFIG_USB_SUPPORT=y
 CONFIG_USB_ARCH_HAS_HCD=y
 CONFIG_USB_ARCH_HAS_OHCI=y
 CONFIG_USB_ARCH_HAS_EHCI=y
 # CONFIG_USB is not set
+# CONFIG_USB_OTG_WHITELIST is not set
+# CONFIG_USB_OTG_BLACKLIST_HUB is not set
 
 #
-# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+# Enable Host or Gadget support to see Inventra options
 #
 
 #
-# USB Gadget Support
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
 #
 # CONFIG_USB_GADGET is not set
+
+#
+# OTG and related infrastructure
+#
+# CONFIG_UWB is not set
 # CONFIG_MMC is not set
-# CONFIG_NEW_LEDS is not set
-# CONFIG_INFINIBAND is not set
-# CONFIG_RTC_CLASS is not set
+# CONFIG_MEMSTICK is not set
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=m
+
+#
+# LED drivers
+#
+CONFIG_LEDS_LP3944=m
+CONFIG_LEDS_PCA955X=m
+CONFIG_LEDS_WM8350=m
+CONFIG_LEDS_BD2802=m
+
+#
+# LED Triggers
+#
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=m
+CONFIG_LEDS_TRIGGER_HEARTBEAT=m
+CONFIG_LEDS_TRIGGER_BACKLIGHT=m
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=m
 
 #
-# DMA Engine support
+# iptables trigger is under Netfilter config (LED target)
 #
-# CONFIG_DMA_ENGINE is not set
+# CONFIG_ACCESSIBILITY is not set
+# CONFIG_INFINIBAND is not set
+CONFIG_RTC_LIB=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_HCTOSYS=y
+CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
+# CONFIG_RTC_DEBUG is not set
 
 #
-# DMA Clients
+# RTC interfaces
 #
+CONFIG_RTC_INTF_SYSFS=y
+CONFIG_RTC_INTF_PROC=y
+CONFIG_RTC_INTF_DEV=y
+# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
+# CONFIG_RTC_DRV_TEST is not set
 
 #
-# DMA Devices
+# I2C RTC drivers
+#
+# CONFIG_RTC_DRV_DS1307 is not set
+# CONFIG_RTC_DRV_DS1374 is not set
+# CONFIG_RTC_DRV_DS1672 is not set
+# CONFIG_RTC_DRV_MAX6900 is not set
+# CONFIG_RTC_DRV_RS5C372 is not set
+# CONFIG_RTC_DRV_ISL1208 is not set
+# CONFIG_RTC_DRV_X1205 is not set
+# CONFIG_RTC_DRV_PCF8563 is not set
+# CONFIG_RTC_DRV_PCF8583 is not set
+# CONFIG_RTC_DRV_M41T80 is not set
+# CONFIG_RTC_DRV_BQ32K is not set
+# CONFIG_RTC_DRV_S35390A is not set
+# CONFIG_RTC_DRV_FM3130 is not set
+# CONFIG_RTC_DRV_RX8581 is not set
+# CONFIG_RTC_DRV_RX8025 is not set
+
+#
+# SPI RTC drivers
+#
+
 #
+# Platform RTC drivers
+#
+# CONFIG_RTC_DRV_CMOS is not set
+# CONFIG_RTC_DRV_DS1286 is not set
+# CONFIG_RTC_DRV_DS1511 is not set
+# CONFIG_RTC_DRV_DS1553 is not set
+# CONFIG_RTC_DRV_DS1742 is not set
+# CONFIG_RTC_DRV_STK17TA8 is not set
+# CONFIG_RTC_DRV_M48T86 is not set
+CONFIG_RTC_DRV_M48T35=y
+# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_MSM6242 is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
+# CONFIG_RTC_DRV_RP5C01 is not set
+# CONFIG_RTC_DRV_V3020 is not set
+# CONFIG_RTC_DRV_WM8350 is not set
+# CONFIG_RTC_DRV_PCF50633 is not set
+CONFIG_RTC_DRV_AB3100=m
 
 #
-# Userspace I/O
+# on-CPU RTC drivers
 #
+# CONFIG_DMADEVICES is not set
+# CONFIG_AUXDISPLAY is not set
 CONFIG_UIO=y
 # CONFIG_UIO_CIF is not set
+# CONFIG_UIO_PDRV is not set
+# CONFIG_UIO_PDRV_GENIRQ is not set
+CONFIG_UIO_SMX=m
+CONFIG_UIO_AEC=m
+CONFIG_UIO_SERCOS3=m
+CONFIG_UIO_PCI_GENERIC=m
+
+#
+# TI VLYNQ
+#
+# CONFIG_STAGING is not set
 
 #
 # File systems
@@ -792,36 +1233,58 @@ CONFIG_EXT2_FS_POSIX_ACL=y
 CONFIG_EXT2_FS_SECURITY=y
 # CONFIG_EXT2_FS_XIP is not set
 CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
 CONFIG_EXT3_FS_XATTR=y
 CONFIG_EXT3_FS_POSIX_ACL=y
 CONFIG_EXT3_FS_SECURITY=y
-# CONFIG_EXT4DEV_FS is not set
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_XATTR=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+# CONFIG_EXT4_DEBUG is not set
 CONFIG_JBD=y
-CONFIG_JBD_DEBUG=y
+CONFIG_JBD2=y
 CONFIG_FS_MBCACHE=y
 # CONFIG_REISERFS_FS is not set
 # CONFIG_JFS_FS is not set
 CONFIG_FS_POSIX_ACL=y
 CONFIG_XFS_FS=m
 CONFIG_XFS_QUOTA=y
-CONFIG_XFS_SECURITY=y
 CONFIG_XFS_POSIX_ACL=y
 # CONFIG_XFS_RT is not set
+# CONFIG_XFS_DEBUG is not set
 # CONFIG_GFS2_FS is not set
 # CONFIG_OCFS2_FS is not set
-# CONFIG_MINIX_FS is not set
-# CONFIG_ROMFS_FS is not set
+CONFIG_BTRFS_FS=m
+CONFIG_BTRFS_FS_POSIX_ACL=y
+# CONFIG_NILFS2_FS is not set
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
+CONFIG_DNOTIFY=y
 CONFIG_INOTIFY=y
 CONFIG_INOTIFY_USER=y
 # CONFIG_QUOTA is not set
+CONFIG_QUOTA_NETLINK_INTERFACE=y
 CONFIG_QUOTACTL=y
-CONFIG_DNOTIFY=y
 CONFIG_AUTOFS_FS=m
 # CONFIG_AUTOFS4_FS is not set
 CONFIG_FUSE_FS=m
+CONFIG_CUSE=m
 CONFIG_GENERIC_ACL=y
 
 #
+# Caches
+#
+CONFIG_FSCACHE=m
+CONFIG_FSCACHE_STATS=y
+# CONFIG_FSCACHE_HISTOGRAM is not set
+# CONFIG_FSCACHE_DEBUG is not set
+# CONFIG_FSCACHE_OBJECT_LIST is not set
+CONFIG_CACHEFILES=m
+# CONFIG_CACHEFILES_DEBUG is not set
+# CONFIG_CACHEFILES_HISTOGRAM is not set
+
+#
 # CD-ROM/DVD Filesystems
 #
 # CONFIG_ISO9660_FS is not set
@@ -840,16 +1303,13 @@ CONFIG_GENERIC_ACL=y
 CONFIG_PROC_FS=y
 CONFIG_PROC_KCORE=y
 CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
 CONFIG_SYSFS=y
 CONFIG_TMPFS=y
 CONFIG_TMPFS_POSIX_ACL=y
 # CONFIG_HUGETLB_PAGE is not set
-CONFIG_RAMFS=y
 CONFIG_CONFIGFS_FS=m
-
-#
-# Miscellaneous filesystems
-#
+CONFIG_MISC_FILESYSTEMS=y
 # CONFIG_ADFS_FS is not set
 # CONFIG_AFFS_FS is not set
 # CONFIG_ECRYPT_FS is not set
@@ -859,28 +1319,32 @@ CONFIG_CONFIGFS_FS=m
 # CONFIG_BFS_FS is not set
 # CONFIG_EFS_FS is not set
 # CONFIG_CRAMFS is not set
+CONFIG_SQUASHFS=m
+# CONFIG_SQUASHFS_EMBEDDED is not set
+CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3
 # CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+CONFIG_OMFS_FS=m
 # CONFIG_HPFS_FS is not set
 # CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
 # CONFIG_SYSV_FS is not set
 # CONFIG_UFS_FS is not set
-
-#
-# Network File Systems
-#
+CONFIG_EXOFS_FS=m
+# CONFIG_EXOFS_DEBUG is not set
+CONFIG_NETWORK_FILESYSTEMS=y
 CONFIG_NFS_FS=y
 CONFIG_NFS_V3=y
 # CONFIG_NFS_V3_ACL is not set
 # CONFIG_NFS_V4 is not set
-# CONFIG_NFS_DIRECTIO is not set
-# CONFIG_NFSD is not set
 # CONFIG_ROOT_NFS is not set
+# CONFIG_NFSD is not set
 CONFIG_LOCKD=y
 CONFIG_LOCKD_V4=y
+CONFIG_EXPORTFS=m
 CONFIG_NFS_COMMON=y
 CONFIG_SUNRPC=y
 CONFIG_SUNRPC_GSS=y
-# CONFIG_SUNRPC_BIND34 is not set
 CONFIG_RPCSEC_GSS_KRB5=y
 # CONFIG_RPCSEC_GSS_SPKM3 is not set
 # CONFIG_SMB_FS is not set
@@ -910,35 +1374,37 @@ CONFIG_SGI_PARTITION=y
 # CONFIG_KARMA_PARTITION is not set
 # CONFIG_EFI_PARTITION is not set
 # CONFIG_SYSV68_PARTITION is not set
-
-#
-# Native Language Support
-#
 # CONFIG_NLS is not set
-
-#
-# Distributed Lock Manager
-#
 CONFIG_DLM=m
 # CONFIG_DLM_DEBUG is not set
 
 #
-# Profiling support
-#
-# CONFIG_PROFILING is not set
-
-#
 # Kernel hacking
 #
 CONFIG_TRACE_IRQFLAGS_SUPPORT=y
 # CONFIG_PRINTK_TIME is not set
+CONFIG_ENABLE_WARN_DEPRECATED=y
 CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=2048
 # CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_STRIP_ASM_SYMS is not set
 # CONFIG_UNUSED_SYMBOLS is not set
 # CONFIG_DEBUG_FS is not set
 # CONFIG_HEADERS_CHECK is not set
 # CONFIG_DEBUG_KERNEL is not set
-CONFIG_CROSSCOMPILE=y
+# CONFIG_DEBUG_MEMORY_INIT is not set
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_SYSCTL_SYSCALL_CHECK is not set
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
+CONFIG_HAVE_DYNAMIC_FTRACE=y
+CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_TRACING_SUPPORT=y
+# CONFIG_FTRACE is not set
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+CONFIG_EARLY_PRINTK=y
 # CONFIG_CMDLINE_BOOL is not set
 
 #
@@ -947,65 +1413,140 @@ CONFIG_CROSSCOMPILE=y
 CONFIG_KEYS=y
 CONFIG_KEYS_DEBUG_PROC_KEYS=y
 # CONFIG_SECURITY is not set
-CONFIG_XOR_BLOCKS=m
-CONFIG_ASYNC_CORE=m
-CONFIG_ASYNC_MEMCPY=m
-CONFIG_ASYNC_XOR=m
+CONFIG_SECURITYFS=y
+# CONFIG_DEFAULT_SECURITY_SELINUX is not set
+# CONFIG_DEFAULT_SECURITY_SMACK is not set
+# CONFIG_DEFAULT_SECURITY_TOMOYO is not set
+CONFIG_DEFAULT_SECURITY_DAC=y
+CONFIG_DEFAULT_SECURITY=""
+CONFIG_XOR_BLOCKS=y
+CONFIG_ASYNC_CORE=y
+CONFIG_ASYNC_MEMCPY=y
+CONFIG_ASYNC_XOR=y
+CONFIG_ASYNC_PQ=y
+CONFIG_ASYNC_RAID6_RECOV=y
 CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+CONFIG_CRYPTO_FIPS=y
 CONFIG_CRYPTO_ALGAPI=y
-CONFIG_CRYPTO_ABLKCIPHER=m
+CONFIG_CRYPTO_ALGAPI2=y
+CONFIG_CRYPTO_AEAD=m
+CONFIG_CRYPTO_AEAD2=y
 CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_BLKCIPHER2=y
 CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_RNG=m
+CONFIG_CRYPTO_RNG2=y
+CONFIG_CRYPTO_PCOMP=y
 CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_MANAGER2=y
+CONFIG_CRYPTO_GF128MUL=m
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_WORKQUEUE=y
+CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_AUTHENC=m
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Authenticated Encryption with Associated Data
+#
+CONFIG_CRYPTO_CCM=m
+CONFIG_CRYPTO_GCM=m
+CONFIG_CRYPTO_SEQIV=m
+
+#
+# Block modes
+#
+CONFIG_CRYPTO_CBC=y
+CONFIG_CRYPTO_CTR=m
+CONFIG_CRYPTO_CTS=m
+CONFIG_CRYPTO_ECB=m
+CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_XTS=m
+
+#
+# Hash modes
+#
 CONFIG_CRYPTO_HMAC=y
 CONFIG_CRYPTO_XCBC=m
-CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_VMAC=m
+
+#
+# Digest
+#
+CONFIG_CRYPTO_CRC32C=m
+CONFIG_CRYPTO_GHASH=m
 CONFIG_CRYPTO_MD4=m
 CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_RMD128=m
+CONFIG_CRYPTO_RMD160=m
+CONFIG_CRYPTO_RMD256=m
+CONFIG_CRYPTO_RMD320=m
 CONFIG_CRYPTO_SHA1=m
 CONFIG_CRYPTO_SHA256=m
 CONFIG_CRYPTO_SHA512=m
-CONFIG_CRYPTO_WP512=m
 CONFIG_CRYPTO_TGR192=m
-CONFIG_CRYPTO_GF128MUL=m
-CONFIG_CRYPTO_ECB=m
-CONFIG_CRYPTO_CBC=y
-CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_LRW=m
-CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_DES=y
-CONFIG_CRYPTO_FCRYPT=m
-CONFIG_CRYPTO_BLOWFISH=m
-CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_TWOFISH_COMMON=m
-CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_WP512=m
+
+#
+# Ciphers
+#
 CONFIG_CRYPTO_AES=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_ARC4=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAMELLIA=m
 CONFIG_CRYPTO_CAST5=m
 CONFIG_CRYPTO_CAST6=m
-CONFIG_CRYPTO_TEA=m
-CONFIG_CRYPTO_ARC4=m
+CONFIG_CRYPTO_DES=y
+CONFIG_CRYPTO_FCRYPT=m
 CONFIG_CRYPTO_KHAZAD=m
-CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_SALSA20=m
+CONFIG_CRYPTO_SEED=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_TWOFISH_COMMON=m
+
+#
+# Compression
+#
 CONFIG_CRYPTO_DEFLATE=m
-CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_CRC32C=m
-CONFIG_CRYPTO_CAMELLIA=m
-# CONFIG_CRYPTO_TEST is not set
+CONFIG_CRYPTO_ZLIB=m
+CONFIG_CRYPTO_LZO=m
+
+#
+# Random Number Generation
+#
+CONFIG_CRYPTO_ANSI_CPRNG=m
 CONFIG_CRYPTO_HW=y
+CONFIG_CRYPTO_DEV_HIFN_795X=m
+# CONFIG_CRYPTO_DEV_HIFN_795X_RNG is not set
+# CONFIG_BINARY_PRINTF is not set
 
 #
 # Library routines
 #
 CONFIG_BITREVERSE=y
+CONFIG_GENERIC_FIND_LAST_BIT=y
 CONFIG_CRC_CCITT=m
-# CONFIG_CRC16 is not set
-# CONFIG_CRC_ITU_T is not set
+CONFIG_CRC16=y
+CONFIG_CRC_T10DIF=m
+CONFIG_CRC_ITU_T=m
 CONFIG_CRC32=y
-# CONFIG_CRC7 is not set
+CONFIG_CRC7=m
 CONFIG_LIBCRC32C=m
 CONFIG_ZLIB_INFLATE=m
 CONFIG_ZLIB_DEFLATE=m
-CONFIG_PLIST=y
+CONFIG_LZO_COMPRESS=m
+CONFIG_LZO_DECOMPRESS=m
 CONFIG_HAS_IOMEM=y
 CONFIG_HAS_IOPORT=y
 CONFIG_HAS_DMA=y
+CONFIG_NLATTR=y
index 9c187a6..758ad42 100644 (file)
@@ -287,9 +287,9 @@ static inline int __cpu_has_fpu(void)
 static inline void cpu_probe_vmbits(struct cpuinfo_mips *c)
 {
 #ifdef __NEED_VMBITS_PROBE
-       write_c0_entryhi(0x3ffffffffffff000ULL);
+       write_c0_entryhi(0x3fffffffffffe000ULL);
        back_to_back_c0_hazard();
-       c->vmbits = fls64(read_c0_entryhi() & 0x3ffffffffffff000ULL);
+       c->vmbits = fls64(read_c0_entryhi() & 0x3fffffffffffe000ULL);
 #endif
 }
 
index 338dfe8..31b204b 100644 (file)
@@ -1501,6 +1501,7 @@ void __cpuinit per_cpu_trap_init(void)
                        cp0_perfcount_irq = -1;
        } else {
                cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ;
+               cp0_compare_irq_shift = cp0_compare_irq;
                cp0_perfcount_irq = -1;
        }
 
index 94e05e5..e06f1af 100644 (file)
@@ -174,7 +174,7 @@ static void octeon_flush_cache_page(struct vm_area_struct *vma,
  * Probe Octeon's caches
  *
  */
-static void __devinit probe_octeon(void)
+static void __cpuinit probe_octeon(void)
 {
        unsigned long icache_size;
        unsigned long dcache_size;
@@ -235,7 +235,7 @@ static void __devinit probe_octeon(void)
  * Setup the Octeon cache flush routines
  *
  */
-void __devinit octeon_cache_init(void)
+void __cpuinit octeon_cache_init(void)
 {
        extern unsigned long ebase;
        extern char except_vec2_octeon;
index 102b2df..e716caf 100644 (file)
@@ -155,7 +155,7 @@ static inline void setup_protection_map(void)
        protection_map[15] = PAGE_SHARED;
 }
 
-void __devinit cpu_cache_init(void)
+void __cpuinit cpu_cache_init(void)
 {
        if (cpu_has_3k_cache) {
                extern void __weak r3k_cache_init(void);
index e274fda..127d732 100644 (file)
@@ -1,5 +1,6 @@
 #include <linux/module.h>
 #include <linux/highmem.h>
+#include <linux/sched.h>
 #include <linux/smp.h>
 #include <asm/fixmap.h>
 #include <asm/tlbflush.h>
index 46f0069..31e2583 100644 (file)
@@ -404,7 +404,7 @@ void __init sni_rm200_i8259_irqs(void)
        if (!rm200_pic_master)
                return;
        rm200_pic_slave = ioremap_nocache(0x160000a0, 4);
-       if (!rm200_pic_master) {
+       if (!rm200_pic_slave) {
                iounmap(rm200_pic_master);
                return;
        }
index 524d935..f388dc6 100644 (file)
@@ -18,7 +18,6 @@ config PARISC
        select BUG
        select HAVE_PERF_EVENTS
        select GENERIC_ATOMIC64 if !64BIT
-       select HAVE_ARCH_TRACEHOOK
        help
          The PA-RISC microprocessor is designed by Hewlett-Packard and used
          in many of their workstations & servers (HP9000 700 and 800 series,
index f7064ab..9e74bfe 100644 (file)
@@ -18,7 +18,6 @@
 
 #include <asm/io.h>
 #include <asm/system.h>
-#include <asm/cache.h>         /* for L1_CACHE_BYTES */
 #include <asm/superio.h>
 
 #define DEBUG_RESOURCES 0
@@ -123,6 +122,10 @@ static int __init pcibios_init(void)
        } else {
                printk(KERN_WARNING "pci_bios != NULL but init() is!\n");
        }
+
+       /* Set the CLS for PCI as early as possible. */
+       pci_cache_line_size = pci_dfl_cache_line_size;
+
        return 0;
 }
 
@@ -171,7 +174,7 @@ void pcibios_set_master(struct pci_dev *dev)
        ** upper byte is PCI_LATENCY_TIMER.
        */
        pci_write_config_word(dev, PCI_CACHE_LINE_SIZE,
-                               (0x80 << 8) | (L1_CACHE_BYTES / sizeof(u32)));
+                             (0x80 << 8) | pci_cache_line_size);
 }
 
 
index fb37ac5..35c827e 100644 (file)
@@ -468,7 +468,9 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
        recalc_sigpending();
        spin_unlock_irq(&current->sighand->siglock);
 
-       tracehook_signal_handler(sig, info, ka, regs, 0);
+       tracehook_signal_handler(sig, info, ka, regs, 
+               test_thread_flag(TIF_SINGLESTEP) ||
+               test_thread_flag(TIF_BLOCKSTEP));
 
        return 1;
 }
index 282d930..1ec0657 100644 (file)
@@ -63,15 +63,21 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
        if (huge) {
 #ifdef CONFIG_HUGETLB_PAGE
                psize = get_slice_psize(mm, addr);
+               /* Mask the address for the correct page size */
+               addr &= ~((1UL << mmu_psize_defs[psize].shift) - 1);
 #else
                BUG();
                psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */
 #endif
-       } else
+       } else {
                psize = pte_pagesize_index(mm, addr, pte);
+               /* Mask the address for the standard page size.  If we
+                * have a 64k page kernel, but the hardware does not
+                * support 64k pages, this might be different from the
+                * hardware page size encoded in the slice table. */
+               addr &= PAGE_MASK;
+       }
 
-       /* Mask the address for the correct page size */
-       addr &= ~((1UL << mmu_psize_defs[psize].shift) - 1);
 
        /* Build full vaddr */
        if (!is_kernel_addr(addr)) {
index 21f61b8..cc29c0f 100644 (file)
@@ -338,7 +338,8 @@ static void __init mpc85xx_mds_pic_init(void)
        }
 
        mpic = mpic_alloc(np, r.start,
-                       MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN,
+                       MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN |
+                       MPIC_BROKEN_FRR_NIRQS,
                        0, 256, " OpenPIC  ");
        BUG_ON(mpic == NULL);
        of_node_put(np);
index 04160a4..a15f582 100644 (file)
@@ -46,6 +46,7 @@ smp_85xx_kick_cpu(int nr)
        __iomem u32 *bptr_vaddr;
        struct device_node *np;
        int n = 0;
+       int ioremappable;
 
        WARN_ON (nr < 0 || nr >= NR_CPUS);
 
@@ -59,21 +60,37 @@ smp_85xx_kick_cpu(int nr)
                return;
        }
 
+       /*
+        * A secondary core could be in a spinloop in the bootpage
+        * (0xfffff000), somewhere in highmem, or somewhere in lowmem.
+        * The bootpage and highmem can be accessed via ioremap(), but
+        * we need to directly access the spinloop if its in lowmem.
+        */
+       ioremappable = *cpu_rel_addr > virt_to_phys(high_memory);
+
        /* Map the spin table */
-       bptr_vaddr = ioremap(*cpu_rel_addr, SIZE_BOOT_ENTRY);
+       if (ioremappable)
+               bptr_vaddr = ioremap(*cpu_rel_addr, SIZE_BOOT_ENTRY);
+       else
+               bptr_vaddr = phys_to_virt(*cpu_rel_addr);
 
        local_irq_save(flags);
 
        out_be32(bptr_vaddr + BOOT_ENTRY_PIR, nr);
        out_be32(bptr_vaddr + BOOT_ENTRY_ADDR_LOWER, __pa(__early_start));
 
+       if (!ioremappable)
+               flush_dcache_range((ulong)bptr_vaddr,
+                               (ulong)(bptr_vaddr + SIZE_BOOT_ENTRY));
+
        /* Wait a bit for the CPU to ack. */
        while ((__secondary_hold_acknowledge != nr) && (++n < 1000))
                mdelay(1);
 
        local_irq_restore(flags);
 
-       iounmap(bptr_vaddr);
+       if (ioremappable)
+               iounmap(bptr_vaddr);
 
        pr_debug("waited %d msecs for CPU #%d.\n", n, nr);
 }
index 1ee66db..f5f7919 100644 (file)
@@ -784,9 +784,13 @@ static void xics_set_cpu_priority(unsigned char cppr)
 {
        struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr);
 
-       BUG_ON(os_cppr->index != 0);
+       /*
+        * we only really want to set the priority when there's
+        * just one cppr value on the stack
+        */
+       WARN_ON(os_cppr->index != 0);
 
-       os_cppr->stack[os_cppr->index] = cppr;
+       os_cppr->stack[0] = cppr;
 
        if (firmware_has_feature(FW_FEATURE_LPAR))
                lpar_cppr_info(cppr);
@@ -821,8 +825,14 @@ void xics_setup_cpu(void)
 
 void xics_teardown_cpu(void)
 {
+       struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr);
        int cpu = smp_processor_id();
 
+       /*
+        * we have to reset the cppr index to 0 because we're
+        * not going to return from the IPI
+        */
+       os_cppr->index = 0;
        xics_set_cpu_priority(0);
 
        /* Clear any pending IPI request */
index 341aff2..cd128b0 100644 (file)
@@ -288,46 +288,30 @@ static int hypfs_fill_super(struct super_block *sb, void *data, int silent)
        sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
        sb->s_magic = HYPFS_MAGIC;
        sb->s_op = &hypfs_s_ops;
-       if (hypfs_parse_options(data, sb)) {
-               rc = -EINVAL;
-               goto err_alloc;
-       }
+       if (hypfs_parse_options(data, sb))
+               return -EINVAL;
        root_inode = hypfs_make_inode(sb, S_IFDIR | 0755);
-       if (!root_inode) {
-               rc = -ENOMEM;
-               goto err_alloc;
-       }
+       if (!root_inode)
+               return -ENOMEM;
        root_inode->i_op = &simple_dir_inode_operations;
        root_inode->i_fop = &simple_dir_operations;
-       root_dentry = d_alloc_root(root_inode);
+       sb->s_root = root_dentry = d_alloc_root(root_inode);
        if (!root_dentry) {
                iput(root_inode);
-               rc = -ENOMEM;
-               goto err_alloc;
+               return -ENOMEM;
        }
        if (MACHINE_IS_VM)
                rc = hypfs_vm_create_files(sb, root_dentry);
        else
                rc = hypfs_diag_create_files(sb, root_dentry);
        if (rc)
-               goto err_tree;
+               return rc;
        sbi->update_file = hypfs_create_update_file(sb, root_dentry);
-       if (IS_ERR(sbi->update_file)) {
-               rc = PTR_ERR(sbi->update_file);
-               goto err_tree;
-       }
+       if (IS_ERR(sbi->update_file))
+               return PTR_ERR(sbi->update_file);
        hypfs_update_update(sb);
-       sb->s_root = root_dentry;
        pr_info("Hypervisor filesystem mounted\n");
        return 0;
-
-err_tree:
-       hypfs_delete_tree(root_dentry);
-       d_genocide(root_dentry);
-       dput(root_dentry);
-err_alloc:
-       kfree(sbi);
-       return rc;
 }
 
 static int hypfs_get_super(struct file_system_type *fst, int flags,
@@ -340,12 +324,12 @@ static void hypfs_kill_super(struct super_block *sb)
 {
        struct hypfs_sb_info *sb_info = sb->s_fs_info;
 
-       if (sb->s_root) {
+       if (sb->s_root)
                hypfs_delete_tree(sb->s_root);
+       if (sb_info->update_file)
                hypfs_remove(sb_info->update_file);
-               kfree(sb->s_fs_info);
-               sb->s_fs_info = NULL;
-       }
+       kfree(sb->s_fs_info);
+       sb->s_fs_info = NULL;
        kill_litter_super(sb);
 }
 
index f2ef4b6..c25dfac 100644 (file)
@@ -293,12 +293,12 @@ struct _lowcore
        __u64   clock_comparator;               /* 0x02d0 */
        __u32   machine_flags;                  /* 0x02d8 */
        __u32   ftrace_func;                    /* 0x02dc */
-       __u8    pad_0x02f0[0x0300-0x02f0];      /* 0x02f0 */
+       __u8    pad_0x02e0[0x0300-0x02e0];      /* 0x02e0 */
 
        /* Interrupt response block */
        __u8    irb[64];                        /* 0x0300 */
 
-       __u8    pad_0x0400[0x0e00-0x0400];      /* 0x0400 */
+       __u8    pad_0x0340[0x0e00-0x0340];      /* 0x0340 */
 
        /*
         * 0xe00 contains the address of the IPL Parameter Information
index 3f7e2a2..f6a389c 100644 (file)
@@ -132,7 +132,6 @@ ENTRY(tlb_protection_violation_store)
         mov    #1, r5
 
 call_handle_tlbmiss:
-       setup_frame_reg
        mov.l   1f, r0
        mov     r5, r8
        mov.l   @r0, r6
@@ -365,6 +364,8 @@ handle_exception:
         mov.l  @k2, k2         ! read out vector and keep in k2
 
 handle_exception_special:
+       setup_frame_reg
+
        ! Setup return address and jump to exception handler
        mov.l   7f, r9          ! fetch return address
        stc     r2_bank, r0     ! k2 (vector)
index 88d28ec..e511680 100644 (file)
@@ -540,6 +540,8 @@ void dwarf_free_frame(struct dwarf_frame *frame)
        mempool_free(frame, dwarf_frame_pool);
 }
 
+extern void ret_from_irq(void);
+
 /**
  *     dwarf_unwind_stack - unwind the stack
  *
@@ -678,6 +680,24 @@ struct dwarf_frame * dwarf_unwind_stack(unsigned long pc,
        addr = frame->cfa + reg->addr;
        frame->return_addr = __raw_readl(addr);
 
+       /*
+        * Ah, the joys of unwinding through interrupts.
+        *
+        * Interrupts are tricky - the DWARF info needs to be _really_
+        * accurate and unfortunately I'm seeing a lot of bogus DWARF
+        * info. For example, I've seen interrupts occur in epilogues
+        * just after the frame pointer (r14) had been restored. The
+        * problem was that the DWARF info claimed that the CFA could be
+        * reached by using the value of the frame pointer before it was
+        * restored.
+        *
+        * So until the compiler can be trusted to produce reliable
+        * DWARF info when it really matters, let's stop unwinding once
+        * we've calculated the function that was interrupted.
+        */
+       if (prev && prev->pc == (unsigned long)ret_from_irq)
+               frame->return_addr = 0;
+
        return frame;
 
 bail:
index f0abd58..2b15ae6 100644 (file)
@@ -70,8 +70,14 @@ ret_from_exception:
        CFI_STARTPROC simple
        CFI_DEF_CFA r14, 0
        CFI_REL_OFFSET 17, 64
-       CFI_REL_OFFSET 15, 0
+       CFI_REL_OFFSET 15, 60
        CFI_REL_OFFSET 14, 56
+       CFI_REL_OFFSET 13, 52
+       CFI_REL_OFFSET 12, 48
+       CFI_REL_OFFSET 11, 44
+       CFI_REL_OFFSET 10, 40
+       CFI_REL_OFFSET 9, 36
+       CFI_REL_OFFSET 8, 32
        preempt_stop()
 ENTRY(ret_from_irq)
        !
index 873ebdc..b063eb8 100644 (file)
@@ -133,6 +133,8 @@ void user_enable_single_step(struct task_struct *child)
        struct pt_regs *regs = child->thread.uregs;
 
        regs->sr |= SR_SSTEP;   /* auto-resetting upon exception */
+
+       set_tsk_thread_flag(child, TIF_SINGLESTEP);
 }
 
 void user_disable_single_step(struct task_struct *child)
@@ -140,6 +142,8 @@ void user_disable_single_step(struct task_struct *child)
        struct pt_regs *regs = child->thread.uregs;
 
        regs->sr &= ~SR_SSTEP;
+
+       clear_tsk_thread_flag(child, TIF_SINGLESTEP);
 }
 
 static int genregs_get(struct task_struct *target,
@@ -454,6 +458,8 @@ asmlinkage long long do_syscall_trace_enter(struct pt_regs *regs)
 
 asmlinkage void do_syscall_trace_leave(struct pt_regs *regs)
 {
+       int step;
+
        if (unlikely(current->audit_context))
                audit_syscall_exit(AUDITSC_RESULT(regs->regs[9]),
                                   regs->regs[9]);
@@ -461,8 +467,9 @@ asmlinkage void do_syscall_trace_leave(struct pt_regs *regs)
        if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
                trace_sys_exit(regs, regs->regs[9]);
 
-       if (test_thread_flag(TIF_SYSCALL_TRACE))
-               tracehook_report_syscall_exit(regs, 0);
+       step = test_thread_flag(TIF_SINGLESTEP);
+       if (step || test_thread_flag(TIF_SYSCALL_TRACE))
+               tracehook_report_syscall_exit(regs, step);
 }
 
 /* Called with interrupts disabled */
index ce76dbd..580e97d 100644 (file)
@@ -118,7 +118,9 @@ static int do_signal(struct pt_regs *regs, sigset_t *oldset)
                         * clear the TS_RESTORE_SIGMASK flag.
                         */
                        current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
-                       tracehook_signal_handler(signr, &info, &ka, regs, 0);
+
+                       tracehook_signal_handler(signr, &info, &ka, regs,
+                                       test_thread_flag(TIF_SINGLESTEP));
                        return 1;
                }
        }
index 55db5ec..39327d6 100644 (file)
@@ -53,8 +53,8 @@ struct stat {
        ino_t           st_ino;
        mode_t          st_mode;
        short           st_nlink;
-       uid_t           st_uid;
-       gid_t           st_gid;
+       uid16_t         st_uid;
+       gid16_t         st_gid;
        unsigned short  st_rdev;
        off_t           st_size;
        time_t          st_atime;
index 4248d96..5247283 100644 (file)
@@ -11,6 +11,10 @@ static inline bool kstack_valid(struct thread_info *tp, unsigned long sp)
 {
        unsigned long base = (unsigned long) tp;
 
+       /* Stack pointer must be 16-byte aligned.  */
+       if (sp & (16UL - 1))
+               return false;
+
        if (sp >= (base + sizeof(struct thread_info)) &&
            sp <= (base + THREAD_SIZE - sizeof(struct sparc_stackf)))
                return true;
index 4c26eb5..53a58b3 100644 (file)
@@ -105,7 +105,7 @@ static unsigned long of_bus_sbus_get_flags(const u32 *addr, unsigned long flags)
 
 static int of_bus_ambapp_match(struct device_node *np)
 {
-       return !strcmp(np->name, "ambapp");
+       return !strcmp(np->type, "ambapp");
 }
 
 static void of_bus_ambapp_count_cells(struct device_node *child,
index 539e83f..592b03d 100644 (file)
@@ -247,6 +247,7 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
                                         struct pci_bus *bus, int devfn)
 {
        struct dev_archdata *sd;
+       struct pci_slot *slot;
        struct of_device *op;
        struct pci_dev *dev;
        const char *type;
@@ -286,6 +287,11 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
        dev->dev.bus = &pci_bus_type;
        dev->devfn = devfn;
        dev->multifunction = 0;         /* maybe a lie? */
+       set_pcie_port_type(dev);
+
+       list_for_each_entry(slot, &dev->bus->slots, list)
+               if (PCI_SLOT(dev->devfn) == slot->number)
+                       dev->slot = slot;
 
        dev->vendor = of_getintprop_default(node, "vendor-id", 0xffff);
        dev->device = of_getintprop_default(node, "device-id", 0xffff);
@@ -322,6 +328,7 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
 
        dev->current_state = 4;         /* unknown power state */
        dev->error_state = pci_channel_io_normal;
+       dev->dma_mask = 0xffffffff;
 
        if (!strcmp(node->name, "pci")) {
                /* a PCI-PCI bridge */
index 2830b41..c49865b 100644 (file)
@@ -526,7 +526,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
                         * Set some valid stack frames to give to the child.
                         */
                        childstack = (struct sparc_stackf __user *)
-                               (sp & ~0x7UL);
+                               (sp & ~0xfUL);
                        parentstack = (struct sparc_stackf __user *)
                                regs->u_regs[UREG_FP];
 
index c3f1cce..cb70476 100644 (file)
@@ -398,11 +398,11 @@ static unsigned long clone_stackframe(unsigned long csp, unsigned long psp)
        } else
                __get_user(fp, &(((struct reg_window32 __user *)psp)->ins[6]));
 
-       /* Now 8-byte align the stack as this is mandatory in the
-        * Sparc ABI due to how register windows work.  This hides
-        * the restriction from thread libraries etc.  -DaveM
+       /* Now align the stack as this is mandatory in the Sparc ABI
+        * due to how register windows work.  This hides the
+        * restriction from thread libraries etc.
         */
-       csp &= ~7UL;
+       csp &= ~15UL;
 
        distance = fp - psp;
        rval = (csp - distance);
index ba5b09a..ea22cd3 100644 (file)
@@ -120,8 +120,8 @@ struct rt_signal_frame32 {
 };
 
 /* Align macros */
-#define SF_ALIGNEDSZ  (((sizeof(struct signal_frame32) + 7) & (~7)))
-#define RT_ALIGNEDSZ  (((sizeof(struct rt_signal_frame32) + 7) & (~7)))
+#define SF_ALIGNEDSZ  (((sizeof(struct signal_frame32) + 15) & (~15)))
+#define RT_ALIGNEDSZ  (((sizeof(struct rt_signal_frame32) + 15) & (~15)))
 
 int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
 {
@@ -420,15 +420,17 @@ static void __user *get_sigframe(struct sigaction *sa, struct pt_regs *regs, uns
                        sp = current->sas_ss_sp + current->sas_ss_size;
        }
 
+       sp -= framesize;
+
        /* Always align the stack frame.  This handles two cases.  First,
         * sigaltstack need not be mindful of platform specific stack
         * alignment.  Second, if we took this signal because the stack
         * is not aligned properly, we'd like to take the signal cleanly
         * and report that.
         */
-       sp &= ~7UL;
+       sp &= ~15UL;
 
-       return (void __user *)(sp - framesize);
+       return (void __user *) sp;
 }
 
 static int save_fpu_state32(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
index 7ce1a10..9882df9 100644 (file)
@@ -267,15 +267,17 @@ static inline void __user *get_sigframe(struct sigaction *sa, struct pt_regs *re
                        sp = current->sas_ss_sp + current->sas_ss_size;
        }
 
+       sp -= framesize;
+
        /* Always align the stack frame.  This handles two cases.  First,
         * sigaltstack need not be mindful of platform specific stack
         * alignment.  Second, if we took this signal because the stack
         * is not aligned properly, we'd like to take the signal cleanly
         * and report that.
         */
-       sp &= ~7UL;
+       sp &= ~15UL;
 
-       return (void __user *)(sp - framesize);
+       return (void __user *) sp;
 }
 
 static inline int
index 647afbd..9fa48c3 100644 (file)
@@ -353,7 +353,7 @@ segv:
 /* Checks if the fp is valid */
 static int invalid_frame_pointer(void __user *fp, int fplen)
 {
-       if (((unsigned long) fp) & 7)
+       if (((unsigned long) fp) & 15)
                return 1;
        return 0;
 }
@@ -396,15 +396,17 @@ static inline void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *
                        sp = current->sas_ss_sp + current->sas_ss_size;
        }
 
+       sp -= framesize;
+
        /* Always align the stack frame.  This handles two cases.  First,
         * sigaltstack need not be mindful of platform specific stack
         * alignment.  Second, if we took this signal because the stack
         * is not aligned properly, we'd like to take the signal cleanly
         * and report that.
         */
-       sp &= ~7UL;
+       sp &= ~15UL;
 
-       return (void __user *)(sp - framesize);
+       return (void __user *) sp;
 }
 
 static inline void
index 8c91d9b..db15d12 100644 (file)
@@ -191,10 +191,12 @@ tsb_dtlb_load:
 
 tsb_itlb_load:
        /* Executable bit must be set.  */
-661:   andcc           %g5, _PAGE_EXEC_4U, %g0
-       .section        .sun4v_1insn_patch, "ax"
+661:   sethi           %hi(_PAGE_EXEC_4U), %g4
+       andcc           %g5, %g4, %g0
+       .section        .sun4v_2insn_patch, "ax"
        .word           661b
        andcc           %g5, _PAGE_EXEC_4V, %g0
+       nop
        .previous
 
        be,pn           %xcc, tsb_do_fault
index 3b3c366..de317d0 100644 (file)
@@ -140,7 +140,7 @@ void mconsole_proc(struct mc_request *req)
                goto out;
        }
 
-       err = may_open(&nd.path, MAY_READ, FMODE_READ);
+       err = may_open(&nd.path, MAY_READ, O_RDONLY);
        if (result) {
                mconsole_reply(req, "Failed to open file", 1, 0);
                path_put(&nd.path);
index 1994d3f..f2ad216 100644 (file)
@@ -170,10 +170,7 @@ static inline void elf_common_init(struct thread_struct *t,
 }
 
 #define ELF_PLAT_INIT(_r, load_addr)                   \
-do {                                                   \
-       elf_common_init(&current->thread, _r, 0);       \
-       clear_thread_flag(TIF_IA32);                    \
-} while (0)
+       elf_common_init(&current->thread, _r, 0)
 
 #define        COMPAT_ELF_PLAT_INIT(regs, load_addr)           \
        elf_common_init(&current->thread, regs, __USER_DS)
index fc801ba..b753ea5 100644 (file)
@@ -450,6 +450,8 @@ struct thread_struct {
        struct perf_event       *ptrace_bps[HBP_NUM];
        /* Debug status used for traps, single steps, etc... */
        unsigned long           debugreg6;
+       /* Keep track of the exact dr7 value set by the user */
+       unsigned long           ptrace_dr7;
        /* Fault info: */
        unsigned long           cr2;
        unsigned long           trap_no;
index ecb544e..e04740f 100644 (file)
@@ -11,9 +11,9 @@
 #include <linux/irqflags.h>
 
 /* entries in ARCH_DLINFO: */
-#ifdef CONFIG_IA32_EMULATION
+#if defined(CONFIG_IA32_EMULATION) || !defined(CONFIG_X86_64)
 # define AT_VECTOR_SIZE_ARCH 2
-#else
+#else /* else it's non-compat x86-64 */
 # define AT_VECTOR_SIZE_ARCH 1
 #endif
 
index 036d28a..af1c583 100644 (file)
@@ -1185,9 +1185,6 @@ static void __init acpi_process_madt(void)
                if (!error) {
                        acpi_lapic = 1;
 
-#ifdef CONFIG_X86_BIGSMP
-                       generic_bigsmp_probe();
-#endif
                        /*
                         * Parse MADT IO-APIC entries
                         */
@@ -1197,8 +1194,6 @@ static void __init acpi_process_madt(void)
                                acpi_ioapic = 1;
 
                                smp_found_config = 1;
-                               if (apic->setup_apic_routing)
-                                       apic->setup_apic_routing();
                        }
                }
                if (error == -EINVAL) {
@@ -1349,14 +1344,6 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
         },
        {
         .callback = force_acpi_ht,
-        .ident = "ASUS P2B-DS",
-        .matches = {
-                    DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
-                    DMI_MATCH(DMI_BOARD_NAME, "P2B-DS"),
-                    },
-        },
-       {
-        .callback = force_acpi_ht,
         .ident = "ASUS CUR-DLS",
         .matches = {
                     DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
index 3987e44..dfca210 100644 (file)
@@ -1641,9 +1641,7 @@ int __init APIC_init_uniprocessor(void)
 #endif
 
        enable_IR_x2apic();
-#ifdef CONFIG_X86_64
        default_setup_apic_routing();
-#endif
 
        verify_local_APIC();
        connect_bsp_APIC();
@@ -1891,21 +1889,6 @@ void __cpuinit generic_processor_info(int apicid, int version)
        if (apicid > max_physical_apicid)
                max_physical_apicid = apicid;
 
-#ifdef CONFIG_X86_32
-       if (num_processors > 8) {
-               switch (boot_cpu_data.x86_vendor) {
-               case X86_VENDOR_INTEL:
-                       if (!APIC_XAPIC(version)) {
-                               def_to_bigsmp = 0;
-                               break;
-                       }
-                       /* If P4 and above fall through */
-               case X86_VENDOR_AMD:
-                       def_to_bigsmp = 1;
-               }
-       }
-#endif
-
 #if defined(CONFIG_SMP) || defined(CONFIG_X86_64)
        early_per_cpu(x86_cpu_to_apicid, cpu) = apicid;
        early_per_cpu(x86_bios_cpu_apicid, cpu) = apicid;
index 1a6559f..99d2fe0 100644 (file)
@@ -52,7 +52,32 @@ static int __init print_ipi_mode(void)
 }
 late_initcall(print_ipi_mode);
 
-void default_setup_apic_routing(void)
+void __init default_setup_apic_routing(void)
+{
+       int version = apic_version[boot_cpu_physical_apicid];
+
+       if (num_possible_cpus() > 8) {
+               switch (boot_cpu_data.x86_vendor) {
+               case X86_VENDOR_INTEL:
+                       if (!APIC_XAPIC(version)) {
+                               def_to_bigsmp = 0;
+                               break;
+                       }
+                       /* If P4 and above fall through */
+               case X86_VENDOR_AMD:
+                       def_to_bigsmp = 1;
+               }
+       }
+
+#ifdef CONFIG_X86_BIGSMP
+       generic_bigsmp_probe();
+#endif
+
+       if (apic->setup_apic_routing)
+               apic->setup_apic_routing();
+}
+
+static void setup_apic_flat_routing(void)
 {
 #ifdef CONFIG_X86_IO_APIC
        printk(KERN_INFO
@@ -103,7 +128,7 @@ struct apic apic_default = {
        .init_apic_ldr                  = default_init_apic_ldr,
 
        .ioapic_phys_id_map             = default_ioapic_phys_id_map,
-       .setup_apic_routing             = default_setup_apic_routing,
+       .setup_apic_routing             = setup_apic_flat_routing,
        .multi_timer_check              = NULL,
        .apicid_to_node                 = default_apicid_to_node,
        .cpu_to_logical_apicid          = default_cpu_to_logical_apicid,
index 450fe20..83e9be4 100644 (file)
@@ -67,7 +67,7 @@ void __init default_setup_apic_routing(void)
        }
 #endif
 
-       if (apic == &apic_flat && num_processors > 8)
+       if (apic == &apic_flat && num_possible_cpus() > 8)
                        apic = &apic_physflat;
 
        printk(KERN_INFO "Setting APIC routing to %s\n", apic->name);
index f125e5c..6e44519 100644 (file)
@@ -1356,6 +1356,7 @@ static int __devexit powernowk8_cpu_exit(struct cpufreq_policy *pol)
 
        kfree(data->powernow_table);
        kfree(data);
+       per_cpu(powernow_data, pol->cpu) = NULL;
 
        return 0;
 }
@@ -1375,7 +1376,7 @@ static unsigned int powernowk8_get(unsigned int cpu)
        int err;
 
        if (!data)
-               return -EINVAL;
+               return 0;
 
        smp_call_function_single(cpu, query_values_on_cpu, &err, true);
        if (err)
index 05d5fec..bb6006e 100644 (file)
@@ -212,25 +212,6 @@ static int arch_check_va_in_kernelspace(unsigned long va, u8 hbp_len)
        return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
 }
 
-/*
- * Store a breakpoint's encoded address, length, and type.
- */
-static int arch_store_info(struct perf_event *bp)
-{
-       struct arch_hw_breakpoint *info = counter_arch_bp(bp);
-       /*
-        * For kernel-addresses, either the address or symbol name can be
-        * specified.
-        */
-       if (info->name)
-               info->address = (unsigned long)
-                               kallsyms_lookup_name(info->name);
-       if (info->address)
-               return 0;
-
-       return -EINVAL;
-}
-
 int arch_bp_generic_fields(int x86_len, int x86_type,
                           int *gen_len, int *gen_type)
 {
@@ -362,10 +343,13 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp,
                return ret;
        }
 
-       ret = arch_store_info(bp);
-
-       if (ret < 0)
-               return ret;
+       /*
+        * For kernel-addresses, either the address or symbol name can be
+        * specified.
+        */
+       if (info->name)
+               info->address = (unsigned long)
+                               kallsyms_lookup_name(info->name);
        /*
         * Check that the low-order bits of the address are appropriate
         * for the alignment implied by len.
index 40b54ce..a2c1edd 100644 (file)
@@ -359,13 +359,6 @@ static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early)
                x86_init.mpparse.mpc_record(1);
        }
 
-#ifdef CONFIG_X86_BIGSMP
-       generic_bigsmp_probe();
-#endif
-
-       if (apic->setup_apic_routing)
-               apic->setup_apic_routing();
-
        if (!num_processors)
                printk(KERN_ERR "MPTABLE: no processors registered!\n");
        return num_processors;
index 41a26a8..126f0b4 100644 (file)
@@ -527,6 +527,7 @@ void set_personality_ia32(void)
 
        /* Make sure to be in 32bit mode */
        set_thread_flag(TIF_IA32);
+       current->personality |= force_personality32;
 
        /* Prepare the first "return" to user space */
        current_thread_info()->status |= TS_COMPAT;
index 017d937..0c1033d 100644 (file)
@@ -702,7 +702,7 @@ static unsigned long ptrace_get_debugreg(struct task_struct *tsk, int n)
        } else if (n == 6) {
                val = thread->debugreg6;
         } else if (n == 7) {
-               val = ptrace_get_dr7(thread->ptrace_bps);
+               val = thread->ptrace_dr7;
        }
        return val;
 }
@@ -778,8 +778,11 @@ int ptrace_set_debugreg(struct task_struct *tsk, int n, unsigned long val)
                        return rc;
        }
        /* All that's left is DR7 */
-       if (n == 7)
+       if (n == 7) {
                rc = ptrace_write_dr7(tsk, val);
+               if (!rc)
+                       thread->ptrace_dr7 = val;
+       }
 
 ret_path:
        return rc;
index 678d0b8..b4e870c 100644 (file)
@@ -1083,9 +1083,7 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
        set_cpu_sibling_map(0);
 
        enable_IR_x2apic();
-#ifdef CONFIG_X86_64
        default_setup_apic_routing();
-#endif
 
        if (smp_sanity_check(max_cpus) < 0) {
                printk(KERN_INFO "SMP disabled\n");
index 296aba4..15578f1 100644 (file)
@@ -467,6 +467,9 @@ static int pit_ioport_read(struct kvm_io_device *this,
                return -EOPNOTSUPP;
 
        addr &= KVM_PIT_CHANNEL_MASK;
+       if (addr == 3)
+               return 0;
+
        s = &pit_state->channels[addr];
 
        mutex_lock(&pit_state->lock);
index 1ddcad4..a1e1bc9 100644 (file)
@@ -670,7 +670,7 @@ static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
 {
        static int version;
        struct pvclock_wall_clock wc;
-       struct timespec now, sys, boot;
+       struct timespec boot;
 
        if (!wall_clock)
                return;
@@ -685,9 +685,7 @@ static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
         * wall clock specified here.  guest system time equals host
         * system time for us, thus we must fill in host boot time here.
         */
-       now = current_kernel_time();
-       ktime_get_ts(&sys);
-       boot = ns_to_timespec(timespec_to_ns(&now) - timespec_to_ns(&sys));
+       getboottime(&boot);
 
        wc.sec = boot.tv_sec;
        wc.nsec = boot.tv_nsec;
@@ -762,6 +760,7 @@ static void kvm_write_guest_time(struct kvm_vcpu *v)
        local_irq_save(flags);
        kvm_get_msr(v, MSR_IA32_TSC, &vcpu->hv_clock.tsc_timestamp);
        ktime_get_ts(&ts);
+       monotonic_to_bootbased(&ts);
        local_irq_restore(flags);
 
        /* With all the info we got, fill in the values */
index 71da1bc..738e659 100644 (file)
@@ -18,7 +18,7 @@ static inline pte_t gup_get_pte(pte_t *ptep)
 #else
        /*
         * With get_user_pages_fast, we walk down the pagetables without taking
-        * any locks.  For this we would like to load the pointers atoimcally,
+        * any locks.  For this we would like to load the pointers atomically,
         * but that is not possible (without expensive cmpxchg8b) on PAE.  What
         * we do have is the guarantee that a pte will only either go from not
         * present to present, or present to not present or both -- it will not
index 718897e..d1a9a0a 100644 (file)
@@ -1147,7 +1147,7 @@ void init_request_from_bio(struct request *req, struct bio *bio)
  */
 static inline bool queue_should_plug(struct request_queue *q)
 {
-       return !(blk_queue_nonrot(q) && blk_queue_queuing(q));
+       return !(blk_queue_nonrot(q) && blk_queue_tagged(q));
 }
 
 static int __make_request(struct request_queue *q, struct bio *bio)
@@ -1859,15 +1859,8 @@ void blk_dequeue_request(struct request *rq)
         * and to it is freed is accounted as io that is in progress at
         * the driver side.
         */
-       if (blk_account_rq(rq)) {
+       if (blk_account_rq(rq))
                q->in_flight[rq_is_sync(rq)]++;
-               /*
-                * Mark this device as supporting hardware queuing, if
-                * we have more IOs in flight than 4.
-                */
-               if (!blk_queue_queuing(q) && queue_in_flight(q) > 4)
-                       set_bit(QUEUE_FLAG_CQ, &q->queue_flags);
-       }
 }
 
 /**
index 17b768d..023f4e6 100644 (file)
@@ -42,16 +42,13 @@ static const int cfq_hist_divisor = 4;
  */
 #define CFQ_MIN_TT             (2)
 
-/*
- * Allow merged cfqqs to perform this amount of seeky I/O before
- * deciding to break the queues up again.
- */
-#define CFQQ_COOP_TOUT         (HZ)
-
 #define CFQ_SLICE_SCALE                (5)
 #define CFQ_HW_QUEUE_MIN       (5)
 #define CFQ_SERVICE_SHIFT       12
 
+#define CFQQ_SEEK_THR          8 * 1024
+#define CFQQ_SEEKY(cfqq)       ((cfqq)->seek_mean > CFQQ_SEEK_THR)
+
 #define RQ_CIC(rq)             \
        ((struct cfq_io_context *) (rq)->elevator_private)
 #define RQ_CFQQ(rq)            (struct cfq_queue *) ((rq)->elevator_private2)
@@ -137,7 +134,6 @@ struct cfq_queue {
        u64 seek_total;
        sector_t seek_mean;
        sector_t last_request_pos;
-       unsigned long seeky_start;
 
        pid_t pid;
 
@@ -314,6 +310,7 @@ enum cfqq_state_flags {
        CFQ_CFQQ_FLAG_slice_new,        /* no requests dispatched in slice */
        CFQ_CFQQ_FLAG_sync,             /* synchronous queue */
        CFQ_CFQQ_FLAG_coop,             /* cfqq is shared */
+       CFQ_CFQQ_FLAG_split_coop,       /* shared cfqq will be splitted */
        CFQ_CFQQ_FLAG_deep,             /* sync cfqq experienced large depth */
        CFQ_CFQQ_FLAG_wait_busy,        /* Waiting for next request */
 };
@@ -342,6 +339,7 @@ CFQ_CFQQ_FNS(prio_changed);
 CFQ_CFQQ_FNS(slice_new);
 CFQ_CFQQ_FNS(sync);
 CFQ_CFQQ_FNS(coop);
+CFQ_CFQQ_FNS(split_coop);
 CFQ_CFQQ_FNS(deep);
 CFQ_CFQQ_FNS(wait_busy);
 #undef CFQ_CFQQ_FNS
@@ -1566,6 +1564,15 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
        cfq_clear_cfqq_wait_busy(cfqq);
 
        /*
+        * If this cfqq is shared between multiple processes, check to
+        * make sure that those processes are still issuing I/Os within
+        * the mean seek distance.  If not, it may be time to break the
+        * queues apart again.
+        */
+       if (cfq_cfqq_coop(cfqq) && CFQQ_SEEKY(cfqq))
+               cfq_mark_cfqq_split_coop(cfqq);
+
+       /*
         * store what was left of this slice, if the queue idled/timed out
         */
        if (timed_out && !cfq_cfqq_slice_new(cfqq)) {
@@ -1663,9 +1670,6 @@ static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
                return cfqd->last_position - blk_rq_pos(rq);
 }
 
-#define CFQQ_SEEK_THR          8 * 1024
-#define CFQQ_SEEKY(cfqq)       ((cfqq)->seek_mean > CFQQ_SEEK_THR)
-
 static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
                               struct request *rq, bool for_preempt)
 {
@@ -3000,19 +3004,6 @@ cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
        total = cfqq->seek_total + (cfqq->seek_samples/2);
        do_div(total, cfqq->seek_samples);
        cfqq->seek_mean = (sector_t)total;
-
-       /*
-        * If this cfqq is shared between multiple processes, check to
-        * make sure that those processes are still issuing I/Os within
-        * the mean seek distance.  If not, it may be time to break the
-        * queues apart again.
-        */
-       if (cfq_cfqq_coop(cfqq)) {
-               if (CFQQ_SEEKY(cfqq) && !cfqq->seeky_start)
-                       cfqq->seeky_start = jiffies;
-               else if (!CFQQ_SEEKY(cfqq))
-                       cfqq->seeky_start = 0;
-       }
 }
 
 /*
@@ -3453,14 +3444,6 @@ cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_context *cic,
        return cic_to_cfqq(cic, 1);
 }
 
-static int should_split_cfqq(struct cfq_queue *cfqq)
-{
-       if (cfqq->seeky_start &&
-           time_after(jiffies, cfqq->seeky_start + CFQQ_COOP_TOUT))
-               return 1;
-       return 0;
-}
-
 /*
  * Returns NULL if a new cfqq should be allocated, or the old cfqq if this
  * was the last process referring to said cfqq.
@@ -3469,9 +3452,9 @@ static struct cfq_queue *
 split_cfqq(struct cfq_io_context *cic, struct cfq_queue *cfqq)
 {
        if (cfqq_process_refs(cfqq) == 1) {
-               cfqq->seeky_start = 0;
                cfqq->pid = current->pid;
                cfq_clear_cfqq_coop(cfqq);
+               cfq_clear_cfqq_split_coop(cfqq);
                return cfqq;
        }
 
@@ -3510,7 +3493,7 @@ new_queue:
                /*
                 * If the queue was seeky for too long, break it apart.
                 */
-               if (cfq_cfqq_coop(cfqq) && should_split_cfqq(cfqq)) {
+               if (cfq_cfqq_coop(cfqq) && cfq_cfqq_split_coop(cfqq)) {
                        cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq");
                        cfqq = split_cfqq(cic, cfqq);
                        if (!cfqq)
index bbc2c13..b2586f5 100644 (file)
@@ -935,6 +935,7 @@ static int dock_add(acpi_handle handle)
        struct platform_device *dd;
 
        id = dock_station_count;
+       memset(&ds, 0, sizeof(ds));
        dd = platform_device_register_data(NULL, "dock", id, &ds, sizeof(ds));
        if (IS_ERR(dd))
                return PTR_ERR(dd);
index 7c0441f..cc978a8 100644 (file)
@@ -110,6 +110,14 @@ static struct dmi_system_id __cpuinitdata processor_power_dmi_table[] = {
          DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
          DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")},
         (void *)2},
+       { set_max_cstate, "Pavilion zv5000", {
+         DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+         DMI_MATCH(DMI_PRODUCT_NAME,"Pavilion zv5000 (DS502A#ABA)")},
+        (void *)1},
+       { set_max_cstate, "Asus L8400B", {
+         DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
+         DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")},
+        (void *)1},
        {},
 };
 
@@ -872,12 +880,14 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
                return(acpi_idle_enter_c1(dev, state));
 
        local_irq_disable();
-       current_thread_info()->status &= ~TS_POLLING;
-       /*
-        * TS_POLLING-cleared state must be visible before we test
-        * NEED_RESCHED:
-        */
-       smp_mb();
+       if (cx->entry_method != ACPI_CSTATE_FFH) {
+               current_thread_info()->status &= ~TS_POLLING;
+               /*
+                * TS_POLLING-cleared state must be visible before we test
+                * NEED_RESCHED:
+                */
+               smp_mb();
+       }
 
        if (unlikely(need_resched())) {
                current_thread_info()->status |= TS_POLLING;
@@ -957,12 +967,14 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
        }
 
        local_irq_disable();
-       current_thread_info()->status &= ~TS_POLLING;
-       /*
-        * TS_POLLING-cleared state must be visible before we test
-        * NEED_RESCHED:
-        */
-       smp_mb();
+       if (cx->entry_method != ACPI_CSTATE_FFH) {
+               current_thread_info()->status &= ~TS_POLLING;
+               /*
+                * TS_POLLING-cleared state must be visible before we test
+                * NEED_RESCHED:
+                */
+               smp_mb();
+       }
 
        if (unlikely(need_resched())) {
                current_thread_info()->status |= TS_POLLING;
index 7247819..e306ba9 100644 (file)
@@ -125,6 +125,8 @@ acpi_processor_eval_pdc(acpi_handle handle, struct acpi_object_list *pdc_in)
        return status;
 }
 
+static int early_pdc_done;
+
 void acpi_processor_set_pdc(acpi_handle handle)
 {
        struct acpi_object_list *obj_list;
@@ -132,6 +134,9 @@ void acpi_processor_set_pdc(acpi_handle handle)
        if (arch_has_acpi_pdc() == false)
                return;
 
+       if (early_pdc_done)
+               return;
+
        obj_list = acpi_processor_alloc_pdc();
        if (!obj_list)
                return;
@@ -151,6 +156,13 @@ static int set_early_pdc_optin(const struct dmi_system_id *id)
        return 0;
 }
 
+static int param_early_pdc_optin(char *s)
+{
+       early_pdc_optin = 1;
+       return 1;
+}
+__setup("acpi_early_pdc_eval", param_early_pdc_optin);
+
 static struct dmi_system_id __cpuinitdata early_pdc_optin_table[] = {
        {
        set_early_pdc_optin, "HP Envy", {
@@ -192,4 +204,6 @@ void __init acpi_early_processor_set_pdc(void)
        acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
                            ACPI_UINT32_MAX,
                            early_init_pdc, NULL, NULL, NULL);
+
+       early_pdc_done = 1;
 }
index 2cabadc..a959f6a 100644 (file)
@@ -413,7 +413,11 @@ static int acpi_processor_get_performance_info(struct acpi_processor *pr)
        if (result)
                goto update_bios;
 
-       return 0;
+       /* We need to call _PPC once when cpufreq starts */
+       if (ignore_ppc != 1)
+               result = acpi_processor_get_platform_limit(pr);
+
+       return result;
 
        /*
         * Having _PPC but missing frequencies (_PSS, _PCT) is a very good hint that
index ff9f622..3e00967 100644 (file)
@@ -1336,9 +1336,25 @@ static int acpi_bus_scan(acpi_handle handle, struct acpi_bus_ops *ops,
 
        if (child)
                *child = device;
-       return 0;
+
+       if (device)
+               return 0;
+       else
+               return -ENODEV;
 }
 
+/*
+ * acpi_bus_add and acpi_bus_start
+ *
+ * scan a given ACPI tree and (probably recently hot-plugged)
+ * create and add or starts found devices.
+ *
+ * If no devices were found -ENODEV is returned which does not
+ * mean that this is a real error, there just have been no suitable
+ * ACPI objects in the table trunk from which the kernel could create
+ * a device and add/start an appropriate driver.
+ */
+
 int
 acpi_bus_add(struct acpi_device **child,
             struct acpi_device *parent, acpi_handle handle, int type)
@@ -1348,8 +1364,7 @@ acpi_bus_add(struct acpi_device **child,
        memset(&ops, 0, sizeof(ops));
        ops.acpi_op_add = 1;
 
-       acpi_bus_scan(handle, &ops, child);
-       return 0;
+       return acpi_bus_scan(handle, &ops, child);
 }
 EXPORT_SYMBOL(acpi_bus_add);
 
@@ -1357,11 +1372,13 @@ int acpi_bus_start(struct acpi_device *device)
 {
        struct acpi_bus_ops ops;
 
+       if (!device)
+               return -EINVAL;
+
        memset(&ops, 0, sizeof(ops));
        ops.acpi_op_start = 1;
 
-       acpi_bus_scan(device->handle, &ops, NULL);
-       return 0;
+       return acpi_bus_scan(device->handle, &ops, NULL);
 }
 EXPORT_SYMBOL(acpi_bus_start);
 
index f336bca..8a0ed28 100644 (file)
@@ -213,7 +213,7 @@ acpi_table_parse_entries(char *id,
        unsigned long table_end;
        acpi_size tbl_size;
 
-       if (acpi_disabled)
+       if (acpi_disabled && !acpi_ht)
                return -ENODEV;
 
        if (!handler)
@@ -280,7 +280,7 @@ int __init acpi_table_parse(char *id, acpi_table_handler handler)
        struct acpi_table_header *table = NULL;
        acpi_size tbl_size;
 
-       if (acpi_disabled)
+       if (acpi_disabled && !acpi_ht)
                return -ENODEV;
 
        if (!handler)
index 161746d..6e2c3b0 100644 (file)
@@ -59,6 +59,8 @@ static void class_release(struct kobject *kobj)
        else
                pr_debug("class '%s' does not have a release() function, "
                         "be careful\n", class->name);
+
+       kfree(cp);
 }
 
 static struct sysfs_ops class_sysfs_ops = {
index 873e594..9291614 100644 (file)
@@ -337,6 +337,9 @@ static int cciss_seq_show(struct seq_file *seq, void *v)
        if (*pos > h->highest_lun)
                return 0;
 
+       if (drv == NULL) /* it's possible for h->drv[] to have holes. */
+               return 0;
+
        if (drv->heads == 0)
                return 0;
 
index f36defa..57d965b 100644 (file)
@@ -808,6 +808,7 @@ static int btmrvl_sdio_host_to_card(struct btmrvl_private *priv,
 
 exit:
        sdio_release_host(card->func);
+       kfree(tmpbuf);
 
        return ret;
 }
index 34cf04e..fd50ead 100644 (file)
@@ -767,16 +767,19 @@ int __init agp_amd64_init(void)
 
 static int __init agp_amd64_mod_init(void)
 {
+#ifndef MODULE
        if (gart_iommu_aperture)
                return agp_bridges_found ? 0 : -ENODEV;
-
+#endif
        return agp_amd64_init();
 }
 
 static void __exit agp_amd64_cleanup(void)
 {
+#ifndef MODULE
        if (gart_iommu_aperture)
                return;
+#endif
        if (aperture_resource)
                release_resource(aperture_resource);
        pci_unregister_driver(&agp_amd64_pci_driver);
index ecba494..f584407 100644 (file)
 struct tpm_inf_dev {
        int iotype;
 
-       void __iomem *mem_base;         /* MMIO ioremap'd addr */
-       unsigned long map_base;         /* phys MMIO base */
-       unsigned long map_size;         /* MMIO region size */
-       unsigned int index_off;         /* index register offset */
+       void __iomem *mem_base; /* MMIO ioremap'd addr */
+       unsigned long map_base; /* phys MMIO base */
+       unsigned long map_size; /* MMIO region size */
+       unsigned int index_off; /* index register offset */
 
-       unsigned int data_regs;         /* Data registers */
+       unsigned int data_regs; /* Data registers */
        unsigned int data_size;
 
        unsigned int config_port;       /* IO Port config index reg */
@@ -406,14 +406,14 @@ static const struct tpm_vendor_specific tpm_inf = {
        .miscdev = {.fops = &inf_ops,},
 };
 
-static const struct pnp_device_id tpm_pnp_tbl[] = {
+static const struct pnp_device_id tpm_inf_pnp_tbl[] = {
        /* Infineon TPMs */
        {"IFX0101", 0},
        {"IFX0102", 0},
        {"", 0}
 };
 
-MODULE_DEVICE_TABLE(pnp, tpm_pnp_tbl);
+MODULE_DEVICE_TABLE(pnp, tpm_inf_pnp_tbl);
 
 static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev,
                                       const struct pnp_device_id *dev_id)
@@ -430,7 +430,7 @@ static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev,
        if (pnp_port_valid(dev, 0) && pnp_port_valid(dev, 1) &&
            !(pnp_port_flags(dev, 0) & IORESOURCE_DISABLED)) {
 
-               tpm_dev.iotype = TPM_INF_IO_PORT;
+               tpm_dev.iotype = TPM_INF_IO_PORT;
 
                tpm_dev.config_port = pnp_port_start(dev, 0);
                tpm_dev.config_size = pnp_port_len(dev, 0);
@@ -459,9 +459,9 @@ static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev,
                        goto err_last;
                }
        } else if (pnp_mem_valid(dev, 0) &&
-                  !(pnp_mem_flags(dev, 0) & IORESOURCE_DISABLED)) {
+                  !(pnp_mem_flags(dev, 0) & IORESOURCE_DISABLED)) {
 
-               tpm_dev.iotype = TPM_INF_IO_MEM;
+               tpm_dev.iotype = TPM_INF_IO_MEM;
 
                tpm_dev.map_base = pnp_mem_start(dev, 0);
                tpm_dev.map_size = pnp_mem_len(dev, 0);
@@ -563,11 +563,11 @@ static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev,
                         "product id 0x%02x%02x"
                         "%s\n",
                         tpm_dev.iotype == TPM_INF_IO_PORT ?
-                               tpm_dev.config_port :
-                               tpm_dev.map_base + tpm_dev.index_off,
+                        tpm_dev.config_port :
+                        tpm_dev.map_base + tpm_dev.index_off,
                         tpm_dev.iotype == TPM_INF_IO_PORT ?
-                               tpm_dev.data_regs :
-                               tpm_dev.map_base + tpm_dev.data_regs,
+                        tpm_dev.data_regs :
+                        tpm_dev.map_base + tpm_dev.data_regs,
                         version[0], version[1],
                         vendorid[0], vendorid[1],
                         productid[0], productid[1], chipname);
@@ -607,20 +607,55 @@ static __devexit void tpm_inf_pnp_remove(struct pnp_dev *dev)
                        iounmap(tpm_dev.mem_base);
                        release_mem_region(tpm_dev.map_base, tpm_dev.map_size);
                }
+               tpm_dev_vendor_release(chip);
                tpm_remove_hardware(chip->dev);
        }
 }
 
+static int tpm_inf_pnp_suspend(struct pnp_dev *dev, pm_message_t pm_state)
+{
+       struct tpm_chip *chip = pnp_get_drvdata(dev);
+       int rc;
+       if (chip) {
+               u8 savestate[] = {
+                       0, 193, /* TPM_TAG_RQU_COMMAND */
+                       0, 0, 0, 10,    /* blob length (in bytes) */
+                       0, 0, 0, 152    /* TPM_ORD_SaveState */
+               };
+               dev_info(&dev->dev, "saving TPM state\n");
+               rc = tpm_inf_send(chip, savestate, sizeof(savestate));
+               if (rc < 0) {
+                       dev_err(&dev->dev, "error while saving TPM state\n");
+                       return rc;
+               }
+       }
+       return 0;
+}
+
+static int tpm_inf_pnp_resume(struct pnp_dev *dev)
+{
+       /* Re-configure TPM after suspending */
+       tpm_config_out(ENABLE_REGISTER_PAIR, TPM_INF_ADDR);
+       tpm_config_out(IOLIMH, TPM_INF_ADDR);
+       tpm_config_out((tpm_dev.data_regs >> 8) & 0xff, TPM_INF_DATA);
+       tpm_config_out(IOLIML, TPM_INF_ADDR);
+       tpm_config_out((tpm_dev.data_regs & 0xff), TPM_INF_DATA);
+       /* activate register */
+       tpm_config_out(TPM_DAR, TPM_INF_ADDR);
+       tpm_config_out(0x01, TPM_INF_DATA);
+       tpm_config_out(DISABLE_REGISTER_PAIR, TPM_INF_ADDR);
+       /* disable RESET, LP and IRQC */
+       tpm_data_out(RESET_LP_IRQC_DISABLE, CMD);
+       return tpm_pm_resume(&dev->dev);
+}
+
 static struct pnp_driver tpm_inf_pnp_driver = {
        .name = "tpm_inf_pnp",
-       .driver = {
-               .owner = THIS_MODULE,
-               .suspend = tpm_pm_suspend,
-               .resume = tpm_pm_resume,
-       },
-       .id_table = tpm_pnp_tbl,
+       .id_table = tpm_inf_pnp_tbl,
        .probe = tpm_inf_pnp_probe,
-       .remove = __devexit_p(tpm_inf_pnp_remove),
+       .suspend = tpm_inf_pnp_suspend,
+       .resume = tpm_inf_pnp_resume,
+       .remove = __devexit_p(tpm_inf_pnp_remove)
 };
 
 static int __init init_inf(void)
@@ -638,5 +673,5 @@ module_exit(cleanup_inf);
 
 MODULE_AUTHOR("Marcel Selhorst <m.selhorst@sirrix.com>");
 MODULE_DESCRIPTION("Driver for Infineon TPM SLD 9630 TT 1.1 / SLB 9635 TT 1.2");
-MODULE_VERSION("1.9");
+MODULE_VERSION("1.9.2");
 MODULE_LICENSE("GPL");
index 27d20fa..b314a99 100644 (file)
@@ -21,7 +21,7 @@
 
 #define DRV_NAME "cs5535-clockevt"
 
-static int timer_irq = CONFIG_CS5535_MFGPT_DEFAULT_IRQ;
+static int timer_irq;
 module_param_named(irq, timer_irq, int, 0644);
 MODULE_PARM_DESC(irq, "Which IRQ to use for the clock source MFGPT ticks.");
 
index 4b34ade..bd444dc 100644 (file)
@@ -554,6 +554,9 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
                                (dbs_tuners_ins.up_threshold -
                                 dbs_tuners_ins.down_differential);
 
+               if (freq_next < policy->min)
+                       freq_next = policy->min;
+
                if (!dbs_tuners_ins.powersave_bias) {
                        __cpufreq_driver_target(policy, freq_next,
                                        CPUFREQ_RELATION_L);
index b5f2ee0..64a9372 100644 (file)
@@ -613,8 +613,6 @@ static void dma_tasklet(unsigned long data)
        cohd_fin->pending_irqs--;
        cohc->completed = cohd_fin->desc.cookie;
 
-       BUG_ON(cohc->nbr_active_done && cohd_fin == NULL);
-
        if (cohc->nbr_active_done == 0)
                return;
 
index 6f51a0a..e7a3230 100644 (file)
@@ -826,6 +826,7 @@ void dma_async_device_unregister(struct dma_device *device)
                chan->dev->chan = NULL;
                mutex_unlock(&dma_list_mutex);
                device_unregister(&chan->dev->device);
+               free_percpu(chan->local);
        }
 }
 EXPORT_SYMBOL(dma_async_device_unregister);
index 8b90516..948d563 100644 (file)
@@ -467,7 +467,7 @@ err_srcs:
 
        if (iterations > 0)
                while (!kthread_should_stop()) {
-                       DECLARE_WAIT_QUEUE_HEAD(wait_dmatest_exit);
+                       DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit);
                        interruptible_sleep_on(&wait_dmatest_exit);
                }
 
index 5f7a500..5cc37af 100644 (file)
@@ -249,7 +249,7 @@ int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo)
        if (is_ioat_active(status) || is_ioat_idle(status))
                ioat_suspend(chan);
        while (is_ioat_active(status) || is_ioat_idle(status)) {
-               if (end && time_after(jiffies, end)) {
+               if (tmo && time_after(jiffies, end)) {
                        err = -ETIMEDOUT;
                        break;
                }
index 9a5bc1a..e80bae1 100644 (file)
@@ -761,12 +761,10 @@ static void ipu_select_buffer(enum ipu_channel channel, int buffer_n)
  * @buffer_n:  buffer number to update.
  *             0 or 1 are the only valid values.
  * @phyaddr:   buffer physical address.
- * @return:    Returns 0 on success or negative error code on failure. This
- *              function will fail if the buffer is set to ready.
  */
 /* Called under spin_lock(_irqsave)(&ichan->lock) */
-static int ipu_update_channel_buffer(struct idmac_channel *ichan,
-                                    int buffer_n, dma_addr_t phyaddr)
+static void ipu_update_channel_buffer(struct idmac_channel *ichan,
+                                     int buffer_n, dma_addr_t phyaddr)
 {
        enum ipu_channel channel = ichan->dma_chan.chan_id;
        uint32_t reg;
@@ -806,8 +804,6 @@ static int ipu_update_channel_buffer(struct idmac_channel *ichan,
        }
 
        spin_unlock_irqrestore(&ipu_data.lock, flags);
-
-       return 0;
 }
 
 /* Called under spin_lock_irqsave(&ichan->lock) */
@@ -816,7 +812,6 @@ static int ipu_submit_buffer(struct idmac_channel *ichan,
 {
        unsigned int chan_id = ichan->dma_chan.chan_id;
        struct device *dev = &ichan->dma_chan.dev->device;
-       int ret;
 
        if (async_tx_test_ack(&desc->txd))
                return -EINTR;
@@ -827,14 +822,7 @@ static int ipu_submit_buffer(struct idmac_channel *ichan,
         * could make it conditional on status >= IPU_CHANNEL_ENABLED, but
         * doing it again shouldn't hurt either.
         */
-       ret = ipu_update_channel_buffer(ichan, buf_idx,
-                                       sg_dma_address(sg));
-
-       if (ret < 0) {
-               dev_err(dev, "Updating sg %p on channel 0x%x buffer %d failed!\n",
-                       sg, chan_id, buf_idx);
-               return ret;
-       }
+       ipu_update_channel_buffer(ichan, buf_idx, sg_dma_address(sg));
 
        ipu_select_buffer(chan_id, buf_idx);
        dev_dbg(dev, "Updated sg %p on channel 0x%x buffer %d\n",
@@ -1379,10 +1367,11 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id)
 
        if (likely(sgnew) &&
            ipu_submit_buffer(ichan, descnew, sgnew, ichan->active_buffer) < 0) {
-               callback = desc->txd.callback;
-               callback_param = desc->txd.callback_param;
+               callback = descnew->txd.callback;
+               callback_param = descnew->txd.callback_param;
                spin_unlock(&ichan->lock);
-               callback(callback_param);
+               if (callback)
+                       callback(callback_param);
                spin_lock(&ichan->lock);
        }
 
index 000dc67..3391e67 100644 (file)
@@ -2658,10 +2658,11 @@ static void amd64_restore_ecc_error_reporting(struct amd64_pvt *pvt)
  * the memory system completely. A command line option allows to force-enable
  * hardware ECC later in amd64_enable_ecc_error_reporting().
  */
-static const char *ecc_warning =
-       "WARNING: ECC is disabled by BIOS. Module will NOT be loaded.\n"
-       " Either Enable ECC in the BIOS, or set 'ecc_enable_override'.\n"
-       " Also, use of the override can cause unknown side effects.\n";
+static const char *ecc_msg =
+       "ECC disabled in the BIOS or no ECC capability, module will not load.\n"
+       " Either enable ECC checking or force module loading by setting "
+       "'ecc_enable_override'.\n"
+       " (Note that use of the override may cause unknown side effects.)\n";
 
 static int amd64_check_ecc_enabled(struct amd64_pvt *pvt)
 {
@@ -2673,7 +2674,7 @@ static int amd64_check_ecc_enabled(struct amd64_pvt *pvt)
 
        ecc_enabled = !!(value & K8_NBCFG_ECC_ENABLE);
        if (!ecc_enabled)
-               amd64_printk(KERN_WARNING, "This node reports that Memory ECC "
+               amd64_printk(KERN_NOTICE, "This node reports that Memory ECC "
                             "is currently disabled, set F3x%x[22] (%s).\n",
                             K8_NBCFG, pci_name(pvt->misc_f3_ctl));
        else
@@ -2681,13 +2682,13 @@ static int amd64_check_ecc_enabled(struct amd64_pvt *pvt)
 
        nb_mce_en = amd64_nb_mce_bank_enabled_on_node(pvt->mc_node_id);
        if (!nb_mce_en)
-               amd64_printk(KERN_WARNING, "NB MCE bank disabled, set MSR "
+               amd64_printk(KERN_NOTICE, "NB MCE bank disabled, set MSR "
                             "0x%08x[4] on node %d to enable.\n",
                             MSR_IA32_MCG_CTL, pvt->mc_node_id);
 
        if (!ecc_enabled || !nb_mce_en) {
                if (!ecc_enable_override) {
-                       amd64_printk(KERN_WARNING, "%s", ecc_warning);
+                       amd64_printk(KERN_NOTICE, "%s", ecc_msg);
                        return -ENODEV;
                }
                ecc_enable_override = 0;
index cf27402..ecd5928 100644 (file)
@@ -804,8 +804,8 @@ static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci)
                end   <<= (24 - PAGE_SHIFT);
                end    |= (1 << (24 - PAGE_SHIFT)) - 1;
 
-               csrow->first_page = start >> PAGE_SHIFT;
-               csrow->last_page = end >> PAGE_SHIFT;
+               csrow->first_page = start;
+               csrow->last_page = end;
                csrow->nr_pages = end + 1 - start;
                csrow->grain = 8;
                csrow->mtype = mtype;
@@ -892,10 +892,6 @@ static int __devinit mpc85xx_mc_err_probe(struct of_device *op,
 
        mpc85xx_init_csrows(mci);
 
-#ifdef CONFIG_EDAC_DEBUG
-       edac_mc_register_mcidev_debug((struct attribute **)debug_attr);
-#endif
-
        /* store the original error disable bits */
        orig_ddr_err_disable =
            in_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DISABLE);
index cbaf420..2d3dc7d 100644 (file)
@@ -893,20 +893,31 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context,
 
 static struct kmem_cache *fwnet_packet_task_cache;
 
+static void fwnet_free_ptask(struct fwnet_packet_task *ptask)
+{
+       dev_kfree_skb_any(ptask->skb);
+       kmem_cache_free(fwnet_packet_task_cache, ptask);
+}
+
 static int fwnet_send_packet(struct fwnet_packet_task *ptask);
 
 static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask)
 {
-       struct fwnet_device *dev;
+       struct fwnet_device *dev = ptask->dev;
        unsigned long flags;
-
-       dev = ptask->dev;
+       bool free;
 
        spin_lock_irqsave(&dev->lock, flags);
-       list_del(&ptask->pt_link);
-       spin_unlock_irqrestore(&dev->lock, flags);
 
-       ptask->outstanding_pkts--; /* FIXME access inside lock */
+       ptask->outstanding_pkts--;
+
+       /* Check whether we or the networking TX soft-IRQ is last user. */
+       free = (ptask->outstanding_pkts == 0 && !list_empty(&ptask->pt_link));
+
+       if (ptask->outstanding_pkts == 0)
+               list_del(&ptask->pt_link);
+
+       spin_unlock_irqrestore(&dev->lock, flags);
 
        if (ptask->outstanding_pkts > 0) {
                u16 dg_size;
@@ -951,10 +962,10 @@ static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask)
                        ptask->max_payload = skb->len + RFC2374_FRAG_HDR_SIZE;
                }
                fwnet_send_packet(ptask);
-       } else {
-               dev_kfree_skb_any(ptask->skb);
-               kmem_cache_free(fwnet_packet_task_cache, ptask);
        }
+
+       if (free)
+               fwnet_free_ptask(ptask);
 }
 
 static void fwnet_write_complete(struct fw_card *card, int rcode,
@@ -977,6 +988,7 @@ static int fwnet_send_packet(struct fwnet_packet_task *ptask)
        unsigned tx_len;
        struct rfc2734_header *bufhdr;
        unsigned long flags;
+       bool free;
 
        dev = ptask->dev;
        tx_len = ptask->max_payload;
@@ -1022,12 +1034,16 @@ static int fwnet_send_packet(struct fwnet_packet_task *ptask)
                                generation, SCODE_100, 0ULL, ptask->skb->data,
                                tx_len + 8, fwnet_write_complete, ptask);
 
-               /* FIXME race? */
                spin_lock_irqsave(&dev->lock, flags);
-               list_add_tail(&ptask->pt_link, &dev->broadcasted_list);
+
+               /* If the AT tasklet already ran, we may be last user. */
+               free = (ptask->outstanding_pkts == 0 && list_empty(&ptask->pt_link));
+               if (!free)
+                       list_add_tail(&ptask->pt_link, &dev->broadcasted_list);
+
                spin_unlock_irqrestore(&dev->lock, flags);
 
-               return 0;
+               goto out;
        }
 
        fw_send_request(dev->card, &ptask->transaction,
@@ -1035,12 +1051,19 @@ static int fwnet_send_packet(struct fwnet_packet_task *ptask)
                        ptask->generation, ptask->speed, ptask->fifo_addr,
                        ptask->skb->data, tx_len, fwnet_write_complete, ptask);
 
-       /* FIXME race? */
        spin_lock_irqsave(&dev->lock, flags);
-       list_add_tail(&ptask->pt_link, &dev->sent_list);
+
+       /* If the AT tasklet already ran, we may be last user. */
+       free = (ptask->outstanding_pkts == 0 && list_empty(&ptask->pt_link));
+       if (!free)
+               list_add_tail(&ptask->pt_link, &dev->sent_list);
+
        spin_unlock_irqrestore(&dev->lock, flags);
 
        dev->netdev->trans_start = jiffies;
+ out:
+       if (free)
+               fwnet_free_ptask(ptask);
 
        return 0;
 }
@@ -1298,6 +1321,8 @@ static netdev_tx_t fwnet_tx(struct sk_buff *skb, struct net_device *net)
        spin_unlock_irqrestore(&dev->lock, flags);
 
        ptask->max_payload = max_payload;
+       INIT_LIST_HEAD(&ptask->pt_link);
+
        fwnet_send_packet(ptask);
 
        return NETDEV_TX_OK;
index 2345d41..43ebf33 100644 (file)
@@ -2101,11 +2101,6 @@ static int ohci_queue_iso_transmit(struct fw_iso_context *base,
        u32 payload_index, payload_end_index, next_page_index;
        int page, end_page, i, length, offset;
 
-       /*
-        * FIXME: Cycle lost behavior should be configurable: lose
-        * packet, retransmit or terminate..
-        */
-
        p = packet;
        payload_index = payload;
 
@@ -2135,6 +2130,14 @@ static int ohci_queue_iso_transmit(struct fw_iso_context *base,
        if (!p->skip) {
                d[0].control   = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
                d[0].req_count = cpu_to_le16(8);
+               /*
+                * Link the skip address to this descriptor itself.  This causes
+                * a context to skip a cycle whenever lost cycles or FIFO
+                * overruns occur, without dropping the data.  The application
+                * should then decide whether this is an error condition or not.
+                * FIXME:  Make the context's cycle-lost behaviour configurable?
+                */
+               d[0].branch_address = cpu_to_le32(d_bus | z);
 
                header = (__le32 *) &d[1];
                header[0] = cpu_to_le32(IT_HEADER_SY(p->sy) |
index f665b05..ab6c973 100644 (file)
@@ -598,6 +598,50 @@ struct drm_display_mode *drm_mode_std(struct drm_device *dev,
        return mode;
 }
 
+/*
+ * EDID is delightfully ambiguous about how interlaced modes are to be
+ * encoded.  Our internal representation is of frame height, but some
+ * HDTV detailed timings are encoded as field height.
+ *
+ * The format list here is from CEA, in frame size.  Technically we
+ * should be checking refresh rate too.  Whatever.
+ */
+static void
+drm_mode_do_interlace_quirk(struct drm_display_mode *mode,
+                           struct detailed_pixel_timing *pt)
+{
+       int i;
+       static const struct {
+               int w, h;
+       } cea_interlaced[] = {
+               { 1920, 1080 },
+               {  720,  480 },
+               { 1440,  480 },
+               { 2880,  480 },
+               {  720,  576 },
+               { 1440,  576 },
+               { 2880,  576 },
+       };
+       static const int n_sizes =
+               sizeof(cea_interlaced)/sizeof(cea_interlaced[0]);
+
+       if (!(pt->misc & DRM_EDID_PT_INTERLACED))
+               return;
+
+       for (i = 0; i < n_sizes; i++) {
+               if ((mode->hdisplay == cea_interlaced[i].w) &&
+                   (mode->vdisplay == cea_interlaced[i].h / 2)) {
+                       mode->vdisplay *= 2;
+                       mode->vsync_start *= 2;
+                       mode->vsync_end *= 2;
+                       mode->vtotal *= 2;
+                       mode->vtotal |= 1;
+               }
+       }
+
+       mode->flags |= DRM_MODE_FLAG_INTERLACE;
+}
+
 /**
  * drm_mode_detailed - create a new mode from an EDID detailed timing section
  * @dev: DRM device (needed to create new mode)
@@ -680,8 +724,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
 
        drm_mode_set_name(mode);
 
-       if (pt->misc & DRM_EDID_PT_INTERLACED)
-               mode->flags |= DRM_MODE_FLAG_INTERLACE;
+       drm_mode_do_interlace_quirk(mode, pt);
 
        if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
                pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE;
index cdec329..2ac074c 100644 (file)
@@ -405,7 +405,8 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
                                wasted += alignment - tmp;
                }
 
-               if (entry->size >= size + wasted) {
+               if (entry->size >= size + wasted &&
+                   (entry->start + wasted + size) <= end) {
                        if (!best_match)
                                return entry;
                        if (entry->size < best_size) {
index e660ac0..2307f98 100644 (file)
@@ -735,8 +735,10 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
        if (cmdbuf->num_cliprects) {
                cliprects = kcalloc(cmdbuf->num_cliprects,
                                    sizeof(struct drm_clip_rect), GFP_KERNEL);
-               if (cliprects == NULL)
+               if (cliprects == NULL) {
+                       ret = -ENOMEM;
                        goto fail_batch_free;
+               }
 
                ret = copy_from_user(cliprects, cmdbuf->cliprects,
                                     cmdbuf->num_cliprects *
index ecac882..cf4cb3e 100644 (file)
@@ -174,26 +174,20 @@ const static struct pci_device_id pciidlist[] = {
 MODULE_DEVICE_TABLE(pci, pciidlist);
 #endif
 
-static int i915_suspend(struct drm_device *dev, pm_message_t state)
+static int i915_drm_freeze(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
-       if (!dev || !dev_priv) {
-               DRM_ERROR("dev: %p, dev_priv: %p\n", dev, dev_priv);
-               DRM_ERROR("DRM not initialized, aborting suspend.\n");
-               return -ENODEV;
-       }
-
-       if (state.event == PM_EVENT_PRETHAW)
-               return 0;
-
        pci_save_state(dev->pdev);
 
        /* If KMS is active, we do the leavevt stuff here */
        if (drm_core_check_feature(dev, DRIVER_MODESET)) {
-               if (i915_gem_idle(dev))
+               int error = i915_gem_idle(dev);
+               if (error) {
                        dev_err(&dev->pdev->dev,
-                               "GEM idle failed, resume may fail\n");
+                               "GEM idle failed, resume might fail\n");
+                       return error;
+               }
                drm_irq_uninstall(dev);
        }
 
@@ -201,26 +195,42 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
 
        intel_opregion_free(dev, 1);
 
+       /* Modeset on resume, not lid events */
+       dev_priv->modeset_on_lid = 0;
+
+       return 0;
+}
+
+static int i915_suspend(struct drm_device *dev, pm_message_t state)
+{
+       int error;
+
+       if (!dev || !dev->dev_private) {
+               DRM_ERROR("dev: %p\n", dev);
+               DRM_ERROR("DRM not initialized, aborting suspend.\n");
+               return -ENODEV;
+       }
+
+       if (state.event == PM_EVENT_PRETHAW)
+               return 0;
+
+       error = i915_drm_freeze(dev);
+       if (error)
+               return error;
+
        if (state.event == PM_EVENT_SUSPEND) {
                /* Shut down the device */
                pci_disable_device(dev->pdev);
                pci_set_power_state(dev->pdev, PCI_D3hot);
        }
 
-       /* Modeset on resume, not lid events */
-       dev_priv->modeset_on_lid = 0;
-
        return 0;
 }
 
-static int i915_resume(struct drm_device *dev)
+static int i915_drm_thaw(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       int ret = 0;
-
-       if (pci_enable_device(dev->pdev))
-               return -1;
-       pci_set_master(dev->pdev);
+       int error = 0;
 
        i915_restore_state(dev);
 
@@ -231,21 +241,28 @@ static int i915_resume(struct drm_device *dev)
                mutex_lock(&dev->struct_mutex);
                dev_priv->mm.suspended = 0;
 
-               ret = i915_gem_init_ringbuffer(dev);
-               if (ret != 0)
-                       ret = -1;
+               error = i915_gem_init_ringbuffer(dev);
                mutex_unlock(&dev->struct_mutex);
 
                drm_irq_install(dev);
-       }
-       if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+
                /* Resume the modeset for every activated CRTC */
                drm_helper_resume_force_mode(dev);
        }
 
        dev_priv->modeset_on_lid = 0;
 
-       return ret;
+       return error;
+}
+
+static int i915_resume(struct drm_device *dev)
+{
+       if (pci_enable_device(dev->pdev))
+               return -EIO;
+
+       pci_set_master(dev->pdev);
+
+       return i915_drm_thaw(dev);
 }
 
 /**
@@ -386,57 +403,62 @@ i915_pci_remove(struct pci_dev *pdev)
        drm_put_dev(dev);
 }
 
-static int
-i915_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+static int i915_pm_suspend(struct device *dev)
 {
-       struct drm_device *dev = pci_get_drvdata(pdev);
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct drm_device *drm_dev = pci_get_drvdata(pdev);
+       int error;
 
-       return i915_suspend(dev, state);
-}
+       if (!drm_dev || !drm_dev->dev_private) {
+               dev_err(dev, "DRM not initialized, aborting suspend.\n");
+               return -ENODEV;
+       }
 
-static int
-i915_pci_resume(struct pci_dev *pdev)
-{
-       struct drm_device *dev = pci_get_drvdata(pdev);
+       error = i915_drm_freeze(drm_dev);
+       if (error)
+               return error;
 
-       return i915_resume(dev);
-}
+       pci_disable_device(pdev);
+       pci_set_power_state(pdev, PCI_D3hot);
 
-static int
-i915_pm_suspend(struct device *dev)
-{
-       return i915_pci_suspend(to_pci_dev(dev), PMSG_SUSPEND);
+       return 0;
 }
 
-static int
-i915_pm_resume(struct device *dev)
+static int i915_pm_resume(struct device *dev)
 {
-       return i915_pci_resume(to_pci_dev(dev));
-}
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct drm_device *drm_dev = pci_get_drvdata(pdev);
 
-static int
-i915_pm_freeze(struct device *dev)
-{
-       return i915_pci_suspend(to_pci_dev(dev), PMSG_FREEZE);
+       return i915_resume(drm_dev);
 }
 
-static int
-i915_pm_thaw(struct device *dev)
+static int i915_pm_freeze(struct device *dev)
 {
-       /* thaw during hibernate, do nothing! */
-       return 0;
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct drm_device *drm_dev = pci_get_drvdata(pdev);
+
+       if (!drm_dev || !drm_dev->dev_private) {
+               dev_err(dev, "DRM not initialized, aborting suspend.\n");
+               return -ENODEV;
+       }
+
+       return i915_drm_freeze(drm_dev);
 }
 
-static int
-i915_pm_poweroff(struct device *dev)
+static int i915_pm_thaw(struct device *dev)
 {
-       return i915_pci_suspend(to_pci_dev(dev), PMSG_HIBERNATE);
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct drm_device *drm_dev = pci_get_drvdata(pdev);
+
+       return i915_drm_thaw(drm_dev);
 }
 
-static int
-i915_pm_restore(struct device *dev)
+static int i915_pm_poweroff(struct device *dev)
 {
-       return i915_pci_resume(to_pci_dev(dev));
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct drm_device *drm_dev = pci_get_drvdata(pdev);
+
+       return i915_drm_freeze(drm_dev);
 }
 
 const struct dev_pm_ops i915_pm_ops = {
@@ -445,7 +467,7 @@ const struct dev_pm_ops i915_pm_ops = {
      .freeze = i915_pm_freeze,
      .thaw = i915_pm_thaw,
      .poweroff = i915_pm_poweroff,
-     .restore = i915_pm_restore,
+     .restore = i915_pm_resume,
 };
 
 static struct vm_operations_struct i915_gem_vm_ops = {
index aaf934d..b99b6a8 100644 (file)
@@ -493,6 +493,15 @@ typedef struct drm_i915_private {
                struct list_head flushing_list;
 
                /**
+                * List of objects currently pending a GPU write flush.
+                *
+                * All elements on this list will belong to either the
+                * active_list or flushing_list, last_rendering_seqno can
+                * be used to differentiate between the two elements.
+                */
+               struct list_head gpu_write_list;
+
+               /**
                 * LRU list of objects which are not in the ringbuffer and
                 * are ready to unbind, but are still in the GTT.
                 *
@@ -592,6 +601,8 @@ struct drm_i915_gem_object {
 
        /** This object's place on the active/flushing/inactive lists */
        struct list_head list;
+       /** This object's place on GPU write list */
+       struct list_head gpu_write_list;
 
        /** This object's place on the fenced object LRU */
        struct list_head fence_list;
index b4c8c02..ec8a0d7 100644 (file)
@@ -1552,6 +1552,8 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
        else
                list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
 
+       BUG_ON(!list_empty(&obj_priv->gpu_write_list));
+
        obj_priv->last_rendering_seqno = 0;
        if (obj_priv->active) {
                obj_priv->active = 0;
@@ -1622,7 +1624,8 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
                struct drm_i915_gem_object *obj_priv, *next;
 
                list_for_each_entry_safe(obj_priv, next,
-                                        &dev_priv->mm.flushing_list, list) {
+                                        &dev_priv->mm.gpu_write_list,
+                                        gpu_write_list) {
                        struct drm_gem_object *obj = obj_priv->obj;
 
                        if ((obj->write_domain & flush_domains) ==
@@ -1630,6 +1633,7 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
                                uint32_t old_write_domain = obj->write_domain;
 
                                obj->write_domain = 0;
+                               list_del_init(&obj_priv->gpu_write_list);
                                i915_gem_object_move_to_active(obj, seqno);
 
                                trace_i915_gem_object_change_domain(obj,
@@ -2084,8 +2088,8 @@ static int
 i915_gem_evict_everything(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
-       uint32_t seqno;
        int ret;
+       uint32_t seqno;
        bool lists_empty;
 
        spin_lock(&dev_priv->mm.active_list_lock);
@@ -2107,6 +2111,8 @@ i915_gem_evict_everything(struct drm_device *dev)
        if (ret)
                return ret;
 
+       BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
+
        ret = i915_gem_evict_from_inactive_list(dev);
        if (ret)
                return ret;
@@ -2701,7 +2707,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
        old_write_domain = obj->write_domain;
        i915_gem_flush(dev, 0, obj->write_domain);
        seqno = i915_add_request(dev, NULL, obj->write_domain);
-       obj->write_domain = 0;
+       BUG_ON(obj->write_domain);
        i915_gem_object_move_to_active(obj, seqno);
 
        trace_i915_gem_object_change_domain(obj,
@@ -3682,8 +3688,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
        if (args->num_cliprects != 0) {
                cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects),
                                    GFP_KERNEL);
-               if (cliprects == NULL)
+               if (cliprects == NULL) {
+                       ret = -ENOMEM;
                        goto pre_mutex_err;
+               }
 
                ret = copy_from_user(cliprects,
                                     (struct drm_clip_rect __user *)
@@ -3850,16 +3858,23 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                i915_gem_flush(dev,
                               dev->invalidate_domains,
                               dev->flush_domains);
-               if (dev->flush_domains)
+               if (dev->flush_domains & I915_GEM_GPU_DOMAINS)
                        (void)i915_add_request(dev, file_priv,
                                               dev->flush_domains);
        }
 
        for (i = 0; i < args->buffer_count; i++) {
                struct drm_gem_object *obj = object_list[i];
+               struct drm_i915_gem_object *obj_priv = obj->driver_private;
                uint32_t old_write_domain = obj->write_domain;
 
                obj->write_domain = obj->pending_write_domain;
+               if (obj->write_domain)
+                       list_move_tail(&obj_priv->gpu_write_list,
+                                      &dev_priv->mm.gpu_write_list);
+               else
+                       list_del_init(&obj_priv->gpu_write_list);
+
                trace_i915_gem_object_change_domain(obj,
                                                    obj->read_domains,
                                                    old_write_domain);
@@ -4370,6 +4385,7 @@ int i915_gem_init_object(struct drm_gem_object *obj)
        obj_priv->obj = obj;
        obj_priv->fence_reg = I915_FENCE_REG_NONE;
        INIT_LIST_HEAD(&obj_priv->list);
+       INIT_LIST_HEAD(&obj_priv->gpu_write_list);
        INIT_LIST_HEAD(&obj_priv->fence_list);
        obj_priv->madv = I915_MADV_WILLNEED;
 
@@ -4821,6 +4837,7 @@ i915_gem_load(struct drm_device *dev)
        spin_lock_init(&dev_priv->mm.active_list_lock);
        INIT_LIST_HEAD(&dev_priv->mm.active_list);
        INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
+       INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list);
        INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
        INIT_LIST_HEAD(&dev_priv->mm.request_list);
        INIT_LIST_HEAD(&dev_priv->mm.fence_list);
index 50ddf4a..a17d6bd 100644 (file)
@@ -309,21 +309,21 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
        if (de_iir & DE_GSE)
                ironlake_opregion_gse_intr(dev);
 
-       if (de_iir & DE_PLANEA_FLIP_DONE)
+       if (de_iir & DE_PLANEA_FLIP_DONE) {
                intel_prepare_page_flip(dev, 0);
+               intel_finish_page_flip(dev, 0);
+       }
 
-       if (de_iir & DE_PLANEB_FLIP_DONE)
+       if (de_iir & DE_PLANEB_FLIP_DONE) {
                intel_prepare_page_flip(dev, 1);
+               intel_finish_page_flip(dev, 1);
+       }
 
-       if (de_iir & DE_PIPEA_VBLANK) {
+       if (de_iir & DE_PIPEA_VBLANK)
                drm_handle_vblank(dev, 0);
-               intel_finish_page_flip(dev, 0);
-       }
 
-       if (de_iir & DE_PIPEB_VBLANK) {
+       if (de_iir & DE_PIPEB_VBLANK)
                drm_handle_vblank(dev, 1);
-               intel_finish_page_flip(dev, 1);
-       }
 
        /* check event from PCH */
        if ((de_iir & DE_PCH_EVENT) &&
index 847006c..ab1bd2d 100644 (file)
 #define   FBC_CTL_PERIODIC     (1<<30)
 #define   FBC_CTL_INTERVAL_SHIFT (16)
 #define   FBC_CTL_UNCOMPRESSIBLE (1<<14)
+#define   FBC_C3_IDLE          (1<<13)
 #define   FBC_CTL_STRIDE_SHIFT (5)
 #define   FBC_CTL_FENCENO      (1<<0)
 #define FBC_COMMAND            0x0320c
index 12775df..b27202d 100644 (file)
@@ -240,33 +240,86 @@ struct intel_limit {
 #define IRONLAKE_DOT_MAX         350000
 #define IRONLAKE_VCO_MIN         1760000
 #define IRONLAKE_VCO_MAX         3510000
-#define IRONLAKE_N_MIN           1
-#define IRONLAKE_N_MAX           6
-#define IRONLAKE_M_MIN           79
-#define IRONLAKE_M_MAX           127
 #define IRONLAKE_M1_MIN          12
 #define IRONLAKE_M1_MAX          22
 #define IRONLAKE_M2_MIN          5
 #define IRONLAKE_M2_MAX          9
-#define IRONLAKE_P_SDVO_DAC_MIN  5
-#define IRONLAKE_P_SDVO_DAC_MAX  80
-#define IRONLAKE_P_LVDS_MIN      28
-#define IRONLAKE_P_LVDS_MAX      112
-#define IRONLAKE_P1_MIN          1
-#define IRONLAKE_P1_MAX          8
-#define IRONLAKE_P2_SDVO_DAC_SLOW 10
-#define IRONLAKE_P2_SDVO_DAC_FAST 5
-#define IRONLAKE_P2_LVDS_SLOW    14 /* single channel */
-#define IRONLAKE_P2_LVDS_FAST    7  /* double channel */
 #define IRONLAKE_P2_DOT_LIMIT    225000 /* 225Mhz */
 
-#define IRONLAKE_P_DISPLAY_PORT_MIN    10
-#define IRONLAKE_P_DISPLAY_PORT_MAX    20
-#define IRONLAKE_P2_DISPLAY_PORT_FAST  10
-#define IRONLAKE_P2_DISPLAY_PORT_SLOW  10
-#define IRONLAKE_P2_DISPLAY_PORT_LIMIT 0
-#define IRONLAKE_P1_DISPLAY_PORT_MIN   1
-#define IRONLAKE_P1_DISPLAY_PORT_MAX   2
+/* We have parameter ranges for different type of outputs. */
+
+/* DAC & HDMI Refclk 120Mhz */
+#define IRONLAKE_DAC_N_MIN     1
+#define IRONLAKE_DAC_N_MAX     5
+#define IRONLAKE_DAC_M_MIN     79
+#define IRONLAKE_DAC_M_MAX     127
+#define IRONLAKE_DAC_P_MIN     5
+#define IRONLAKE_DAC_P_MAX     80
+#define IRONLAKE_DAC_P1_MIN    1
+#define IRONLAKE_DAC_P1_MAX    8
+#define IRONLAKE_DAC_P2_SLOW   10
+#define IRONLAKE_DAC_P2_FAST   5
+
+/* LVDS single-channel 120Mhz refclk */
+#define IRONLAKE_LVDS_S_N_MIN  1
+#define IRONLAKE_LVDS_S_N_MAX  3
+#define IRONLAKE_LVDS_S_M_MIN  79
+#define IRONLAKE_LVDS_S_M_MAX  118
+#define IRONLAKE_LVDS_S_P_MIN  28
+#define IRONLAKE_LVDS_S_P_MAX  112
+#define IRONLAKE_LVDS_S_P1_MIN 2
+#define IRONLAKE_LVDS_S_P1_MAX 8
+#define IRONLAKE_LVDS_S_P2_SLOW        14
+#define IRONLAKE_LVDS_S_P2_FAST        14
+
+/* LVDS dual-channel 120Mhz refclk */
+#define IRONLAKE_LVDS_D_N_MIN  1
+#define IRONLAKE_LVDS_D_N_MAX  3
+#define IRONLAKE_LVDS_D_M_MIN  79
+#define IRONLAKE_LVDS_D_M_MAX  127
+#define IRONLAKE_LVDS_D_P_MIN  14
+#define IRONLAKE_LVDS_D_P_MAX  56
+#define IRONLAKE_LVDS_D_P1_MIN 2
+#define IRONLAKE_LVDS_D_P1_MAX 8
+#define IRONLAKE_LVDS_D_P2_SLOW        7
+#define IRONLAKE_LVDS_D_P2_FAST        7
+
+/* LVDS single-channel 100Mhz refclk */
+#define IRONLAKE_LVDS_S_SSC_N_MIN      1
+#define IRONLAKE_LVDS_S_SSC_N_MAX      2
+#define IRONLAKE_LVDS_S_SSC_M_MIN      79
+#define IRONLAKE_LVDS_S_SSC_M_MAX      126
+#define IRONLAKE_LVDS_S_SSC_P_MIN      28
+#define IRONLAKE_LVDS_S_SSC_P_MAX      112
+#define IRONLAKE_LVDS_S_SSC_P1_MIN     2
+#define IRONLAKE_LVDS_S_SSC_P1_MAX     8
+#define IRONLAKE_LVDS_S_SSC_P2_SLOW    14
+#define IRONLAKE_LVDS_S_SSC_P2_FAST    14
+
+/* LVDS dual-channel 100Mhz refclk */
+#define IRONLAKE_LVDS_D_SSC_N_MIN      1
+#define IRONLAKE_LVDS_D_SSC_N_MAX      3
+#define IRONLAKE_LVDS_D_SSC_M_MIN      79
+#define IRONLAKE_LVDS_D_SSC_M_MAX      126
+#define IRONLAKE_LVDS_D_SSC_P_MIN      14
+#define IRONLAKE_LVDS_D_SSC_P_MAX      42
+#define IRONLAKE_LVDS_D_SSC_P1_MIN     2
+#define IRONLAKE_LVDS_D_SSC_P1_MAX     6
+#define IRONLAKE_LVDS_D_SSC_P2_SLOW    7
+#define IRONLAKE_LVDS_D_SSC_P2_FAST    7
+
+/* DisplayPort */
+#define IRONLAKE_DP_N_MIN              1
+#define IRONLAKE_DP_N_MAX              2
+#define IRONLAKE_DP_M_MIN              81
+#define IRONLAKE_DP_M_MAX              90
+#define IRONLAKE_DP_P_MIN              10
+#define IRONLAKE_DP_P_MAX              20
+#define IRONLAKE_DP_P2_FAST            10
+#define IRONLAKE_DP_P2_SLOW            10
+#define IRONLAKE_DP_P2_LIMIT           0
+#define IRONLAKE_DP_P1_MIN             1
+#define IRONLAKE_DP_P1_MAX             2
 
 static bool
 intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
@@ -474,33 +527,78 @@ static const intel_limit_t intel_limits_pineview_lvds = {
        .find_pll = intel_find_best_PLL,
 };
 
-static const intel_limit_t intel_limits_ironlake_sdvo = {
+static const intel_limit_t intel_limits_ironlake_dac = {
        .dot = { .min = IRONLAKE_DOT_MIN,          .max = IRONLAKE_DOT_MAX },
        .vco = { .min = IRONLAKE_VCO_MIN,          .max = IRONLAKE_VCO_MAX },
-       .n   = { .min = IRONLAKE_N_MIN,            .max = IRONLAKE_N_MAX },
-       .m   = { .min = IRONLAKE_M_MIN,            .max = IRONLAKE_M_MAX },
+       .n   = { .min = IRONLAKE_DAC_N_MIN,        .max = IRONLAKE_DAC_N_MAX },
+       .m   = { .min = IRONLAKE_DAC_M_MIN,        .max = IRONLAKE_DAC_M_MAX },
        .m1  = { .min = IRONLAKE_M1_MIN,           .max = IRONLAKE_M1_MAX },
        .m2  = { .min = IRONLAKE_M2_MIN,           .max = IRONLAKE_M2_MAX },
-       .p   = { .min = IRONLAKE_P_SDVO_DAC_MIN,   .max = IRONLAKE_P_SDVO_DAC_MAX },
-       .p1  = { .min = IRONLAKE_P1_MIN,           .max = IRONLAKE_P1_MAX },
+       .p   = { .min = IRONLAKE_DAC_P_MIN,        .max = IRONLAKE_DAC_P_MAX },
+       .p1  = { .min = IRONLAKE_DAC_P1_MIN,       .max = IRONLAKE_DAC_P1_MAX },
        .p2  = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
-                .p2_slow = IRONLAKE_P2_SDVO_DAC_SLOW,
-                .p2_fast = IRONLAKE_P2_SDVO_DAC_FAST },
+                .p2_slow = IRONLAKE_DAC_P2_SLOW,
+                .p2_fast = IRONLAKE_DAC_P2_FAST },
        .find_pll = intel_g4x_find_best_PLL,
 };
 
-static const intel_limit_t intel_limits_ironlake_lvds = {
+static const intel_limit_t intel_limits_ironlake_single_lvds = {
        .dot = { .min = IRONLAKE_DOT_MIN,          .max = IRONLAKE_DOT_MAX },
        .vco = { .min = IRONLAKE_VCO_MIN,          .max = IRONLAKE_VCO_MAX },
-       .n   = { .min = IRONLAKE_N_MIN,            .max = IRONLAKE_N_MAX },
-       .m   = { .min = IRONLAKE_M_MIN,            .max = IRONLAKE_M_MAX },
+       .n   = { .min = IRONLAKE_LVDS_S_N_MIN,     .max = IRONLAKE_LVDS_S_N_MAX },
+       .m   = { .min = IRONLAKE_LVDS_S_M_MIN,     .max = IRONLAKE_LVDS_S_M_MAX },
        .m1  = { .min = IRONLAKE_M1_MIN,           .max = IRONLAKE_M1_MAX },
        .m2  = { .min = IRONLAKE_M2_MIN,           .max = IRONLAKE_M2_MAX },
-       .p   = { .min = IRONLAKE_P_LVDS_MIN,       .max = IRONLAKE_P_LVDS_MAX },
-       .p1  = { .min = IRONLAKE_P1_MIN,           .max = IRONLAKE_P1_MAX },
+       .p   = { .min = IRONLAKE_LVDS_S_P_MIN,     .max = IRONLAKE_LVDS_S_P_MAX },
+       .p1  = { .min = IRONLAKE_LVDS_S_P1_MIN,    .max = IRONLAKE_LVDS_S_P1_MAX },
        .p2  = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
-                .p2_slow = IRONLAKE_P2_LVDS_SLOW,
-                .p2_fast = IRONLAKE_P2_LVDS_FAST },
+                .p2_slow = IRONLAKE_LVDS_S_P2_SLOW,
+                .p2_fast = IRONLAKE_LVDS_S_P2_FAST },
+       .find_pll = intel_g4x_find_best_PLL,
+};
+
+static const intel_limit_t intel_limits_ironlake_dual_lvds = {
+       .dot = { .min = IRONLAKE_DOT_MIN,          .max = IRONLAKE_DOT_MAX },
+       .vco = { .min = IRONLAKE_VCO_MIN,          .max = IRONLAKE_VCO_MAX },
+       .n   = { .min = IRONLAKE_LVDS_D_N_MIN,     .max = IRONLAKE_LVDS_D_N_MAX },
+       .m   = { .min = IRONLAKE_LVDS_D_M_MIN,     .max = IRONLAKE_LVDS_D_M_MAX },
+       .m1  = { .min = IRONLAKE_M1_MIN,           .max = IRONLAKE_M1_MAX },
+       .m2  = { .min = IRONLAKE_M2_MIN,           .max = IRONLAKE_M2_MAX },
+       .p   = { .min = IRONLAKE_LVDS_D_P_MIN,     .max = IRONLAKE_LVDS_D_P_MAX },
+       .p1  = { .min = IRONLAKE_LVDS_D_P1_MIN,    .max = IRONLAKE_LVDS_D_P1_MAX },
+       .p2  = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
+                .p2_slow = IRONLAKE_LVDS_D_P2_SLOW,
+                .p2_fast = IRONLAKE_LVDS_D_P2_FAST },
+       .find_pll = intel_g4x_find_best_PLL,
+};
+
+static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
+       .dot = { .min = IRONLAKE_DOT_MIN,          .max = IRONLAKE_DOT_MAX },
+       .vco = { .min = IRONLAKE_VCO_MIN,          .max = IRONLAKE_VCO_MAX },
+       .n   = { .min = IRONLAKE_LVDS_S_SSC_N_MIN, .max = IRONLAKE_LVDS_S_SSC_N_MAX },
+       .m   = { .min = IRONLAKE_LVDS_S_SSC_M_MIN, .max = IRONLAKE_LVDS_S_SSC_M_MAX },
+       .m1  = { .min = IRONLAKE_M1_MIN,           .max = IRONLAKE_M1_MAX },
+       .m2  = { .min = IRONLAKE_M2_MIN,           .max = IRONLAKE_M2_MAX },
+       .p   = { .min = IRONLAKE_LVDS_S_SSC_P_MIN, .max = IRONLAKE_LVDS_S_SSC_P_MAX },
+       .p1  = { .min = IRONLAKE_LVDS_S_SSC_P1_MIN,.max = IRONLAKE_LVDS_S_SSC_P1_MAX },
+       .p2  = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
+                .p2_slow = IRONLAKE_LVDS_S_SSC_P2_SLOW,
+                .p2_fast = IRONLAKE_LVDS_S_SSC_P2_FAST },
+       .find_pll = intel_g4x_find_best_PLL,
+};
+
+static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
+       .dot = { .min = IRONLAKE_DOT_MIN,          .max = IRONLAKE_DOT_MAX },
+       .vco = { .min = IRONLAKE_VCO_MIN,          .max = IRONLAKE_VCO_MAX },
+       .n   = { .min = IRONLAKE_LVDS_D_SSC_N_MIN, .max = IRONLAKE_LVDS_D_SSC_N_MAX },
+       .m   = { .min = IRONLAKE_LVDS_D_SSC_M_MIN, .max = IRONLAKE_LVDS_D_SSC_M_MAX },
+       .m1  = { .min = IRONLAKE_M1_MIN,           .max = IRONLAKE_M1_MAX },
+       .m2  = { .min = IRONLAKE_M2_MIN,           .max = IRONLAKE_M2_MAX },
+       .p   = { .min = IRONLAKE_LVDS_D_SSC_P_MIN, .max = IRONLAKE_LVDS_D_SSC_P_MAX },
+       .p1  = { .min = IRONLAKE_LVDS_D_SSC_P1_MIN,.max = IRONLAKE_LVDS_D_SSC_P1_MAX },
+       .p2  = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
+                .p2_slow = IRONLAKE_LVDS_D_SSC_P2_SLOW,
+                .p2_fast = IRONLAKE_LVDS_D_SSC_P2_FAST },
        .find_pll = intel_g4x_find_best_PLL,
 };
 
@@ -509,34 +607,53 @@ static const intel_limit_t intel_limits_ironlake_display_port = {
                  .max = IRONLAKE_DOT_MAX },
         .vco = { .min = IRONLAKE_VCO_MIN,
                  .max = IRONLAKE_VCO_MAX},
-        .n   = { .min = IRONLAKE_N_MIN,
-                 .max = IRONLAKE_N_MAX },
-        .m   = { .min = IRONLAKE_M_MIN,
-                 .max = IRONLAKE_M_MAX },
+        .n   = { .min = IRONLAKE_DP_N_MIN,
+                 .max = IRONLAKE_DP_N_MAX },
+        .m   = { .min = IRONLAKE_DP_M_MIN,
+                 .max = IRONLAKE_DP_M_MAX },
         .m1  = { .min = IRONLAKE_M1_MIN,
                  .max = IRONLAKE_M1_MAX },
         .m2  = { .min = IRONLAKE_M2_MIN,
                  .max = IRONLAKE_M2_MAX },
-        .p   = { .min = IRONLAKE_P_DISPLAY_PORT_MIN,
-                 .max = IRONLAKE_P_DISPLAY_PORT_MAX },
-        .p1  = { .min = IRONLAKE_P1_DISPLAY_PORT_MIN,
-                 .max = IRONLAKE_P1_DISPLAY_PORT_MAX},
-        .p2  = { .dot_limit = IRONLAKE_P2_DISPLAY_PORT_LIMIT,
-                 .p2_slow = IRONLAKE_P2_DISPLAY_PORT_SLOW,
-                 .p2_fast = IRONLAKE_P2_DISPLAY_PORT_FAST },
+        .p   = { .min = IRONLAKE_DP_P_MIN,
+                 .max = IRONLAKE_DP_P_MAX },
+        .p1  = { .min = IRONLAKE_DP_P1_MIN,
+                 .max = IRONLAKE_DP_P1_MAX},
+        .p2  = { .dot_limit = IRONLAKE_DP_P2_LIMIT,
+                 .p2_slow = IRONLAKE_DP_P2_SLOW,
+                 .p2_fast = IRONLAKE_DP_P2_FAST },
         .find_pll = intel_find_pll_ironlake_dp,
 };
 
 static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc)
 {
+       struct drm_device *dev = crtc->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
        const intel_limit_t *limit;
-       if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
-               limit = &intel_limits_ironlake_lvds;
-       else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
+       int refclk = 120;
+
+       if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+               if (dev_priv->lvds_use_ssc && dev_priv->lvds_ssc_freq == 100)
+                       refclk = 100;
+
+               if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) ==
+                   LVDS_CLKB_POWER_UP) {
+                       /* LVDS dual channel */
+                       if (refclk == 100)
+                               limit = &intel_limits_ironlake_dual_lvds_100m;
+                       else
+                               limit = &intel_limits_ironlake_dual_lvds;
+               } else {
+                       if (refclk == 100)
+                               limit = &intel_limits_ironlake_single_lvds_100m;
+                       else
+                               limit = &intel_limits_ironlake_single_lvds;
+               }
+       } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
                        HAS_eDP)
                limit = &intel_limits_ironlake_display_port;
        else
-               limit = &intel_limits_ironlake_sdvo;
+               limit = &intel_limits_ironlake_dac;
 
        return limit;
 }
@@ -914,6 +1031,8 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
 
        /* enable it... */
        fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
+       if (IS_I945GM(dev))
+               fbc_ctl |= FBC_C3_IDLE; /* 945 needs special SR handling */
        fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
        fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
        if (obj_priv->tiling_mode != I915_TILING_NONE)
@@ -3962,7 +4081,8 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
 struct intel_unpin_work {
        struct work_struct work;
        struct drm_device *dev;
-       struct drm_gem_object *obj;
+       struct drm_gem_object *old_fb_obj;
+       struct drm_gem_object *pending_flip_obj;
        struct drm_pending_vblank_event *event;
        int pending;
 };
@@ -3973,8 +4093,9 @@ static void intel_unpin_work_fn(struct work_struct *__work)
                container_of(__work, struct intel_unpin_work, work);
 
        mutex_lock(&work->dev->struct_mutex);
-       i915_gem_object_unpin(work->obj);
-       drm_gem_object_unreference(work->obj);
+       i915_gem_object_unpin(work->old_fb_obj);
+       drm_gem_object_unreference(work->pending_flip_obj);
+       drm_gem_object_unreference(work->old_fb_obj);
        mutex_unlock(&work->dev->struct_mutex);
        kfree(work);
 }
@@ -3998,7 +4119,7 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe)
        work = intel_crtc->unpin_work;
        if (work == NULL || !work->pending) {
                if (work && !work->pending) {
-                       obj_priv = work->obj->driver_private;
+                       obj_priv = work->pending_flip_obj->driver_private;
                        DRM_DEBUG_DRIVER("flip finish: %p (%d) not pending?\n",
                                         obj_priv,
                                         atomic_read(&obj_priv->pending_flip));
@@ -4023,7 +4144,7 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe)
 
        spin_unlock_irqrestore(&dev->event_lock, flags);
 
-       obj_priv = work->obj->driver_private;
+       obj_priv = work->pending_flip_obj->driver_private;
 
        /* Initial scanout buffer will have a 0 pending flip count */
        if ((atomic_read(&obj_priv->pending_flip) == 0) ||
@@ -4060,7 +4181,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct intel_unpin_work *work;
        unsigned long flags;
-       int ret;
+       int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC;
+       int ret, pipesrc;
        RING_LOCALS;
 
        work = kzalloc(sizeof *work, GFP_KERNEL);
@@ -4072,7 +4194,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
        work->event = event;
        work->dev = crtc->dev;
        intel_fb = to_intel_framebuffer(crtc->fb);
-       work->obj = intel_fb->obj;
+       work->old_fb_obj = intel_fb->obj;
        INIT_WORK(&work->work, intel_unpin_work_fn);
 
        /* We borrow the event spin lock for protecting unpin_work */
@@ -4100,14 +4222,16 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
                return ret;
        }
 
-       /* Reference the old fb object for the scheduled work. */
-       drm_gem_object_reference(work->obj);
+       /* Reference the objects for the scheduled work. */
+       drm_gem_object_reference(work->old_fb_obj);
+       drm_gem_object_reference(obj);
 
        crtc->fb = fb;
        i915_gem_object_flush_write_domain(obj);
        drm_vblank_get(dev, intel_crtc->pipe);
        obj_priv = obj->driver_private;
        atomic_inc(&obj_priv->pending_flip);
+       work->pending_flip_obj = obj;
 
        BEGIN_LP_RING(4);
        OUT_RING(MI_DISPLAY_FLIP |
@@ -4115,7 +4239,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
        OUT_RING(fb->pitch);
        if (IS_I965G(dev)) {
                OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode);
-               OUT_RING((fb->width << 16) | fb->height);
+               pipesrc = I915_READ(pipesrc_reg); 
+               OUT_RING(pipesrc & 0x0fff0fff);
        } else {
                OUT_RING(obj_priv->gtt_offset);
                OUT_RING(MI_NOOP);
index 371d753..aaabbcb 100644 (file)
@@ -148,7 +148,7 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
 
        mutex_lock(&dev->struct_mutex);
 
-       ret = i915_gem_object_pin(fbo, PAGE_SIZE);
+       ret = i915_gem_object_pin(fbo, 64*1024);
        if (ret) {
                DRM_ERROR("failed to pin fb: %d\n", ret);
                goto out_unref;
index b1d0acb..c2e8a45 100644 (file)
@@ -636,6 +636,13 @@ static const struct dmi_system_id bad_lid_status[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "PC-81005"),
                },
        },
+       {
+               .ident = "Clevo M5x0N",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "CLEVO Co."),
+                       DMI_MATCH(DMI_BOARD_NAME, "M5x0N"),
+               },
+       },
        { }
 };
 
index 1cf4882..48227e7 100644 (file)
@@ -90,21 +90,21 @@ int nouveau_hybrid_setup(struct drm_device *dev)
 {
        int result;
 
-       if (nouveau_dsm(dev, NOUVEAU_DSM_ACTIVE, NOUVEAU_DSM_ACTIVE_QUERY,
+       if (nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_STATE,
                                                                &result))
                return -ENODEV;
 
        NV_INFO(dev, "_DSM hardware status gave 0x%x\n", result);
 
-       if (result & 0x1) {     /* Stamina mode - disable the external GPU */
+       if (result) { /* Ensure that the external GPU is enabled */
+               nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_SPEED, NULL);
+               nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_SPEED,
+                                                                       NULL);
+       } else { /* Stamina mode - disable the external GPU */
                nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_STAMINA,
                                                                        NULL);
                nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_STAMINA,
                                                                        NULL);
-       } else {                /* Ensure that the external GPU is enabled */
-               nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_SPEED, NULL);
-               nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_SPEED,
-                                                                       NULL);
        }
 
        return 0;
index d7f8d8b..0e9cd1d 100644 (file)
@@ -1865,7 +1865,7 @@ init_compute_mem(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
 
        struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
 
-       if (dev_priv->card_type >= NV_50)
+       if (dev_priv->card_type >= NV_40)
                return 1;
 
        /*
@@ -3765,7 +3765,6 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
         */
 
        struct drm_nouveau_private *dev_priv = dev->dev_private;
-       struct init_exec iexec = {true, false};
        struct nvbios *bios = &dev_priv->VBIOS;
        uint8_t *table = &bios->data[bios->display.script_table_ptr];
        uint8_t *otable = NULL;
@@ -3845,8 +3844,6 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
                }
        }
 
-       bios->display.output = dcbent;
-
        if (pxclk == 0) {
                script = ROM16(otable[6]);
                if (!script) {
@@ -3855,7 +3852,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
                }
 
                NV_TRACE(dev, "0x%04X: parsing output script 0\n", script);
-               parse_init_table(bios, script, &iexec);
+               nouveau_bios_run_init_table(dev, script, dcbent);
        } else
        if (pxclk == -1) {
                script = ROM16(otable[8]);
@@ -3865,7 +3862,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
                }
 
                NV_TRACE(dev, "0x%04X: parsing output script 1\n", script);
-               parse_init_table(bios, script, &iexec);
+               nouveau_bios_run_init_table(dev, script, dcbent);
        } else
        if (pxclk == -2) {
                if (table[4] >= 12)
@@ -3878,7 +3875,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
                }
 
                NV_TRACE(dev, "0x%04X: parsing output script 2\n", script);
-               parse_init_table(bios, script, &iexec);
+               nouveau_bios_run_init_table(dev, script, dcbent);
        } else
        if (pxclk > 0) {
                script = ROM16(otable[table[4] + i*6 + 2]);
@@ -3890,7 +3887,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
                }
 
                NV_TRACE(dev, "0x%04X: parsing clock script 0\n", script);
-               parse_init_table(bios, script, &iexec);
+               nouveau_bios_run_init_table(dev, script, dcbent);
        } else
        if (pxclk < 0) {
                script = ROM16(otable[table[4] + i*6 + 4]);
@@ -3902,7 +3899,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
                }
 
                NV_TRACE(dev, "0x%04X: parsing clock script 1\n", script);
-               parse_init_table(bios, script, &iexec);
+               nouveau_bios_run_init_table(dev, script, dcbent);
        }
 
        return 0;
@@ -5865,9 +5862,11 @@ nouveau_bios_run_init_table(struct drm_device *dev, uint16_t table,
        struct nvbios *bios = &dev_priv->VBIOS;
        struct init_exec iexec = { true, false };
 
+       mutex_lock(&bios->lock);
        bios->display.output = dcbent;
        parse_init_table(bios, table, &iexec);
        bios->display.output = NULL;
+       mutex_unlock(&bios->lock);
 }
 
 static bool NVInitVBIOS(struct drm_device *dev)
@@ -5876,6 +5875,7 @@ static bool NVInitVBIOS(struct drm_device *dev)
        struct nvbios *bios = &dev_priv->VBIOS;
 
        memset(bios, 0, sizeof(struct nvbios));
+       mutex_init(&bios->lock);
        bios->dev = dev;
 
        if (!NVShadowVBIOS(dev, bios->data))
index 058e98c..fd94bd6 100644 (file)
@@ -205,6 +205,8 @@ struct nvbios {
        struct drm_device *dev;
        struct nouveau_bios_info pub;
 
+       struct mutex lock;
+
        uint8_t data[NV_PROM_SIZE];
        unsigned int length;
        bool execute;
index db0ed4c..028719f 100644 (file)
@@ -65,8 +65,10 @@ nouveau_bo_fixup_align(struct drm_device *dev,
 
        /*
         * Some of the tile_flags have a periodic structure of N*4096 bytes,
-        * align to to that as well as the page size. Overallocate memory to
-        * avoid corruption of other buffer objects.
+        * align to to that as well as the page size. Align the size to the
+        * appropriate boundaries. This does imply that sizes are rounded up
+        * 3-7 pages, so be aware of this and do not waste memory by allocating
+        * many small buffers.
         */
        if (dev_priv->card_type == NV_50) {
                uint32_t block_size = nouveau_mem_fb_amount(dev) >> 15;
@@ -77,22 +79,20 @@ nouveau_bo_fixup_align(struct drm_device *dev,
                case 0x2800:
                case 0x4800:
                case 0x7a00:
-                       *size = roundup(*size, block_size);
                        if (is_power_of_2(block_size)) {
-                               *size += 3 * block_size;
                                for (i = 1; i < 10; i++) {
                                        *align = 12 * i * block_size;
                                        if (!(*align % 65536))
                                                break;
                                }
                        } else {
-                               *size += 6 * block_size;
                                for (i = 1; i < 10; i++) {
                                        *align = 8 * i * block_size;
                                        if (!(*align % 65536))
                                                break;
                                }
                        }
+                       *size = roundup(*size, *align);
                        break;
                default:
                        break;
index 343d718..2281f99 100644 (file)
@@ -278,12 +278,11 @@ nouveau_channel_free(struct nouveau_channel *chan)
        /* Ensure the channel is no longer active on the GPU */
        pfifo->reassign(dev, false);
 
-       if (pgraph->channel(dev) == chan) {
-               pgraph->fifo_access(dev, false);
+       pgraph->fifo_access(dev, false);
+       if (pgraph->channel(dev) == chan)
                pgraph->unload_context(dev);
-               pgraph->fifo_access(dev, true);
-       }
        pgraph->destroy_context(chan);
+       pgraph->fifo_access(dev, true);
 
        if (pfifo->channel_id(dev) == chan->id) {
                pfifo->disable(dev);
index 7e6d673..d2f6335 100644 (file)
@@ -88,13 +88,14 @@ nouveau_connector_destroy(struct drm_connector *drm_connector)
 {
        struct nouveau_connector *nv_connector =
                nouveau_connector(drm_connector);
-       struct drm_device *dev = nv_connector->base.dev;
-
-       NV_DEBUG_KMS(dev, "\n");
+       struct drm_device *dev;
 
        if (!nv_connector)
                return;
 
+       dev = nv_connector->base.dev;
+       NV_DEBUG_KMS(dev, "\n");
+
        kfree(nv_connector->edid);
        drm_sysfs_connector_remove(drm_connector);
        drm_connector_cleanup(drm_connector);
index dd49372..f954ad9 100644 (file)
@@ -502,12 +502,12 @@ nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
                        break;
        }
 
-       if ((stat & NV50_AUXCH_STAT_COUNT) != data_nr) {
-               ret = -EREMOTEIO;
-               goto out;
-       }
-
        if (cmd & 1) {
+               if ((stat & NV50_AUXCH_STAT_COUNT) != data_nr) {
+                       ret = -EREMOTEIO;
+                       goto out;
+               }
+
                for (i = 0; i < 4; i++) {
                        data32[i] = nv_rd32(dev, NV50_AUXCH_DATA_IN(index, i));
                        NV_DEBUG_KMS(dev, "rd %d: 0x%08x\n", i, data32[i]);
index 343ab7f..da3b93b 100644 (file)
@@ -56,7 +56,7 @@ int nouveau_vram_pushbuf;
 module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400);
 
 MODULE_PARM_DESC(vram_notify, "Force DMA notifiers to be in VRAM");
-int nouveau_vram_notify;
+int nouveau_vram_notify = 1;
 module_param_named(vram_notify, nouveau_vram_notify, int, 0400);
 
 MODULE_PARM_DESC(duallink, "Allow dual-link TMDS (>=GeForce 8)");
@@ -75,6 +75,14 @@ MODULE_PARM_DESC(ignorelid, "Ignore ACPI lid status");
 int nouveau_ignorelid = 0;
 module_param_named(ignorelid, nouveau_ignorelid, int, 0400);
 
+MODULE_PARM_DESC(noagp, "Disable all acceleration");
+int nouveau_noaccel = 0;
+module_param_named(noaccel, nouveau_noaccel, int, 0400);
+
+MODULE_PARM_DESC(noagp, "Disable fbcon acceleration");
+int nouveau_nofbaccel = 0;
+module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400);
+
 MODULE_PARM_DESC(tv_norm, "Default TV norm.\n"
                 "\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n"
                 "\t\t\thd480i, hd480p, hd576i, hd576p, hd720p, hd1080i.\n"
index 6b96904..1c15ef3 100644 (file)
@@ -583,6 +583,7 @@ struct drm_nouveau_private {
        uint64_t vm_end;
        struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR];
        int vm_vram_pt_nr;
+       uint64_t vram_sys_base;
 
        /* the mtrr covering the FB */
        int fb_mtrr;
@@ -678,6 +679,8 @@ extern int nouveau_reg_debug;
 extern char *nouveau_vbios;
 extern int nouveau_ctxfw;
 extern int nouveau_ignorelid;
+extern int nouveau_nofbaccel;
+extern int nouveau_noaccel;
 
 /* nouveau_state.c */
 extern void nouveau_preclose(struct drm_device *dev, struct drm_file *);
index 0b05c86..ea879a2 100644 (file)
@@ -107,6 +107,34 @@ static struct fb_ops nouveau_fbcon_ops = {
        .fb_setcmap = drm_fb_helper_setcmap,
 };
 
+static struct fb_ops nv04_fbcon_ops = {
+       .owner = THIS_MODULE,
+       .fb_check_var = drm_fb_helper_check_var,
+       .fb_set_par = drm_fb_helper_set_par,
+       .fb_setcolreg = drm_fb_helper_setcolreg,
+       .fb_fillrect = nv04_fbcon_fillrect,
+       .fb_copyarea = nv04_fbcon_copyarea,
+       .fb_imageblit = nv04_fbcon_imageblit,
+       .fb_sync = nouveau_fbcon_sync,
+       .fb_pan_display = drm_fb_helper_pan_display,
+       .fb_blank = drm_fb_helper_blank,
+       .fb_setcmap = drm_fb_helper_setcmap,
+};
+
+static struct fb_ops nv50_fbcon_ops = {
+       .owner = THIS_MODULE,
+       .fb_check_var = drm_fb_helper_check_var,
+       .fb_set_par = drm_fb_helper_set_par,
+       .fb_setcolreg = drm_fb_helper_setcolreg,
+       .fb_fillrect = nv50_fbcon_fillrect,
+       .fb_copyarea = nv50_fbcon_copyarea,
+       .fb_imageblit = nv50_fbcon_imageblit,
+       .fb_sync = nouveau_fbcon_sync,
+       .fb_pan_display = drm_fb_helper_pan_display,
+       .fb_blank = drm_fb_helper_blank,
+       .fb_setcmap = drm_fb_helper_setcmap,
+};
+
 static void nouveau_fbcon_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
                                    u16 blue, int regno)
 {
@@ -267,8 +295,12 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
        dev_priv->fbdev_info = info;
 
        strcpy(info->fix.id, "nouveaufb");
-       info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA |
-                     FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_IMAGEBLIT;
+       if (nouveau_nofbaccel)
+               info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_DISABLED;
+       else
+               info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA |
+                             FBINFO_HWACCEL_FILLRECT |
+                             FBINFO_HWACCEL_IMAGEBLIT;
        info->fbops = &nouveau_fbcon_ops;
        info->fix.smem_start = dev->mode_config.fb_base + nvbo->bo.offset -
                               dev_priv->vm_vram_base;
@@ -316,13 +348,15 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
        par->nouveau_fb = nouveau_fb;
        par->dev = dev;
 
-       if (dev_priv->channel) {
+       if (dev_priv->channel && !nouveau_nofbaccel) {
                switch (dev_priv->card_type) {
                case NV_50:
                        nv50_fbcon_accel_init(info);
+                       info->fbops = &nv50_fbcon_ops;
                        break;
                default:
                        nv04_fbcon_accel_init(info);
+                       info->fbops = &nv04_fbcon_ops;
                        break;
                };
        }
index 462e0b8..f9c34e1 100644 (file)
@@ -40,7 +40,13 @@ int nouveau_fbcon_remove(struct drm_device *dev, struct drm_framebuffer *fb);
 void nouveau_fbcon_restore(void);
 void nouveau_fbcon_zfill(struct drm_device *dev);
 
+void nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
+void nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
+void nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
 int nv04_fbcon_accel_init(struct fb_info *info);
+void nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
+void nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
+void nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
 int nv50_fbcon_accel_init(struct fb_info *info);
 
 void nouveau_fbcon_gpu_lockup(struct fb_info *info);
index 6ac804b..70cc308 100644 (file)
@@ -925,7 +925,9 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
        }
 
        if (req->flags & NOUVEAU_GEM_CPU_PREP_NOBLOCK) {
+               spin_lock(&nvbo->bo.lock);
                ret = ttm_bo_wait(&nvbo->bo, false, false, no_wait);
+               spin_unlock(&nvbo->bo.lock);
        } else {
                ret = ttm_bo_synccpu_write_grab(&nvbo->bo, no_wait);
                if (ret == 0)
index 419f4c2..c7ebec6 100644 (file)
@@ -97,8 +97,8 @@ nouveau_grctx_prog_load(struct drm_device *dev)
                }
 
                pgraph->ctxvals = kmalloc(fw->size, GFP_KERNEL);
-               if (!pgraph->ctxprog) {
-                       NV_ERROR(dev, "OOM copying ctxprog\n");
+               if (!pgraph->ctxvals) {
+                       NV_ERROR(dev, "OOM copying ctxvals\n");
                        release_firmware(fw);
                        nouveau_grctx_fini(dev);
                        return -ENOMEM;
index 3b9bad6..447f9f6 100644 (file)
@@ -211,6 +211,20 @@ nouveau_fifo_irq_handler(struct drm_device *dev)
                                                                get + 4);
                }
 
+               if (status & NV_PFIFO_INTR_SEMAPHORE) {
+                       uint32_t sem;
+
+                       status &= ~NV_PFIFO_INTR_SEMAPHORE;
+                       nv_wr32(dev, NV03_PFIFO_INTR_0,
+                               NV_PFIFO_INTR_SEMAPHORE);
+
+                       sem = nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE);
+                       nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
+
+                       nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
+                       nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
+               }
+
                if (status) {
                        NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n",
                                status, chid);
@@ -566,86 +580,99 @@ nouveau_pgraph_irq_handler(struct drm_device *dev)
 static void
 nv50_pgraph_irq_handler(struct drm_device *dev)
 {
-       uint32_t status, nsource;
+       uint32_t status;
 
-       status = nv_rd32(dev, NV03_PGRAPH_INTR);
-       nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
+       while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) {
+               uint32_t nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
 
-       if (status & 0x00000001) {
-               nouveau_pgraph_intr_notify(dev, nsource);
-               status &= ~0x00000001;
-               nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001);
-       }
+               if (status & 0x00000001) {
+                       nouveau_pgraph_intr_notify(dev, nsource);
+                       status &= ~0x00000001;
+                       nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001);
+               }
 
-       if (status & 0x00000010) {
-               nouveau_pgraph_intr_error(dev, nsource |
-                                         NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD);
+               if (status & 0x00000010) {
+                       nouveau_pgraph_intr_error(dev, nsource |
+                                       NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD);
 
-               status &= ~0x00000010;
-               nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010);
-       }
+                       status &= ~0x00000010;
+                       nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010);
+               }
 
-       if (status & 0x00001000) {
-               nv_wr32(dev, 0x400500, 0x00000000);
-               nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
-               nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev,
-                       NV40_PGRAPH_INTR_EN) & ~NV_PGRAPH_INTR_CONTEXT_SWITCH);
-               nv_wr32(dev, 0x400500, 0x00010001);
+               if (status & 0x00001000) {
+                       nv_wr32(dev, 0x400500, 0x00000000);
+                       nv_wr32(dev, NV03_PGRAPH_INTR,
+                               NV_PGRAPH_INTR_CONTEXT_SWITCH);
+                       nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev,
+                               NV40_PGRAPH_INTR_EN) &
+                               ~NV_PGRAPH_INTR_CONTEXT_SWITCH);
+                       nv_wr32(dev, 0x400500, 0x00010001);
 
-               nv50_graph_context_switch(dev);
+                       nv50_graph_context_switch(dev);
 
-               status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
-       }
+                       status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
+               }
 
-       if (status & 0x00100000) {
-               nouveau_pgraph_intr_error(dev, nsource |
-                                         NV03_PGRAPH_NSOURCE_DATA_ERROR);
+               if (status & 0x00100000) {
+                       nouveau_pgraph_intr_error(dev, nsource |
+                                       NV03_PGRAPH_NSOURCE_DATA_ERROR);
 
-               status &= ~0x00100000;
-               nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000);
-       }
+                       status &= ~0x00100000;
+                       nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000);
+               }
 
-       if (status & 0x00200000) {
-               int r;
-
-               nouveau_pgraph_intr_error(dev, nsource |
-                                         NV03_PGRAPH_NSOURCE_PROTECTION_ERROR);
-
-               NV_ERROR(dev, "magic set 1:\n");
-               for (r = 0x408900; r <= 0x408910; r += 4)
-                       NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r));
-               nv_wr32(dev, 0x408900, nv_rd32(dev, 0x408904) | 0xc0000000);
-               for (r = 0x408e08; r <= 0x408e24; r += 4)
-                       NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r));
-               nv_wr32(dev, 0x408e08, nv_rd32(dev, 0x408e08) | 0xc0000000);
-
-               NV_ERROR(dev, "magic set 2:\n");
-               for (r = 0x409900; r <= 0x409910; r += 4)
-                       NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r));
-               nv_wr32(dev, 0x409900, nv_rd32(dev, 0x409904) | 0xc0000000);
-               for (r = 0x409e08; r <= 0x409e24; r += 4)
-                       NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r));
-               nv_wr32(dev, 0x409e08, nv_rd32(dev, 0x409e08) | 0xc0000000);
-
-               status &= ~0x00200000;
-               nv_wr32(dev, NV03_PGRAPH_NSOURCE, nsource);
-               nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000);
-       }
+               if (status & 0x00200000) {
+                       int r;
+
+                       nouveau_pgraph_intr_error(dev, nsource |
+                                       NV03_PGRAPH_NSOURCE_PROTECTION_ERROR);
+
+                       NV_ERROR(dev, "magic set 1:\n");
+                       for (r = 0x408900; r <= 0x408910; r += 4)
+                               NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
+                                       nv_rd32(dev, r));
+                       nv_wr32(dev, 0x408900,
+                               nv_rd32(dev, 0x408904) | 0xc0000000);
+                       for (r = 0x408e08; r <= 0x408e24; r += 4)
+                               NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
+                                                       nv_rd32(dev, r));
+                       nv_wr32(dev, 0x408e08,
+                               nv_rd32(dev, 0x408e08) | 0xc0000000);
+
+                       NV_ERROR(dev, "magic set 2:\n");
+                       for (r = 0x409900; r <= 0x409910; r += 4)
+                               NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
+                                       nv_rd32(dev, r));
+                       nv_wr32(dev, 0x409900,
+                               nv_rd32(dev, 0x409904) | 0xc0000000);
+                       for (r = 0x409e08; r <= 0x409e24; r += 4)
+                               NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
+                                       nv_rd32(dev, r));
+                       nv_wr32(dev, 0x409e08,
+                               nv_rd32(dev, 0x409e08) | 0xc0000000);
+
+                       status &= ~0x00200000;
+                       nv_wr32(dev, NV03_PGRAPH_NSOURCE, nsource);
+                       nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000);
+               }
 
-       if (status) {
-               NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n", status);
-               nv_wr32(dev, NV03_PGRAPH_INTR, status);
-       }
+               if (status) {
+                       NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n",
+                               status);
+                       nv_wr32(dev, NV03_PGRAPH_INTR, status);
+               }
 
-       {
-               const int isb = (1 << 16) | (1 << 0);
+               {
+                       const int isb = (1 << 16) | (1 << 0);
 
-               if ((nv_rd32(dev, 0x400500) & isb) != isb)
-                       nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) | isb);
-               nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31));
+                       if ((nv_rd32(dev, 0x400500) & isb) != isb)
+                               nv_wr32(dev, 0x400500,
+                                       nv_rd32(dev, 0x400500) | isb);
+               }
        }
 
        nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
+       nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31));
 }
 
 static void
index 8f3a12f..2dc09db 100644 (file)
@@ -285,53 +285,50 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size,
                        uint32_t flags, uint64_t phys)
 {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
-       struct nouveau_gpuobj **pgt;
-       unsigned psz, pfl, pages;
-
-       if (virt >= dev_priv->vm_gart_base &&
-           (virt + size) < (dev_priv->vm_gart_base + dev_priv->vm_gart_size)) {
-               psz = 12;
-               pgt = &dev_priv->gart_info.sg_ctxdma;
-               pfl = 0x21;
-               virt -= dev_priv->vm_gart_base;
-       } else
-       if (virt >= dev_priv->vm_vram_base &&
-           (virt + size) < (dev_priv->vm_vram_base + dev_priv->vm_vram_size)) {
-               psz = 16;
-               pgt = dev_priv->vm_vram_pt;
-               pfl = 0x01;
-               virt -= dev_priv->vm_vram_base;
-       } else {
-               NV_ERROR(dev, "Invalid address: 0x%16llx-0x%16llx\n",
-                        virt, virt + size - 1);
-               return -EINVAL;
-       }
+       struct nouveau_gpuobj *pgt;
+       unsigned block;
+       int i;
 
-       pages = size >> psz;
+       virt = ((virt - dev_priv->vm_vram_base) >> 16) << 1;
+       size = (size >> 16) << 1;
+
+       phys |= ((uint64_t)flags << 32);
+       phys |= 1;
+       if (dev_priv->vram_sys_base) {
+               phys += dev_priv->vram_sys_base;
+               phys |= 0x30;
+       }
 
        dev_priv->engine.instmem.prepare_access(dev, true);
-       if (flags & 0x80000000) {
-               while (pages--) {
-                       struct nouveau_gpuobj *pt = pgt[virt >> 29];
-                       unsigned pte = ((virt & 0x1fffffffULL) >> psz) << 1;
+       while (size) {
+               unsigned offset_h = upper_32_bits(phys);
+               unsigned offset_l = lower_32_bits(phys);
+               unsigned pte, end;
+
+               for (i = 7; i >= 0; i--) {
+                       block = 1 << (i + 1);
+                       if (size >= block && !(virt & (block - 1)))
+                               break;
+               }
+               offset_l |= (i << 7);
 
-                       nv_wo32(dev, pt, pte++, 0x00000000);
-                       nv_wo32(dev, pt, pte++, 0x00000000);
+               phys += block << 15;
+               size -= block;
 
-                       virt += (1 << psz);
-               }
-       } else {
-               while (pages--) {
-                       struct nouveau_gpuobj *pt = pgt[virt >> 29];
-                       unsigned pte = ((virt & 0x1fffffffULL) >> psz) << 1;
-                       unsigned offset_h = upper_32_bits(phys) & 0xff;
-                       unsigned offset_l = lower_32_bits(phys);
+               while (block) {
+                       pgt = dev_priv->vm_vram_pt[virt >> 14];
+                       pte = virt & 0x3ffe;
 
-                       nv_wo32(dev, pt, pte++, offset_l | pfl);
-                       nv_wo32(dev, pt, pte++, offset_h | flags);
+                       end = pte + block;
+                       if (end > 16384)
+                               end = 16384;
+                       block -= (end - pte);
+                       virt  += (end - pte);
 
-                       phys += (1 << psz);
-                       virt += (1 << psz);
+                       while (pte < end) {
+                               nv_wo32(dev, pgt, pte++, offset_l);
+                               nv_wo32(dev, pgt, pte++, offset_h);
+                       }
                }
        }
        dev_priv->engine.instmem.finish_access(dev);
@@ -356,7 +353,41 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size,
 void
 nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size)
 {
-       nv50_mem_vm_bind_linear(dev, virt, size, 0x80000000, 0);
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_gpuobj *pgt;
+       unsigned pages, pte, end;
+
+       virt -= dev_priv->vm_vram_base;
+       pages = (size >> 16) << 1;
+
+       dev_priv->engine.instmem.prepare_access(dev, true);
+       while (pages) {
+               pgt = dev_priv->vm_vram_pt[virt >> 29];
+               pte = (virt & 0x1ffe0000ULL) >> 15;
+
+               end = pte + pages;
+               if (end > 16384)
+                       end = 16384;
+               pages -= (end - pte);
+               virt  += (end - pte) << 15;
+
+               while (pte < end)
+                       nv_wo32(dev, pgt, pte++, 0);
+       }
+       dev_priv->engine.instmem.finish_access(dev);
+
+       nv_wr32(dev, 0x100c80, 0x00050001);
+       if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
+               NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
+               NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
+               return;
+       }
+
+       nv_wr32(dev, 0x100c80, 0x00000001);
+       if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
+               NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
+               NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
+       }
 }
 
 /*
index 6c66a34..d99dc08 100644 (file)
@@ -34,15 +34,20 @@ nouveau_notifier_init_channel(struct nouveau_channel *chan)
 {
        struct drm_device *dev = chan->dev;
        struct nouveau_bo *ntfy = NULL;
+       uint32_t flags;
        int ret;
 
-       ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, nouveau_vram_notify ?
-                             TTM_PL_FLAG_VRAM : TTM_PL_FLAG_TT,
+       if (nouveau_vram_notify)
+               flags = TTM_PL_FLAG_VRAM;
+       else
+               flags = TTM_PL_FLAG_TT;
+
+       ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, flags,
                              0, 0x0000, false, true, &ntfy);
        if (ret)
                return ret;
 
-       ret = nouveau_bo_pin(ntfy, TTM_PL_FLAG_VRAM);
+       ret = nouveau_bo_pin(ntfy, flags);
        if (ret)
                goto out_err;
 
@@ -128,6 +133,8 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
                        target = NV_DMA_TARGET_PCI;
                } else {
                        target = NV_DMA_TARGET_AGP;
+                       if (dev_priv->card_type >= NV_50)
+                               offset += dev_priv->vm_gart_base;
                }
        } else {
                NV_ERROR(dev, "Bad DMA target, mem_type %d!\n",
index 6c2cf81..e7c100b 100644 (file)
@@ -885,11 +885,12 @@ int
 nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class,
                      struct nouveau_gpuobj **gpuobj_ret)
 {
-       struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+       struct drm_nouveau_private *dev_priv;
        struct nouveau_gpuobj *gpuobj;
 
        if (!chan || !gpuobj_ret || *gpuobj_ret != NULL)
                return -EINVAL;
+       dev_priv = chan->dev->dev_private;
 
        gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
        if (!gpuobj)
index 251f1b3..aa9b310 100644 (file)
@@ -99,6 +99,7 @@
  * the card will hang early on in the X init process.
  */
 #    define NV_PMC_ENABLE_UNK13                               (1<<13)
+#define NV40_PMC_GRAPH_UNITS                              0x00001540
 #define NV40_PMC_BACKLIGHT                                0x000015f0
 #      define NV40_PMC_BACKLIGHT_MASK                     0x001f0000
 #define NV40_PMC_1700                                      0x00001700
index 4c7f1e4..ed15905 100644 (file)
@@ -54,11 +54,12 @@ static void
 nouveau_sgdma_clear(struct ttm_backend *be)
 {
        struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
-       struct drm_device *dev = nvbe->dev;
-
-       NV_DEBUG(nvbe->dev, "\n");
+       struct drm_device *dev;
 
        if (nvbe && nvbe->pages) {
+               dev = nvbe->dev;
+               NV_DEBUG(dev, "\n");
+
                if (nvbe->bound)
                        be->func->unbind(be);
 
index f2d0187..a4851af 100644 (file)
@@ -310,6 +310,14 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
 static unsigned int
 nouveau_vga_set_decode(void *priv, bool state)
 {
+       struct drm_device *dev = priv;
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+       if (dev_priv->chipset >= 0x40)
+               nv_wr32(dev, 0x88054, state);
+       else
+               nv_wr32(dev, 0x1854, state);
+
        if (state)
                return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
                       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
@@ -427,15 +435,19 @@ nouveau_card_init(struct drm_device *dev)
        if (ret)
                goto out_timer;
 
-       /* PGRAPH */
-       ret = engine->graph.init(dev);
-       if (ret)
-               goto out_fb;
+       if (nouveau_noaccel)
+               engine->graph.accel_blocked = true;
+       else {
+               /* PGRAPH */
+               ret = engine->graph.init(dev);
+               if (ret)
+                       goto out_fb;
 
-       /* PFIFO */
-       ret = engine->fifo.init(dev);
-       if (ret)
-               goto out_graph;
+               /* PFIFO */
+               ret = engine->fifo.init(dev);
+               if (ret)
+                       goto out_graph;
+       }
 
        /* this call irq_preinstall, register irq handler and
         * call irq_postinstall
@@ -479,9 +491,11 @@ nouveau_card_init(struct drm_device *dev)
 out_irq:
        drm_irq_uninstall(dev);
 out_fifo:
-       engine->fifo.takedown(dev);
+       if (!nouveau_noaccel)
+               engine->fifo.takedown(dev);
 out_graph:
-       engine->graph.takedown(dev);
+       if (!nouveau_noaccel)
+               engine->graph.takedown(dev);
 out_fb:
        engine->fb.takedown(dev);
 out_timer:
@@ -518,8 +532,10 @@ static void nouveau_card_takedown(struct drm_device *dev)
                        dev_priv->channel = NULL;
                }
 
-               engine->fifo.takedown(dev);
-               engine->graph.takedown(dev);
+               if (!nouveau_noaccel) {
+                       engine->fifo.takedown(dev);
+                       engine->graph.takedown(dev);
+               }
                engine->fb.takedown(dev);
                engine->timer.takedown(dev);
                engine->mc.takedown(dev);
@@ -817,6 +833,15 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
        case NOUVEAU_GETPARAM_VM_VRAM_BASE:
                getparam->value = dev_priv->vm_vram_base;
                break;
+       case NOUVEAU_GETPARAM_GRAPH_UNITS:
+               /* NV40 and NV50 versions are quite different, but register
+                * address is the same. User is supposed to know the card
+                * family anyway... */
+               if (dev_priv->chipset >= 0x40) {
+                       getparam->value = nv_rd32(dev, NV40_PMC_GRAPH_UNITS);
+                       break;
+               }
+               /* FALLTHRU */
        default:
                NV_ERROR(dev, "unknown parameter %lld\n", getparam->param);
                return -EINVAL;
index d0e038d..1d73b15 100644 (file)
@@ -119,7 +119,7 @@ static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder,
                                                 struct drm_connector *connector)
 {
        struct drm_device *dev = encoder->dev;
-       uint8_t saved_seq1, saved_pi, saved_rpc1;
+       uint8_t saved_seq1, saved_pi, saved_rpc1, saved_cr_mode;
        uint8_t saved_palette0[3], saved_palette_mask;
        uint32_t saved_rtest_ctrl, saved_rgen_ctrl;
        int i;
@@ -135,6 +135,9 @@ static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder,
                /* only implemented for head A for now */
                NVSetOwner(dev, 0);
 
+       saved_cr_mode = NVReadVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX);
+       NVWriteVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX, saved_cr_mode | 0x80);
+
        saved_seq1 = NVReadVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX);
        NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1 & ~0x20);
 
@@ -203,6 +206,7 @@ out:
        NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX, saved_pi);
        NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX, saved_rpc1);
        NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1);
+       NVWriteVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX, saved_cr_mode);
 
        if (blue == 0x18) {
                NV_INFO(dev, "Load detected on head A\n");
index d910873..fd01caa 100644 (file)
@@ -27,7 +27,7 @@
 #include "nouveau_dma.h"
 #include "nouveau_fbcon.h"
 
-static void
+void
 nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
 {
        struct nouveau_fbcon_par *par = info->par;
@@ -54,7 +54,7 @@ nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
        FIRE_RING(chan);
 }
 
-static void
+void
 nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
 {
        struct nouveau_fbcon_par *par = info->par;
@@ -88,7 +88,7 @@ nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
        FIRE_RING(chan);
 }
 
-static void
+void
 nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
 {
        struct nouveau_fbcon_par *par = info->par;
@@ -307,9 +307,6 @@ nv04_fbcon_accel_init(struct fb_info *info)
 
        FIRE_RING(chan);
 
-       info->fbops->fb_fillrect = nv04_fbcon_fillrect;
-       info->fbops->fb_copyarea = nv04_fbcon_copyarea;
-       info->fbops->fb_imageblit = nv04_fbcon_imageblit;
        return 0;
 }
 
index 58b917c..21ac6e4 100644 (file)
@@ -579,6 +579,8 @@ static void nv17_tv_restore(struct drm_encoder *encoder)
                                nouveau_encoder(encoder)->restore.output);
 
        nv17_tv_state_load(dev, &to_tv_enc(encoder)->saved_state);
+
+       nouveau_encoder(encoder)->last_dpms = NV_DPMS_CLEARED;
 }
 
 static int nv17_tv_create_resources(struct drm_encoder *encoder,
index 40b7360..d1a651e 100644 (file)
@@ -298,14 +298,17 @@ nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk)
 static void
 nv50_crtc_destroy(struct drm_crtc *crtc)
 {
-       struct drm_device *dev = crtc->dev;
-       struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-
-       NV_DEBUG_KMS(dev, "\n");
+       struct drm_device *dev;
+       struct nouveau_crtc *nv_crtc;
 
        if (!crtc)
                return;
 
+       dev = crtc->dev;
+       nv_crtc = nouveau_crtc(crtc);
+
+       NV_DEBUG_KMS(dev, "\n");
+
        drm_crtc_cleanup(&nv_crtc->base);
 
        nv50_cursor_fini(nv_crtc);
index e4f279e..0f57cdf 100644 (file)
@@ -3,7 +3,7 @@
 #include "nouveau_dma.h"
 #include "nouveau_fbcon.h"
 
-static void
+void
 nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
 {
        struct nouveau_fbcon_par *par = info->par;
@@ -46,7 +46,7 @@ nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
        FIRE_RING(chan);
 }
 
-static void
+void
 nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
 {
        struct nouveau_fbcon_par *par = info->par;
@@ -81,7 +81,7 @@ nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
        FIRE_RING(chan);
 }
 
-static void
+void
 nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
 {
        struct nouveau_fbcon_par *par = info->par;
@@ -262,9 +262,6 @@ nv50_fbcon_accel_init(struct fb_info *info)
        OUT_RING(chan, info->fix.smem_start - dev_priv->fb_phys +
                         dev_priv->vm_vram_base);
 
-       info->fbops->fb_fillrect = nv50_fbcon_fillrect;
-       info->fbops->fb_copyarea = nv50_fbcon_copyarea;
-       info->fbops->fb_imageblit = nv50_fbcon_imageblit;
        return 0;
 }
 
index 32b244b..204a79f 100644 (file)
@@ -317,17 +317,20 @@ void
 nv50_fifo_destroy_context(struct nouveau_channel *chan)
 {
        struct drm_device *dev = chan->dev;
+       struct nouveau_gpuobj_ref *ramfc = chan->ramfc;
 
        NV_DEBUG(dev, "ch%d\n", chan->id);
 
-       nouveau_gpuobj_ref_del(dev, &chan->ramfc);
-       nouveau_gpuobj_ref_del(dev, &chan->cache);
-
+       /* This will ensure the channel is seen as disabled. */
+       chan->ramfc = NULL;
        nv50_fifo_channel_disable(dev, chan->id, false);
 
        /* Dummy channel, also used on ch 127 */
        if (chan->id == 0)
                nv50_fifo_channel_disable(dev, 127, false);
+
+       nouveau_gpuobj_ref_del(dev, &ramfc);
+       nouveau_gpuobj_ref_del(dev, &chan->cache);
 }
 
 int
index 20319e5..6d50480 100644 (file)
@@ -165,6 +165,12 @@ nv50_graph_channel(struct drm_device *dev)
        uint32_t inst;
        int i;
 
+       /* Be sure we're not in the middle of a context switch or bad things
+        * will happen, such as unloading the wrong pgraph context.
+        */
+       if (!nv_wait(0x400300, 0x00000001, 0x00000000))
+               NV_ERROR(dev, "Ctxprog is still running\n");
+
        inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR);
        if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED))
                return NULL;
@@ -275,7 +281,7 @@ nv50_graph_load_context(struct nouveau_channel *chan)
 int
 nv50_graph_unload_context(struct drm_device *dev)
 {
-       uint32_t inst, fifo = nv_rd32(dev, 0x400500);
+       uint32_t inst;
 
        inst  = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR);
        if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED))
@@ -283,12 +289,10 @@ nv50_graph_unload_context(struct drm_device *dev)
        inst &= NV50_PGRAPH_CTXCTL_CUR_INSTANCE;
 
        nouveau_wait_for_idle(dev);
-       nv_wr32(dev, 0x400500, fifo & ~1);
        nv_wr32(dev, 0x400784, inst);
        nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x20);
        nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 0x01);
        nouveau_wait_for_idle(dev);
-       nv_wr32(dev, 0x400500, fifo);
 
        nv_wr32(dev, NV50_PGRAPH_CTXCTL_CUR, inst);
        return 0;
index 94400f7..f0dc4e3 100644 (file)
@@ -76,6 +76,11 @@ nv50_instmem_init(struct drm_device *dev)
        for (i = 0x1700; i <= 0x1710; i += 4)
                priv->save1700[(i-0x1700)/4] = nv_rd32(dev, i);
 
+       if (dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac)
+               dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10) << 12;
+       else
+               dev_priv->vram_sys_base = 0;
+
        /* Reserve the last MiB of VRAM, we should probably try to avoid
         * setting up the below tables over the top of the VBIOS image at
         * some point.
@@ -172,16 +177,28 @@ nv50_instmem_init(struct drm_device *dev)
         * We map the entire fake channel into the start of the PRAMIN BAR
         */
        ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pt_size, 0x1000,
-                                                       0, &priv->pramin_pt);
+                                    0, &priv->pramin_pt);
        if (ret)
                return ret;
 
-       for (i = 0, v = c_offset; i < pt_size; i += 8, v += 0x1000) {
-               if (v < (c_offset + c_size))
-                       BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, v | 1);
-               else
-                       BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, 0x00000009);
+       v = c_offset | 1;
+       if (dev_priv->vram_sys_base) {
+               v += dev_priv->vram_sys_base;
+               v |= 0x30;
+       }
+
+       i = 0;
+       while (v < dev_priv->vram_sys_base + c_offset + c_size) {
+               BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, v);
+               BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000);
+               v += 0x1000;
+               i += 8;
+       }
+
+       while (i < pt_size) {
+               BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, 0x00000000);
                BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000);
+               i += 8;
        }
 
        BAR0_WI32(chan->vm_pd, 0x00, priv->pramin_pt->instance | 0x63);
@@ -416,7 +433,9 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
 {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
        struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
-       uint32_t pte, pte_end, vram;
+       struct nouveau_gpuobj *pramin_pt = priv->pramin_pt->gpuobj;
+       uint32_t pte, pte_end;
+       uint64_t vram;
 
        if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound)
                return -EINVAL;
@@ -424,20 +443,24 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
        NV_DEBUG(dev, "st=0x%0llx sz=0x%0llx\n",
                 gpuobj->im_pramin->start, gpuobj->im_pramin->size);
 
-       pte     = (gpuobj->im_pramin->start >> 12) << 3;
-       pte_end = ((gpuobj->im_pramin->size >> 12) << 3) + pte;
+       pte     = (gpuobj->im_pramin->start >> 12) << 1;
+       pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte;
        vram    = gpuobj->im_backing_start;
 
        NV_DEBUG(dev, "pramin=0x%llx, pte=%d, pte_end=%d\n",
                 gpuobj->im_pramin->start, pte, pte_end);
        NV_DEBUG(dev, "first vram page: 0x%08x\n", gpuobj->im_backing_start);
 
+       vram |= 1;
+       if (dev_priv->vram_sys_base) {
+               vram += dev_priv->vram_sys_base;
+               vram |= 0x30;
+       }
+
        dev_priv->engine.instmem.prepare_access(dev, true);
        while (pte < pte_end) {
-               nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 0)/4, vram | 1);
-               nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 4)/4, 0x00000000);
-
-               pte += 8;
+               nv_wo32(dev, pramin_pt, pte++, lower_32_bits(vram));
+               nv_wo32(dev, pramin_pt, pte++, upper_32_bits(vram));
                vram += NV50_INSTMEM_PAGE_SIZE;
        }
        dev_priv->engine.instmem.finish_access(dev);
@@ -470,14 +493,13 @@ nv50_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
        if (gpuobj->im_bound == 0)
                return -EINVAL;
 
-       pte     = (gpuobj->im_pramin->start >> 12) << 3;
-       pte_end = ((gpuobj->im_pramin->size >> 12) << 3) + pte;
+       pte     = (gpuobj->im_pramin->start >> 12) << 1;
+       pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte;
 
        dev_priv->engine.instmem.prepare_access(dev, true);
        while (pte < pte_end) {
-               nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 0)/4, 0x00000009);
-               nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 4)/4, 0x00000000);
-               pte += 8;
+               nv_wo32(dev, priv->pramin_pt->gpuobj, pte++, 0x00000000);
+               nv_wo32(dev, priv->pramin_pt->gpuobj, pte++, 0x00000000);
        }
        dev_priv->engine.instmem.finish_access(dev);
 
index ecf1936..c2fff54 100644 (file)
@@ -101,6 +101,7 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
                struct nouveau_encoder *nvenc = nouveau_encoder(enc);
 
                if (nvenc == nv_encoder ||
+                   nvenc->disconnect != nv50_sor_disconnect ||
                    nvenc->dcb->or != nv_encoder->dcb->or)
                        continue;
 
index 5982321..1c02d23 100644 (file)
@@ -1,10 +1,14 @@
 config DRM_RADEON_KMS
-       bool "Enable modesetting on radeon by default"
+       bool "Enable modesetting on radeon by default - NEW DRIVER"
        depends on DRM_RADEON
        help
-         Choose this option if you want kernel modesetting enabled by default,
-         and you have a new enough userspace to support this. Running old
-         userspaces with this enabled will cause pain.
+         Choose this option if you want kernel modesetting enabled by default.
+
+         This is a completely new driver. It's only part of the existing drm
+         for compatibility reasons. It requires an entirely different graphics
+         stack above it and works very differently from the old drm stack.
+         i.e. don't enable this unless you know what you are doing it may
+         cause issues or bugs compared to the previous userspace driver stack.
 
          When kernel modesetting is enabled the IOCTL of radeon/drm
          driver are considered as invalid and an error message is printed
index e3b4456..7f152f6 100644 (file)
@@ -24,6 +24,7 @@
 
 #include <linux/module.h>
 #include <linux/sched.h>
+#include <asm/unaligned.h>
 
 #define ATOM_DEBUG
 
@@ -212,7 +213,9 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
        case ATOM_ARG_PS:
                idx = U8(*ptr);
                (*ptr)++;
-               val = le32_to_cpu(ctx->ps[idx]);
+               /* get_unaligned_le32 avoids unaligned accesses from atombios
+                * tables, noticed on a DEC Alpha. */
+               val = get_unaligned_le32((u32 *)&ctx->ps[idx]);
                if (print)
                        DEBUG("PS[0x%02X,0x%04X]", idx, val);
                break;
@@ -640,7 +643,7 @@ static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg)
        uint8_t count = U8((*ptr)++);
        SDEBUG("   count: %d\n", count);
        if (arg == ATOM_UNIT_MICROSEC)
-               schedule_timeout_uninterruptible(usecs_to_jiffies(count));
+               udelay(count);
        else
                schedule_timeout_uninterruptible(msecs_to_jiffies(count));
 }
index 7106011..99915a6 100644 (file)
@@ -332,11 +332,13 @@ bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes,
        PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION args;
        int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction);
        unsigned char *base;
+       int retry_count = 0;
 
        memset(&args, 0, sizeof(args));
 
        base = (unsigned char *)rdev->mode_info.atom_context->scratch;
 
+retry:
        memcpy(base, req_bytes, num_bytes);
 
        args.lpAuxRequest = 0;
@@ -347,10 +349,12 @@ bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes,
 
        atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
 
-       if (args.ucReplyStatus) {
-               DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x\n",
+       if (args.ucReplyStatus && !args.ucDataOutLen) {
+               if (args.ucReplyStatus == 0x20 && retry_count++ < 10)
+                       goto retry;
+               DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x after %d retries\n",
                          req_bytes[1], req_bytes[0], req_bytes[2], req_bytes[3],
-                         chan->rec.i2c_id, args.ucReplyStatus);
+                         chan->rec.i2c_id, args.ucReplyStatus, retry_count);
                return false;
        }
 
index a1198d9..2ffcf5a 100644 (file)
@@ -1950,6 +1950,13 @@ int r600_resume(struct radeon_device *rdev)
                DRM_ERROR("radeon: failled testing IB (%d).\n", r);
                return r;
        }
+
+       r = r600_audio_init(rdev);
+       if (r) {
+               DRM_ERROR("radeon: audio resume failed\n");
+               return r;
+       }
+
        return r;
 }
 
@@ -1957,6 +1964,7 @@ int r600_suspend(struct radeon_device *rdev)
 {
        int r;
 
+       r600_audio_fini(rdev);
        /* FIXME: we should wait for ring to be empty */
        r600_cp_stop(rdev);
        rdev->cp.ready = false;
index b1c1d34..0dcb690 100644 (file)
@@ -261,7 +261,6 @@ void r600_audio_fini(struct radeon_device *rdev)
        if (!r600_audio_chipset_supported(rdev))
                return;
 
-       WREG32_P(R600_AUDIO_ENABLE, 0x0, ~0x81000000);
-
        del_timer(&rdev->audio_timer);
+       WREG32_P(R600_AUDIO_ENABLE, 0x0, ~0x81000000);
 }
index af1c3ca..446b765 100644 (file)
@@ -543,9 +543,6 @@ int r600_vb_ib_get(struct radeon_device *rdev)
 void r600_vb_ib_put(struct radeon_device *rdev)
 {
        radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence);
-       mutex_lock(&rdev->ib_pool.mutex);
-       list_add_tail(&rdev->r600_blit.vb_ib->list, &rdev->ib_pool.scheduled_ibs);
-       mutex_unlock(&rdev->ib_pool.mutex);
        radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
 }
 
index 6d5a711..75bcf35 100644 (file)
@@ -1428,9 +1428,12 @@ static void r700_gfx_init(struct drm_device *dev,
 
        gb_tiling_config |= R600_BANK_SWAPS(1);
 
-       backend_map = r700_get_tile_pipe_to_backend_map(dev_priv->r600_max_tile_pipes,
-                                                       dev_priv->r600_max_backends,
-                                                       (0xff << dev_priv->r600_max_backends) & 0xff);
+       if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV740)
+               backend_map = 0x28;
+       else
+               backend_map = r700_get_tile_pipe_to_backend_map(dev_priv->r600_max_tile_pipes,
+                                                               dev_priv->r600_max_backends,
+                                                               (0xff << dev_priv->r600_max_backends) & 0xff);
        gb_tiling_config |= R600_BACKEND_MAP(backend_map);
 
        cc_gc_shader_pipe_config =
index f57480b..c0356bb 100644 (file)
@@ -96,6 +96,7 @@ extern int radeon_audio;
  * symbol;
  */
 #define RADEON_MAX_USEC_TIMEOUT                100000  /* 100 ms */
+/* RADEON_IB_POOL_SIZE must be a power of 2 */
 #define RADEON_IB_POOL_SIZE            16
 #define RADEON_DEBUGFS_MAX_NUM_FILES   32
 #define RADEONFB_CONN_LIMIT            4
@@ -363,11 +364,12 @@ void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev);
  */
 struct radeon_ib {
        struct list_head        list;
-       unsigned long           idx;
+       unsigned                idx;
        uint64_t                gpu_addr;
        struct radeon_fence     *fence;
-       uint32_t        *ptr;
+       uint32_t                *ptr;
        uint32_t                length_dw;
+       bool                    free;
 };
 
 /*
@@ -377,10 +379,9 @@ struct radeon_ib {
 struct radeon_ib_pool {
        struct mutex            mutex;
        struct radeon_bo        *robj;
-       struct list_head        scheduled_ibs;
        struct radeon_ib        ibs[RADEON_IB_POOL_SIZE];
        bool                    ready;
-       DECLARE_BITMAP(alloc_bm, RADEON_IB_POOL_SIZE);
+       unsigned                head_id;
 };
 
 struct radeon_cp {
index fa82ca7..4d88315 100644 (file)
@@ -206,6 +206,15 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
                        *connector_type = DRM_MODE_CONNECTOR_DVID;
        }
 
+       /* Asrock RS600 board lists the DVI port as HDMI */
+       if ((dev->pdev->device == 0x7941) &&
+           (dev->pdev->subsystem_vendor == 0x1849) &&
+           (dev->pdev->subsystem_device == 0x7941)) {
+               if ((*connector_type == DRM_MODE_CONNECTOR_HDMIA) &&
+                   (supported_device == ATOM_DEVICE_DFP3_SUPPORT))
+                       *connector_type = DRM_MODE_CONNECTOR_DVID;
+       }
+
        /* a-bit f-i90hd - ciaranm on #radeonhd - this board has no DVI */
        if ((dev->pdev->device == 0x7941) &&
            (dev->pdev->subsystem_vendor == 0x147b) &&
@@ -287,6 +296,15 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
                        *connector_type = DRM_MODE_CONNECTOR_DVID;
        }
 
+       /* XFX Pine Group device rv730 reports no VGA DDC lines
+        * even though they are wired up to record 0x93
+        */
+       if ((dev->pdev->device == 0x9498) &&
+           (dev->pdev->subsystem_vendor == 0x1682) &&
+           (dev->pdev->subsystem_device == 0x2452)) {
+               struct radeon_device *rdev = dev->dev_private;
+               *i2c_bus = radeon_lookup_i2c_gpio(rdev, 0x93);
+       }
        return true;
 }
 
index 4ddfd4b..7932dc4 100644 (file)
@@ -65,31 +65,42 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
        if (r) {
                goto out_cleanup;
        }
-       start_jiffies = jiffies;
-       for (i = 0; i < n; i++) {
-               r = radeon_fence_create(rdev, &fence);
-               if (r) {
-                       goto out_cleanup;
+
+       /* r100 doesn't have dma engine so skip the test */
+       if (rdev->asic->copy_dma) {
+
+               start_jiffies = jiffies;
+               for (i = 0; i < n; i++) {
+                       r = radeon_fence_create(rdev, &fence);
+                       if (r) {
+                               goto out_cleanup;
+                       }
+
+                       r = radeon_copy_dma(rdev, saddr, daddr,
+                                       size / RADEON_GPU_PAGE_SIZE, fence);
+
+                       if (r) {
+                               goto out_cleanup;
+                       }
+                       r = radeon_fence_wait(fence, false);
+                       if (r) {
+                               goto out_cleanup;
+                       }
+                       radeon_fence_unref(&fence);
                }
-               r = radeon_copy_dma(rdev, saddr, daddr, size / RADEON_GPU_PAGE_SIZE, fence);
-               if (r) {
-                       goto out_cleanup;
+               end_jiffies = jiffies;
+               time = end_jiffies - start_jiffies;
+               time = jiffies_to_msecs(time);
+               if (time > 0) {
+                       i = ((n * size) >> 10) / time;
+                       printk(KERN_INFO "radeon: dma %u bo moves of %ukb from"
+                                       " %d to %d in %lums (%ukb/ms %ukb/s %uM/s)\n",
+                                       n, size >> 10,
+                                       sdomain, ddomain, time,
+                                       i, i * 1000, (i * 1000) / 1024);
                }
-               r = radeon_fence_wait(fence, false);
-               if (r) {
-                       goto out_cleanup;
-               }
-               radeon_fence_unref(&fence);
-       }
-       end_jiffies = jiffies;
-       time = end_jiffies - start_jiffies;
-       time = jiffies_to_msecs(time);
-       if (time > 0) {
-               i = ((n * size) >> 10) / time;
-               printk(KERN_INFO "radeon: dma %u bo moves of %ukb from %d to %d"
-                      " in %lums (%ukb/ms %ukb/s %uM/s)\n", n, size >> 10,
-                      sdomain, ddomain, time, i, i * 1000, (i * 1000) / 1024);
        }
+
        start_jiffies = jiffies;
        for (i = 0; i < n; i++) {
                r = radeon_fence_create(rdev, &fence);
index 2d8e5a7..65f8194 100644 (file)
@@ -580,16 +580,18 @@ static enum drm_connector_status radeon_vga_detect(struct drm_connector *connect
        struct radeon_connector *radeon_connector = to_radeon_connector(connector);
        struct drm_encoder *encoder;
        struct drm_encoder_helper_funcs *encoder_funcs;
-       bool dret;
+       bool dret = false;
        enum drm_connector_status ret = connector_status_disconnected;
 
        encoder = radeon_best_single_encoder(connector);
        if (!encoder)
                ret = connector_status_disconnected;
 
-       radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
-       dret = radeon_ddc_probe(radeon_connector);
-       radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
+       if (radeon_connector->ddc_bus) {
+               radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
+               dret = radeon_ddc_probe(radeon_connector);
+               radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
+       }
        if (dret) {
                if (radeon_connector->edid) {
                        kfree(radeon_connector->edid);
@@ -740,11 +742,13 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect
        struct drm_mode_object *obj;
        int i;
        enum drm_connector_status ret = connector_status_disconnected;
-       bool dret;
+       bool dret = false;
 
-       radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
-       dret = radeon_ddc_probe(radeon_connector);
-       radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
+       if (radeon_connector->ddc_bus) {
+               radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
+               dret = radeon_ddc_probe(radeon_connector);
+               radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
+       }
        if (dret) {
                if (radeon_connector->edid) {
                        kfree(radeon_connector->edid);
@@ -776,7 +780,7 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect
                         * connected and the DVI port disconnected.  If the edid doesn't
                         * say HDMI, vice versa.
                         */
-                       if (radeon_connector->shared_ddc && connector_status_connected) {
+                       if (radeon_connector->shared_ddc && (ret == connector_status_connected)) {
                                struct drm_device *dev = connector->dev;
                                struct drm_connector *list_connector;
                                struct radeon_connector *list_radeon_connector;
@@ -1056,8 +1060,7 @@ radeon_add_atom_connector(struct drm_device *dev,
                        return;
                }
                if (radeon_connector->ddc_bus && i2c_bus->valid) {
-                       if (memcmp(&radeon_connector->ddc_bus->rec, i2c_bus,
-                                   sizeof(struct radeon_i2c_bus_rec)) == 0) {
+                       if (radeon_connector->ddc_bus->rec.i2c_id == i2c_bus->i2c_id) {
                                radeon_connector->shared_ddc = true;
                                shared_ddc = true;
                        }
index 1190148..e9d0850 100644 (file)
@@ -86,7 +86,7 @@ int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
                                                &p->validated);
                }
        }
-       return radeon_bo_list_validate(&p->validated, p->ib->fence);
+       return radeon_bo_list_validate(&p->validated);
 }
 
 int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
@@ -189,12 +189,10 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
 {
        unsigned i;
 
-       if (error && parser->ib) {
-               radeon_bo_list_unvalidate(&parser->validated,
-                                               parser->ib->fence);
-       } else {
-               radeon_bo_list_unreserve(&parser->validated);
+       if (!error && parser->ib) {
+               radeon_bo_list_fence(&parser->validated, parser->ib->fence);
        }
+       radeon_bo_list_unreserve(&parser->validated);
        for (i = 0; i < parser->nrelocs; i++) {
                if (parser->relocs[i].gobj) {
                        mutex_lock(&parser->rdev->ddev->struct_mutex);
index 6a92f99..7e17a36 100644 (file)
@@ -278,7 +278,7 @@ static void radeon_print_display_setup(struct drm_device *dev)
                DRM_INFO("  %s\n", connector_names[connector->connector_type]);
                if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
                        DRM_INFO("  %s\n", hpd_names[radeon_connector->hpd.hpd]);
-               if (radeon_connector->ddc_bus)
+               if (radeon_connector->ddc_bus) {
                        DRM_INFO("  DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
                                 radeon_connector->ddc_bus->rec.mask_clk_reg,
                                 radeon_connector->ddc_bus->rec.mask_data_reg,
@@ -288,6 +288,15 @@ static void radeon_print_display_setup(struct drm_device *dev)
                                 radeon_connector->ddc_bus->rec.en_data_reg,
                                 radeon_connector->ddc_bus->rec.y_clk_reg,
                                 radeon_connector->ddc_bus->rec.y_data_reg);
+               } else {
+                       if (connector->connector_type == DRM_MODE_CONNECTOR_VGA ||
+                           connector->connector_type == DRM_MODE_CONNECTOR_DVII ||
+                           connector->connector_type == DRM_MODE_CONNECTOR_DVID ||
+                           connector->connector_type == DRM_MODE_CONNECTOR_DVIA ||
+                           connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
+                           connector->connector_type == DRM_MODE_CONNECTOR_HDMIB)
+                               DRM_INFO("  DDC: no ddc bus - possible BIOS bug - please report to xorg-driver-ati@lists.x.org\n");
+               }
                DRM_INFO("  Encoders:\n");
                list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
                        radeon_encoder = to_radeon_encoder(encoder);
index e137852..c57ad60 100644 (file)
  * 1.29- R500 3D cmd buffer support
  * 1.30- Add support for occlusion queries
  * 1.31- Add support for num Z pipes from GET_PARAM
+ * 1.32- fixes for rv740 setup
  */
 #define DRIVER_MAJOR           1
-#define DRIVER_MINOR           31
+#define DRIVER_MINOR           32
 #define DRIVER_PATCHLEVEL      0
 
 enum radeon_cp_microcode_version {
index 3ba213d..d71e346 100644 (file)
@@ -248,7 +248,7 @@ int radeonfb_create(struct drm_device *dev,
        if (ret)
                goto out_unref;
 
-       memset_io(fbptr, 0xff, aligned_size);
+       memset_io(fbptr, 0x0, aligned_size);
 
        strcpy(info->fix.id, "radeondrmfb");
 
index d72a71b..f1da370 100644 (file)
@@ -306,11 +306,10 @@ void radeon_bo_list_unreserve(struct list_head *head)
        }
 }
 
-int radeon_bo_list_validate(struct list_head *head, void *fence)
+int radeon_bo_list_validate(struct list_head *head)
 {
        struct radeon_bo_list *lobj;
        struct radeon_bo *bo;
-       struct radeon_fence *old_fence = NULL;
        int r;
 
        r = radeon_bo_list_reserve(head);
@@ -334,32 +333,27 @@ int radeon_bo_list_validate(struct list_head *head, void *fence)
                }
                lobj->gpu_offset = radeon_bo_gpu_offset(bo);
                lobj->tiling_flags = bo->tiling_flags;
-               if (fence) {
-                       old_fence = (struct radeon_fence *)bo->tbo.sync_obj;
-                       bo->tbo.sync_obj = radeon_fence_ref(fence);
-                       bo->tbo.sync_obj_arg = NULL;
-               }
-               if (old_fence) {
-                       radeon_fence_unref(&old_fence);
-               }
        }
        return 0;
 }
 
-void radeon_bo_list_unvalidate(struct list_head *head, void *fence)
+void radeon_bo_list_fence(struct list_head *head, void *fence)
 {
        struct radeon_bo_list *lobj;
-       struct radeon_fence *old_fence;
-
-       if (fence)
-               list_for_each_entry(lobj, head, list) {
-                       old_fence = to_radeon_fence(lobj->bo->tbo.sync_obj);
-                       if (old_fence == fence) {
-                               lobj->bo->tbo.sync_obj = NULL;
-                               radeon_fence_unref(&old_fence);
-                       }
+       struct radeon_bo *bo;
+       struct radeon_fence *old_fence = NULL;
+
+       list_for_each_entry(lobj, head, list) {
+               bo = lobj->bo;
+               spin_lock(&bo->tbo.lock);
+               old_fence = (struct radeon_fence *)bo->tbo.sync_obj;
+               bo->tbo.sync_obj = radeon_fence_ref(fence);
+               bo->tbo.sync_obj_arg = NULL;
+               spin_unlock(&bo->tbo.lock);
+               if (old_fence) {
+                       radeon_fence_unref(&old_fence);
                }
-       radeon_bo_list_unreserve(head);
+       }
 }
 
 int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
index a02f180..7ab43de 100644 (file)
@@ -156,8 +156,8 @@ extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
                                struct list_head *head);
 extern int radeon_bo_list_reserve(struct list_head *head);
 extern void radeon_bo_list_unreserve(struct list_head *head);
-extern int radeon_bo_list_validate(struct list_head *head, void *fence);
-extern void radeon_bo_list_unvalidate(struct list_head *head, void *fence);
+extern int radeon_bo_list_validate(struct list_head *head);
+extern void radeon_bo_list_fence(struct list_head *head, void *fence);
 extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
                                struct vm_area_struct *vma);
 extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
index 4d12b2d..6579eb4 100644 (file)
@@ -41,68 +41,55 @@ int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib)
 {
        struct radeon_fence *fence;
        struct radeon_ib *nib;
-       unsigned long i;
-       int r = 0;
+       int r = 0, i, c;
 
        *ib = NULL;
        r = radeon_fence_create(rdev, &fence);
        if (r) {
-               DRM_ERROR("failed to create fence for new IB\n");
+               dev_err(rdev->dev, "failed to create fence for new IB\n");
                return r;
        }
        mutex_lock(&rdev->ib_pool.mutex);
-       i = find_first_zero_bit(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
-       if (i < RADEON_IB_POOL_SIZE) {
-               set_bit(i, rdev->ib_pool.alloc_bm);
-               rdev->ib_pool.ibs[i].length_dw = 0;
-               *ib = &rdev->ib_pool.ibs[i];
-               mutex_unlock(&rdev->ib_pool.mutex);
-               goto out;
+       for (i = rdev->ib_pool.head_id, c = 0, nib = NULL; c < RADEON_IB_POOL_SIZE; c++, i++) {
+               i &= (RADEON_IB_POOL_SIZE - 1);
+               if (rdev->ib_pool.ibs[i].free) {
+                       nib = &rdev->ib_pool.ibs[i];
+                       break;
+               }
        }
-       if (list_empty(&rdev->ib_pool.scheduled_ibs)) {
-               /* we go do nothings here */
+       if (nib == NULL) {
+               /* This should never happen, it means we allocated all
+                * IB and haven't scheduled one yet, return EBUSY to
+                * userspace hoping that on ioctl recall we get better
+                * luck
+                */
+               dev_err(rdev->dev, "no free indirect buffer !\n");
                mutex_unlock(&rdev->ib_pool.mutex);
-               DRM_ERROR("all IB allocated none scheduled.\n");
-               r = -EINVAL;
-               goto out;
+               radeon_fence_unref(&fence);
+               return -EBUSY;
        }
-       /* get the first ib on the scheduled list */
-       nib = list_entry(rdev->ib_pool.scheduled_ibs.next,
-                        struct radeon_ib, list);
-       if (nib->fence == NULL) {
-               /* we go do nothings here */
+       rdev->ib_pool.head_id = (nib->idx + 1) & (RADEON_IB_POOL_SIZE - 1);
+       nib->free = false;
+       if (nib->fence) {
                mutex_unlock(&rdev->ib_pool.mutex);
-               DRM_ERROR("IB %lu scheduled without a fence.\n", nib->idx);
-               r = -EINVAL;
-               goto out;
-       }
-       mutex_unlock(&rdev->ib_pool.mutex);
-
-       r = radeon_fence_wait(nib->fence, false);
-       if (r) {
-               DRM_ERROR("radeon: IB(%lu:0x%016lX:%u)\n", nib->idx,
-                         (unsigned long)nib->gpu_addr, nib->length_dw);
-               DRM_ERROR("radeon: GPU lockup detected, fail to get a IB\n");
-               goto out;
+               r = radeon_fence_wait(nib->fence, false);
+               if (r) {
+                       dev_err(rdev->dev, "error waiting fence of IB(%u:0x%016lX:%u)\n",
+                               nib->idx, (unsigned long)nib->gpu_addr, nib->length_dw);
+                       mutex_lock(&rdev->ib_pool.mutex);
+                       nib->free = true;
+                       mutex_unlock(&rdev->ib_pool.mutex);
+                       radeon_fence_unref(&fence);
+                       return r;
+               }
+               mutex_lock(&rdev->ib_pool.mutex);
        }
        radeon_fence_unref(&nib->fence);
-
+       nib->fence = fence;
        nib->length_dw = 0;
-
-       /* scheduled list is accessed here */
-       mutex_lock(&rdev->ib_pool.mutex);
-       list_del(&nib->list);
-       INIT_LIST_HEAD(&nib->list);
        mutex_unlock(&rdev->ib_pool.mutex);
-
        *ib = nib;
-out:
-       if (r) {
-               radeon_fence_unref(&fence);
-       } else {
-               (*ib)->fence = fence;
-       }
-       return r;
+       return 0;
 }
 
 void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
@@ -113,19 +100,10 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
        if (tmp == NULL) {
                return;
        }
-       mutex_lock(&rdev->ib_pool.mutex);
-       if (!list_empty(&tmp->list) && !radeon_fence_signaled(tmp->fence)) {
-               /* IB is scheduled & not signaled don't do anythings */
-               mutex_unlock(&rdev->ib_pool.mutex);
-               return;
-       }
-       list_del(&tmp->list);
-       INIT_LIST_HEAD(&tmp->list);
-       if (tmp->fence)
+       if (!tmp->fence->emited)
                radeon_fence_unref(&tmp->fence);
-
-       tmp->length_dw = 0;
-       clear_bit(tmp->idx, rdev->ib_pool.alloc_bm);
+       mutex_lock(&rdev->ib_pool.mutex);
+       tmp->free = true;
        mutex_unlock(&rdev->ib_pool.mutex);
 }
 
@@ -135,7 +113,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
 
        if (!ib->length_dw || !rdev->cp.ready) {
                /* TODO: Nothings in the ib we should report. */
-               DRM_ERROR("radeon: couldn't schedule IB(%lu).\n", ib->idx);
+               DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx);
                return -EINVAL;
        }
 
@@ -148,7 +126,8 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
        radeon_ring_ib_execute(rdev, ib);
        radeon_fence_emit(rdev, ib->fence);
        mutex_lock(&rdev->ib_pool.mutex);
-       list_add_tail(&ib->list, &rdev->ib_pool.scheduled_ibs);
+       /* once scheduled IB is considered free and protected by the fence */
+       ib->free = true;
        mutex_unlock(&rdev->ib_pool.mutex);
        radeon_ring_unlock_commit(rdev);
        return 0;
@@ -164,7 +143,6 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
        if (rdev->ib_pool.robj)
                return 0;
        /* Allocate 1M object buffer */
-       INIT_LIST_HEAD(&rdev->ib_pool.scheduled_ibs);
        r = radeon_bo_create(rdev, NULL,  RADEON_IB_POOL_SIZE*64*1024,
                                true, RADEON_GEM_DOMAIN_GTT,
                                &rdev->ib_pool.robj);
@@ -195,9 +173,9 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
                rdev->ib_pool.ibs[i].ptr = ptr + offset;
                rdev->ib_pool.ibs[i].idx = i;
                rdev->ib_pool.ibs[i].length_dw = 0;
-               INIT_LIST_HEAD(&rdev->ib_pool.ibs[i].list);
+               rdev->ib_pool.ibs[i].free = true;
        }
-       bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
+       rdev->ib_pool.head_id = 0;
        rdev->ib_pool.ready = true;
        DRM_INFO("radeon: ib pool ready.\n");
        if (radeon_debugfs_ib_init(rdev)) {
@@ -214,7 +192,6 @@ void radeon_ib_pool_fini(struct radeon_device *rdev)
                return;
        }
        mutex_lock(&rdev->ib_pool.mutex);
-       bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
        if (rdev->ib_pool.robj) {
                r = radeon_bo_reserve(rdev->ib_pool.robj, false);
                if (likely(r == 0)) {
@@ -363,7 +340,7 @@ static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
        if (ib == NULL) {
                return 0;
        }
-       seq_printf(m, "IB %04lu\n", ib->idx);
+       seq_printf(m, "IB %04u\n", ib->idx);
        seq_printf(m, "IB fence %p\n", ib->fence);
        seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
        for (i = 0; i < ib->length_dw; i++) {
index 5943d56..0302167 100644 (file)
@@ -549,9 +549,12 @@ static void rv770_gpu_init(struct radeon_device *rdev)
 
        gb_tiling_config |= BANK_SWAPS(1);
 
-       backend_map = r700_get_tile_pipe_to_backend_map(rdev->config.rv770.max_tile_pipes,
-                                                       rdev->config.rv770.max_backends,
-                                                       (0xff << rdev->config.rv770.max_backends) & 0xff);
+       if (rdev->family == CHIP_RV740)
+               backend_map = 0x28;
+       else
+               backend_map = r700_get_tile_pipe_to_backend_map(rdev->config.rv770.max_tile_pipes,
+                                                               rdev->config.rv770.max_backends,
+                                                               (0xff << rdev->config.rv770.max_backends) & 0xff);
        gb_tiling_config |= BACKEND_MAP(backend_map);
 
        cc_gc_shader_pipe_config =
index 1a3e909..c7320ce 100644 (file)
@@ -1020,6 +1020,12 @@ static int ttm_bo_mem_compat(struct ttm_placement *placement,
                             struct ttm_mem_reg *mem)
 {
        int i;
+       struct drm_mm_node *node = mem->mm_node;
+
+       if (node && placement->lpfn != 0 &&
+           (node->start < placement->fpfn ||
+            node->start + node->size > placement->lpfn))
+               return -1;
 
        for (i = 0; i < placement->num_placement; i++) {
                if ((placement->placement[i] & mem->placement &
index e2123af..3d47a2c 100644 (file)
@@ -196,14 +196,15 @@ EXPORT_SYMBOL(ttm_tt_populate);
 
 #ifdef CONFIG_X86
 static inline int ttm_tt_set_page_caching(struct page *p,
-                                         enum ttm_caching_state c_state)
+                                         enum ttm_caching_state c_old,
+                                         enum ttm_caching_state c_new)
 {
        int ret = 0;
 
        if (PageHighMem(p))
                return 0;
 
-       if (get_page_memtype(p) != -1) {
+       if (c_old != tt_cached) {
                /* p isn't in the default caching state, set it to
                 * writeback first to free its current memtype. */
 
@@ -212,16 +213,17 @@ static inline int ttm_tt_set_page_caching(struct page *p,
                        return ret;
        }
 
-       if (c_state == tt_wc)
+       if (c_new == tt_wc)
                ret = set_memory_wc((unsigned long) page_address(p), 1);
-       else if (c_state == tt_uncached)
+       else if (c_new == tt_uncached)
                ret = set_pages_uc(p, 1);
 
        return ret;
 }
 #else /* CONFIG_X86 */
 static inline int ttm_tt_set_page_caching(struct page *p,
-                                         enum ttm_caching_state c_state)
+                                         enum ttm_caching_state c_old,
+                                         enum ttm_caching_state c_new)
 {
        return 0;
 }
@@ -254,7 +256,9 @@ static int ttm_tt_set_caching(struct ttm_tt *ttm,
        for (i = 0; i < ttm->num_pages; ++i) {
                cur_page = ttm->pages[i];
                if (likely(cur_page != NULL)) {
-                       ret = ttm_tt_set_page_caching(cur_page, c_state);
+                       ret = ttm_tt_set_page_caching(cur_page,
+                                                     ttm->caching_state,
+                                                     c_state);
                        if (unlikely(ret != 0))
                                goto out_err;
                }
@@ -268,7 +272,7 @@ out_err:
        for (j = 0; j < i; ++j) {
                cur_page = ttm->pages[j];
                if (likely(cur_page != NULL)) {
-                       (void)ttm_tt_set_page_caching(cur_page,
+                       (void)ttm_tt_set_page_caching(cur_page, c_state,
                                                      ttm->caching_state);
                }
        }
index a6e8f68..0c9c081 100644 (file)
@@ -348,22 +348,19 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
                 */
 
                DRM_INFO("It appears like vesafb is loaded. "
-                        "Ignore above error if any. Entering stealth mode.\n");
+                        "Ignore above error if any.\n");
                ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
                if (unlikely(ret != 0)) {
                        DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
                        goto out_no_device;
                }
-               vmw_kms_init(dev_priv);
-               vmw_overlay_init(dev_priv);
-       } else {
-               ret = vmw_request_device(dev_priv);
-               if (unlikely(ret != 0))
-                       goto out_no_device;
-               vmw_kms_init(dev_priv);
-               vmw_overlay_init(dev_priv);
-               vmw_fb_init(dev_priv);
        }
+       ret = vmw_request_device(dev_priv);
+       if (unlikely(ret != 0))
+               goto out_no_device;
+       vmw_kms_init(dev_priv);
+       vmw_overlay_init(dev_priv);
+       vmw_fb_init(dev_priv);
 
        dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
        register_pm_notifier(&dev_priv->pm_nb);
@@ -406,17 +403,15 @@ static int vmw_driver_unload(struct drm_device *dev)
 
        unregister_pm_notifier(&dev_priv->pm_nb);
 
-       if (!dev_priv->stealth) {
-               vmw_fb_close(dev_priv);
-               vmw_kms_close(dev_priv);
-               vmw_overlay_close(dev_priv);
-               vmw_release_device(dev_priv);
-               pci_release_regions(dev->pdev);
-       } else {
-               vmw_kms_close(dev_priv);
-               vmw_overlay_close(dev_priv);
+       vmw_fb_close(dev_priv);
+       vmw_kms_close(dev_priv);
+       vmw_overlay_close(dev_priv);
+       vmw_release_device(dev_priv);
+       if (dev_priv->stealth)
                pci_release_region(dev->pdev, 2);
-       }
+       else
+               pci_release_regions(dev->pdev);
+
        if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
                drm_irq_uninstall(dev_priv->dev);
        if (dev->devname == vmw_devname)
@@ -585,11 +580,6 @@ static int vmw_master_set(struct drm_device *dev,
        int ret = 0;
 
        DRM_INFO("Master set.\n");
-       if (dev_priv->stealth) {
-               ret = vmw_request_device(dev_priv);
-               if (unlikely(ret != 0))
-                       return ret;
-       }
 
        if (active) {
                BUG_ON(active != &dev_priv->fbdev_master);
@@ -649,18 +639,11 @@ static void vmw_master_drop(struct drm_device *dev,
 
        ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
 
-       if (dev_priv->stealth) {
-               ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
-               if (unlikely(ret != 0))
-                       DRM_ERROR("Unable to clean VRAM on master drop.\n");
-               vmw_release_device(dev_priv);
-       }
        dev_priv->active_master = &dev_priv->fbdev_master;
        ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
        ttm_vt_unlock(&dev_priv->fbdev_master.lock);
 
-       if (!dev_priv->stealth)
-               vmw_fb_on(dev_priv);
+       vmw_fb_on(dev_priv);
 }
 
 
index 135be96..356dc93 100644 (file)
 #include "ttm/ttm_execbuf_util.h"
 #include "ttm/ttm_module.h"
 
-#define VMWGFX_DRIVER_DATE "20090724"
-#define VMWGFX_DRIVER_MAJOR 0
-#define VMWGFX_DRIVER_MINOR 1
-#define VMWGFX_DRIVER_PATCHLEVEL 2
+#define VMWGFX_DRIVER_DATE "20100209"
+#define VMWGFX_DRIVER_MAJOR 1
+#define VMWGFX_DRIVER_MINOR 0
+#define VMWGFX_DRIVER_PATCHLEVEL 0
 #define VMWGFX_FILE_PAGE_OFFSET 0x00100000
 #define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
 #define VMWGFX_MAX_RELOCATIONS 2048
@@ -113,6 +113,7 @@ struct vmw_fifo_state {
        unsigned long static_buffer_size;
        bool using_bounce_buffer;
        uint32_t capabilities;
+       struct mutex fifo_mutex;
        struct rw_semaphore rwsem;
 };
 
@@ -213,7 +214,7 @@ struct vmw_private {
         * Fencing and IRQs.
         */
 
-       uint32_t fence_seq;
+       atomic_t fence_seq;
        wait_queue_head_t fence_queue;
        wait_queue_head_t fifo_queue;
        atomic_t fence_queue_waiters;
index d69caf9..0897359 100644 (file)
@@ -182,25 +182,19 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv,
        return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid);
 }
 
-static int vmw_cmd_dma(struct vmw_private *dev_priv,
-                      struct vmw_sw_context *sw_context,
-                      SVGA3dCmdHeader *header)
+static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
+                                  struct vmw_sw_context *sw_context,
+                                  SVGAGuestPtr *ptr,
+                                  struct vmw_dma_buffer **vmw_bo_p)
 {
-       uint32_t handle;
        struct vmw_dma_buffer *vmw_bo = NULL;
        struct ttm_buffer_object *bo;
-       struct vmw_surface *srf = NULL;
-       struct vmw_dma_cmd {
-               SVGA3dCmdHeader header;
-               SVGA3dCmdSurfaceDMA dma;
-       } *cmd;
+       uint32_t handle = ptr->gmrId;
        struct vmw_relocation *reloc;
-       int ret;
        uint32_t cur_validate_node;
        struct ttm_validate_buffer *val_buf;
+       int ret;
 
-       cmd = container_of(header, struct vmw_dma_cmd, header);
-       handle = cmd->dma.guest.ptr.gmrId;
        ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
        if (unlikely(ret != 0)) {
                DRM_ERROR("Could not find or use GMR region.\n");
@@ -209,14 +203,14 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
        bo = &vmw_bo->base;
 
        if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
-               DRM_ERROR("Max number of DMA commands per submission"
+               DRM_ERROR("Max number relocations per submission"
                          " exceeded\n");
                ret = -EINVAL;
                goto out_no_reloc;
        }
 
        reloc = &sw_context->relocs[sw_context->cur_reloc++];
-       reloc->location = &cmd->dma.guest.ptr;
+       reloc->location = ptr;
 
        cur_validate_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf);
        if (unlikely(cur_validate_node >= VMWGFX_MAX_GMRS)) {
@@ -234,7 +228,89 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
                list_add_tail(&val_buf->head, &sw_context->validate_nodes);
                ++sw_context->cur_val_buf;
        }
+       *vmw_bo_p = vmw_bo;
+       return 0;
+
+out_no_reloc:
+       vmw_dmabuf_unreference(&vmw_bo);
+       vmw_bo_p = NULL;
+       return ret;
+}
+
+static int vmw_cmd_end_query(struct vmw_private *dev_priv,
+                            struct vmw_sw_context *sw_context,
+                            SVGA3dCmdHeader *header)
+{
+       struct vmw_dma_buffer *vmw_bo;
+       struct vmw_query_cmd {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdEndQuery q;
+       } *cmd;
+       int ret;
+
+       cmd = container_of(header, struct vmw_query_cmd, header);
+       ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
+       if (unlikely(ret != 0))
+               return ret;
+
+       ret = vmw_translate_guest_ptr(dev_priv, sw_context,
+                                     &cmd->q.guestResult,
+                                     &vmw_bo);
+       if (unlikely(ret != 0))
+               return ret;
+
+       vmw_dmabuf_unreference(&vmw_bo);
+       return 0;
+}
 
+static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
+                             struct vmw_sw_context *sw_context,
+                             SVGA3dCmdHeader *header)
+{
+       struct vmw_dma_buffer *vmw_bo;
+       struct vmw_query_cmd {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdWaitForQuery q;
+       } *cmd;
+       int ret;
+
+       cmd = container_of(header, struct vmw_query_cmd, header);
+       ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
+       if (unlikely(ret != 0))
+               return ret;
+
+       ret = vmw_translate_guest_ptr(dev_priv, sw_context,
+                                     &cmd->q.guestResult,
+                                     &vmw_bo);
+       if (unlikely(ret != 0))
+               return ret;
+
+       vmw_dmabuf_unreference(&vmw_bo);
+       return 0;
+}
+
+
+static int vmw_cmd_dma(struct vmw_private *dev_priv,
+                      struct vmw_sw_context *sw_context,
+                      SVGA3dCmdHeader *header)
+{
+       struct vmw_dma_buffer *vmw_bo = NULL;
+       struct ttm_buffer_object *bo;
+       struct vmw_surface *srf = NULL;
+       struct vmw_dma_cmd {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdSurfaceDMA dma;
+       } *cmd;
+       int ret;
+
+       cmd = container_of(header, struct vmw_dma_cmd, header);
+       ret = vmw_translate_guest_ptr(dev_priv, sw_context,
+                                     &cmd->dma.guest.ptr,
+                                     &vmw_bo);
+       if (unlikely(ret != 0))
+               return ret;
+
+       bo = &vmw_bo->base;
        ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile,
                                             cmd->dma.host.sid, &srf);
        if (ret) {
@@ -379,8 +455,8 @@ static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = {
        VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw),
        VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check),
        VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check),
-       VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_cid_check),
-       VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_cid_check),
+       VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query),
+       VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query),
        VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok),
        VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
                    &vmw_cmd_blt_surf_screen_check)
index 4f4f643..a933670 100644 (file)
@@ -559,6 +559,9 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
        info->pixmap.scan_align = 1;
 #endif
 
+       info->aperture_base = vmw_priv->vram_start;
+       info->aperture_size = vmw_priv->vram_size;
+
        /*
         * Dirty & Deferred IO
         */
index 4157547..39d43a0 100644 (file)
@@ -74,6 +74,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
        fifo->reserved_size = 0;
        fifo->using_bounce_buffer = false;
 
+       mutex_init(&fifo->fifo_mutex);
        init_rwsem(&fifo->rwsem);
 
        /*
@@ -117,7 +118,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
                 (unsigned int) min,
                 (unsigned int) fifo->capabilities);
 
-       dev_priv->fence_seq = dev_priv->last_read_sequence;
+       atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence);
        iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE);
 
        return vmw_fifo_send_fence(dev_priv, &dummy);
@@ -283,7 +284,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
        uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
        int ret;
 
-       down_write(&fifo_state->rwsem);
+       mutex_lock(&fifo_state->fifo_mutex);
        max = ioread32(fifo_mem + SVGA_FIFO_MAX);
        min = ioread32(fifo_mem + SVGA_FIFO_MIN);
        next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
@@ -351,7 +352,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
        }
 out_err:
        fifo_state->reserved_size = 0;
-       up_write(&fifo_state->rwsem);
+       mutex_unlock(&fifo_state->fifo_mutex);
        return NULL;
 }
 
@@ -426,6 +427,7 @@ void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
 
        }
 
+       down_write(&fifo_state->rwsem);
        if (fifo_state->using_bounce_buffer || reserveable) {
                next_cmd += bytes;
                if (next_cmd >= max)
@@ -437,8 +439,9 @@ void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
        if (reserveable)
                iowrite32(0, fifo_mem + SVGA_FIFO_RESERVED);
        mb();
-       vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
        up_write(&fifo_state->rwsem);
+       vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
+       mutex_unlock(&fifo_state->fifo_mutex);
 }
 
 int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
@@ -451,9 +454,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
 
        fm = vmw_fifo_reserve(dev_priv, bytes);
        if (unlikely(fm == NULL)) {
-               down_write(&fifo_state->rwsem);
-               *sequence = dev_priv->fence_seq;
-               up_write(&fifo_state->rwsem);
+               *sequence = atomic_read(&dev_priv->fence_seq);
                ret = -ENOMEM;
                (void)vmw_fallback_wait(dev_priv, false, true, *sequence,
                                        false, 3*HZ);
@@ -461,7 +462,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
        }
 
        do {
-               *sequence = dev_priv->fence_seq++;
+               *sequence = atomic_add_return(1, &dev_priv->fence_seq);
        } while (*sequence == 0);
 
        if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
index 778851f..1c7a316 100644 (file)
@@ -48,6 +48,12 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
        case DRM_VMW_PARAM_FIFO_OFFSET:
                param->value = dev_priv->mmio_start;
                break;
+       case DRM_VMW_PARAM_HW_CAPS:
+               param->value = dev_priv->capabilities;
+               break;
+       case DRM_VMW_PARAM_FIFO_CAPS:
+               param->value = dev_priv->fifo.capabilities;
+               break;
        default:
                DRM_ERROR("Illegal vmwgfx get param request: %d\n",
                          param->param);
index d40086f..4d7cb53 100644 (file)
@@ -85,19 +85,12 @@ bool vmw_fence_signaled(struct vmw_private *dev_priv,
                return true;
 
        /**
-        * Below is to signal stale fences that have wrapped.
-        * First, block fence submission.
-        */
-
-       down_read(&fifo_state->rwsem);
-
-       /**
         * Then check if the sequence is higher than what we've actually
         * emitted. Then the fence is stale and signaled.
         */
 
-       ret = ((dev_priv->fence_seq - sequence) > VMW_FENCE_WRAP);
-       up_read(&fifo_state->rwsem);
+       ret = ((atomic_read(&dev_priv->fence_seq) - sequence)
+              > VMW_FENCE_WRAP);
 
        return ret;
 }
@@ -127,7 +120,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
 
        if (fifo_idle)
                down_read(&fifo_state->rwsem);
-       signal_seq = dev_priv->fence_seq;
+       signal_seq = atomic_read(&dev_priv->fence_seq);
        ret = 0;
 
        for (;;) {
index eeba6d1..31f9afe 100644 (file)
@@ -769,10 +769,10 @@ int vmw_kms_init(struct vmw_private *dev_priv)
 
        drm_mode_config_init(dev);
        dev->mode_config.funcs = &vmw_kms_funcs;
-       dev->mode_config.min_width = 640;
-       dev->mode_config.min_height = 480;
-       dev->mode_config.max_width = 2048;
-       dev->mode_config.max_height = 2048;
+       dev->mode_config.min_width = 1;
+       dev->mode_config.min_height = 1;
+       dev->mode_config.max_width = dev_priv->fb_max_width;
+       dev->mode_config.max_height = dev_priv->fb_max_height;
 
        ret = vmw_kms_init_legacy_display_system(dev_priv);
 
index c7efbd4..f8fbbc6 100644 (file)
 #define VMW_RES_SURFACE ttm_driver_type1
 #define VMW_RES_STREAM ttm_driver_type2
 
-/* XXX: This isn't a real hardware flag, but just a hack for kernel to
- * know about primary surfaces. Find a better way to accomplish this.
- */
-#define SVGA3D_SURFACE_HINT_SCANOUT (1 << 9)
-
 struct vmw_user_context {
        struct ttm_base_object base;
        struct vmw_resource res;
@@ -579,6 +574,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
 
        srf->flags = req->flags;
        srf->format = req->format;
+       srf->scanout = req->scanout;
        memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
        srf->num_sizes = 0;
        for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
@@ -604,16 +600,6 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
        if (unlikely(ret != 0))
                goto out_err1;
 
-       if (srf->flags & SVGA3D_SURFACE_HINT_SCANOUT) {
-               /* we should not send this flag down to hardware since
-                * its not a official one
-                */
-               srf->flags &= ~SVGA3D_SURFACE_HINT_SCANOUT;
-               srf->scanout = true;
-       } else {
-               srf->scanout = false;
-       }
-
        if (srf->scanout &&
            srf->num_sizes == 1 &&
            srf->sizes[0].width == 64 &&
index 1ac0c93..2f6cf69 100644 (file)
@@ -961,7 +961,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
                remaining -= 7;
                pr_devel("client 0x%p called 'target'\n", priv);
                /* if target is default */
-               if (!strncmp(buf, "default", 7))
+               if (!strncmp(curr_pos, "default", 7))
                        pdev = pci_dev_get(vga_default_device());
                else {
                        if (!vga_pci_str_to_vars(curr_pos, remaining,
index b1c050f..e29b6d5 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/kernel.h>
 #include <linux/errno.h>
 #include <linux/module.h>
+#include <linux/types.h>
 
 /* include interfaces to usb layer */
 #include <linux/usb.h>
@@ -31,8 +32,8 @@
 #define CMD_I2C_IO_END         (1<<1)
 
 /* i2c bit delay, default is 10us -> 100kHz */
-static int delay = 10;
-module_param(delay, int, 0);
+static unsigned short delay = 10;
+module_param(delay, ushort, 0);
 MODULE_PARM_DESC(delay, "bit delay in microseconds, "
                 "e.g. 10 for 100kHz (default is 100kHz)");
 
@@ -109,7 +110,7 @@ static int usb_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
 
 static u32 usb_func(struct i2c_adapter *adapter)
 {
-       u32 func;
+       __le32 func;
 
        /* get functionality from adapter */
        if (usb_read(adapter, CMD_GET_FUNC, 0, 0, &func, sizeof(func)) !=
@@ -118,7 +119,7 @@ static u32 usb_func(struct i2c_adapter *adapter)
                return 0;
        }
 
-       return func;
+       return le32_to_cpu(func);
 }
 
 /* This is the actual algorithm we define */
@@ -216,8 +217,7 @@ static int i2c_tiny_usb_probe(struct usb_interface *interface,
                 "i2c-tiny-usb at bus %03d device %03d",
                 dev->usb_dev->bus->busnum, dev->usb_dev->devnum);
 
-       if (usb_write(&dev->adapter, CMD_SET_DELAY,
-                     cpu_to_le16(delay), 0, NULL, 0) != 0) {
+       if (usb_write(&dev->adapter, CMD_SET_DELAY, delay, 0, NULL, 0) != 0) {
                dev_err(&dev->adapter.dev,
                        "failure setting delay to %dus\n", delay);
                retval = -EIO;
index dd0db67..975adce 100644 (file)
@@ -20,6 +20,7 @@ config INFINIBAND_USER_MAD
 
 config INFINIBAND_USER_ACCESS
        tristate "InfiniBand userspace access (verbs and CM)"
+       select ANON_INODES
        ---help---
          Userspace InfiniBand access support.  This enables the
          kernel side of userspace verbs and the userspace
index cc9b594..875e34e 100644 (file)
@@ -2115,9 +2115,7 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
        if (ret)
                goto err1;
 
-       if (cma_loopback_addr(addr)) {
-               ret = cma_bind_loopback(id_priv);
-       } else if (!cma_zero_addr(addr)) {
+       if (!cma_any_addr(addr)) {
                ret = rdma_translate_ip(addr, &id->route.addr.dev_addr);
                if (ret)
                        goto err1;
index b3ea958..0b38620 100644 (file)
@@ -145,7 +145,7 @@ extern struct idr ib_uverbs_srq_idr;
 void idr_remove_uobj(struct idr *idp, struct ib_uobject *uobj);
 
 struct file *ib_uverbs_alloc_event_file(struct ib_uverbs_file *uverbs_file,
-                                       int is_async, int *fd);
+                                       int is_async);
 struct ib_uverbs_event_file *ib_uverbs_lookup_comp_file(int fd);
 
 void ib_uverbs_release_ucq(struct ib_uverbs_file *file,
index 112d397..f71cf13 100644 (file)
@@ -301,10 +301,15 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
 
        resp.num_comp_vectors = file->device->num_comp_vectors;
 
-       filp = ib_uverbs_alloc_event_file(file, 1, &resp.async_fd);
+       ret = get_unused_fd();
+       if (ret < 0)
+               goto err_free;
+       resp.async_fd = ret;
+
+       filp = ib_uverbs_alloc_event_file(file, 1);
        if (IS_ERR(filp)) {
                ret = PTR_ERR(filp);
-               goto err_free;
+               goto err_fd;
        }
 
        if (copy_to_user((void __user *) (unsigned long) cmd.response,
@@ -332,9 +337,11 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
        return in_len;
 
 err_file:
-       put_unused_fd(resp.async_fd);
        fput(filp);
 
+err_fd:
+       put_unused_fd(resp.async_fd);
+
 err_free:
        ibdev->dealloc_ucontext(ucontext);
 
@@ -715,6 +722,7 @@ ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file,
        struct ib_uverbs_create_comp_channel       cmd;
        struct ib_uverbs_create_comp_channel_resp  resp;
        struct file                               *filp;
+       int ret;
 
        if (out_len < sizeof resp)
                return -ENOSPC;
@@ -722,9 +730,16 @@ ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file,
        if (copy_from_user(&cmd, buf, sizeof cmd))
                return -EFAULT;
 
-       filp = ib_uverbs_alloc_event_file(file, 0, &resp.fd);
-       if (IS_ERR(filp))
+       ret = get_unused_fd();
+       if (ret < 0)
+               return ret;
+       resp.fd = ret;
+
+       filp = ib_uverbs_alloc_event_file(file, 0);
+       if (IS_ERR(filp)) {
+               put_unused_fd(resp.fd);
                return PTR_ERR(filp);
+       }
 
        if (copy_to_user((void __user *) (unsigned long) cmd.response,
                         &resp, sizeof resp)) {
index 5f284ff..810f277 100644 (file)
@@ -41,8 +41,8 @@
 #include <linux/fs.h>
 #include <linux/poll.h>
 #include <linux/sched.h>
+#include <linux/anon_inodes.h>
 #include <linux/file.h>
-#include <linux/mount.h>
 #include <linux/cdev.h>
 
 #include <asm/uaccess.h>
@@ -53,8 +53,6 @@ MODULE_AUTHOR("Roland Dreier");
 MODULE_DESCRIPTION("InfiniBand userspace verbs access");
 MODULE_LICENSE("Dual BSD/GPL");
 
-#define INFINIBANDEVENTFS_MAGIC        0x49426576      /* "IBev" */
-
 enum {
        IB_UVERBS_MAJOR       = 231,
        IB_UVERBS_BASE_MINOR  = 192,
@@ -111,8 +109,6 @@ static ssize_t (*uverbs_cmd_table[])(struct ib_uverbs_file *file,
        [IB_USER_VERBS_CMD_DESTROY_SRQ]         = ib_uverbs_destroy_srq,
 };
 
-static struct vfsmount *uverbs_event_mnt;
-
 static void ib_uverbs_add_one(struct ib_device *device);
 static void ib_uverbs_remove_one(struct ib_device *device);
 
@@ -489,12 +485,10 @@ void ib_uverbs_event_handler(struct ib_event_handler *handler,
 }
 
 struct file *ib_uverbs_alloc_event_file(struct ib_uverbs_file *uverbs_file,
-                                       int is_async, int *fd)
+                                       int is_async)
 {
        struct ib_uverbs_event_file *ev_file;
-       struct path path;
        struct file *filp;
-       int ret;
 
        ev_file = kmalloc(sizeof *ev_file, GFP_KERNEL);
        if (!ev_file)
@@ -509,38 +503,12 @@ struct file *ib_uverbs_alloc_event_file(struct ib_uverbs_file *uverbs_file,
        ev_file->is_async    = is_async;
        ev_file->is_closed   = 0;
 
-       *fd = get_unused_fd();
-       if (*fd < 0) {
-               ret = *fd;
-               goto err;
-       }
-
-       /*
-        * fops_get() can't fail here, because we're coming from a
-        * system call on a uverbs file, which will already have a
-        * module reference.
-        */
-       path.mnt = uverbs_event_mnt;
-       path.dentry = uverbs_event_mnt->mnt_root;
-       path_get(&path);
-       filp = alloc_file(&path, FMODE_READ, fops_get(&uverbs_event_fops));
-       if (!filp) {
-               ret = -ENFILE;
-               goto err_fd;
-       }
-
-       filp->private_data = ev_file;
+       filp = anon_inode_getfile("[infinibandevent]", &uverbs_event_fops,
+                                 ev_file, O_RDONLY);
+       if (IS_ERR(filp))
+               kfree(ev_file);
 
        return filp;
-
-err_fd:
-       fops_put(&uverbs_event_fops);
-       path_put(&path);
-       put_unused_fd(*fd);
-
-err:
-       kfree(ev_file);
-       return ERR_PTR(ret);
 }
 
 /*
@@ -825,21 +793,6 @@ static void ib_uverbs_remove_one(struct ib_device *device)
        kfree(uverbs_dev);
 }
 
-static int uverbs_event_get_sb(struct file_system_type *fs_type, int flags,
-                              const char *dev_name, void *data,
-                              struct vfsmount *mnt)
-{
-       return get_sb_pseudo(fs_type, "infinibandevent:", NULL,
-                            INFINIBANDEVENTFS_MAGIC, mnt);
-}
-
-static struct file_system_type uverbs_event_fs = {
-       /* No owner field so module can be unloaded */
-       .name    = "infinibandeventfs",
-       .get_sb  = uverbs_event_get_sb,
-       .kill_sb = kill_litter_super
-};
-
 static int __init ib_uverbs_init(void)
 {
        int ret;
@@ -864,33 +817,14 @@ static int __init ib_uverbs_init(void)
                goto out_class;
        }
 
-       ret = register_filesystem(&uverbs_event_fs);
-       if (ret) {
-               printk(KERN_ERR "user_verbs: couldn't register infinibandeventfs\n");
-               goto out_class;
-       }
-
-       uverbs_event_mnt = kern_mount(&uverbs_event_fs);
-       if (IS_ERR(uverbs_event_mnt)) {
-               ret = PTR_ERR(uverbs_event_mnt);
-               printk(KERN_ERR "user_verbs: couldn't mount infinibandeventfs\n");
-               goto out_fs;
-       }
-
        ret = ib_register_client(&uverbs_client);
        if (ret) {
                printk(KERN_ERR "user_verbs: couldn't register client\n");
-               goto out_mnt;
+               goto out_class;
        }
 
        return 0;
 
-out_mnt:
-       mntput(uverbs_event_mnt);
-
-out_fs:
-       unregister_filesystem(&uverbs_event_fs);
-
 out_class:
        class_destroy(uverbs_class);
 
@@ -904,8 +838,6 @@ out:
 static void __exit ib_uverbs_cleanup(void)
 {
        ib_unregister_client(&uverbs_client);
-       mntput(uverbs_event_mnt);
-       unregister_filesystem(&uverbs_event_fs);
        class_destroy(uverbs_class);
        unregister_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_MAX_DEVICES);
        idr_destroy(&ib_uverbs_pd_idr);
index aa6713b..291d939 100644 (file)
@@ -100,6 +100,12 @@ static void input_close_polled_device(struct input_dev *input)
        struct input_polled_dev *dev = input_get_drvdata(input);
 
        cancel_delayed_work_sync(&dev->work);
+       /*
+        * Clean up work struct to remove references to the workqueue.
+        * It may be destroyed by the next call. This causes problems
+        * at next device open-close in case of poll_interval == 0.
+        */
+       INIT_DELAYED_WORK(&dev->work, dev->work.work.func);
        input_polldev_stop_workqueue();
 
        if (dev->close)
index 9774bdf..d8c0c8d 100644 (file)
@@ -1141,7 +1141,14 @@ static void psmouse_cleanup(struct serio *serio)
                psmouse_deactivate(parent);
        }
 
-       psmouse_deactivate(psmouse);
+       psmouse_set_state(psmouse, PSMOUSE_INITIALIZING);
+
+       /*
+        * Disable stream mode so cleanup routine can proceed undisturbed.
+        */
+       if (ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_DISABLE))
+               printk(KERN_WARNING "psmouse.c: Failed to disable mouse on %s\n",
+                       psmouse->ps2dev.serio->phys);
 
        if (psmouse->cleanup)
                psmouse->cleanup(psmouse);
index d84a36e..b54aee7 100644 (file)
@@ -1161,9 +1161,17 @@ static int i8042_pm_restore(struct device *dev)
        return 0;
 }
 
+static int i8042_pm_thaw(struct device *dev)
+{
+       i8042_interrupt(0, NULL);
+
+       return 0;
+}
+
 static const struct dev_pm_ops i8042_pm_ops = {
        .suspend        = i8042_pm_reset,
        .resume         = i8042_pm_restore,
+       .thaw           = i8042_pm_thaw,
        .poweroff       = i8042_pm_reset,
        .restore        = i8042_pm_restore,
 };
index 09a5e73..5256123 100644 (file)
@@ -618,8 +618,8 @@ static int idealtek_read_data(struct usbtouch_usb *dev, unsigned char *pkt)
 #ifdef CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH
 static int general_touch_read_data(struct usbtouch_usb *dev, unsigned char *pkt)
 {
-       dev->x = ((pkt[2] & 0x0F) << 8) | pkt[1] ;
-       dev->y = ((pkt[4] & 0x0F) << 8) | pkt[3] ;
+       dev->x = (pkt[2] << 8) | pkt[1];
+       dev->y = (pkt[4] << 8) | pkt[3];
        dev->press = pkt[5] & 0xff;
        dev->touch = pkt[0] & 0x01;
 
@@ -809,9 +809,9 @@ static struct usbtouch_device_info usbtouch_dev_info[] = {
 #ifdef CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH
        [DEVTYPE_GENERAL_TOUCH] = {
                .min_xc         = 0x0,
-               .max_xc         = 0x0500,
+               .max_xc         = 0x7fff,
                .min_yc         = 0x0,
-               .max_yc         = 0x0500,
+               .max_yc         = 0x7fff,
                .rept_size      = 7,
                .read_data      = general_touch_read_data,
        },
index 54abf9e..f1c8cae 100644 (file)
@@ -172,11 +172,15 @@ int dm_consult_userspace(const char *uuid, uint64_t luid, int request_type,
 {
        int r = 0;
        size_t dummy = 0;
-       int overhead_size =
-               sizeof(struct dm_ulog_request *) + sizeof(struct cn_msg);
+       int overhead_size = sizeof(struct dm_ulog_request) + sizeof(struct cn_msg);
        struct dm_ulog_request *tfr = prealloced_ulog_tfr;
        struct receiving_pkg pkg;
 
+       /*
+        * Given the space needed to hold the 'struct cn_msg' and
+        * 'struct dm_ulog_request' - do we have enough payload
+        * space remaining?
+        */
        if (data_size > (DM_ULOG_PREALLOCED_SIZE - overhead_size)) {
                DMINFO("Size of tfr exceeds preallocated size");
                return -EINVAL;
@@ -191,7 +195,7 @@ resend:
         */
        mutex_lock(&dm_ulog_lock);
 
-       memset(tfr, 0, DM_ULOG_PREALLOCED_SIZE - overhead_size);
+       memset(tfr, 0, DM_ULOG_PREALLOCED_SIZE - sizeof(struct cn_msg));
        memcpy(tfr->uuid, uuid, DM_UUID_LEN);
        tfr->luid = luid;
        tfr->seq = dm_ulog_seq++;
index ad779bd..6c1046d 100644 (file)
@@ -724,7 +724,7 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes)
        /*
         * Dispatch io.
         */
-       if (unlikely(ms->log_failure)) {
+       if (unlikely(ms->log_failure) && errors_handled(ms)) {
                spin_lock_irq(&ms->lock);
                bio_list_merge(&ms->failures, &sync);
                spin_unlock_irq(&ms->lock);
index 5f19ceb..168bd38 100644 (file)
@@ -660,10 +660,9 @@ void dm_rh_recovery_end(struct dm_region *reg, int success)
        spin_lock_irq(&rh->region_lock);
        if (success)
                list_add(&reg->list, &reg->rh->recovered_regions);
-       else {
-               reg->state = DM_RH_NOSYNC;
+       else
                list_add(&reg->list, &reg->rh->failed_recovered_regions);
-       }
+
        spin_unlock_irq(&rh->region_lock);
 
        rh->wakeup_workers(rh->context);
index 7d08879..c097d8a 100644 (file)
@@ -254,7 +254,7 @@ static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw,
         * Issue the synchronous I/O from a different thread
         * to avoid generic_make_request recursion.
         */
-       INIT_WORK(&req.work, do_metadata);
+       INIT_WORK_ON_STACK(&req.work, do_metadata);
        queue_work(ps->metadata_wq, &req.work);
        flush_workqueue(ps->metadata_wq);
 
index e0efc1a..bd58703 100644 (file)
@@ -110,7 +110,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
        }
 
        stripes = simple_strtoul(argv[0], &end, 10);
-       if (*end) {
+       if (!stripes || *end) {
                ti->error = "Invalid stripe count";
                return -EINVAL;
        }
index f53392d..f91b409 100644 (file)
@@ -80,20 +80,12 @@ static struct sysfs_ops dm_sysfs_ops = {
 };
 
 /*
- * The sysfs structure is embedded in md struct, nothing to do here
- */
-static void dm_sysfs_release(struct kobject *kobj)
-{
-}
-
-/*
  * dm kobject is embedded in mapped_device structure
  * no need to define release function here
  */
 static struct kobj_type dm_ktype = {
        .sysfs_ops      = &dm_sysfs_ops,
        .default_attrs  = dm_attrs,
-       .release        = dm_sysfs_release
 };
 
 /*
index 3167480..aa4e2aa 100644 (file)
@@ -1595,10 +1595,15 @@ static int dm_prep_fn(struct request_queue *q, struct request *rq)
        return BLKPREP_OK;
 }
 
-static void map_request(struct dm_target *ti, struct request *clone,
-                       struct mapped_device *md)
+/*
+ * Returns:
+ * 0  : the request has been processed (not requeued)
+ * !0 : the request has been requeued
+ */
+static int map_request(struct dm_target *ti, struct request *clone,
+                      struct mapped_device *md)
 {
-       int r;
+       int r, requeued = 0;
        struct dm_rq_target_io *tio = clone->end_io_data;
 
        /*
@@ -1625,6 +1630,7 @@ static void map_request(struct dm_target *ti, struct request *clone,
        case DM_MAPIO_REQUEUE:
                /* The target wants to requeue the I/O */
                dm_requeue_unmapped_request(clone);
+               requeued = 1;
                break;
        default:
                if (r > 0) {
@@ -1636,6 +1642,8 @@ static void map_request(struct dm_target *ti, struct request *clone,
                dm_kill_unmapped_request(clone, r);
                break;
        }
+
+       return requeued;
 }
 
 /*
@@ -1677,12 +1685,17 @@ static void dm_request_fn(struct request_queue *q)
                atomic_inc(&md->pending[rq_data_dir(clone)]);
 
                spin_unlock(q->queue_lock);
-               map_request(ti, clone, md);
+               if (map_request(ti, clone, md))
+                       goto requeued;
+
                spin_lock_irq(q->queue_lock);
        }
 
        goto out;
 
+requeued:
+       spin_lock_irq(q->queue_lock);
+
 plug_and_out:
        if (!elv_queue_empty(q))
                /* Some requests still remain, retry later */
index dd3dfe4..a20a71e 100644 (file)
@@ -4075,8 +4075,10 @@ static void mddev_delayed_delete(struct work_struct *ws)
 {
        mddev_t *mddev = container_of(ws, mddev_t, del_work);
 
-       if (mddev->private == &md_redundancy_group) {
+       if (mddev->private) {
                sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
+               if (mddev->private != (void*)1)
+                       sysfs_remove_group(&mddev->kobj, mddev->private);
                if (mddev->sysfs_action)
                        sysfs_put(mddev->sysfs_action);
                mddev->sysfs_action = NULL;
@@ -4287,10 +4289,7 @@ static int do_md_run(mddev_t * mddev)
                sysfs_notify_dirent(rdev->sysfs_state);
        }
 
-       md_probe(mddev->unit, NULL, NULL);
        disk = mddev->gendisk;
-       if (!disk)
-               return -ENOMEM;
 
        spin_lock(&pers_lock);
        pers = find_pers(mddev->level, mddev->clevel);
@@ -4530,8 +4529,8 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
                        mddev->queue->unplug_fn = NULL;
                        mddev->queue->backing_dev_info.congested_fn = NULL;
                        module_put(mddev->pers->owner);
-                       if (mddev->pers->sync_request)
-                               mddev->private = &md_redundancy_group;
+                       if (mddev->pers->sync_request && mddev->private == NULL)
+                               mddev->private = (void*)1;
                        mddev->pers = NULL;
                        /* tell userspace to handle 'inactive' */
                        sysfs_notify_dirent(mddev->sysfs_state);
@@ -4578,9 +4577,6 @@ out:
                }
                mddev->bitmap_info.offset = 0;
 
-               /* make sure all md_delayed_delete calls have finished */
-               flush_scheduled_work();
-
                export_array(mddev);
 
                mddev->array_sectors = 0;
index e84204e..ceb24af 100644 (file)
@@ -5136,9 +5136,8 @@ static int stop(mddev_t *mddev)
        mddev->thread = NULL;
        mddev->queue->backing_dev_info.congested_fn = NULL;
        blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
-       sysfs_remove_group(&mddev->kobj, &raid5_attrs_group);
        free_conf(conf);
-       mddev->private = NULL;
+       mddev->private = &raid5_attrs_group;
        return 0;
 }
 
@@ -5464,11 +5463,11 @@ static int raid5_start_reshape(mddev_t *mddev)
                    !test_bit(Faulty, &rdev->flags)) {
                        if (raid5_add_disk(mddev, rdev) == 0) {
                                char nm[20];
-                               if (rdev->raid_disk >= conf->previous_raid_disks)
+                               if (rdev->raid_disk >= conf->previous_raid_disks) {
                                        set_bit(In_sync, &rdev->flags);
-                               else
+                                       added_devices++;
+                               } else
                                        rdev->recovery_offset = 0;
-                               added_devices++;
                                sprintf(nm, "rd%d", rdev->raid_disk);
                                if (sysfs_create_link(&mddev->kobj,
                                                      &rdev->kobj, nm))
@@ -5480,9 +5479,12 @@ static int raid5_start_reshape(mddev_t *mddev)
                                break;
                }
 
+       /* When a reshape changes the number of devices, ->degraded
+        * is measured against the large of the pre and post number of
+        * devices.*/
        if (mddev->delta_disks > 0) {
                spin_lock_irqsave(&conf->device_lock, flags);
-               mddev->degraded = (conf->raid_disks - conf->previous_raid_disks)
+               mddev->degraded += (conf->raid_disks - conf->previous_raid_disks)
                        - added_devices;
                spin_unlock_irqrestore(&conf->device_lock, flags);
        }
index c37790a..9ddc579 100644 (file)
@@ -761,7 +761,6 @@ static int dvb_demux_open(struct inode *inode, struct file *file)
        dvb_ringbuffer_init(&dmxdevfilter->buffer, NULL, 8192);
        dmxdevfilter->type = DMXDEV_TYPE_NONE;
        dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_ALLOCATED);
-       INIT_LIST_HEAD(&dmxdevfilter->feed.ts);
        init_timer(&dmxdevfilter->timer);
 
        dvbdev->users++;
@@ -887,6 +886,7 @@ static int dvb_dmxdev_pes_filter_set(struct dmxdev *dmxdev,
        dmxdevfilter->type = DMXDEV_TYPE_PES;
        memcpy(&dmxdevfilter->params, params,
               sizeof(struct dmx_pes_filter_params));
+       INIT_LIST_HEAD(&dmxdevfilter->feed.ts);
 
        dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_SET);
 
index b78cfb7..67f189b 100644 (file)
@@ -426,16 +426,7 @@ static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf)
                };
        };
 
-       if (dvb_demux_tscheck) {
-               if (!demux->cnt_storage)
-                       demux->cnt_storage = vmalloc(MAX_PID + 1);
-
-               if (!demux->cnt_storage) {
-                       printk(KERN_WARNING "Couldn't allocate memory for TS/TEI check. Disabling it\n");
-                       dvb_demux_tscheck = 0;
-                       goto no_dvb_demux_tscheck;
-               }
-
+       if (demux->cnt_storage) {
                /* check pkt counter */
                if (pid < MAX_PID) {
                        if (buf[1] & 0x80)
@@ -454,7 +445,6 @@ static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf)
                };
                /* end check */
        };
-no_dvb_demux_tscheck:
 
        list_for_each_entry(feed, &demux->feed_list, list_head) {
                if ((feed->pid != pid) && (feed->pid != 0x2000))
@@ -1246,6 +1236,7 @@ int dvb_dmx_init(struct dvb_demux *dvbdemux)
        dvbdemux->feed = vmalloc(dvbdemux->feednum * sizeof(struct dvb_demux_feed));
        if (!dvbdemux->feed) {
                vfree(dvbdemux->filter);
+               dvbdemux->filter = NULL;
                return -ENOMEM;
        }
        for (i = 0; i < dvbdemux->filternum; i++) {
@@ -1257,6 +1248,13 @@ int dvb_dmx_init(struct dvb_demux *dvbdemux)
                dvbdemux->feed[i].index = i;
        }
 
+       if (dvb_demux_tscheck) {
+               dvbdemux->cnt_storage = vmalloc(MAX_PID + 1);
+
+               if (!dvbdemux->cnt_storage)
+                       printk(KERN_WARNING "Couldn't allocate memory for TS/TEI check. Disabling it\n");
+       }
+
        INIT_LIST_HEAD(&dvbdemux->frontend_list);
 
        for (i = 0; i < DMX_TS_PES_OTHER; i++) {
index 1b24989..465295b 100644 (file)
@@ -112,11 +112,13 @@ config DVB_USB_CXUSB
        select DVB_MT352 if !DVB_FE_CUSTOMISE
        select DVB_ZL10353 if !DVB_FE_CUSTOMISE
        select DVB_DIB7000P if !DVB_FE_CUSTOMISE
-       select DVB_LGS8GL5 if !DVB_FE_CUSTOMISE
        select DVB_TUNER_DIB0070 if !DVB_FE_CUSTOMISE
+       select DVB_ATBM8830 if !DVB_FE_CUSTOMISE
+       select DVB_LGS8GXX if !DVB_FE_CUSTOMISE
        select MEDIA_TUNER_SIMPLE if !MEDIA_TUNER_CUSTOMISE
        select MEDIA_TUNER_XC2028 if !MEDIA_TUNER_CUSTOMISE
        select MEDIA_TUNER_MXL5005S if !MEDIA_TUNER_CUSTOMISE
+       select MEDIA_TUNER_MAX2165 if !MEDIA_TUNER_CUSTOMISE
        help
          Say Y here to support the Conexant USB2.0 hybrid reference design.
          Currently, only DVB and ATSC modes are supported, analog mode
index 3051b64..445fa10 100644 (file)
@@ -192,8 +192,8 @@ static int apply_frontend_param (struct dvb_frontend* fe, struct dvb_frontend_pa
        spi_bias *= qam_tab[p->constellation];
        spi_bias /= p->code_rate_HP + 1;
        spi_bias /= (guard_tab[p->guard_interval] + 32);
-       spi_bias *= 1000ULL;
-       spi_bias /= 1000ULL + ppm/1000;
+       spi_bias *= 1000;
+       spi_bias /= 1000 + ppm/1000;
        spi_bias *= p->code_rate_HP;
 
        val0x04 = (p->transmission_mode << 2) | p->guard_interval;
index 3182a40..ae08b07 100644 (file)
@@ -4461,6 +4461,7 @@ static int __devinit bttv_probe(struct pci_dev *dev,
                request_modules(btv);
        }
 
+       init_bttv_i2c_ir(btv);
        bttv_input_init(btv);
 
        /* everything is fine */
index 63aa31a..407fa61 100644 (file)
@@ -388,7 +388,12 @@ int __devinit init_bttv_i2c(struct bttv *btv)
        if (0 == btv->i2c_rc && i2c_scan)
                do_i2c_scan(btv->c.v4l2_dev.name, &btv->i2c_client);
 
-       /* Instantiate the IR receiver device, if present */
+       return btv->i2c_rc;
+}
+
+/* Instantiate the I2C IR receiver device, if present */
+void __devinit init_bttv_i2c_ir(struct bttv *btv)
+{
        if (0 == btv->i2c_rc) {
                struct i2c_board_info info;
                /* The external IR receiver is at i2c address 0x34 (0x35 for
@@ -408,7 +413,6 @@ int __devinit init_bttv_i2c(struct bttv *btv)
                strlcpy(info.type, "ir_video", I2C_NAME_SIZE);
                i2c_new_probed_device(&btv->c.i2c_adap, &info, addr_list);
        }
-       return btv->i2c_rc;
 }
 
 int __devexit fini_bttv_i2c(struct bttv *btv)
index a1d0e9c..6cccc2a 100644 (file)
@@ -279,6 +279,7 @@ extern unsigned int bttv_debug;
 extern unsigned int bttv_gpio;
 extern void bttv_gpio_tracking(struct bttv *btv, char *comment);
 extern int init_bttv_i2c(struct bttv *btv);
+extern void init_bttv_i2c_ir(struct bttv *btv);
 extern int fini_bttv_i2c(struct bttv *btv);
 
 #define bttv_printk if (bttv_verbose) printk
index fc4dd60..7438f8d 100644 (file)
@@ -514,7 +514,7 @@ static int mt9t112_init_pll(const struct i2c_client *client)
        /* poll to verify out of standby. Must Poll this bit */
        for (i = 0; i < 100; i++) {
                mt9t112_reg_read(data, client, 0x0018);
-               if (0x4000 & data)
+               if (!(0x4000 & data))
                        break;
 
                mdelay(10);
index 50b415e..f7f7e04 100644 (file)
@@ -753,7 +753,7 @@ int pwc_set_shutter_speed(struct pwc_device *pdev, int mode, int value)
                buf[0] = 0xff; /* fixed */
 
        ret = send_control_msg(pdev,
-               SET_LUM_CTL, SHUTTER_MODE_FORMATTER, &buf, sizeof(buf));
+               SET_LUM_CTL, SHUTTER_MODE_FORMATTER, &buf, 1);
 
        if (!mode && ret >= 0) {
                if (value < 0)
index 5775275..81279b3 100644 (file)
@@ -1796,7 +1796,7 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
                dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "task abort: "
                   "Command not in the active list! (sc=%p)\n", ioc->name,
                   SCpnt));
-               retval = 0;
+               retval = SUCCESS;
                goto out;
        }
 
index b9f1e84..e7f8027 100644 (file)
@@ -74,6 +74,9 @@ static void mmc_test_prepare_mrq(struct mmc_test_card *test,
        }
 
        mrq->cmd->arg = dev_addr;
+       if (!mmc_card_blockaddr(test->card))
+               mrq->cmd->arg <<= 9;
+
        mrq->cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC;
 
        if (blocks == 1)
@@ -190,7 +193,7 @@ static int __mmc_test_prepare(struct mmc_test_card *test, int write)
        }
 
        for (i = 0;i < BUFFER_SIZE / 512;i++) {
-               ret = mmc_test_buffer_transfer(test, test->buffer, i * 512, 512, 1);
+               ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
                if (ret)
                        return ret;
        }
@@ -219,7 +222,7 @@ static int mmc_test_cleanup(struct mmc_test_card *test)
        memset(test->buffer, 0, 512);
 
        for (i = 0;i < BUFFER_SIZE / 512;i++) {
-               ret = mmc_test_buffer_transfer(test, test->buffer, i * 512, 512, 1);
+               ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
                if (ret)
                        return ret;
        }
@@ -426,7 +429,7 @@ static int mmc_test_transfer(struct mmc_test_card *test,
                for (i = 0;i < sectors;i++) {
                        ret = mmc_test_buffer_transfer(test,
                                test->buffer + i * 512,
-                               dev_addr + i * 512, 512, 0);
+                               dev_addr + i, 512, 0);
                        if (ret)
                                return ret;
                }
index 62d9c9c..1dd4403 100644 (file)
@@ -921,7 +921,7 @@ static int ax_probe(struct platform_device *pdev)
                size = (res->end - res->start) + 1;
 
                ax->mem2 = request_mem_region(res->start, size, pdev->name);
-               if (ax->mem == NULL) {
+               if (ax->mem2 == NULL) {
                        dev_err(&pdev->dev, "cannot reserve registers\n");
                        ret = -ENXIO;
                        goto exit_mem1;
index fee6eee..006cb2e 100644 (file)
@@ -296,6 +296,7 @@ static void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
        req_hdr->opcode = opcode;
        req_hdr->subsystem = subsystem;
        req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
+       req_hdr->version = 0;
 }
 
 static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
index bdbd147..318a018 100644 (file)
@@ -2079,6 +2079,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
                         struct sge_fl *fl, int len, int complete)
 {
        struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
+       struct port_info *pi = netdev_priv(qs->netdev);
        struct sk_buff *skb = NULL;
        struct cpl_rx_pkt *cpl;
        struct skb_frag_struct *rx_frag;
@@ -2116,11 +2117,18 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
 
        if (!nr_frags) {
                offset = 2 + sizeof(struct cpl_rx_pkt);
-               qs->lro_va = sd->pg_chunk.va + 2;
-       }
-       len -= offset;
+               cpl = qs->lro_va = sd->pg_chunk.va + 2;
 
-       prefetch(qs->lro_va);
+               if ((pi->rx_offload & T3_RX_CSUM) &&
+                    cpl->csum_valid && cpl->csum == htons(0xffff)) {
+                       skb->ip_summed = CHECKSUM_UNNECESSARY;
+                       qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
+               } else
+                       skb->ip_summed = CHECKSUM_NONE;
+       } else
+               cpl = qs->lro_va;
+
+       len -= offset;
 
        rx_frag += nr_frags;
        rx_frag->page = sd->pg_chunk.page;
@@ -2136,12 +2144,8 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
                return;
 
        skb_record_rx_queue(skb, qs - &adap->sge.qs[0]);
-       skb->ip_summed = CHECKSUM_UNNECESSARY;
-       cpl = qs->lro_va;
 
        if (unlikely(cpl->vlan_valid)) {
-               struct net_device *dev = qs->netdev;
-               struct port_info *pi = netdev_priv(dev);
                struct vlan_group *grp = pi->vlan_grp;
 
                if (likely(grp != NULL)) {
index d29bb53..7655436 100644 (file)
@@ -4006,11 +4006,21 @@ check_page:
                        }
                }
 
-               if (!buffer_info->dma)
+               if (!buffer_info->dma) {
                        buffer_info->dma = pci_map_page(pdev,
                                                        buffer_info->page, 0,
                                                        buffer_info->length,
                                                        PCI_DMA_FROMDEVICE);
+                       if (pci_dma_mapping_error(pdev, buffer_info->dma)) {
+                               put_page(buffer_info->page);
+                               dev_kfree_skb(skb);
+                               buffer_info->page = NULL;
+                               buffer_info->skb = NULL;
+                               buffer_info->dma = 0;
+                               adapter->alloc_rx_buff_failed++;
+                               break; /* while !buffer_info->skb */
+                       }
+               }
 
                rx_desc = E1000_RX_DESC(*rx_ring, i);
                rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
@@ -4101,6 +4111,13 @@ map_skb:
                                                  skb->data,
                                                  buffer_info->length,
                                                  PCI_DMA_FROMDEVICE);
+               if (pci_dma_mapping_error(pdev, buffer_info->dma)) {
+                       dev_kfree_skb(skb);
+                       buffer_info->skb = NULL;
+                       buffer_info->dma = 0;
+                       adapter->alloc_rx_buff_failed++;
+                       break; /* while !buffer_info->skb */
+               }
 
                /*
                 * XXX if it was allocated cleanly it will never map to a
index 997124d..c881347 100644 (file)
@@ -421,6 +421,8 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
                        msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
                if (tx_queue > IGB_N0_QUEUE)
                        msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
+               if (!adapter->msix_entries && msix_vector == 0)
+                       msixbm |= E1000_EIMS_OTHER;
                array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
                q_vector->eims_value = msixbm;
                break;
@@ -877,7 +879,6 @@ static int igb_request_irq(struct igb_adapter *adapter)
 {
        struct net_device *netdev = adapter->netdev;
        struct pci_dev *pdev = adapter->pdev;
-       struct e1000_hw *hw = &adapter->hw;
        int err = 0;
 
        if (adapter->msix_entries) {
@@ -909,20 +910,7 @@ static int igb_request_irq(struct igb_adapter *adapter)
                igb_setup_all_tx_resources(adapter);
                igb_setup_all_rx_resources(adapter);
        } else {
-               switch (hw->mac.type) {
-               case e1000_82575:
-                       wr32(E1000_MSIXBM(0),
-                            (E1000_EICR_RX_QUEUE0 |
-                             E1000_EICR_TX_QUEUE0 |
-                             E1000_EIMS_OTHER));
-                       break;
-               case e1000_82580:
-               case e1000_82576:
-                       wr32(E1000_IVAR0, E1000_IVAR_VALID);
-                       break;
-               default:
-                       break;
-               }
+               igb_assign_vector(adapter->q_vector[0], 0);
        }
 
        if (adapter->flags & IGB_FLAG_HAS_MSI) {
@@ -1140,6 +1128,8 @@ int igb_up(struct igb_adapter *adapter)
        }
        if (adapter->msix_entries)
                igb_configure_msix(adapter);
+       else
+               igb_assign_vector(adapter->q_vector[0], 0);
 
        /* Clear any pending interrupts. */
        rd32(E1000_ICR);
index 3103f41..35a06b4 100644 (file)
@@ -357,12 +357,34 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
        u32 fctrl_reg;
        u32 rmcs_reg;
        u32 reg;
+       u32 link_speed = 0;
+       bool link_up;
 
 #ifdef CONFIG_DCB
        if (hw->fc.requested_mode == ixgbe_fc_pfc)
                goto out;
 
 #endif /* CONFIG_DCB */
+       /*
+        * On 82598 having Rx FC on causes resets while doing 1G
+        * so if it's on turn it off once we know link_speed. For
+        * more details see 82598 Specification update.
+        */
+       hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
+       if (link_up && link_speed == IXGBE_LINK_SPEED_1GB_FULL) {
+               switch (hw->fc.requested_mode) {
+               case ixgbe_fc_full:
+                       hw->fc.requested_mode = ixgbe_fc_tx_pause;
+                       break;
+               case ixgbe_fc_rx_pause:
+                       hw->fc.requested_mode = ixgbe_fc_none;
+                       break;
+               default:
+                       /* no change */
+                       break;
+               }
+       }
+
        /* Negotiate the fc mode to use */
        ret_val = ixgbe_fc_autoneg(hw);
        if (ret_val)
index b5f64ad..951b73c 100644 (file)
@@ -5179,7 +5179,7 @@ dma_error:
                ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
        }
 
-       return count;
+       return 0;
 }
 
 static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
@@ -5329,8 +5329,11 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
        struct ixgbe_adapter *adapter = netdev_priv(dev);
        int txq = smp_processor_id();
 
-       if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)
+       if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
+               while (unlikely(txq >= dev->real_num_tx_queues))
+                       txq -= dev->real_num_tx_queues;
                return txq;
+       }
 
 #ifdef IXGBE_FCOE
        if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
@@ -5760,6 +5763,10 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
        if (err)
                goto err_sw_init;
 
+       /* Make it possible the adapter to be woken up via WOL */
+       if (adapter->hw.mac.type == ixgbe_mac_82599EB)
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
+
        /*
         * If there is a fan on this device and it has failed log the
         * failure.
index 9f9d608..24279e6 100644 (file)
@@ -1941,7 +1941,7 @@ static void netxen_tx_timeout_task(struct work_struct *work)
                netif_wake_queue(adapter->netdev);
 
                clear_bit(__NX_RESETTING, &adapter->state);
-
+               return;
        } else {
                clear_bit(__NX_RESETTING, &adapter->state);
                if (!netxen_nic_reset_context(adapter)) {
@@ -2240,7 +2240,9 @@ netxen_detach_work(struct work_struct *work)
 
        netxen_nic_down(adapter, netdev);
 
+       rtnl_lock();
        netxen_nic_detach(adapter);
+       rtnl_unlock();
 
        status = NXRD32(adapter, NETXEN_PEG_HALT_STATUS1);
 
index 103e8b0..46997e1 100644 (file)
@@ -2284,6 +2284,7 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
  fail2:
        efx_fini_struct(efx);
  fail1:
+       WARN_ON(rc > 0);
        EFX_LOG(efx, "initialisation failed. rc=%d\n", rc);
        free_netdev(net_dev);
        return rc;
index bf0b96a..5712fdd 100644 (file)
 #define FALCON_BOARD_SFN4111T 0x51
 #define FALCON_BOARD_SFN4112F 0x52
 
+/* Board temperature is about 15°C above ambient when air flow is
+ * limited. */
+#define FALCON_BOARD_TEMP_BIAS 15
+
+/* SFC4000 datasheet says: 'The maximum permitted junction temperature
+ * is 125°C; the thermal design of the environment for the SFC4000
+ * should aim to keep this well below 100°C.' */
+#define FALCON_JUNC_TEMP_MAX   90
+
 /*****************************************************************************
  * Support for LM87 sensor chip used on several boards
  */
@@ -548,16 +557,16 @@ fail_hwmon:
 static u8 sfe4002_lm87_channel = 0x03; /* use AIN not FAN inputs */
 
 static const u8 sfe4002_lm87_regs[] = {
-       LM87_IN_LIMITS(0, 0x83, 0x91),          /* 2.5V:  1.8V +/- 5% */
-       LM87_IN_LIMITS(1, 0x51, 0x5a),          /* Vccp1: 1.2V +/- 5% */
-       LM87_IN_LIMITS(2, 0xb6, 0xca),          /* 3.3V:  3.3V +/- 5% */
-       LM87_IN_LIMITS(3, 0xb0, 0xc9),          /* 5V:    4.6-5.2V */
-       LM87_IN_LIMITS(4, 0xb0, 0xe0),          /* 12V:   11-14V */
-       LM87_IN_LIMITS(5, 0x44, 0x4b),          /* Vccp2: 1.0V +/- 5% */
-       LM87_AIN_LIMITS(0, 0xa0, 0xb2),         /* AIN1:  1.66V +/- 5% */
-       LM87_AIN_LIMITS(1, 0x91, 0xa1),         /* AIN2:  1.5V +/- 5% */
-       LM87_TEMP_INT_LIMITS(10, 60),           /* board */
-       LM87_TEMP_EXT1_LIMITS(10, 70),          /* Falcon */
+       LM87_IN_LIMITS(0, 0x7c, 0x99),          /* 2.5V:  1.8V +/- 10% */
+       LM87_IN_LIMITS(1, 0x4c, 0x5e),          /* Vccp1: 1.2V +/- 10% */
+       LM87_IN_LIMITS(2, 0xac, 0xd4),          /* 3.3V:  3.3V +/- 10% */
+       LM87_IN_LIMITS(3, 0xac, 0xd4),          /* 5V:    5.0V +/- 10% */
+       LM87_IN_LIMITS(4, 0xac, 0xe0),          /* 12V:   10.8-14V */
+       LM87_IN_LIMITS(5, 0x3f, 0x4f),          /* Vccp2: 1.0V +/- 10% */
+       LM87_AIN_LIMITS(0, 0x98, 0xbb),         /* AIN1:  1.66V +/- 10% */
+       LM87_AIN_LIMITS(1, 0x8a, 0xa9),         /* AIN2:  1.5V +/- 10% */
+       LM87_TEMP_INT_LIMITS(0, 80 + FALCON_BOARD_TEMP_BIAS),
+       LM87_TEMP_EXT1_LIMITS(0, FALCON_JUNC_TEMP_MAX),
        0
 };
 
@@ -619,14 +628,14 @@ static int sfe4002_init(struct efx_nic *efx)
 static u8 sfn4112f_lm87_channel = 0x03; /* use AIN not FAN inputs */
 
 static const u8 sfn4112f_lm87_regs[] = {
-       LM87_IN_LIMITS(0, 0x83, 0x91),          /* 2.5V:  1.8V +/- 5% */
-       LM87_IN_LIMITS(1, 0x51, 0x5a),          /* Vccp1: 1.2V +/- 5% */
-       LM87_IN_LIMITS(2, 0xb6, 0xca),          /* 3.3V:  3.3V +/- 5% */
-       LM87_IN_LIMITS(4, 0xb0, 0xe0),          /* 12V:   11-14V */
-       LM87_IN_LIMITS(5, 0x44, 0x4b),          /* Vccp2: 1.0V +/- 5% */
-       LM87_AIN_LIMITS(1, 0x91, 0xa1),         /* AIN2:  1.5V +/- 5% */
-       LM87_TEMP_INT_LIMITS(10, 60),           /* board */
-       LM87_TEMP_EXT1_LIMITS(10, 70),          /* Falcon */
+       LM87_IN_LIMITS(0, 0x7c, 0x99),          /* 2.5V:  1.8V +/- 10% */
+       LM87_IN_LIMITS(1, 0x4c, 0x5e),          /* Vccp1: 1.2V +/- 10% */
+       LM87_IN_LIMITS(2, 0xac, 0xd4),          /* 3.3V:  3.3V +/- 10% */
+       LM87_IN_LIMITS(4, 0xac, 0xe0),          /* 12V:   10.8-14V */
+       LM87_IN_LIMITS(5, 0x3f, 0x4f),          /* Vccp2: 1.0V +/- 10% */
+       LM87_AIN_LIMITS(1, 0x8a, 0xa9),         /* AIN2:  1.5V +/- 10% */
+       LM87_TEMP_INT_LIMITS(0, 60 + FALCON_BOARD_TEMP_BIAS),
+       LM87_TEMP_EXT1_LIMITS(0, FALCON_JUNC_TEMP_MAX),
        0
 };
 
index 9f035b9..f66b3da 100644 (file)
@@ -127,7 +127,7 @@ static int efx_mcdi_poll(struct efx_nic *efx)
        efx_dword_t reg;
 
        /* Check for a reboot atomically with respect to efx_mcdi_copyout() */
-       rc = efx_mcdi_poll_reboot(efx);
+       rc = -efx_mcdi_poll_reboot(efx);
        if (rc)
                goto out;
 
index e0d13a4..67eec7a 100644 (file)
@@ -320,7 +320,7 @@ static int qt202x_reset_phy(struct efx_nic *efx)
 
        falcon_board(efx)->type->init_phy(efx);
 
-       return rc;
+       return 0;
 
  fail:
        EFX_ERR(efx, "PHY reset timed out\n");
index d760650..67249c3 100644 (file)
@@ -1025,11 +1025,8 @@ static void sky2_prefetch_init(struct sky2_hw *hw, u32 qaddr,
 static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2, u16 *slot)
 {
        struct sky2_tx_le *le = sky2->tx_le + *slot;
-       struct tx_ring_info *re = sky2->tx_ring + *slot;
 
        *slot = RING_NEXT(*slot, sky2->tx_ring_size);
-       re->flags = 0;
-       re->skb = NULL;
        le->ctrl = 0;
        return le;
 }
@@ -1622,8 +1619,7 @@ static unsigned tx_le_req(const struct sk_buff *skb)
        return count;
 }
 
-static void sky2_tx_unmap(struct pci_dev *pdev,
-                         const struct tx_ring_info *re)
+static void sky2_tx_unmap(struct pci_dev *pdev, struct tx_ring_info *re)
 {
        if (re->flags & TX_MAP_SINGLE)
                pci_unmap_single(pdev, pci_unmap_addr(re, mapaddr),
@@ -1633,6 +1629,7 @@ static void sky2_tx_unmap(struct pci_dev *pdev,
                pci_unmap_page(pdev, pci_unmap_addr(re, mapaddr),
                               pci_unmap_len(re, maplen),
                               PCI_DMA_TODEVICE);
+       re->flags = 0;
 }
 
 /*
@@ -1839,6 +1836,7 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
                        dev->stats.tx_packets++;
                        dev->stats.tx_bytes += skb->len;
 
+                       re->skb = NULL;
                        dev_kfree_skb_any(skb);
 
                        sky2->tx_next = RING_NEXT(idx, sky2->tx_ring_size);
index 75a669d..d71c197 100644 (file)
@@ -1437,7 +1437,6 @@ static int tc35815_do_interrupt(struct net_device *dev, u32 status, int limit)
                /* Transmit complete. */
                lp->lstats.tx_ints++;
                tc35815_txdone(dev);
-               netif_wake_queue(dev);
                if (ret < 0)
                        ret = 0;
        }
index 4f27f02..5f3b9ea 100644 (file)
@@ -584,6 +584,11 @@ static const struct usb_device_id  products [] = {
                        USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
        .driver_info = (unsigned long) &mbm_info,
 }, {
+       /* Ericsson C3607w ver 2 */
+       USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x190b, USB_CLASS_COMM,
+                       USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
+       .driver_info = (unsigned long) &mbm_info,
+}, {
        /* Toshiba F3507g */
        USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x130b, USB_CLASS_COMM,
                        USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
index c93f58f..317aa34 100644 (file)
@@ -1877,13 +1877,12 @@ static void velocity_error(struct velocity_info *vptr, int status)
 /**
  *     tx_srv          -       transmit interrupt service
  *     @vptr; Velocity
- *     @status:
  *
  *     Scan the queues looking for transmitted packets that
  *     we can complete and clean up. Update any statistics as
  *     necessary/
  */
-static int velocity_tx_srv(struct velocity_info *vptr, u32 status)
+static int velocity_tx_srv(struct velocity_info *vptr)
 {
        struct tx_desc *td;
        int qnum;
@@ -2090,14 +2089,12 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
 /**
  *     velocity_rx_srv         -       service RX interrupt
  *     @vptr: velocity
- *     @status: adapter status (unused)
  *
  *     Walk the receive ring of the velocity adapter and remove
  *     any received packets from the receive queue. Hand the ring
  *     slots back to the adapter for reuse.
  */
-static int velocity_rx_srv(struct velocity_info *vptr, int status,
-               int budget_left)
+static int velocity_rx_srv(struct velocity_info *vptr, int budget_left)
 {
        struct net_device_stats *stats = &vptr->dev->stats;
        int rd_curr = vptr->rx.curr;
@@ -2151,32 +2148,24 @@ static int velocity_poll(struct napi_struct *napi, int budget)
        struct velocity_info *vptr = container_of(napi,
                        struct velocity_info, napi);
        unsigned int rx_done;
-       u32 isr_status;
-
-       spin_lock(&vptr->lock);
-       isr_status = mac_read_isr(vptr->mac_regs);
-
-       /* Ack the interrupt */
-       mac_write_isr(vptr->mac_regs, isr_status);
-       if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI)))
-               velocity_error(vptr, isr_status);
+       unsigned long flags;
 
+       spin_lock_irqsave(&vptr->lock, flags);
        /*
         * Do rx and tx twice for performance (taken from the VIA
         * out-of-tree driver).
         */
-       rx_done = velocity_rx_srv(vptr, isr_status, budget / 2);
-       velocity_tx_srv(vptr, isr_status);
-       rx_done += velocity_rx_srv(vptr, isr_status, budget - rx_done);
-       velocity_tx_srv(vptr, isr_status);
-
-       spin_unlock(&vptr->lock);
+       rx_done = velocity_rx_srv(vptr, budget / 2);
+       velocity_tx_srv(vptr);
+       rx_done += velocity_rx_srv(vptr, budget - rx_done);
+       velocity_tx_srv(vptr);
 
        /* If budget not fully consumed, exit the polling mode */
        if (rx_done < budget) {
                napi_complete(napi);
                mac_enable_int(vptr->mac_regs);
        }
+       spin_unlock_irqrestore(&vptr->lock, flags);
 
        return rx_done;
 }
@@ -2206,10 +2195,17 @@ static irqreturn_t velocity_intr(int irq, void *dev_instance)
                return IRQ_NONE;
        }
 
+       /* Ack the interrupt */
+       mac_write_isr(vptr->mac_regs, isr_status);
+
        if (likely(napi_schedule_prep(&vptr->napi))) {
                mac_disable_int(vptr->mac_regs);
                __napi_schedule(&vptr->napi);
        }
+
+       if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI)))
+               velocity_error(vptr, isr_status);
+
        spin_unlock(&vptr->lock);
 
        return IRQ_HANDLED;
@@ -3100,7 +3096,7 @@ static int velocity_resume(struct pci_dev *pdev)
        velocity_init_registers(vptr, VELOCITY_INIT_WOL);
        mac_disable_int(vptr->mac_regs);
 
-       velocity_tx_srv(vptr, 0);
+       velocity_tx_srv(vptr);
 
        for (i = 0; i < vptr->tx.numq; i++) {
                if (vptr->tx.used[i])
@@ -3344,6 +3340,7 @@ static int velocity_set_coalesce(struct net_device *dev,
 {
        struct velocity_info *vptr = netdev_priv(dev);
        int max_us = 0x3f * 64;
+       unsigned long flags;
 
        /* 6 bits of  */
        if (ecmd->tx_coalesce_usecs > max_us)
@@ -3365,6 +3362,7 @@ static int velocity_set_coalesce(struct net_device *dev,
                        ecmd->tx_coalesce_usecs);
 
        /* Setup the interrupt suppression and queue timers */
+       spin_lock_irqsave(&vptr->lock, flags);
        mac_disable_int(vptr->mac_regs);
        setup_adaptive_interrupts(vptr);
        setup_queue_timers(vptr);
@@ -3372,6 +3370,7 @@ static int velocity_set_coalesce(struct net_device *dev,
        mac_write_int_mask(vptr->int_mask, vptr->mac_regs);
        mac_clear_isr(vptr->mac_regs);
        mac_enable_int(vptr->mac_regs);
+       spin_unlock_irqrestore(&vptr->lock, flags);
 
        return 0;
 }
index fa12b90..29bf336 100644 (file)
@@ -1615,7 +1615,7 @@ static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
                bf->bf_frmlen -= padsize;
        }
 
-       if (conf_is_ht(&hw->conf) && !is_pae(skb))
+       if (conf_is_ht(&hw->conf))
                bf->bf_state.bf_type |= BUF_HT;
 
        bf->bf_flags = setup_tx_flags(sc, skb, txctl->txq);
@@ -1701,7 +1701,7 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
                        goto tx_done;
                }
 
-               if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
+               if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && !is_pae(skb)) {
                        /*
                         * Try aggregation if it's a unicast data frame
                         * and the destination is HT capable.
index fe3bf94..c484cc2 100644 (file)
 #define B43_MMIO_TSF_2                 0x636   /* core rev < 3 only */
 #define B43_MMIO_TSF_3                 0x638   /* core rev < 3 only */
 #define B43_MMIO_RNG                   0x65A
+#define B43_MMIO_IFSSLOT               0x684   /* Interframe slot time */
 #define B43_MMIO_IFSCTL                        0x688 /* Interframe space control */
 #define  B43_MMIO_IFSCTL_USE_EDCF      0x0004
 #define B43_MMIO_POWERUP_DELAY         0x6A8
index 4c41cfe..490fb45 100644 (file)
@@ -628,10 +628,17 @@ static void b43_upload_card_macaddress(struct b43_wldev *dev)
 static void b43_set_slot_time(struct b43_wldev *dev, u16 slot_time)
 {
        /* slot_time is in usec. */
-       if (dev->phy.type != B43_PHYTYPE_G)
+       /* This test used to exit for all but a G PHY. */
+       if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
                return;
-       b43_write16(dev, 0x684, 510 + slot_time);
-       b43_shm_write16(dev, B43_SHM_SHARED, 0x0010, slot_time);
+       b43_write16(dev, B43_MMIO_IFSSLOT, 510 + slot_time);
+       /* Shared memory location 0x0010 is the slot time and should be
+        * set to slot_time; however, this register is initially 0 and changing
+        * the value adversely affects the transmit rate for BCM4311
+        * devices. Until this behavior is unterstood, delete this step
+        *
+        * b43_shm_write16(dev, B43_SHM_SHARED, 0x0010, slot_time);
+        */
 }
 
 static void b43_short_slot_timing_enable(struct b43_wldev *dev)
index 9b4b8b5..3146281 100644 (file)
@@ -2008,7 +2008,7 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
                        IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn "
                                           "%d index %d\n", scd_ssn , index);
                        freed = iwl_tx_queue_reclaim(priv, txq_id, index);
-                       priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
+                       iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
 
                        if (priv->mac80211_registered &&
                            (iwl_queue_space(&txq->q) > txq->q.low_mark) &&
index de45f30..cffaae7 100644 (file)
@@ -1125,7 +1125,7 @@ static void iwl5000_rx_reply_tx(struct iwl_priv *priv,
                                        scd_ssn , index, txq_id, txq->swq_id);
 
                        freed = iwl_tx_queue_reclaim(priv, txq_id, index);
-                       priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
+                       iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
 
                        if (priv->mac80211_registered &&
                            (iwl_queue_space(&txq->q) > txq->q.low_mark) &&
@@ -1153,16 +1153,14 @@ static void iwl5000_rx_reply_tx(struct iwl_priv *priv,
                                   tx_resp->failure_frame);
 
                freed = iwl_tx_queue_reclaim(priv, txq_id, index);
-               if (ieee80211_is_data_qos(tx_resp->frame_ctrl))
-                       priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
+               iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
 
                if (priv->mac80211_registered &&
                    (iwl_queue_space(&txq->q) > txq->q.low_mark))
                        iwl_wake_queue(priv, txq_id);
        }
 
-       if (ieee80211_is_data_qos(tx_resp->frame_ctrl))
-               iwl_txq_check_empty(priv, sta_id, tid, txq_id);
+       iwl_txq_check_empty(priv, sta_id, tid, txq_id);
 
        if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK))
                IWL_ERR(priv, "TODO:  Implement Tx ABORT REQUIRED!!!\n");
index 5461f10..f36f804 100644 (file)
@@ -2745,6 +2745,7 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
                        priv->staging_rxon.flags = 0;
 
                iwl_set_rxon_channel(priv, conf->channel);
+               iwl_set_rxon_ht(priv, ht_conf);
 
                iwl_set_flags_for_band(priv, conf->channel->band);
                spin_unlock_irqrestore(&priv->lock, flags);
index 27ca859..b69e972 100644 (file)
@@ -446,6 +446,8 @@ void iwl_hw_txq_ctx_free(struct iwl_priv *priv);
 int iwl_hw_tx_queue_init(struct iwl_priv *priv,
                         struct iwl_tx_queue *txq);
 int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq);
+void iwl_free_tfds_in_queue(struct iwl_priv *priv,
+                           int sta_id, int tid, int freed);
 int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
                      int slots_num, u32 txq_id);
 void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id);
index 6f36b6e..2dbce85 100644 (file)
@@ -928,7 +928,10 @@ static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv,
        if (ieee80211_is_mgmt(fc) ||
            ieee80211_has_protected(fc) ||
            ieee80211_has_morefrags(fc) ||
-           le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG)
+           le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG ||
+           (ieee80211_is_data_qos(fc) &&
+            *ieee80211_get_qos_ctl(hdr) &
+            IEEE80211_QOS_CONTROL_A_MSDU_PRESENT))
                ret = skb_linearize(skb);
        else
                ret = __pskb_pull_tail(skb, min_t(u16, IWL_LINK_HDR_MAX, len)) ?
index 87ce2bd..8f40715 100644 (file)
@@ -120,6 +120,20 @@ int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
 EXPORT_SYMBOL(iwl_txq_update_write_ptr);
 
 
+void iwl_free_tfds_in_queue(struct iwl_priv *priv,
+                           int sta_id, int tid, int freed)
+{
+       if (priv->stations[sta_id].tid[tid].tfds_in_queue >= freed)
+               priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
+       else {
+               IWL_ERR(priv, "free more than tfds_in_queue (%u:%d)\n",
+                       priv->stations[sta_id].tid[tid].tfds_in_queue,
+                       freed);
+               priv->stations[sta_id].tid[tid].tfds_in_queue = 0;
+       }
+}
+EXPORT_SYMBOL(iwl_free_tfds_in_queue);
+
 /**
  * iwl_tx_queue_free - Deallocate DMA queue.
  * @txq: Transmit queue to deallocate.
@@ -1131,6 +1145,7 @@ int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
        struct iwl_queue *q = &txq->q;
        struct iwl_tx_info *tx_info;
        int nfreed = 0;
+       struct ieee80211_hdr *hdr;
 
        if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) {
                IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
@@ -1145,13 +1160,16 @@ int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
 
                tx_info = &txq->txb[txq->q.read_ptr];
                iwl_tx_status(priv, tx_info->skb[0]);
+
+               hdr = (struct ieee80211_hdr *)tx_info->skb[0]->data;
+               if (hdr && ieee80211_is_data_qos(hdr->frame_control))
+                       nfreed++;
                tx_info->skb[0] = NULL;
 
                if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl)
                        priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq);
 
                priv->cfg->ops->lib->txq_free_tfd(priv, txq);
-               nfreed++;
        }
        return nfreed;
 }
@@ -1559,7 +1577,7 @@ void iwl_rx_reply_compressed_ba(struct iwl_priv *priv,
        if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
                /* calculate mac80211 ampdu sw queue to wake */
                int freed = iwl_tx_queue_reclaim(priv, scd_flow, index);
-               priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
+               iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
 
                if ((iwl_queue_space(&txq->q) > txq->q.low_mark) &&
                    priv->mac80211_registered &&
index 6d6ed74..f727b4a 100644 (file)
@@ -794,7 +794,7 @@ static int iwm_mlme_update_bss_table(struct iwm_priv *iwm, u8 *buf,
        }
 
        bss->bss = kzalloc(bss_len, GFP_KERNEL);
-       if (!bss) {
+       if (!bss->bss) {
                kfree(bss);
                IWM_ERR(iwm, "Couldn't allocate bss\n");
                return -ENOMEM;
index bc5726d..7ba3052 100644 (file)
@@ -65,6 +65,7 @@ static struct usb_device_id rtl8187_table[] __devinitdata = {
        /* Sitecom */
        {USB_DEVICE(0x0df6, 0x000d), .driver_info = DEVICE_RTL8187},
        {USB_DEVICE(0x0df6, 0x0028), .driver_info = DEVICE_RTL8187B},
+       {USB_DEVICE(0x0df6, 0x0029), .driver_info = DEVICE_RTL8187B},
        /* Sphairon Access Systems GmbH */
        {USB_DEVICE(0x114B, 0x0150), .driver_info = DEVICE_RTL8187},
        /* Dick Smith Electronics */
index 8e952fd..cb2fd01 100644 (file)
@@ -720,12 +720,6 @@ static int acpiphp_bus_add(struct acpiphp_func *func)
                        -ret_val);
                goto acpiphp_bus_add_out;
        }
-       /*
-        * try to start anyway.  We could have failed to add
-        * simply because this bus had previously been added
-        * on another add.  Don't bother with the return value
-        * we just keep going.
-        */
        ret_val = acpi_bus_start(device);
 
 acpiphp_bus_add_out:
index 07d14df..226b3e9 100644 (file)
@@ -934,7 +934,7 @@ static int __devinit acer_backlight_init(struct device *dev)
        acer_backlight_device = bd;
 
        bd->props.power = FB_BLANK_UNBLANK;
-       bd->props.brightness = max_brightness;
+       bd->props.brightness = read_brightness(bd);
        bd->props.max_brightness = max_brightness;
        backlight_update_status(bd);
        return 0;
index e67e4fe..eb603f1 100644 (file)
@@ -5771,7 +5771,7 @@ static void thermal_exit(void)
        case TPACPI_THERMAL_ACPI_TMP07:
        case TPACPI_THERMAL_ACPI_UPDT:
                sysfs_remove_group(&tpacpi_sensors_pdev->dev.kobj,
-                                  &thermal_temp_input16_group);
+                                  &thermal_temp_input8_group);
                break;
        case TPACPI_THERMAL_NONE:
        default:
index fa39e75..6ea3cb5 100644 (file)
@@ -175,8 +175,14 @@ static int __devinit wm97xx_bat_probe(struct platform_device *dev)
                dev_err(&dev->dev, "Do not pass platform_data through "
                        "wm97xx_bat_set_pdata!\n");
                return -EINVAL;
-       } else
-               pdata = wmdata->batt_pdata;
+       }
+
+       if (!wmdata) {
+               dev_err(&dev->dev, "No platform data supplied\n");
+               return -EINVAL;
+       }
+
+       pdata = wmdata->batt_pdata;
 
        if (dev->id != -1)
                return -EINVAL;
index 686ef27..b60a4c9 100644 (file)
@@ -661,7 +661,7 @@ static int suspend_prepare(struct regulator_dev *rdev, suspend_state_t state)
 static void print_constraints(struct regulator_dev *rdev)
 {
        struct regulation_constraints *constraints = rdev->constraints;
-       char buf[80];
+       char buf[80] = "";
        int count = 0;
        int ret;
 
index 76d08c2..4f33a0f 100644 (file)
@@ -183,7 +183,7 @@ static int lp3971_ldo_set_voltage(struct regulator_dev *dev,
                if (vol_map[val] >= min_vol)
                        break;
 
-       if (vol_map[val] > max_vol)
+       if (val > LDO_VOL_MAX_IDX || vol_map[val] > max_vol)
                return -EINVAL;
 
        return lp3971_set_bits(lp3971, LP3971_LDO_VOL_CONTR_REG(ldo),
@@ -272,7 +272,7 @@ static int lp3971_dcdc_set_voltage(struct regulator_dev *dev,
                if (vol_map[val] >= min_vol)
                        break;
 
-       if (vol_map[val] > max_vol)
+       if (val > BUCK_TARGET_VOL_MAX_IDX || vol_map[val] > max_vol)
                return -EINVAL;
 
        ret = lp3971_set_bits(lp3971, LP3971_BUCK_TARGET_VOL1_REG(buck),
index 999fe80..62b654a 100644 (file)
@@ -531,7 +531,7 @@ static inline int qdio_inbound_q_done(struct qdio_q *q)
        qdio_siga_sync_q(q);
        get_buf_state(q, q->first_to_check, &state, 0);
 
-       if (state == SLSB_P_INPUT_PRIMED)
+       if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR)
                /* more work coming */
                return 0;
 
@@ -960,6 +960,8 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
                        qdio_handle_activate_check(cdev, intparm, cstat,
                                                   dstat);
                break;
+       case QDIO_IRQ_STATE_STOPPED:
+               break;
        default:
                WARN_ON(1);
        }
index 0f7b493..271399f 100644 (file)
@@ -671,12 +671,11 @@ static void zfcp_fc_ct_els_job_handler(void *data)
 {
        struct fc_bsg_job *job = data;
        struct zfcp_fsf_ct_els *zfcp_ct_els = job->dd_data;
-       int status = zfcp_ct_els->status;
-       int reply_status;
+       struct fc_bsg_reply *jr = job->reply;
 
-       reply_status = status ? FC_CTELS_STATUS_REJECT : FC_CTELS_STATUS_OK;
-       job->reply->reply_data.ctels_reply.status = reply_status;
-       job->reply->reply_payload_rcv_len = job->reply_payload.payload_len;
+       jr->reply_payload_rcv_len = job->reply_payload.payload_len;
+       jr->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
+       jr->result = zfcp_ct_els->status ? -EIO : 0;
        job->job_done(job);
 }
 
index 4775426..9e71ac6 100644 (file)
@@ -2516,7 +2516,7 @@ int fas216_eh_device_reset(struct scsi_cmnd *SCpnt)
                if (info->scsi.phase == PHASE_IDLE)
                        fas216_kick(info);
 
-               mod_timer(&info->eh_timer, 30 * HZ);
+               mod_timer(&info->eh_timer, jiffies + 30 * HZ);
                spin_unlock_irqrestore(&info->host_lock, flags);
 
                /*
index 10be9f3..2f47ae7 100644 (file)
@@ -2009,6 +2009,8 @@ static int fcoe_destroy(const char *buffer, struct kernel_param *kp)
        fcoe_interface_cleanup(fcoe);
        rtnl_unlock();
        fcoe_if_destroy(fcoe->ctlr.lp);
+       module_put(THIS_MODULE);
+
 out_putdev:
        dev_put(netdev);
 out_nodev:
@@ -2059,6 +2061,11 @@ static int fcoe_create(const char *buffer, struct kernel_param *kp)
        }
 #endif
 
+       if (!try_module_get(THIS_MODULE)) {
+               rc = -EINVAL;
+               goto out_nomod;
+       }
+
        rtnl_lock();
        netdev = fcoe_if_to_netdev(buffer);
        if (!netdev) {
@@ -2099,17 +2106,24 @@ static int fcoe_create(const char *buffer, struct kernel_param *kp)
        if (!fcoe_link_ok(lport))
                fcoe_ctlr_link_up(&fcoe->ctlr);
 
-       rc = 0;
-out_free:
        /*
         * Release from init in fcoe_interface_create(), on success lport
         * should be holding a reference taken in fcoe_if_create().
         */
        fcoe_interface_put(fcoe);
+       dev_put(netdev);
+       rtnl_unlock();
+       mutex_unlock(&fcoe_config_mutex);
+
+       return 0;
+out_free:
+       fcoe_interface_put(fcoe);
 out_putdev:
        dev_put(netdev);
 out_nodev:
        rtnl_unlock();
+       module_put(THIS_MODULE);
+out_nomod:
        mutex_unlock(&fcoe_config_mutex);
        return rc;
 }
index 9823291..511cb6b 100644 (file)
@@ -1187,7 +1187,7 @@ static void fcoe_ctlr_timeout(unsigned long arg)
                        next_timer = fip->ctlr_ka_time;
 
                if (time_after_eq(jiffies, fip->port_ka_time)) {
-                       fip->port_ka_time += jiffies +
+                       fip->port_ka_time = jiffies +
                                msecs_to_jiffies(FIP_VN_KA_PERIOD);
                        fip->send_port_ka = 1;
                }
index 19d711c..7f43647 100644 (file)
@@ -1890,7 +1890,7 @@ static struct fc_seq *fc_exch_seq_send(struct fc_lport *lport,
        fc_exch_setup_hdr(ep, fp, ep->f_ctl);
        sp->cnt++;
 
-       if (ep->xid <= lport->lro_xid)
+       if (ep->xid <= lport->lro_xid && fh->fh_r_ctl == FC_RCTL_DD_UNSOL_CMD)
                fc_fcp_ddp_setup(fr_fsp(fp), ep->xid);
 
        if (unlikely(lport->tt.frame_send(lport, fp)))
index 881d5df..6fde2fa 100644 (file)
@@ -298,9 +298,6 @@ void fc_fcp_ddp_setup(struct fc_fcp_pkt *fsp, u16 xid)
 {
        struct fc_lport *lport;
 
-       if (!fsp)
-               return;
-
        lport = fsp->lp;
        if ((fsp->req_flags & FC_SRB_READ) &&
            (lport->lro_enabled) && (lport->tt.ddp_setup)) {
index 0b16502..7ec8ce7 100644 (file)
@@ -1800,7 +1800,8 @@ int fc_lport_bsg_request(struct fc_bsg_job *job)
        u32 did;
 
        job->reply->reply_payload_rcv_len = 0;
-       rsp->resid_len = job->reply_payload.payload_len;
+       if (rsp)
+               rsp->resid_len = job->reply_payload.payload_len;
 
        mutex_lock(&lport->lp_mutex);
 
index 0230052..97923bb 100644 (file)
@@ -623,7 +623,7 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
 
                tov = ntohl(plp->fl_csp.sp_e_d_tov);
                if (ntohs(plp->fl_csp.sp_features) & FC_SP_FT_EDTR)
-                       tov /= 1000;
+                       tov /= 1000000;
                if (tov > rdata->e_d_tov)
                        rdata->e_d_tov = tov;
                csp_seq = ntohs(plp->fl_csp.sp_tot_seq);
index db6856c..4ad87fd 100644 (file)
@@ -992,12 +992,10 @@ static struct iscsi_r2t_info *iscsi_tcp_get_curr_r2t(struct iscsi_task *task)
                if (r2t == NULL) {
                        if (kfifo_out(&tcp_task->r2tqueue,
                            (void *)&tcp_task->r2t, sizeof(void *)) !=
-                           sizeof(void *)) {
-                               WARN_ONCE(1, "unexpected fifo state");
+                           sizeof(void *))
                                r2t = NULL;
-                       }
-
-                       r2t = tcp_task->r2t;
+                       else
+                               r2t = tcp_task->r2t;
                }
                spin_unlock_bh(&session->lock);
        }
index 708ea31..d9b8ca5 100644 (file)
@@ -3781,6 +3781,7 @@ static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg)
            compat_alloc_user_space(sizeof(struct megasas_iocpacket));
        int i;
        int error = 0;
+       compat_uptr_t ptr;
 
        if (clear_user(ioc, sizeof(*ioc)))
                return -EFAULT;
@@ -3793,9 +3794,22 @@ static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg)
            copy_in_user(&ioc->sge_count, &cioc->sge_count, sizeof(u32)))
                return -EFAULT;
 
-       for (i = 0; i < MAX_IOCTL_SGE; i++) {
-               compat_uptr_t ptr;
+       /*
+        * The sense_ptr is used in megasas_mgmt_fw_ioctl only when
+        * sense_len is not null, so prepare the 64bit value under
+        * the same condition.
+        */
+       if (ioc->sense_len) {
+               void __user **sense_ioc_ptr =
+                       (void __user **)(ioc->frame.raw + ioc->sense_off);
+               compat_uptr_t *sense_cioc_ptr =
+                       (compat_uptr_t *)(cioc->frame.raw + cioc->sense_off);
+               if (get_user(ptr, sense_cioc_ptr) ||
+                   put_user(compat_ptr(ptr), sense_ioc_ptr))
+                       return -EFAULT;
+       }
 
+       for (i = 0; i < MAX_IOCTL_SGE; i++) {
                if (get_user(ptr, &cioc->sgl[i].iov_base) ||
                    put_user(compat_ptr(ptr), &ioc->sgl[i].iov_base) ||
                    copy_in_user(&ioc->sgl[i].iov_len,
index f61fb8d..8bc6f53 100644 (file)
@@ -453,6 +453,5 @@ extern void qla24xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t);
 extern void qla25xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t);
 extern void qla25xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
 extern void qla24xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
-extern struct scsi_qla_host * qla25xx_get_host(struct rsp_que *);
 
 #endif /* _QLA_GBL_H */
index ffd0efd..6fc63b9 100644 (file)
@@ -1917,6 +1917,7 @@ qla24xx_msix_rsp_q(int irq, void *dev_id)
        struct rsp_que *rsp;
        struct device_reg_24xx __iomem *reg;
        struct scsi_qla_host *vha;
+       unsigned long flags;
 
        rsp = (struct rsp_que *) dev_id;
        if (!rsp) {
@@ -1927,15 +1928,15 @@ qla24xx_msix_rsp_q(int irq, void *dev_id)
        ha = rsp->hw;
        reg = &ha->iobase->isp24;
 
-       spin_lock_irq(&ha->hardware_lock);
+       spin_lock_irqsave(&ha->hardware_lock, flags);
 
-       vha = qla25xx_get_host(rsp);
+       vha = pci_get_drvdata(ha->pdev);
        qla24xx_process_response_queue(vha, rsp);
        if (!ha->flags.disable_msix_handshake) {
                WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
                RD_REG_DWORD_RELAXED(&reg->hccr);
        }
-       spin_unlock_irq(&ha->hardware_lock);
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
        return IRQ_HANDLED;
 }
@@ -1946,6 +1947,7 @@ qla25xx_msix_rsp_q(int irq, void *dev_id)
        struct qla_hw_data *ha;
        struct rsp_que *rsp;
        struct device_reg_24xx __iomem *reg;
+       unsigned long flags;
 
        rsp = (struct rsp_que *) dev_id;
        if (!rsp) {
@@ -1958,10 +1960,10 @@ qla25xx_msix_rsp_q(int irq, void *dev_id)
        /* Clear the interrupt, if enabled, for this response queue */
        if (rsp->options & ~BIT_6) {
                reg = &ha->iobase->isp24;
-               spin_lock_irq(&ha->hardware_lock);
+               spin_lock_irqsave(&ha->hardware_lock, flags);
                WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
                RD_REG_DWORD_RELAXED(&reg->hccr);
-               spin_unlock_irq(&ha->hardware_lock);
+               spin_unlock_irqrestore(&ha->hardware_lock, flags);
        }
        queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work);
 
@@ -1979,6 +1981,7 @@ qla24xx_msix_default(int irq, void *dev_id)
        uint32_t        stat;
        uint32_t        hccr;
        uint16_t        mb[4];
+       unsigned long flags;
 
        rsp = (struct rsp_que *) dev_id;
        if (!rsp) {
@@ -1990,7 +1993,7 @@ qla24xx_msix_default(int irq, void *dev_id)
        reg = &ha->iobase->isp24;
        status = 0;
 
-       spin_lock_irq(&ha->hardware_lock);
+       spin_lock_irqsave(&ha->hardware_lock, flags);
        vha = pci_get_drvdata(ha->pdev);
        do {
                stat = RD_REG_DWORD(&reg->host_status);
@@ -2039,7 +2042,7 @@ qla24xx_msix_default(int irq, void *dev_id)
                }
                WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
        } while (0);
-       spin_unlock_irq(&ha->hardware_lock);
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
        if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
            (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
@@ -2277,30 +2280,3 @@ int qla25xx_request_irq(struct rsp_que *rsp)
        msix->rsp = rsp;
        return ret;
 }
-
-struct scsi_qla_host *
-qla25xx_get_host(struct rsp_que *rsp)
-{
-       srb_t *sp;
-       struct qla_hw_data *ha = rsp->hw;
-       struct scsi_qla_host *vha = NULL;
-       struct sts_entry_24xx *pkt;
-       struct req_que *req;
-       uint16_t que;
-       uint32_t handle;
-
-       pkt = (struct sts_entry_24xx *) rsp->ring_ptr;
-       que = MSW(pkt->handle);
-       handle = (uint32_t) LSW(pkt->handle);
-       req = ha->req_q_map[que];
-       if (handle < MAX_OUTSTANDING_COMMANDS) {
-               sp = req->outstanding_cmds[handle];
-               if (sp)
-                       return  sp->fcport->vha;
-               else
-                       goto base_que;
-       }
-base_que:
-       vha = pci_get_drvdata(ha->pdev);
-       return vha;
-}
index b901aa2..ff17dee 100644 (file)
@@ -636,13 +636,15 @@ failed:
 
 static void qla_do_work(struct work_struct *work)
 {
+       unsigned long flags;
        struct rsp_que *rsp = container_of(work, struct rsp_que, q_work);
        struct scsi_qla_host *vha;
+       struct qla_hw_data *ha = rsp->hw;
 
-       spin_lock_irq(&rsp->hw->hardware_lock);
-       vha = qla25xx_get_host(rsp);
+       spin_lock_irqsave(&rsp->hw->hardware_lock, flags);
+       vha = pci_get_drvdata(ha->pdev);
        qla24xx_process_response_queue(vha, rsp);
-       spin_unlock_irq(&rsp->hw->hardware_lock);
+       spin_unlock_irqrestore(&rsp->hw->hardware_lock, flags);
 }
 
 /* create response queue */
index c3e37c8..e9b15c3 100644 (file)
@@ -83,6 +83,9 @@ static unsigned int skip_txen_test; /* force skip of txen test at init time */
 
 #define PASS_LIMIT     256
 
+#define BOTH_EMPTY     (UART_LSR_TEMT | UART_LSR_THRE)
+
+
 /*
  * We default to IRQ0 for the "no irq" hack.   Some
  * machine types want others as well - they're free
@@ -1792,7 +1795,7 @@ static unsigned int serial8250_tx_empty(struct uart_port *port)
        up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS;
        spin_unlock_irqrestore(&up->port.lock, flags);
 
-       return lsr & UART_LSR_TEMT ? TIOCSER_TEMT : 0;
+       return (lsr & BOTH_EMPTY) == BOTH_EMPTY ? TIOCSER_TEMT : 0;
 }
 
 static unsigned int serial8250_get_mctrl(struct uart_port *port)
@@ -1850,8 +1853,6 @@ static void serial8250_break_ctl(struct uart_port *port, int break_state)
        spin_unlock_irqrestore(&up->port.lock, flags);
 }
 
-#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
-
 /*
  *     Wait for transmitter & holding register to empty
  */
index 5681ebe..03dfd27 100644 (file)
@@ -494,8 +494,7 @@ static int ssb_devices_register(struct ssb_bus *bus)
 #endif
                        break;
                case SSB_BUSTYPE_SDIO:
-#ifdef CONFIG_SSB_SDIO
-                       sdev->irq = bus->host_sdio->dev.irq;
+#ifdef CONFIG_SSB_SDIOHOST
                        dev->parent = &bus->host_sdio->dev;
 #endif
                        break;
index 6e8bcdf..a678186 100644 (file)
@@ -1312,9 +1312,9 @@ static int processcompl(struct async *as, void __user * __user *arg)
        void __user *addr = as->userurb;
        unsigned int i;
 
-       if (as->userbuffer)
+       if (as->userbuffer && urb->actual_length)
                if (copy_to_user(as->userbuffer, urb->transfer_buffer,
-                                urb->transfer_buffer_length))
+                                urb->actual_length))
                        goto err_out;
        if (put_user(as->status, &userurb->status))
                goto err_out;
@@ -1334,14 +1334,11 @@ static int processcompl(struct async *as, void __user * __user *arg)
                }
        }
 
-       free_async(as);
-
        if (put_user(addr, (void __user * __user *)arg))
                return -EFAULT;
        return 0;
 
 err_out:
-       free_async(as);
        return -EFAULT;
 }
 
@@ -1371,8 +1368,11 @@ static struct async *reap_as(struct dev_state *ps)
 static int proc_reapurb(struct dev_state *ps, void __user *arg)
 {
        struct async *as = reap_as(ps);
-       if (as)
-               return processcompl(as, (void __user * __user *)arg);
+       if (as) {
+               int retval = processcompl(as, (void __user * __user *)arg);
+               free_async(as);
+               return retval;
+       }
        if (signal_pending(current))
                return -EINTR;
        return -EIO;
@@ -1380,11 +1380,16 @@ static int proc_reapurb(struct dev_state *ps, void __user *arg)
 
 static int proc_reapurbnonblock(struct dev_state *ps, void __user *arg)
 {
+       int retval;
        struct async *as;
 
-       if (!(as = async_getcompleted(ps)))
-               return -EAGAIN;
-       return processcompl(as, (void __user * __user *)arg);
+       as = async_getcompleted(ps);
+       retval = -EAGAIN;
+       if (as) {
+               retval = processcompl(as, (void __user * __user *)arg);
+               free_async(as);
+       }
+       return retval;
 }
 
 #ifdef CONFIG_COMPAT
@@ -1475,9 +1480,9 @@ static int processcompl_compat(struct async *as, void __user * __user *arg)
        void __user *addr = as->userurb;
        unsigned int i;
 
-       if (as->userbuffer)
+       if (as->userbuffer && urb->actual_length)
                if (copy_to_user(as->userbuffer, urb->transfer_buffer,
-                                urb->transfer_buffer_length))
+                                urb->actual_length))
                        return -EFAULT;
        if (put_user(as->status, &userurb->status))
                return -EFAULT;
@@ -1497,7 +1502,6 @@ static int processcompl_compat(struct async *as, void __user * __user *arg)
                }
        }
 
-       free_async(as);
        if (put_user(ptr_to_compat(addr), (u32 __user *)arg))
                return -EFAULT;
        return 0;
@@ -1506,8 +1510,11 @@ static int processcompl_compat(struct async *as, void __user * __user *arg)
 static int proc_reapurb_compat(struct dev_state *ps, void __user *arg)
 {
        struct async *as = reap_as(ps);
-       if (as)
-               return processcompl_compat(as, (void __user * __user *)arg);
+       if (as) {
+               int retval = processcompl_compat(as, (void __user * __user *)arg);
+               free_async(as);
+               return retval;
+       }
        if (signal_pending(current))
                return -EINTR;
        return -EIO;
@@ -1515,11 +1522,16 @@ static int proc_reapurb_compat(struct dev_state *ps, void __user *arg)
 
 static int proc_reapurbnonblock_compat(struct dev_state *ps, void __user *arg)
 {
+       int retval;
        struct async *as;
 
-       if (!(as = async_getcompleted(ps)))
-               return -EAGAIN;
-       return processcompl_compat(as, (void __user * __user *)arg);
+       retval = -EAGAIN;
+       as = async_getcompleted(ps);
+       if (as) {
+               retval = processcompl_compat(as, (void __user * __user *)arg);
+               free_async(as);
+       }
+       return retval;
 }
 
 
index 0a577d5..d4f0db5 100644 (file)
@@ -358,7 +358,7 @@ done:
         * b15:         bmType (0 == data)
         */
        len = skb->len;
-       put_unaligned_le16((len & 0x3FFF) | BIT(14), skb_push(skb, 2));
+       put_unaligned_le16(len & 0x3FFF, skb_push(skb, 2));
 
        /* add a zero-length EEM packet, if needed */
        if (padlen)
@@ -464,7 +464,6 @@ static int eem_unwrap(struct gether *port,
                        }
 
                        /* validate CRC */
-                       crc = get_unaligned_le32(skb->data + len - ETH_FCS_LEN);
                        if (header & BIT(14)) {
                                crc = get_unaligned_le32(skb->data + len
                                                        - ETH_FCS_LEN);
index a37640e..77fcd1b 100644 (file)
@@ -1041,7 +1041,7 @@ static void invalidate_sub(struct fsg_lun *curlun)
        unsigned long   rc;
 
        rc = invalidate_mapping_pages(inode->i_mapping, 0, -1);
-       VLDBG(curlun, "invalidate_inode_pages -> %ld\n", rc);
+       VLDBG(curlun, "invalidate_mapping_pages -> %ld\n", rc);
 }
 
 static int do_verify(struct fsg_common *common)
index 29dfb02..7dcdbda 100644 (file)
@@ -1448,7 +1448,7 @@ static void invalidate_sub(struct fsg_lun *curlun)
        unsigned long   rc;
 
        rc = invalidate_mapping_pages(inode->i_mapping, 0, -1);
-       VLDBG(curlun, "invalidate_inode_pages -> %ld\n", rc);
+       VLDBG(curlun, "invalidate_mapping_pages -> %ld\n", rc);
 }
 
 static int do_verify(struct fsg_dev *fsg)
index 4295601..76496f5 100644 (file)
@@ -29,7 +29,7 @@
 #if defined USB_ETH_RNDIS
 #  undef USB_ETH_RNDIS
 #endif
-#ifdef CONFIG_USB_ETH_RNDIS
+#ifdef CONFIG_USB_G_MULTI_RNDIS
 #  define USB_ETH_RNDIS y
 #endif
 
index e220fb8..8b45145 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/io.h>
 #include <linux/platform_device.h>
 #include <linux/clk.h>
+#include <linux/err.h>
 
 #include <linux/usb/ch9.h>
 #include <linux/usb/gadget.h>
index 4b5dbd0..5fc80a1 100644 (file)
@@ -2582,6 +2582,7 @@ err:
        hsotg->gadget.dev.driver = NULL;
        return ret;
 }
+EXPORT_SYMBOL(usb_gadget_register_driver);
 
 int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
 {
index c75d927..1937267 100644 (file)
@@ -196,7 +196,9 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
                        if (hostpc_reg) {
                                u32     t3;
 
+                               spin_unlock_irq(&ehci->lock);
                                msleep(5);/* 5ms for HCD enter low pwr mode */
+                               spin_lock_irq(&ehci->lock);
                                t3 = ehci_readl(ehci, hostpc_reg);
                                ehci_writel(ehci, t3 | HOSTPC_PHCD, hostpc_reg);
                                t3 = ehci_readl(ehci, hostpc_reg);
@@ -904,17 +906,18 @@ static int ehci_hub_control (
                        if ((temp & PORT_PE) == 0
                                        || (temp & PORT_RESET) != 0)
                                goto error;
-                       ehci_writel(ehci, temp | PORT_SUSPEND, status_reg);
+
                        /* After above check the port must be connected.
                         * Set appropriate bit thus could put phy into low power
                         * mode if we have hostpc feature
                         */
+                       temp &= ~PORT_WKCONN_E;
+                       temp |= PORT_WKDISC_E | PORT_WKOC_E;
+                       ehci_writel(ehci, temp | PORT_SUSPEND, status_reg);
                        if (hostpc_reg) {
-                               temp &= ~PORT_WKCONN_E;
-                               temp |= (PORT_WKDISC_E | PORT_WKOC_E);
-                               ehci_writel(ehci, temp | PORT_SUSPEND,
-                                                       status_reg);
+                               spin_unlock_irqrestore(&ehci->lock, flags);
                                msleep(5);/* 5ms for HCD enter low pwr mode */
+                               spin_lock_irqsave(&ehci->lock, flags);
                                temp1 = ehci_readl(ehci, hostpc_reg);
                                ehci_writel(ehci, temp1 | HOSTPC_PHCD,
                                        hostpc_reg);
index d224ab4..e123289 100644 (file)
@@ -105,7 +105,7 @@ void fhci_ep0_free(struct fhci_usb *usb)
                if (ep->td_base)
                        cpm_muram_free(cpm_muram_offset(ep->td_base));
 
-               if (ep->conf_frame_Q) {
+               if (kfifo_initialized(&ep->conf_frame_Q)) {
                        size = cq_howmany(&ep->conf_frame_Q);
                        for (; size; size--) {
                                struct packet *pkt = cq_get(&ep->conf_frame_Q);
@@ -115,7 +115,7 @@ void fhci_ep0_free(struct fhci_usb *usb)
                        cq_delete(&ep->conf_frame_Q);
                }
 
-               if (ep->empty_frame_Q) {
+               if (kfifo_initialized(&ep->empty_frame_Q)) {
                        size = cq_howmany(&ep->empty_frame_Q);
                        for (; size; size--) {
                                struct packet *pkt = cq_get(&ep->empty_frame_Q);
@@ -125,7 +125,7 @@ void fhci_ep0_free(struct fhci_usb *usb)
                        cq_delete(&ep->empty_frame_Q);
                }
 
-               if (ep->dummy_packets_Q) {
+               if (kfifo_initialized(&ep->dummy_packets_Q)) {
                        size = cq_howmany(&ep->dummy_packets_Q);
                        for (; size; size--) {
                                u8 *buff = cq_get(&ep->dummy_packets_Q);
index 0ceec12..bee558a 100644 (file)
@@ -35,7 +35,9 @@
 #include <linux/usb.h>
 #include <linux/platform_device.h>
 #include <linux/io.h>
+#include <linux/mm.h>
 #include <linux/irq.h>
+#include <asm/cacheflush.h>
 
 #include "../core/hcd.h"
 #include "r8a66597.h"
@@ -820,6 +822,26 @@ static void enable_r8a66597_pipe(struct r8a66597 *r8a66597, struct urb *urb,
        enable_r8a66597_pipe_dma(r8a66597, dev, pipe, urb);
 }
 
+static void r8a66597_urb_done(struct r8a66597 *r8a66597, struct urb *urb,
+                             int status)
+__releases(r8a66597->lock)
+__acquires(r8a66597->lock)
+{
+       if (usb_pipein(urb->pipe) && usb_pipetype(urb->pipe) != PIPE_CONTROL) {
+               void *ptr;
+
+               for (ptr = urb->transfer_buffer;
+                    ptr < urb->transfer_buffer + urb->transfer_buffer_length;
+                    ptr += PAGE_SIZE)
+                       flush_dcache_page(virt_to_page(ptr));
+       }
+
+       usb_hcd_unlink_urb_from_ep(r8a66597_to_hcd(r8a66597), urb);
+       spin_unlock(&r8a66597->lock);
+       usb_hcd_giveback_urb(r8a66597_to_hcd(r8a66597), urb, status);
+       spin_lock(&r8a66597->lock);
+}
+
 /* this function must be called with interrupt disabled */
 static void force_dequeue(struct r8a66597 *r8a66597, u16 pipenum, u16 address)
 {
@@ -838,15 +860,9 @@ static void force_dequeue(struct r8a66597 *r8a66597, u16 pipenum, u16 address)
                list_del(&td->queue);
                kfree(td);
 
-               if (urb) {
-                       usb_hcd_unlink_urb_from_ep(r8a66597_to_hcd(r8a66597),
-                                       urb);
+               if (urb)
+                       r8a66597_urb_done(r8a66597, urb, -ENODEV);
 
-                       spin_unlock(&r8a66597->lock);
-                       usb_hcd_giveback_urb(r8a66597_to_hcd(r8a66597), urb,
-                                       -ENODEV);
-                       spin_lock(&r8a66597->lock);
-               }
                break;
        }
 }
@@ -1006,6 +1022,8 @@ static void start_root_hub_sampling(struct r8a66597 *r8a66597, int port,
 /* this function must be called with interrupt disabled */
 static void r8a66597_check_syssts(struct r8a66597 *r8a66597, int port,
                                        u16 syssts)
+__releases(r8a66597->lock)
+__acquires(r8a66597->lock)
 {
        if (syssts == SE0) {
                r8a66597_write(r8a66597, ~ATTCH, get_intsts_reg(port));
@@ -1023,7 +1041,9 @@ static void r8a66597_check_syssts(struct r8a66597 *r8a66597, int port,
                        usb_hcd_resume_root_hub(r8a66597_to_hcd(r8a66597));
        }
 
+       spin_unlock(&r8a66597->lock);
        usb_hcd_poll_rh_status(r8a66597_to_hcd(r8a66597));
+       spin_lock(&r8a66597->lock);
 }
 
 /* this function must be called with interrupt disabled */
@@ -1283,10 +1303,7 @@ __releases(r8a66597->lock) __acquires(r8a66597->lock)
                if (usb_pipeisoc(urb->pipe))
                        urb->start_frame = r8a66597_get_frame(hcd);
 
-               usb_hcd_unlink_urb_from_ep(r8a66597_to_hcd(r8a66597), urb);
-               spin_unlock(&r8a66597->lock);
-               usb_hcd_giveback_urb(hcd, urb, status);
-               spin_lock(&r8a66597->lock);
+               r8a66597_urb_done(r8a66597, urb, status);
        }
 
        if (restart) {
index 0025847..8b37a4b 100644 (file)
@@ -3245,6 +3245,7 @@ static struct usb_device_id sisusb_table [] = {
        { USB_DEVICE(0x0711, 0x0902) },
        { USB_DEVICE(0x0711, 0x0903) },
        { USB_DEVICE(0x0711, 0x0918) },
+       { USB_DEVICE(0x0711, 0x0920) },
        { USB_DEVICE(0x182d, 0x021c) },
        { USB_DEVICE(0x182d, 0x0269) },
        { }
index de56b3d..3d2d3e5 100644 (file)
@@ -44,6 +44,7 @@ config ISP1301_OMAP
 config USB_ULPI
        bool "Generic ULPI Transceiver Driver"
        depends on ARM
+       select USB_OTG_UTILS
        help
          Enable this to support ULPI connected USB OTG transceivers which
          are likely found on embedded boards.
index 216f187..7638828 100644 (file)
@@ -50,7 +50,7 @@
  * Version Information
  */
 #define DRIVER_VERSION "v1.5.0"
-#define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com>, Bill Ryder <bryder@sgi.com>, Kuba Ober <kuba@mareimbrium.org>"
+#define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com>, Bill Ryder <bryder@sgi.com>, Kuba Ober <kuba@mareimbrium.org>, Andreas Mohr"
 #define DRIVER_DESC "USB FTDI Serial Converters Driver"
 
 static int debug;
@@ -145,10 +145,15 @@ static struct ftdi_sio_quirk ftdi_HE_TIRA1_quirk = {
 
 
 
+/*
+ * Device ID not listed? Test via module params product/vendor or
+ * /sys/bus/usb/ftdi_sio/new_id, then send patch/report!
+ */
 static struct usb_device_id id_table_combined [] = {
        { USB_DEVICE(FTDI_VID, FTDI_AMC232_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_CANUSB_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_CANDAPTER_PID) },
+       { USB_DEVICE(FTDI_VID, FTDI_NXTCAM_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_0_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_1_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_2_PID) },
@@ -552,9 +557,16 @@ static struct usb_device_id id_table_combined [] = {
        { USB_DEVICE(FTDI_VID, FTDI_IBS_PEDO_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_IBS_PROD_PID) },
        /*
-        * Due to many user requests for multiple ELV devices we enable
-        * them by default.
+        * ELV devices:
         */
+       { USB_DEVICE(FTDI_VID, FTDI_ELV_USR_PID) },
+       { USB_DEVICE(FTDI_VID, FTDI_ELV_MSM1_PID) },
+       { USB_DEVICE(FTDI_VID, FTDI_ELV_KL100_PID) },
+       { USB_DEVICE(FTDI_VID, FTDI_ELV_WS550_PID) },
+       { USB_DEVICE(FTDI_VID, FTDI_ELV_EC3000_PID) },
+       { USB_DEVICE(FTDI_VID, FTDI_ELV_WS888_PID) },
+       { USB_DEVICE(FTDI_VID, FTDI_ELV_TWS550_PID) },
+       { USB_DEVICE(FTDI_VID, FTDI_ELV_FEM_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_ELV_CLI7000_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_ELV_PPS7330_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_ELV_TFM100_PID) },
@@ -571,11 +583,17 @@ static struct usb_device_id id_table_combined [] = {
        { USB_DEVICE(FTDI_VID, FTDI_ELV_PCK100_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_ELV_RFP500_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_ELV_FS20SIG_PID) },
+       { USB_DEVICE(FTDI_VID, FTDI_ELV_UTP8_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_ELV_WS300PC_PID) },
+       { USB_DEVICE(FTDI_VID, FTDI_ELV_WS444PC_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_ELV_FHZ1300PC_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_ELV_EM1010PC_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_ELV_WS500_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_ELV_HS485_PID) },
+       { USB_DEVICE(FTDI_VID, FTDI_ELV_UMS100_PID) },
+       { USB_DEVICE(FTDI_VID, FTDI_ELV_TFD128_PID) },
+       { USB_DEVICE(FTDI_VID, FTDI_ELV_FM3RX_PID) },
+       { USB_DEVICE(FTDI_VID, FTDI_ELV_WS777_PID) },
        { USB_DEVICE(FTDI_VID, LINX_SDMUSBQSS_PID) },
        { USB_DEVICE(FTDI_VID, LINX_MASTERDEVEL2_PID) },
        { USB_DEVICE(FTDI_VID, LINX_FUTURE_0_PID) },
@@ -697,6 +715,7 @@ static struct usb_device_id id_table_combined [] = {
        { USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_USB60F) },
        { USB_DEVICE(FTDI_VID, FTDI_REU_TINY_PID) },
        { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO4x4_PID) },
+       { USB_DEVICE(PAPOUCH_VID, PAPOUCH_AD4USB_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_DOMINTELL_DGQG_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_DOMINTELL_DUSB_PID) },
        { USB_DEVICE(ALTI2_VID, ALTI2_N3_PID) },
index da92b49..c8951ae 100644 (file)
@@ -38,6 +38,8 @@
 /* www.candapter.com Ewert Energy Systems CANdapter device */
 #define FTDI_CANDAPTER_PID 0x9F80 /* Product Id */
 
+#define FTDI_NXTCAM_PID                0xABB8 /* NXTCam for Mindstorms NXT */
+
 /* OOCDlink by Joern Kaipf <joernk@web.de>
  * (http://www.joernonline.de/dw/doku.php?id=start&idx=projects:oocdlink) */
 #define FTDI_OOCDLINK_PID      0xbaf8  /* Amontec JTAGkey */
 /*
  * ELV USB devices submitted by Christian Abt of ELV (www.elv.de).
  * All of these devices use FTDI's vendor ID (0x0403).
+ * Further IDs taken from ELV Windows .inf file.
  *
  * The previously included PID for the UO 100 module was incorrect.
  * In fact, that PID was for ELV's UR 100 USB-RS232 converter (0xFB58).
  *
  * Armin Laeuger originally sent the PID for the UM 100 module.
  */
+#define FTDI_ELV_USR_PID       0xE000  /* ELV Universal-Sound-Recorder */
+#define FTDI_ELV_MSM1_PID      0xE001  /* ELV Mini-Sound-Modul */
+#define FTDI_ELV_KL100_PID     0xE002  /* ELV Kfz-Leistungsmesser KL 100 */
+#define FTDI_ELV_WS550_PID     0xE004  /* WS 550 */
+#define FTDI_ELV_EC3000_PID    0xE006  /* ENERGY CONTROL 3000 USB */
+#define FTDI_ELV_WS888_PID     0xE008  /* WS 888 */
+#define FTDI_ELV_TWS550_PID    0xE009  /* Technoline WS 550 */
+#define FTDI_ELV_FEM_PID       0xE00A  /* Funk Energie Monitor */
 #define FTDI_ELV_FHZ1300PC_PID 0xE0E8  /* FHZ 1300 PC */
 #define FTDI_ELV_WS500_PID     0xE0E9  /* PC-Wetterstation (WS 500) */
 #define FTDI_ELV_HS485_PID     0xE0EA  /* USB to RS-485 adapter */
+#define FTDI_ELV_UMS100_PID    0xE0EB  /* ELV USB Master-Slave Schaltsteckdose UMS 100 */
+#define FTDI_ELV_TFD128_PID    0xE0EC  /* ELV Temperatur-Feuchte-Datenlogger TFD 128 */
+#define FTDI_ELV_FM3RX_PID     0xE0ED  /* ELV Messwertuebertragung FM3 RX */
+#define FTDI_ELV_WS777_PID     0xE0EE  /* Conrad WS 777 */
 #define FTDI_ELV_EM1010PC_PID  0xE0EF  /* Engery monitor EM 1010 PC */
 #define FTDI_ELV_CSI8_PID      0xE0F0  /* Computer-Schalt-Interface (CSI 8) */
 #define FTDI_ELV_EM1000DL_PID  0xE0F1  /* PC-Datenlogger fuer Energiemonitor (EM 1000 DL) */
 #define FTDI_ELV_PCK100_PID    0xE0F2  /* PC-Kabeltester (PCK 100) */
 #define FTDI_ELV_RFP500_PID    0xE0F3  /* HF-Leistungsmesser (RFP 500) */
 #define FTDI_ELV_FS20SIG_PID   0xE0F4  /* Signalgeber (FS 20 SIG) */
+#define FTDI_ELV_UTP8_PID      0xE0F5  /* ELV UTP 8 */
 #define FTDI_ELV_WS300PC_PID   0xE0F6  /* PC-Wetterstation (WS 300 PC) */
+#define FTDI_ELV_WS444PC_PID   0xE0F7  /* Conrad WS 444 PC */
 #define FTDI_PHI_FISCO_PID      0xE40B  /* PHI Fisco USB to Serial cable */
 #define FTDI_ELV_UAD8_PID      0xF068  /* USB-AD-Wandler (UAD 8) */
 #define FTDI_ELV_UDA7_PID      0xF069  /* USB-DA-Wandler (UDA 7) */
 #define PAPOUCH_VID                    0x5050  /* Vendor ID */
 #define PAPOUCH_TMU_PID                        0x0400  /* TMU USB Thermometer */
 #define PAPOUCH_QUIDO4x4_PID           0x0900  /* Quido 4/4 Module */
+#define PAPOUCH_AD4USB_PID             0x8003  /* AD4USB Measurement Module */
 
 /*
  * Marvell SheevaPlug
index ac1b644..3eb6143 100644 (file)
@@ -298,6 +298,7 @@ static struct usb_device_id id_table [] = {
        { USB_DEVICE(0x1199, 0x68A3),   /* Sierra Wireless Direct IP modems */
          .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
        },
+       { USB_DEVICE(0x413C, 0x08133) }, /* Dell Computer Corp. Wireless 5720 VZW Mobile Broadband (EVDO Rev-A) Minicard GPS Port */
 
        { }
 };
index c932f90..49575fb 100644 (file)
@@ -941,7 +941,7 @@ UNUSUAL_DEV(  0x07ab, 0xfccd, 0x0000, 0x9999,
 UNUSUAL_DEV(  0x07af, 0x0004, 0x0100, 0x0133,
                "Microtech",
                "USB-SCSI-DB25",
-               US_SC_SCSI, US_PR_BULK, usb_stor_euscsi_init,
+               US_SC_DEVICE, US_PR_DEVICE, usb_stor_euscsi_init,
                US_FL_SCM_MULT_TARG ), 
 
 UNUSUAL_DEV(  0x07af, 0x0005, 0x0100, 0x0100,
index eb12182..d25df51 100644 (file)
@@ -161,8 +161,17 @@ static int efifb_setcolreg(unsigned regno, unsigned red, unsigned green,
        return 0;
 }
 
+static void efifb_destroy(struct fb_info *info)
+{
+       if (info->screen_base)
+               iounmap(info->screen_base);
+       release_mem_region(info->aperture_base, info->aperture_size);
+       framebuffer_release(info);
+}
+
 static struct fb_ops efifb_ops = {
        .owner          = THIS_MODULE,
+       .fb_destroy     = efifb_destroy,
        .fb_setcolreg   = efifb_setcolreg,
        .fb_fillrect    = cfb_fillrect,
        .fb_copyarea    = cfb_copyarea,
@@ -281,7 +290,7 @@ static int __init efifb_probe(struct platform_device *dev)
        info->par = NULL;
 
        info->aperture_base = efifb_fix.smem_start;
-       info->aperture_size = size_total;
+       info->aperture_size = size_remap;
 
        info->screen_base = ioremap(efifb_fix.smem_start, efifb_fix.smem_len);
        if (!info->screen_base) {
index c7b3f9d..2159e66 100644 (file)
@@ -1,9 +1,8 @@
 /*
  * Blackfin On-Chip Watchdog Driver
- *  Supports BF53[123]/BF53[467]/BF54[2489]/BF561
  *
  * Originally based on softdog.c
- * Copyright 2006-2007 Analog Devices Inc.
+ * Copyright 2006-2010 Analog Devices Inc.
  * Copyright 2006-2007 Michele d'Amico
  * Copyright 1996 Alan Cox <alan@lxorguk.ukuu.org.uk>
  *
@@ -137,13 +136,15 @@ static int bfin_wdt_running(void)
  */
 static int bfin_wdt_set_timeout(unsigned long t)
 {
-       u32 cnt;
+       u32 cnt, max_t, sclk;
        unsigned long flags;
 
-       stampit();
+       sclk = get_sclk();
+       max_t = -1 / sclk;
+       cnt = t * sclk;
+       stamp("maxtimeout=%us newtimeout=%lus (cnt=%#x)", max_t, t, cnt);
 
-       cnt = t * get_sclk();
-       if (cnt < get_sclk()) {
+       if (t > max_t) {
                printk(KERN_WARNING PFX "timeout value is too large\n");
                return -EINVAL;
        }
index cf62b05..7d6c213 100644 (file)
@@ -84,7 +84,7 @@ static const match_table_t tokens = {
 
 static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
 {
-       char *options;
+       char *options, *tmp_options;
        substring_t args[MAX_OPT_ARGS];
        char *p;
        int option = 0;
@@ -102,9 +102,12 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
        if (!opts)
                return 0;
 
-       options = kstrdup(opts, GFP_KERNEL);
-       if (!options)
+       tmp_options = kstrdup(opts, GFP_KERNEL);
+       if (!tmp_options) {
+               ret = -ENOMEM;
                goto fail_option_alloc;
+       }
+       options = tmp_options;
 
        while ((p = strsep(&options, ",")) != NULL) {
                int token;
@@ -159,8 +162,12 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
                        break;
                case Opt_cache:
                        s = match_strdup(&args[0]);
-                       if (!s)
-                               goto fail_option_alloc;
+                       if (!s) {
+                               ret = -ENOMEM;
+                               P9_DPRINTK(P9_DEBUG_ERROR,
+                                 "problem allocating copy of cache arg\n");
+                               goto free_and_return;
+                       }
 
                        if (strcmp(s, "loose") == 0)
                                v9ses->cache = CACHE_LOOSE;
@@ -173,8 +180,12 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
 
                case Opt_access:
                        s = match_strdup(&args[0]);
-                       if (!s)
-                               goto fail_option_alloc;
+                       if (!s) {
+                               ret = -ENOMEM;
+                               P9_DPRINTK(P9_DEBUG_ERROR,
+                                 "problem allocating copy of access arg\n");
+                               goto free_and_return;
+                       }
 
                        v9ses->flags &= ~V9FS_ACCESS_MASK;
                        if (strcmp(s, "user") == 0)
@@ -194,13 +205,11 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
                        continue;
                }
        }
-       kfree(options);
-       return ret;
 
+free_and_return:
+       kfree(tmp_options);
 fail_option_alloc:
-       P9_DPRINTK(P9_DEBUG_ERROR,
-                  "failed to allocate copy of option argument\n");
-       return -ENOMEM;
+       return ret;
 }
 
 /**
index 3a7560e..ed83583 100644 (file)
@@ -60,3 +60,4 @@ void v9fs_dentry_release(struct dentry *);
 int v9fs_uflags2omode(int uflags, int extended);
 
 ssize_t v9fs_file_readn(struct file *, char *, char __user *, u32, u64);
+void v9fs_blank_wstat(struct p9_wstat *wstat);
index 3902bf4..74a0461 100644 (file)
@@ -257,6 +257,23 @@ v9fs_file_write(struct file *filp, const char __user * data,
        return total;
 }
 
+static int v9fs_file_fsync(struct file *filp, struct dentry *dentry,
+                                       int datasync)
+{
+       struct p9_fid *fid;
+       struct p9_wstat wstat;
+       int retval;
+
+       P9_DPRINTK(P9_DEBUG_VFS, "filp %p dentry %p datasync %x\n", filp,
+                                               dentry, datasync);
+
+       fid = filp->private_data;
+       v9fs_blank_wstat(&wstat);
+
+       retval = p9_client_wstat(fid, &wstat);
+       return retval;
+}
+
 static const struct file_operations v9fs_cached_file_operations = {
        .llseek = generic_file_llseek,
        .read = do_sync_read,
@@ -266,6 +283,7 @@ static const struct file_operations v9fs_cached_file_operations = {
        .release = v9fs_dir_release,
        .lock = v9fs_file_lock,
        .mmap = generic_file_readonly_mmap,
+       .fsync = v9fs_file_fsync,
 };
 
 const struct file_operations v9fs_file_operations = {
@@ -276,4 +294,5 @@ const struct file_operations v9fs_file_operations = {
        .release = v9fs_dir_release,
        .lock = v9fs_file_lock,
        .mmap = generic_file_readonly_mmap,
+       .fsync = v9fs_file_fsync,
 };
index 9d03d1e..a407fa3 100644 (file)
@@ -176,7 +176,7 @@ int v9fs_uflags2omode(int uflags, int extended)
  *
  */
 
-static void
+void
 v9fs_blank_wstat(struct p9_wstat *wstat)
 {
        wstat->type = ~0;
index 0118d67..3d283ab 100644 (file)
@@ -60,11 +60,6 @@ do {                                                 \
                current->pid, __func__, ##args);        \
 } while (0)
 
-struct rehash_entry {
-       struct task_struct *task;
-       struct list_head list;
-};
-
 /* Unified info structure.  This is pointed to by both the dentry and
    inode structures.  Each file in the filesystem has an instance of this
    structure.  It holds a reference to the dentry, so dentries are never
@@ -81,7 +76,6 @@ struct autofs_info {
 
        struct list_head active;
        int active_count;
-       struct list_head rehash_list;
 
        struct list_head expiring;
 
@@ -104,7 +98,6 @@ struct autofs_info {
 #define AUTOFS_INF_EXPIRING    (1<<0) /* dentry is in the process of expiring */
 #define AUTOFS_INF_MOUNTPOINT  (1<<1) /* mountpoint status for direct expire */
 #define AUTOFS_INF_PENDING     (1<<2) /* dentry pending mount */
-#define AUTOFS_INF_REHASH      (1<<3) /* dentry in transit to ->lookup() */
 
 struct autofs_wait_queue {
        wait_queue_head_t queue;
index 00bf8fc..c8a80df 100644 (file)
@@ -544,10 +544,9 @@ static int autofs_dev_ioctl_ismountpoint(struct file *fp,
                        goto out;
                devid = new_encode_dev(path.mnt->mnt_sb->s_dev);
                err = 0;
-               if (path.dentry->d_inode &&
-                   path.mnt->mnt_root == path.dentry) {
+               if (path.mnt->mnt_root == path.dentry) {
                        err = 1;
-                       magic = path.dentry->d_inode->i_sb->s_magic;
+                       magic = path.mnt->mnt_sb->s_magic;
                }
        } else {
                dev_t dev = sbi->sb->s_dev;
@@ -560,10 +559,8 @@ static int autofs_dev_ioctl_ismountpoint(struct file *fp,
 
                err = have_submounts(path.dentry);
 
-               if (path.mnt->mnt_mountpoint != path.mnt->mnt_root) {
-                       if (follow_down(&path))
-                               magic = path.mnt->mnt_sb->s_magic;
-               }
+               if (follow_down(&path))
+                       magic = path.mnt->mnt_sb->s_magic;
        }
 
        param->ismountpoint.out.devid = devid;
index 74bc9aa..a796c94 100644 (file)
@@ -279,7 +279,6 @@ struct dentry *autofs4_expire_direct(struct super_block *sb,
                        root->d_mounted--;
                }
                ino->flags |= AUTOFS_INF_EXPIRING;
-               autofs4_add_expiring(root);
                init_completion(&ino->expire_complete);
                spin_unlock(&sbi->fs_lock);
                return root;
@@ -407,7 +406,6 @@ found:
                expired, (int)expired->d_name.len, expired->d_name.name);
        ino = autofs4_dentry_ino(expired);
        ino->flags |= AUTOFS_INF_EXPIRING;
-       autofs4_add_expiring(expired);
        init_completion(&ino->expire_complete);
        spin_unlock(&sbi->fs_lock);
        spin_lock(&dcache_lock);
@@ -435,7 +433,7 @@ int autofs4_expire_wait(struct dentry *dentry)
 
                DPRINTK("expire done status=%d", status);
 
-               if (d_unhashed(dentry) && IS_DEADDIR(dentry->d_inode))
+               if (d_unhashed(dentry))
                        return -EAGAIN;
 
                return status;
@@ -475,7 +473,6 @@ int autofs4_expire_run(struct super_block *sb,
        spin_lock(&sbi->fs_lock);
        ino = autofs4_dentry_ino(dentry);
        ino->flags &= ~AUTOFS_INF_EXPIRING;
-       autofs4_del_expiring(dentry);
        complete_all(&ino->expire_complete);
        spin_unlock(&sbi->fs_lock);
 
@@ -506,7 +503,6 @@ int autofs4_do_expire_multi(struct super_block *sb, struct vfsmount *mnt,
                        ino->flags &= ~AUTOFS_INF_MOUNTPOINT;
                }
                ino->flags &= ~AUTOFS_INF_EXPIRING;
-               autofs4_del_expiring(dentry);
                complete_all(&ino->expire_complete);
                spin_unlock(&sbi->fs_lock);
                dput(dentry);
index d0a3de2..821b2b9 100644 (file)
@@ -49,7 +49,6 @@ struct autofs_info *autofs4_init_ino(struct autofs_info *ino,
                ino->dentry = NULL;
                ino->size = 0;
                INIT_LIST_HEAD(&ino->active);
-               INIT_LIST_HEAD(&ino->rehash_list);
                ino->active_count = 0;
                INIT_LIST_HEAD(&ino->expiring);
                atomic_set(&ino->count, 0);
@@ -97,63 +96,6 @@ void autofs4_free_ino(struct autofs_info *ino)
        kfree(ino);
 }
 
-/*
- * Deal with the infamous "Busy inodes after umount ..." message.
- *
- * Clean up the dentry tree. This happens with autofs if the user
- * space program goes away due to a SIGKILL, SIGSEGV etc.
- */
-static void autofs4_force_release(struct autofs_sb_info *sbi)
-{
-       struct dentry *this_parent = sbi->sb->s_root;
-       struct list_head *next;
-
-       if (!sbi->sb->s_root)
-               return;
-
-       spin_lock(&dcache_lock);
-repeat:
-       next = this_parent->d_subdirs.next;
-resume:
-       while (next != &this_parent->d_subdirs) {
-               struct dentry *dentry = list_entry(next, struct dentry, d_u.d_child);
-
-               /* Negative dentry - don`t care */
-               if (!simple_positive(dentry)) {
-                       next = next->next;
-                       continue;
-               }
-
-               if (!list_empty(&dentry->d_subdirs)) {
-                       this_parent = dentry;
-                       goto repeat;
-               }
-
-               next = next->next;
-               spin_unlock(&dcache_lock);
-
-               DPRINTK("dentry %p %.*s",
-                       dentry, (int)dentry->d_name.len, dentry->d_name.name);
-
-               dput(dentry);
-               spin_lock(&dcache_lock);
-       }
-
-       if (this_parent != sbi->sb->s_root) {
-               struct dentry *dentry = this_parent;
-
-               next = this_parent->d_u.d_child.next;
-               this_parent = this_parent->d_parent;
-               spin_unlock(&dcache_lock);
-               DPRINTK("parent dentry %p %.*s",
-                       dentry, (int)dentry->d_name.len, dentry->d_name.name);
-               dput(dentry);
-               spin_lock(&dcache_lock);
-               goto resume;
-       }
-       spin_unlock(&dcache_lock);
-}
-
 void autofs4_kill_sb(struct super_block *sb)
 {
        struct autofs_sb_info *sbi = autofs4_sbi(sb);
@@ -170,15 +112,12 @@ void autofs4_kill_sb(struct super_block *sb)
        /* Free wait queues, close pipe */
        autofs4_catatonic_mode(sbi);
 
-       /* Clean up and release dangling references */
-       autofs4_force_release(sbi);
-
        sb->s_fs_info = NULL;
        kfree(sbi);
 
 out_kill_sb:
        DPRINTK("shutting down");
-       kill_anon_super(sb);
+       kill_litter_super(sb);
 }
 
 static int autofs4_show_options(struct seq_file *m, struct vfsmount *mnt)
index 30cc9dd..a015b49 100644 (file)
@@ -104,99 +104,6 @@ static void autofs4_del_active(struct dentry *dentry)
        return;
 }
 
-static void autofs4_add_rehash_entry(struct autofs_info *ino,
-                                    struct rehash_entry *entry)
-{
-       entry->task = current;
-       INIT_LIST_HEAD(&entry->list);
-       list_add(&entry->list, &ino->rehash_list);
-       return;
-}
-
-static void autofs4_remove_rehash_entry(struct autofs_info *ino)
-{
-       struct list_head *head = &ino->rehash_list;
-       struct rehash_entry *entry;
-       list_for_each_entry(entry, head, list) {
-               if (entry->task == current) {
-                       list_del(&entry->list);
-                       kfree(entry);
-                       break;
-               }
-       }
-       return;
-}
-
-static void autofs4_remove_rehash_entrys(struct autofs_info *ino)
-{
-       struct autofs_sb_info *sbi = ino->sbi;
-       struct rehash_entry *entry, *next;
-       struct list_head *head;
-
-       spin_lock(&sbi->fs_lock);
-       spin_lock(&sbi->lookup_lock);
-       if (!(ino->flags & AUTOFS_INF_REHASH)) {
-               spin_unlock(&sbi->lookup_lock);
-               spin_unlock(&sbi->fs_lock);
-               return;
-       }
-       ino->flags &= ~AUTOFS_INF_REHASH;
-       head = &ino->rehash_list;
-       list_for_each_entry_safe(entry, next, head, list) {
-               list_del(&entry->list);
-               kfree(entry);
-       }
-       spin_unlock(&sbi->lookup_lock);
-       spin_unlock(&sbi->fs_lock);
-       dput(ino->dentry);
-
-       return;
-}
-
-static void autofs4_revalidate_drop(struct dentry *dentry,
-                                   struct rehash_entry *entry)
-{
-       struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
-       struct autofs_info *ino = autofs4_dentry_ino(dentry);
-       /*
-        * Add to the active list so we can pick this up in
-        * ->lookup(). Also add an entry to a rehash list so
-        * we know when there are no dentrys in flight so we
-        * know when we can rehash the dentry.
-        */
-       spin_lock(&sbi->lookup_lock);
-       if (list_empty(&ino->active))
-               list_add(&ino->active, &sbi->active_list);
-       autofs4_add_rehash_entry(ino, entry);
-       spin_unlock(&sbi->lookup_lock);
-       if (!(ino->flags & AUTOFS_INF_REHASH)) {
-               ino->flags |= AUTOFS_INF_REHASH;
-               dget(dentry);
-               spin_lock(&dentry->d_lock);
-               __d_drop(dentry);
-               spin_unlock(&dentry->d_lock);
-       }
-       return;
-}
-
-static void autofs4_revalidate_rehash(struct dentry *dentry)
-{
-       struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
-       struct autofs_info *ino = autofs4_dentry_ino(dentry);
-       if (ino->flags & AUTOFS_INF_REHASH) {
-               spin_lock(&sbi->lookup_lock);
-               autofs4_remove_rehash_entry(ino);
-               if (list_empty(&ino->rehash_list)) {
-                       spin_unlock(&sbi->lookup_lock);
-                       ino->flags &= ~AUTOFS_INF_REHASH;
-                       d_rehash(dentry);
-                       dput(ino->dentry);
-               } else
-                       spin_unlock(&sbi->lookup_lock);
-       }
-       return;
-}
-
 static unsigned int autofs4_need_mount(unsigned int flags)
 {
        unsigned int res = 0;
@@ -236,7 +143,7 @@ out:
        return dcache_dir_open(inode, file);
 }
 
-static int try_to_fill_dentry(struct dentry *dentry)
+static int try_to_fill_dentry(struct dentry *dentry, int flags)
 {
        struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
        struct autofs_info *ino = autofs4_dentry_ino(dentry);
@@ -249,17 +156,55 @@ static int try_to_fill_dentry(struct dentry *dentry)
         * Wait for a pending mount, triggering one if there
         * isn't one already
         */
-       DPRINTK("waiting for mount name=%.*s",
-                dentry->d_name.len, dentry->d_name.name);
+       if (dentry->d_inode == NULL) {
+               DPRINTK("waiting for mount name=%.*s",
+                        dentry->d_name.len, dentry->d_name.name);
 
-       status = autofs4_wait(sbi, dentry, NFY_MOUNT);
+               status = autofs4_wait(sbi, dentry, NFY_MOUNT);
 
-       DPRINTK("mount done status=%d", status);
+               DPRINTK("mount done status=%d", status);
 
-       /* Update expiry counter */
-       ino->last_used = jiffies;
+               /* Turn this into a real negative dentry? */
+               if (status == -ENOENT) {
+                       spin_lock(&sbi->fs_lock);
+                       ino->flags &= ~AUTOFS_INF_PENDING;
+                       spin_unlock(&sbi->fs_lock);
+                       return status;
+               } else if (status) {
+                       /* Return a negative dentry, but leave it "pending" */
+                       return status;
+               }
+       /* Trigger mount for path component or follow link */
+       } else if (ino->flags & AUTOFS_INF_PENDING ||
+                       autofs4_need_mount(flags) ||
+                       current->link_count) {
+               DPRINTK("waiting for mount name=%.*s",
+                       dentry->d_name.len, dentry->d_name.name);
 
-       return status;
+               spin_lock(&sbi->fs_lock);
+               ino->flags |= AUTOFS_INF_PENDING;
+               spin_unlock(&sbi->fs_lock);
+               status = autofs4_wait(sbi, dentry, NFY_MOUNT);
+
+               DPRINTK("mount done status=%d", status);
+
+               if (status) {
+                       spin_lock(&sbi->fs_lock);
+                       ino->flags &= ~AUTOFS_INF_PENDING;
+                       spin_unlock(&sbi->fs_lock);
+                       return status;
+               }
+       }
+
+       /* Initialize expiry counter after successful mount */
+       if (ino)
+               ino->last_used = jiffies;
+
+       spin_lock(&sbi->fs_lock);
+       ino->flags &= ~AUTOFS_INF_PENDING;
+       spin_unlock(&sbi->fs_lock);
+
+       return 0;
 }
 
 /* For autofs direct mounts the follow link triggers the mount */
@@ -313,16 +258,10 @@ static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd)
         */
        if (ino->flags & AUTOFS_INF_PENDING ||
            (!d_mountpoint(dentry) && list_empty(&dentry->d_subdirs))) {
-               ino->flags |= AUTOFS_INF_PENDING;
                spin_unlock(&dcache_lock);
                spin_unlock(&sbi->fs_lock);
 
-               status = try_to_fill_dentry(dentry);
-
-               spin_lock(&sbi->fs_lock);
-               ino->flags &= ~AUTOFS_INF_PENDING;
-               spin_unlock(&sbi->fs_lock);
-
+               status = try_to_fill_dentry(dentry, 0);
                if (status)
                        goto out_error;
 
@@ -361,47 +300,18 @@ static int autofs4_revalidate(struct dentry *dentry, struct nameidata *nd)
 {
        struct inode *dir = dentry->d_parent->d_inode;
        struct autofs_sb_info *sbi = autofs4_sbi(dir->i_sb);
-       struct autofs_info *ino = autofs4_dentry_ino(dentry);
-       struct rehash_entry *entry;
+       int oz_mode = autofs4_oz_mode(sbi);
        int flags = nd ? nd->flags : 0;
-       unsigned int mutex_aquired;
+       int status = 1;
 
-       DPRINTK("name = %.*s oz_mode = %d",
-               dentry->d_name.len, dentry->d_name.name, oz_mode);
-
-       /* Daemon never causes a mount to trigger */
-       if (autofs4_oz_mode(sbi))
-               return 1;
-
-       entry = kmalloc(sizeof(struct rehash_entry), GFP_KERNEL);
-       if (!entry)
-               return -ENOMEM;
-
-       mutex_aquired = mutex_trylock(&dir->i_mutex);
-
-       spin_lock(&sbi->fs_lock);
-       spin_lock(&dcache_lock);
        /* Pending dentry */
+       spin_lock(&sbi->fs_lock);
        if (autofs4_ispending(dentry)) {
-               int status;
-
-               /*
-                * We can only unhash and send this to ->lookup() if
-                * the directory mutex is held over d_revalidate() and
-                * ->lookup(). This prevents the VFS from incorrectly
-                * seeing the dentry as non-existent.
-                */
-               ino->flags |= AUTOFS_INF_PENDING;
-               if (!mutex_aquired) {
-                       autofs4_revalidate_drop(dentry, entry);
-                       spin_unlock(&dcache_lock);
-                       spin_unlock(&sbi->fs_lock);
-                       return 0;
-               }
-               spin_unlock(&dcache_lock);
+               /* The daemon never causes a mount to trigger */
                spin_unlock(&sbi->fs_lock);
-               mutex_unlock(&dir->i_mutex);
-               kfree(entry);
+
+               if (oz_mode)
+                       return 1;
 
                /*
                 * If the directory has gone away due to an expire
@@ -415,82 +325,45 @@ static int autofs4_revalidate(struct dentry *dentry, struct nameidata *nd)
                 * A zero status is success otherwise we have a
                 * negative error code.
                 */
-               status = try_to_fill_dentry(dentry);
-
-               spin_lock(&sbi->fs_lock);
-               ino->flags &= ~AUTOFS_INF_PENDING;
-               spin_unlock(&sbi->fs_lock);
-
+               status = try_to_fill_dentry(dentry, flags);
                if (status == 0)
                        return 1;
 
                return status;
        }
+       spin_unlock(&sbi->fs_lock);
+
+       /* Negative dentry.. invalidate if "old" */
+       if (dentry->d_inode == NULL)
+               return 0;
 
        /* Check for a non-mountpoint directory with no contents */
+       spin_lock(&dcache_lock);
        if (S_ISDIR(dentry->d_inode->i_mode) &&
            !d_mountpoint(dentry) && list_empty(&dentry->d_subdirs)) {
                DPRINTK("dentry=%p %.*s, emptydir",
                         dentry, dentry->d_name.len, dentry->d_name.name);
+               spin_unlock(&dcache_lock);
 
-               if (autofs4_need_mount(flags) || current->link_count) {
-                       int status;
-
-                       /*
-                        * We can only unhash and send this to ->lookup() if
-                        * the directory mutex is held over d_revalidate() and
-                        * ->lookup(). This prevents the VFS from incorrectly
-                        * seeing the dentry as non-existent.
-                        */
-                       ino->flags |= AUTOFS_INF_PENDING;
-                       if (!mutex_aquired) {
-                               autofs4_revalidate_drop(dentry, entry);
-                               spin_unlock(&dcache_lock);
-                               spin_unlock(&sbi->fs_lock);
-                               return 0;
-                       }
-                       spin_unlock(&dcache_lock);
-                       spin_unlock(&sbi->fs_lock);
-                       mutex_unlock(&dir->i_mutex);
-                       kfree(entry);
-
-                       /*
-                        * A zero status is success otherwise we have a
-                        * negative error code.
-                        */
-                       status = try_to_fill_dentry(dentry);
-
-                       spin_lock(&sbi->fs_lock);
-                       ino->flags &= ~AUTOFS_INF_PENDING;
-                       spin_unlock(&sbi->fs_lock);
+               /* The daemon never causes a mount to trigger */
+               if (oz_mode)
+                       return 1;
 
-                       if (status == 0)
-                               return 1;
+               /*
+                * A zero status is success otherwise we have a
+                * negative error code.
+                */
+               status = try_to_fill_dentry(dentry, flags);
+               if (status == 0)
+                       return 1;
 
-                       return status;
-               }
+               return status;
        }
        spin_unlock(&dcache_lock);
-       spin_unlock(&sbi->fs_lock);
-
-       if (mutex_aquired)
-               mutex_unlock(&dir->i_mutex);
-
-       kfree(entry);
 
        return 1;
 }
 
-static void autofs4_free_rehash_entrys(struct autofs_info *inf)
-{
-       struct list_head *head = &inf->rehash_list;
-       struct rehash_entry *entry, *next;
-       list_for_each_entry_safe(entry, next, head, list) {
-               list_del(&entry->list);
-               kfree(entry);
-       }
-}
-
 void autofs4_dentry_release(struct dentry *de)
 {
        struct autofs_info *inf;
@@ -509,8 +382,6 @@ void autofs4_dentry_release(struct dentry *de)
                                list_del(&inf->active);
                        if (!list_empty(&inf->expiring))
                                list_del(&inf->expiring);
-                       if (!list_empty(&inf->rehash_list))
-                               autofs4_free_rehash_entrys(inf);
                        spin_unlock(&sbi->lookup_lock);
                }
 
@@ -543,7 +414,6 @@ static struct dentry *autofs4_lookup_active(struct dentry *dentry)
        const unsigned char *str = name->name;
        struct list_head *p, *head;
 
-restart:
        spin_lock(&dcache_lock);
        spin_lock(&sbi->lookup_lock);
        head = &sbi->active_list;
@@ -561,19 +431,6 @@ restart:
                if (atomic_read(&active->d_count) == 0)
                        goto next;
 
-               if (active->d_inode && IS_DEADDIR(active->d_inode)) {
-                       if (!list_empty(&ino->rehash_list)) {
-                               dget(active);
-                               spin_unlock(&active->d_lock);
-                               spin_unlock(&sbi->lookup_lock);
-                               spin_unlock(&dcache_lock);
-                               autofs4_remove_rehash_entrys(ino);
-                               dput(active);
-                               goto restart;
-                       }
-                       goto next;
-               }
-
                qstr = &active->d_name;
 
                if (active->d_name.hash != hash)
@@ -586,11 +443,13 @@ restart:
                if (memcmp(qstr->name, str, len))
                        goto next;
 
-               dget(active);
-               spin_unlock(&active->d_lock);
-               spin_unlock(&sbi->lookup_lock);
-               spin_unlock(&dcache_lock);
-               return active;
+               if (d_unhashed(active)) {
+                       dget(active);
+                       spin_unlock(&active->d_lock);
+                       spin_unlock(&sbi->lookup_lock);
+                       spin_unlock(&dcache_lock);
+                       return active;
+               }
 next:
                spin_unlock(&active->d_lock);
        }
@@ -639,11 +498,13 @@ static struct dentry *autofs4_lookup_expiring(struct dentry *dentry)
                if (memcmp(qstr->name, str, len))
                        goto next;
 
-               dget(expiring);
-               spin_unlock(&expiring->d_lock);
-               spin_unlock(&sbi->lookup_lock);
-               spin_unlock(&dcache_lock);
-               return expiring;
+               if (d_unhashed(expiring)) {
+                       dget(expiring);
+                       spin_unlock(&expiring->d_lock);
+                       spin_unlock(&sbi->lookup_lock);
+                       spin_unlock(&dcache_lock);
+                       return expiring;
+               }
 next:
                spin_unlock(&expiring->d_lock);
        }
@@ -653,48 +514,6 @@ next:
        return NULL;
 }
 
-static struct autofs_info *init_new_dentry(struct autofs_sb_info *sbi,
-                                          struct dentry *dentry, int oz_mode)
-{
-       struct autofs_info *ino;
-
-       /*
-        * Mark the dentry incomplete but don't hash it. We do this
-        * to serialize our inode creation operations (symlink and
-        * mkdir) which prevents deadlock during the callback to
-        * the daemon. Subsequent user space lookups for the same
-        * dentry are placed on the wait queue while the daemon
-        * itself is allowed passage unresticted so the create
-        * operation itself can then hash the dentry. Finally,
-        * we check for the hashed dentry and return the newly
-        * hashed dentry.
-        */
-       dentry->d_op = &autofs4_root_dentry_operations;
-
-       /*
-        * And we need to ensure that the same dentry is used for
-        * all following lookup calls until it is hashed so that
-        * the dentry flags are persistent throughout the request.
-        */
-       ino = autofs4_init_ino(NULL, sbi, 0555);
-       if (!ino)
-               return ERR_PTR(-ENOMEM);
-
-       dentry->d_fsdata = ino;
-       ino->dentry = dentry;
-
-       /*
-        * Only set the mount pending flag for new dentrys not created
-        * by the daemon.
-        */
-       if (!oz_mode)
-               ino->flags |= AUTOFS_INF_PENDING;
-
-       d_instantiate(dentry, NULL);
-
-       return ino;
-}
-
 /* Lookups in the root directory */
 static struct dentry *autofs4_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
 {
@@ -702,7 +521,6 @@ static struct dentry *autofs4_lookup(struct inode *dir, struct dentry *dentry, s
        struct autofs_info *ino;
        struct dentry *expiring, *active;
        int oz_mode;
-       int status = 0;
 
        DPRINTK("name = %.*s",
                dentry->d_name.len, dentry->d_name.name);
@@ -717,26 +535,44 @@ static struct dentry *autofs4_lookup(struct inode *dir, struct dentry *dentry, s
        DPRINTK("pid = %u, pgrp = %u, catatonic = %d, oz_mode = %d",
                 current->pid, task_pgrp_nr(current), sbi->catatonic, oz_mode);
 
-       spin_lock(&sbi->fs_lock);
        active = autofs4_lookup_active(dentry);
        if (active) {
                dentry = active;
                ino = autofs4_dentry_ino(dentry);
-               /* If this came from revalidate, rehash it */
-               autofs4_revalidate_rehash(dentry);
-               spin_unlock(&sbi->fs_lock);
        } else {
-               spin_unlock(&sbi->fs_lock);
-               ino = init_new_dentry(sbi, dentry, oz_mode);
-               if (IS_ERR(ino))
-                       return (struct dentry *) ino;
-       }
+               /*
+                * Mark the dentry incomplete but don't hash it. We do this
+                * to serialize our inode creation operations (symlink and
+                * mkdir) which prevents deadlock during the callback to
+                * the daemon. Subsequent user space lookups for the same
+                * dentry are placed on the wait queue while the daemon
+                * itself is allowed passage unresticted so the create
+                * operation itself can then hash the dentry. Finally,
+                * we check for the hashed dentry and return the newly
+                * hashed dentry.
+                */
+               dentry->d_op = &autofs4_root_dentry_operations;
+
+               /*
+                * And we need to ensure that the same dentry is used for
+                * all following lookup calls until it is hashed so that
+                * the dentry flags are persistent throughout the request.
+                */
+               ino = autofs4_init_ino(NULL, sbi, 0555);
+               if (!ino)
+                       return ERR_PTR(-ENOMEM);
 
-       autofs4_add_active(dentry);
+               dentry->d_fsdata = ino;
+               ino->dentry = dentry;
+
+               autofs4_add_active(dentry);
+
+               d_instantiate(dentry, NULL);
+       }
 
        if (!oz_mode) {
-               expiring = autofs4_lookup_expiring(dentry);
                mutex_unlock(&dir->i_mutex);
+               expiring = autofs4_lookup_expiring(dentry);
                if (expiring) {
                        /*
                         * If we are racing with expire the request might not
@@ -744,22 +580,23 @@ static struct dentry *autofs4_lookup(struct inode *dir, struct dentry *dentry, s
                         * so it must have been successful, so just wait for it.
                         */
                        autofs4_expire_wait(expiring);
+                       autofs4_del_expiring(expiring);
                        dput(expiring);
                }
-               status = try_to_fill_dentry(dentry);
-               mutex_lock(&dir->i_mutex);
+
                spin_lock(&sbi->fs_lock);
-               ino->flags &= ~AUTOFS_INF_PENDING;
+               ino->flags |= AUTOFS_INF_PENDING;
                spin_unlock(&sbi->fs_lock);
+               if (dentry->d_op && dentry->d_op->d_revalidate)
+                       (dentry->d_op->d_revalidate)(dentry, nd);
+               mutex_lock(&dir->i_mutex);
        }
 
-       autofs4_del_active(dentry);
-
        /*
-        * If we had a mount fail, check if we had to handle
+        * If we are still pending, check if we had to handle
         * a signal. If so we can force a restart..
         */
-       if (status) {
+       if (ino->flags & AUTOFS_INF_PENDING) {
                /* See if we were interrupted */
                if (signal_pending(current)) {
                        sigset_t *sigset = &current->pending.signal;
@@ -771,46 +608,43 @@ static struct dentry *autofs4_lookup(struct inode *dir, struct dentry *dentry, s
                            return ERR_PTR(-ERESTARTNOINTR);
                        }
                }
-       }
-
-       /*
-        * User space can (and has done in the past) remove and re-create
-        * this directory during the callback. This can leave us with an
-        * unhashed dentry, but a successful mount!  So we need to
-        * perform another cached lookup in case the dentry now exists.
-        */
-       if (!oz_mode && !have_submounts(dentry)) {
-               struct dentry *new;
-               new = d_lookup(dentry->d_parent, &dentry->d_name);
-               if (new) {
-                       if (active)
-                               dput(active);
-                       return new;
-               } else {
-                       if (!status)
-                               status = -ENOENT;
+               if (!oz_mode) {
+                       spin_lock(&sbi->fs_lock);
+                       ino->flags &= ~AUTOFS_INF_PENDING;
+                       spin_unlock(&sbi->fs_lock);
                }
        }
 
        /*
-        * If we had a mount failure, return status to user space.
-        * If the mount succeeded and we used a dentry from the active queue
-        * return it.
+        * If this dentry is unhashed, then we shouldn't honour this
+        * lookup.  Returning ENOENT here doesn't do the right thing
+        * for all system calls, but it should be OK for the operations
+        * we permit from an autofs.
         */
-       if (status) {
-               dentry = ERR_PTR(status);
-               if (active)
-                       dput(active);
-               return dentry;
-       } else {
+       if (!oz_mode && d_unhashed(dentry)) {
                /*
-                * Valid successful mount, return active dentry or NULL
-                * for a new dentry.
+                * A user space application can (and has done in the past)
+                * remove and re-create this directory during the callback.
+                * This can leave us with an unhashed dentry, but a
+                * successful mount!  So we need to perform another
+                * cached lookup in case the dentry now exists.
                 */
+               struct dentry *parent = dentry->d_parent;
+               struct dentry *new = d_lookup(parent, &dentry->d_name);
+               if (new != NULL)
+                       dentry = new;
+               else
+                       dentry = ERR_PTR(-ENOENT);
+
                if (active)
-                       return active;
+                       dput(active);
+
+               return dentry;
        }
 
+       if (active)
+               return active;
+
        return NULL;
 }
 
@@ -834,6 +668,8 @@ static int autofs4_dir_symlink(struct inode *dir,
        if (!ino)
                return -ENOMEM;
 
+       autofs4_del_active(dentry);
+
        ino->size = strlen(symname);
        cp = kmalloc(ino->size + 1, GFP_KERNEL);
        if (!cp) {
@@ -910,6 +746,7 @@ static int autofs4_dir_unlink(struct inode *dir, struct dentry *dentry)
        dir->i_mtime = CURRENT_TIME;
 
        spin_lock(&dcache_lock);
+       autofs4_add_expiring(dentry);
        spin_lock(&dentry->d_lock);
        __d_drop(dentry);
        spin_unlock(&dentry->d_lock);
@@ -935,6 +772,7 @@ static int autofs4_dir_rmdir(struct inode *dir, struct dentry *dentry)
                spin_unlock(&dcache_lock);
                return -ENOTEMPTY;
        }
+       autofs4_add_expiring(dentry);
        spin_lock(&dentry->d_lock);
        __d_drop(dentry);
        spin_unlock(&dentry->d_lock);
@@ -972,6 +810,8 @@ static int autofs4_dir_mkdir(struct inode *dir, struct dentry *dentry, int mode)
        if (!ino)
                return -ENOMEM;
 
+       autofs4_del_active(dentry);
+
        inode = autofs4_get_inode(dir->i_sb, ino);
        if (!inode) {
                if (!dentry->d_fsdata)
index 9d08096..6ed434a 100644 (file)
@@ -720,13 +720,15 @@ again:
                                        inode->i_ino, orig_offset);
                BUG_ON(ret);
        }
-       fi = btrfs_item_ptr(leaf, path->slots[0],
-                          struct btrfs_file_extent_item);
        if (del_nr == 0) {
+               fi = btrfs_item_ptr(leaf, path->slots[0],
+                          struct btrfs_file_extent_item);
                btrfs_set_file_extent_type(leaf, fi,
                                           BTRFS_FILE_EXTENT_REG);
                btrfs_mark_buffer_dirty(leaf);
        } else {
+               fi = btrfs_item_ptr(leaf, del_slot - 1,
+                          struct btrfs_file_extent_item);
                btrfs_set_file_extent_type(leaf, fi,
                                           BTRFS_FILE_EXTENT_REG);
                btrfs_set_file_extent_num_bytes(leaf, fi,
index 14ac480..eeb4986 100644 (file)
@@ -348,7 +348,17 @@ int cachefiles_delete_object(struct cachefiles_cache *cache,
        dir = dget_parent(object->dentry);
 
        mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT);
-       ret = cachefiles_bury_object(cache, dir, object->dentry);
+
+       /* we need to check that our parent is _still_ our parent - it may have
+        * been renamed */
+       if (dir == object->dentry->d_parent) {
+               ret = cachefiles_bury_object(cache, dir, object->dentry);
+       } else {
+               /* it got moved, presumably by cachefilesd culling it, so it's
+                * no longer in the key path and we can ignore it */
+               mutex_unlock(&dir->d_inode->i_mutex);
+               ret = 0;
+       }
 
        dput(dir);
        _leave(" = %d", ret);
index 7b2600b..49503d2 100644 (file)
@@ -1,3 +1,7 @@
+Version 1.62
+------------
+Add sockopt=TCP_NODELAY mount option.
+
 Version 1.61
 ------------
 Fix append problem to Samba servers (files opened with O_APPEND could
index ac2b24c..78c1b86 100644 (file)
@@ -113,5 +113,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
 extern const struct export_operations cifs_export_ops;
 #endif /* EXPERIMENTAL */
 
-#define CIFS_VERSION   "1.61"
+#define CIFS_VERSION   "1.62"
 #endif                         /* _CIFSFS_H */
index 4b35f7e..ed751bb 100644 (file)
@@ -149,6 +149,7 @@ struct TCP_Server_Info {
        bool svlocal:1;                 /* local server or remote */
        bool noblocksnd;                /* use blocking sendmsg */
        bool noautotune;                /* do not autotune send buf sizes */
+       bool tcp_nodelay;
        atomic_t inFlight;  /* number of requests on the wire to server */
 #ifdef CONFIG_CIFS_STATS2
        atomic_t inSend; /* requests trying to send */
index 3bbcaa7..2e9e09c 100644 (file)
@@ -98,7 +98,7 @@ struct smb_vol {
        bool nostrictsync:1; /* do not force expensive SMBflush on every sync */
        unsigned int rsize;
        unsigned int wsize;
-       unsigned int sockopt;
+       bool sockopt_tcp_nodelay:1;
        unsigned short int port;
        char *prepath;
 };
@@ -1142,9 +1142,11 @@ cifs_parse_mount_options(char *options, const char *devname,
                                        simple_strtoul(value, &value, 0);
                        }
                } else if (strnicmp(data, "sockopt", 5) == 0) {
-                       if (value && *value) {
-                               vol->sockopt =
-                                       simple_strtoul(value, &value, 0);
+                       if (!value || !*value) {
+                               cERROR(1, ("no socket option specified"));
+                               continue;
+                       } else if (strnicmp(value, "TCP_NODELAY", 11) == 0) {
+                               vol->sockopt_tcp_nodelay = 1;
                        }
                } else if (strnicmp(data, "netbiosname", 4) == 0) {
                        if (!value || !*value || (*value == ' ')) {
@@ -1514,6 +1516,7 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
 
        tcp_ses->noblocksnd = volume_info->noblocksnd;
        tcp_ses->noautotune = volume_info->noautotune;
+       tcp_ses->tcp_nodelay = volume_info->sockopt_tcp_nodelay;
        atomic_set(&tcp_ses->inFlight, 0);
        init_waitqueue_head(&tcp_ses->response_q);
        init_waitqueue_head(&tcp_ses->request_q);
@@ -1764,6 +1767,7 @@ static int
 ipv4_connect(struct TCP_Server_Info *server)
 {
        int rc = 0;
+       int val;
        bool connected = false;
        __be16 orig_port = 0;
        struct socket *socket = server->ssocket;
@@ -1845,6 +1849,14 @@ ipv4_connect(struct TCP_Server_Info *server)
                        socket->sk->sk_rcvbuf = 140 * 1024;
        }
 
+       if (server->tcp_nodelay) {
+               val = 1;
+               rc = kernel_setsockopt(socket, SOL_TCP, TCP_NODELAY,
+                               (char *)&val, sizeof(val));
+               if (rc)
+                       cFYI(1, ("set TCP_NODELAY socket option error %d", rc));
+       }
+
         cFYI(1, ("sndbuf %d rcvbuf %d rcvtimeo 0x%lx",
                 socket->sk->sk_sndbuf,
                 socket->sk->sk_rcvbuf, socket->sk->sk_rcvtimeo));
@@ -1916,6 +1928,7 @@ static int
 ipv6_connect(struct TCP_Server_Info *server)
 {
        int rc = 0;
+       int val;
        bool connected = false;
        __be16 orig_port = 0;
        struct socket *socket = server->ssocket;
@@ -1987,6 +2000,15 @@ ipv6_connect(struct TCP_Server_Info *server)
         */
        socket->sk->sk_rcvtimeo = 7 * HZ;
        socket->sk->sk_sndtimeo = 5 * HZ;
+
+       if (server->tcp_nodelay) {
+               val = 1;
+               rc = kernel_setsockopt(socket, SOL_TCP, TCP_NODELAY,
+                               (char *)&val, sizeof(val));
+               if (rc)
+                       cFYI(1, ("set TCP_NODELAY socket option error %d", rc));
+       }
+
        server->ssocket = socket;
 
        return rc;
index 057e1da..3d8f8a9 100644 (file)
@@ -2289,9 +2289,9 @@ cifs_oplock_break(struct slow_work *work)
        if (inode && S_ISREG(inode->i_mode)) {
 #ifdef CONFIG_CIFS_EXPERIMENTAL
                if (cinode->clientCanCacheAll == 0)
-                       break_lease(inode, FMODE_READ);
+                       break_lease(inode, O_RDONLY);
                else if (cinode->clientCanCacheRead == 0)
-                       break_lease(inode, FMODE_WRITE);
+                       break_lease(inode, O_WRONLY);
 #endif
                rc = filemap_fdatawrite(inode->i_mapping);
                if (cinode->clientCanCacheRead == 0) {
index cf18ee7..e3fda97 100644 (file)
@@ -1762,8 +1762,18 @@ cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs)
                                        CIFS_MOUNT_MAP_SPECIAL_CHR);
        }
 
-       if (!rc)
+       if (!rc) {
                rc = inode_setattr(inode, attrs);
+
+               /* force revalidate when any of these times are set since some
+                  of the fs types (eg ext3, fat) do not have fine enough
+                  time granularity to match protocol, and we do not have a
+                  a way (yet) to query the server fs's time granularity (and
+                  whether it rounds times down).
+               */
+               if (!rc && (attrs->ia_valid & (ATTR_MTIME | ATTR_CTIME)))
+                       cifsInode->time = 0;
+       }
 out:
        kfree(args);
        kfree(full_path);
index f84062f..c343b14 100644 (file)
@@ -77,6 +77,11 @@ cifs_readdir_lookup(struct dentry *parent, struct qstr *name,
 
        cFYI(1, ("For %s", name->name));
 
+       if (parent->d_op && parent->d_op->d_hash)
+               parent->d_op->d_hash(parent, name);
+       else
+               name->hash = full_name_hash(name->name, name->len);
+
        dentry = d_lookup(parent, name);
        if (dentry) {
                /* FIXME: check for inode number changes? */
@@ -666,12 +671,11 @@ static int cifs_get_name_from_search_buf(struct qstr *pqst,
                                           min(len, max_len), nlt,
                                           cifs_sb->mnt_cifs_flags &
                                                CIFS_MOUNT_MAP_SPECIAL_CHR);
+               pqst->len -= nls_nullsize(nlt);
        } else {
                pqst->name = filename;
                pqst->len = len;
        }
-       pqst->hash = full_name_hash(pqst->name, pqst->len);
-/*     cFYI(1, ("filldir on %s",pqst->name));  */
        return rc;
 }
 
index 7085a62..aaa9c1c 100644 (file)
@@ -223,9 +223,9 @@ static void unicode_ssetup_strings(char **pbcc_area, struct cifsSesInfo *ses,
                /* null user mount */
                *bcc_ptr = 0;
                *(bcc_ptr+1) = 0;
-       } else { /* 300 should be long enough for any conceivable user name */
+       } else {
                bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr, ses->userName,
-                                         300, nls_cp);
+                                         MAX_USERNAME_SIZE, nls_cp);
        }
        bcc_ptr += 2 * bytes_ret;
        bcc_ptr += 2; /* account for null termination */
@@ -246,11 +246,10 @@ static void ascii_ssetup_strings(char **pbcc_area, struct cifsSesInfo *ses,
        /* copy user */
        if (ses->userName == NULL) {
                /* BB what about null user mounts - check that we do this BB */
-       } else { /* 300 should be long enough for any conceivable user name */
-               strncpy(bcc_ptr, ses->userName, 300);
+       } else {
+               strncpy(bcc_ptr, ses->userName, MAX_USERNAME_SIZE);
        }
-       /* BB improve check for overflow */
-       bcc_ptr += strnlen(ses->userName, 300);
+       bcc_ptr += strnlen(ses->userName, MAX_USERNAME_SIZE);
        *bcc_ptr = 0;
        bcc_ptr++; /* account for null termination */
 
index c5c45de..0ca9ec4 100644 (file)
@@ -301,6 +301,12 @@ static int sg_ioctl_trans(unsigned int fd, unsigned int cmd,
        u32 data;
        void __user *dxferp;
        int err;
+       int interface_id;
+
+       if (get_user(interface_id, &sgio32->interface_id))
+               return -EFAULT;
+       if (interface_id != 'S')
+               return sys_ioctl(fd, cmd, (unsigned long)sgio32);
 
        if (get_user(iovec_count, &sgio32->iovec_count))
                return -EFAULT;
@@ -936,6 +942,7 @@ COMPATIBLE_IOCTL(TCSETSF)
 COMPATIBLE_IOCTL(TIOCLINUX)
 COMPATIBLE_IOCTL(TIOCSBRK)
 COMPATIBLE_IOCTL(TIOCCBRK)
+COMPATIBLE_IOCTL(TIOCGSID)
 COMPATIBLE_IOCTL(TIOCGICOUNT)
 /* Little t */
 COMPATIBLE_IOCTL(TIOCGETD)
@@ -1038,6 +1045,8 @@ COMPATIBLE_IOCTL(FIOQSIZE)
 #ifdef CONFIG_BLOCK
 /* loop */
 IGNORE_IOCTL(LOOP_CLR_FD)
+/* md calls this on random blockdevs */
+IGNORE_IOCTL(RAID_VERSION)
 /* SG stuff */
 COMPATIBLE_IOCTL(SG_SET_TIMEOUT)
 COMPATIBLE_IOCTL(SG_GET_TIMEOUT)
index 953173a..f1358e5 100644 (file)
@@ -257,6 +257,7 @@ kill_it:
        if (dentry)
                goto repeat;
 }
+EXPORT_SYMBOL(dput);
 
 /**
  * d_invalidate - invalidate a dentry
@@ -314,6 +315,7 @@ int d_invalidate(struct dentry * dentry)
        spin_unlock(&dcache_lock);
        return 0;
 }
+EXPORT_SYMBOL(d_invalidate);
 
 /* This should be called _only_ with dcache_lock held */
 
@@ -328,6 +330,7 @@ struct dentry * dget_locked(struct dentry *dentry)
 {
        return __dget_locked(dentry);
 }
+EXPORT_SYMBOL(dget_locked);
 
 /**
  * d_find_alias - grab a hashed alias of inode
@@ -384,6 +387,7 @@ struct dentry * d_find_alias(struct inode *inode)
        }
        return de;
 }
+EXPORT_SYMBOL(d_find_alias);
 
 /*
  *     Try to kill dentries associated with this inode.
@@ -408,6 +412,7 @@ restart:
        }
        spin_unlock(&dcache_lock);
 }
+EXPORT_SYMBOL(d_prune_aliases);
 
 /*
  * Throw away a dentry - free the inode, dput the parent.  This requires that
@@ -610,6 +615,7 @@ void shrink_dcache_sb(struct super_block * sb)
 {
        __shrink_dcache_sb(sb, NULL, 0);
 }
+EXPORT_SYMBOL(shrink_dcache_sb);
 
 /*
  * destroy a single subtree of dentries for unmount
@@ -792,6 +798,7 @@ positive:
        spin_unlock(&dcache_lock);
        return 1;
 }
+EXPORT_SYMBOL(have_submounts);
 
 /*
  * Search the dentry child list for the specified parent,
@@ -876,6 +883,7 @@ void shrink_dcache_parent(struct dentry * parent)
        while ((found = select_parent(parent)) != 0)
                __shrink_dcache_sb(sb, &found, 0);
 }
+EXPORT_SYMBOL(shrink_dcache_parent);
 
 /*
  * Scan `nr' dentries and return the number which remain.
@@ -968,6 +976,7 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
 
        return dentry;
 }
+EXPORT_SYMBOL(d_alloc);
 
 struct dentry *d_alloc_name(struct dentry *parent, const char *name)
 {
@@ -1012,6 +1021,7 @@ void d_instantiate(struct dentry *entry, struct inode * inode)
        spin_unlock(&dcache_lock);
        security_d_instantiate(entry, inode);
 }
+EXPORT_SYMBOL(d_instantiate);
 
 /**
  * d_instantiate_unique - instantiate a non-aliased dentry
@@ -1108,6 +1118,7 @@ struct dentry * d_alloc_root(struct inode * root_inode)
        }
        return res;
 }
+EXPORT_SYMBOL(d_alloc_root);
 
 static inline struct hlist_head *d_hash(struct dentry *parent,
                                        unsigned long hash)
@@ -1211,7 +1222,6 @@ struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
                        BUG_ON(!(new->d_flags & DCACHE_DISCONNECTED));
                        spin_unlock(&dcache_lock);
                        security_d_instantiate(new, inode);
-                       d_rehash(dentry);
                        d_move(new, dentry);
                        iput(inode);
                } else {
@@ -1225,6 +1235,7 @@ struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
                d_add(dentry, inode);
        return new;
 }
+EXPORT_SYMBOL(d_splice_alias);
 
 /**
  * d_add_ci - lookup or allocate new dentry with case-exact name
@@ -1314,6 +1325,7 @@ err_out:
        iput(inode);
        return ERR_PTR(error);
 }
+EXPORT_SYMBOL(d_add_ci);
 
 /**
  * d_lookup - search for a dentry
@@ -1357,6 +1369,7 @@ struct dentry * d_lookup(struct dentry * parent, struct qstr * name)
        } while (read_seqretry(&rename_lock, seq));
        return dentry;
 }
+EXPORT_SYMBOL(d_lookup);
 
 struct dentry * __d_lookup(struct dentry * parent, struct qstr * name)
 {
@@ -1483,6 +1496,7 @@ int d_validate(struct dentry *dentry, struct dentry *dparent)
 out:
        return 0;
 }
+EXPORT_SYMBOL(d_validate);
 
 /*
  * When a file is deleted, we have two options:
@@ -1528,6 +1542,7 @@ void d_delete(struct dentry * dentry)
 
        fsnotify_nameremove(dentry, isdir);
 }
+EXPORT_SYMBOL(d_delete);
 
 static void __d_rehash(struct dentry * entry, struct hlist_head *list)
 {
@@ -1556,6 +1571,7 @@ void d_rehash(struct dentry * entry)
        spin_unlock(&entry->d_lock);
        spin_unlock(&dcache_lock);
 }
+EXPORT_SYMBOL(d_rehash);
 
 /*
  * When switching names, the actual string doesn't strictly have to
@@ -1702,6 +1718,7 @@ void d_move(struct dentry * dentry, struct dentry * target)
        d_move_locked(dentry, target);
        spin_unlock(&dcache_lock);
 }
+EXPORT_SYMBOL(d_move);
 
 /**
  * d_ancestor - search for an ancestor
@@ -1868,6 +1885,7 @@ shouldnt_be_hashed:
        spin_unlock(&dcache_lock);
        BUG();
 }
+EXPORT_SYMBOL_GPL(d_materialise_unique);
 
 static int prepend(char **buffer, int *buflen, const char *str, int namelen)
 {
@@ -2005,6 +2023,7 @@ char *d_path(const struct path *path, char *buf, int buflen)
        path_put(&root);
        return res;
 }
+EXPORT_SYMBOL(d_path);
 
 /*
  * Helper function for dentry_operations.d_dname() members
@@ -2171,6 +2190,30 @@ int is_subdir(struct dentry *new_dentry, struct dentry *old_dentry)
        return result;
 }
 
+int path_is_under(struct path *path1, struct path *path2)
+{
+       struct vfsmount *mnt = path1->mnt;
+       struct dentry *dentry = path1->dentry;
+       int res;
+       spin_lock(&vfsmount_lock);
+       if (mnt != path2->mnt) {
+               for (;;) {
+                       if (mnt->mnt_parent == mnt) {
+                               spin_unlock(&vfsmount_lock);
+                               return 0;
+                       }
+                       if (mnt->mnt_parent == path2->mnt)
+                               break;
+                       mnt = mnt->mnt_parent;
+               }
+               dentry = mnt->mnt_mountpoint;
+       }
+       res = is_subdir(dentry, path2->dentry);
+       spin_unlock(&vfsmount_lock);
+       return res;
+}
+EXPORT_SYMBOL(path_is_under);
+
 void d_genocide(struct dentry *root)
 {
        struct dentry *this_parent = root;
@@ -2228,6 +2271,7 @@ ino_t find_inode_number(struct dentry *dir, struct qstr *name)
        }
        return ino;
 }
+EXPORT_SYMBOL(find_inode_number);
 
 static __initdata unsigned long dhash_entries;
 static int __init set_dhash_entries(char *str)
@@ -2297,6 +2341,7 @@ static void __init dcache_init(void)
 
 /* SLAB cache for __getname() consumers */
 struct kmem_cache *names_cachep __read_mostly;
+EXPORT_SYMBOL(names_cachep);
 
 EXPORT_SYMBOL(d_genocide);
 
@@ -2326,26 +2371,3 @@ void __init vfs_caches_init(unsigned long mempages)
        bdev_cache_init();
        chrdev_init();
 }
-
-EXPORT_SYMBOL(d_alloc);
-EXPORT_SYMBOL(d_alloc_root);
-EXPORT_SYMBOL(d_delete);
-EXPORT_SYMBOL(d_find_alias);
-EXPORT_SYMBOL(d_instantiate);
-EXPORT_SYMBOL(d_invalidate);
-EXPORT_SYMBOL(d_lookup);
-EXPORT_SYMBOL(d_move);
-EXPORT_SYMBOL_GPL(d_materialise_unique);
-EXPORT_SYMBOL(d_path);
-EXPORT_SYMBOL(d_prune_aliases);
-EXPORT_SYMBOL(d_rehash);
-EXPORT_SYMBOL(d_splice_alias);
-EXPORT_SYMBOL(d_add_ci);
-EXPORT_SYMBOL(d_validate);
-EXPORT_SYMBOL(dget_locked);
-EXPORT_SYMBOL(dput);
-EXPORT_SYMBOL(find_inode_number);
-EXPORT_SYMBOL(have_submounts);
-EXPORT_SYMBOL(names_cachep);
-EXPORT_SYMBOL(shrink_dcache_parent);
-EXPORT_SYMBOL(shrink_dcache_sb);
index 0790a10..cce6bbd 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -571,6 +571,9 @@ int setup_arg_pages(struct linux_binprm *bprm,
        struct vm_area_struct *prev = NULL;
        unsigned long vm_flags;
        unsigned long stack_base;
+       unsigned long stack_size;
+       unsigned long stack_expand;
+       unsigned long rlim_stack;
 
 #ifdef CONFIG_STACK_GROWSUP
        /* Limit stack size to 1GB */
@@ -627,10 +630,23 @@ int setup_arg_pages(struct linux_binprm *bprm,
                        goto out_unlock;
        }
 
+       stack_expand = EXTRA_STACK_VM_PAGES * PAGE_SIZE;
+       stack_size = vma->vm_end - vma->vm_start;
+       /*
+        * Align this down to a page boundary as expand_stack
+        * will align it up.
+        */
+       rlim_stack = rlimit(RLIMIT_STACK) & PAGE_MASK;
 #ifdef CONFIG_STACK_GROWSUP
-       stack_base = vma->vm_end + EXTRA_STACK_VM_PAGES * PAGE_SIZE;
+       if (stack_size + stack_expand > rlim_stack)
+               stack_base = vma->vm_start + rlim_stack;
+       else
+               stack_base = vma->vm_end + stack_expand;
 #else
-       stack_base = vma->vm_start - EXTRA_STACK_VM_PAGES * PAGE_SIZE;
+       if (stack_size + stack_expand > rlim_stack)
+               stack_base = vma->vm_end - rlim_stack;
+       else
+               stack_base = vma->vm_start - stack_expand;
 #endif
        ret = expand_stack(vma, stack_base);
        if (ret)
index 9630583..56eee3d 100644 (file)
@@ -116,11 +116,9 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
                 * devices or filesystem images.
                 */
                memset(buf, 0, sizeof(buf));
-               path.mnt = mnt->mnt_parent;
-               path.dentry = mnt->mnt_mountpoint;
-               path_get(&path);
+               path.mnt = mnt;
+               path.dentry = mnt->mnt_root;
                cp = d_path(&path, buf, sizeof(buf));
-               path_put(&path);
                if (!IS_ERR(cp)) {
                        memcpy(sbi->s_es->s_last_mounted, cp,
                               sizeof(sbi->s_es->s_last_mounted));
index 6d47379..583e823 100644 (file)
@@ -541,7 +541,7 @@ static int gfs2_bmap_alloc(struct inode *inode, const sector_t lblock,
                                *ptr++ = cpu_to_be64(bn++);
                        break;
                }
-       } while (state != ALLOC_DATA);
+       } while ((state != ALLOC_DATA) || !dblock);
 
        ip->i_height = height;
        gfs2_add_inode_blocks(&ip->i_inode, alloced);
index 8a102f7..a86ed63 100644 (file)
@@ -725,7 +725,7 @@ static int init_journal(struct gfs2_sbd *sdp, int undo)
                goto fail;
        }
 
-       error = -EINVAL;
+       error = -EUSERS;
        if (!gfs2_jindex_size(sdp)) {
                fs_err(sdp, "no journals!\n");
                goto fail_jindex;
index 84350e1..4e64352 100644 (file)
@@ -976,122 +976,62 @@ out:
 }
 
 /**
- * gfs2_readlinki - return the contents of a symlink
- * @ip: the symlink's inode
- * @buf: a pointer to the buffer to be filled
- * @len: a pointer to the length of @buf
+ * gfs2_follow_link - Follow a symbolic link
+ * @dentry: The dentry of the link
+ * @nd: Data that we pass to vfs_follow_link()
  *
- * If @buf is too small, a piece of memory is kmalloc()ed and needs
- * to be freed by the caller.
+ * This can handle symlinks of any size.
  *
- * Returns: errno
+ * Returns: 0 on success or error code
  */
 
-static int gfs2_readlinki(struct gfs2_inode *ip, char **buf, unsigned int *len)
+static void *gfs2_follow_link(struct dentry *dentry, struct nameidata *nd)
 {
+       struct gfs2_inode *ip = GFS2_I(dentry->d_inode);
        struct gfs2_holder i_gh;
        struct buffer_head *dibh;
        unsigned int x;
+       char *buf;
        int error;
 
        gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh);
        error = gfs2_glock_nq(&i_gh);
        if (error) {
                gfs2_holder_uninit(&i_gh);
-               return error;
+               nd_set_link(nd, ERR_PTR(error));
+               return NULL;
        }
 
        if (!ip->i_disksize) {
                gfs2_consist_inode(ip);
-               error = -EIO;
+               buf = ERR_PTR(-EIO);
                goto out;
        }
 
        error = gfs2_meta_inode_buffer(ip, &dibh);
-       if (error)
+       if (error) {
+               buf = ERR_PTR(error);
                goto out;
-
-       x = ip->i_disksize + 1;
-       if (x > *len) {
-               *buf = kmalloc(x, GFP_NOFS);
-               if (!*buf) {
-                       error = -ENOMEM;
-                       goto out_brelse;
-               }
        }
 
-       memcpy(*buf, dibh->b_data + sizeof(struct gfs2_dinode), x);
-       *len = x;
-
-out_brelse:
+       x = ip->i_disksize + 1;
+       buf = kmalloc(x, GFP_NOFS);
+       if (!buf)
+               buf = ERR_PTR(-ENOMEM);
+       else
+               memcpy(buf, dibh->b_data + sizeof(struct gfs2_dinode), x);
        brelse(dibh);
 out:
        gfs2_glock_dq_uninit(&i_gh);
-       return error;
-}
-
-/**
- * gfs2_readlink - Read the value of a symlink
- * @dentry: the symlink
- * @buf: the buffer to read the symlink data into
- * @size: the size of the buffer
- *
- * Returns: errno
- */
-
-static int gfs2_readlink(struct dentry *dentry, char __user *user_buf,
-                        int user_size)
-{
-       struct gfs2_inode *ip = GFS2_I(dentry->d_inode);
-       char array[GFS2_FAST_NAME_SIZE], *buf = array;
-       unsigned int len = GFS2_FAST_NAME_SIZE;
-       int error;
-
-       error = gfs2_readlinki(ip, &buf, &len);
-       if (error)
-               return error;
-
-       if (user_size > len - 1)
-               user_size = len - 1;
-
-       if (copy_to_user(user_buf, buf, user_size))
-               error = -EFAULT;
-       else
-               error = user_size;
-
-       if (buf != array)
-               kfree(buf);
-
-       return error;
+       nd_set_link(nd, buf);
+       return NULL;
 }
 
-/**
- * gfs2_follow_link - Follow a symbolic link
- * @dentry: The dentry of the link
- * @nd: Data that we pass to vfs_follow_link()
- *
- * This can handle symlinks of any size. It is optimised for symlinks
- * under GFS2_FAST_NAME_SIZE.
- *
- * Returns: 0 on success or error code
- */
-
-static void *gfs2_follow_link(struct dentry *dentry, struct nameidata *nd)
+static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
 {
-       struct gfs2_inode *ip = GFS2_I(dentry->d_inode);
-       char array[GFS2_FAST_NAME_SIZE], *buf = array;
-       unsigned int len = GFS2_FAST_NAME_SIZE;
-       int error;
-
-       error = gfs2_readlinki(ip, &buf, &len);
-       if (!error) {
-               error = vfs_follow_link(nd, buf);
-               if (buf != array)
-                       kfree(buf);
-       } else
-               path_put(&nd->path);
-
-       return ERR_PTR(error);
+       char *s = nd_get_link(nd);
+       if (!IS_ERR(s))
+               kfree(s);
 }
 
 /**
@@ -1426,8 +1366,9 @@ const struct inode_operations gfs2_dir_iops = {
 };
 
 const struct inode_operations gfs2_symlink_iops = {
-       .readlink = gfs2_readlink,
+       .readlink = generic_readlink,
        .follow_link = gfs2_follow_link,
+       .put_link = gfs2_put_link,
        .permission = gfs2_permission,
        .setattr = gfs2_setattr,
        .getattr = gfs2_getattr,
index 1aa88c4..6a2f04b 100644 (file)
@@ -353,7 +353,7 @@ int hpfs_ea_read(struct super_block *s, secno a, int ano, unsigned pos,
 }
 
 int hpfs_ea_write(struct super_block *s, secno a, int ano, unsigned pos,
-            unsigned len, char *buf)
+            unsigned len, const char *buf)
 {
        struct buffer_head *bh;
        char *data;
index 940d6d1..67d9d36 100644 (file)
@@ -20,8 +20,8 @@ static int hpfs_hash_dentry(struct dentry *dentry, struct qstr *qstr)
 
        if (l == 1) if (qstr->name[0]=='.') goto x;
        if (l == 2) if (qstr->name[0]=='.' || qstr->name[1]=='.') goto x;
-       hpfs_adjust_length((char *)qstr->name, &l);
-       /*if (hpfs_chk_name((char *)qstr->name,&l))*/
+       hpfs_adjust_length(qstr->name, &l);
+       /*if (hpfs_chk_name(qstr->name,&l))*/
                /*return -ENAMETOOLONG;*/
                /*return -ENOENT;*/
        x:
@@ -38,14 +38,16 @@ static int hpfs_compare_dentry(struct dentry *dentry, struct qstr *a, struct qst
 {
        unsigned al=a->len;
        unsigned bl=b->len;
-       hpfs_adjust_length((char *)a->name, &al);
-       /*hpfs_adjust_length((char *)b->name, &bl);*/
+       hpfs_adjust_length(a->name, &al);
+       /*hpfs_adjust_length(b->name, &bl);*/
        /* 'a' is the qstr of an already existing dentry, so the name
         * must be valid. 'b' must be validated first.
         */
 
-       if (hpfs_chk_name((char *)b->name, &bl)) return 1;
-       if (hpfs_compare_names(dentry->d_sb, (char *)a->name, al, (char *)b->name, bl, 0)) return 1;
+       if (hpfs_chk_name(b->name, &bl))
+               return 1;
+       if (hpfs_compare_names(dentry->d_sb, a->name, al, b->name, bl, 0))
+               return 1;
        return 0;
 }
 
index 8865c94..26e3964 100644 (file)
@@ -59,7 +59,7 @@ static int hpfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
        struct hpfs_dirent *de;
        int lc;
        long old_pos;
-       char *tempname;
+       unsigned char *tempname;
        int c1, c2 = 0;
        int ret = 0;
 
@@ -158,11 +158,11 @@ static int hpfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
                tempname = hpfs_translate_name(inode->i_sb, de->name, de->namelen, lc, de->not_8x3);
                if (filldir(dirent, tempname, de->namelen, old_pos, de->fnode, DT_UNKNOWN) < 0) {
                        filp->f_pos = old_pos;
-                       if (tempname != (char *)de->name) kfree(tempname);
+                       if (tempname != de->name) kfree(tempname);
                        hpfs_brelse4(&qbh);
                        goto out;
                }
-               if (tempname != (char *)de->name) kfree(tempname);
+               if (tempname != de->name) kfree(tempname);
                hpfs_brelse4(&qbh);
        }
 out:
@@ -187,7 +187,7 @@ out:
 
 struct dentry *hpfs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
 {
-       const char *name = dentry->d_name.name;
+       const unsigned char *name = dentry->d_name.name;
        unsigned len = dentry->d_name.len;
        struct quad_buffer_head qbh;
        struct hpfs_dirent *de;
@@ -197,7 +197,7 @@ struct dentry *hpfs_lookup(struct inode *dir, struct dentry *dentry, struct name
        struct hpfs_inode_info *hpfs_result;
 
        lock_kernel();
-       if ((err = hpfs_chk_name((char *)name, &len))) {
+       if ((err = hpfs_chk_name(name, &len))) {
                if (err == -ENAMETOOLONG) {
                        unlock_kernel();
                        return ERR_PTR(-ENAMETOOLONG);
@@ -209,7 +209,7 @@ struct dentry *hpfs_lookup(struct inode *dir, struct dentry *dentry, struct name
         * '.' and '..' will never be passed here.
         */
 
-       de = map_dirent(dir, hpfs_i(dir)->i_dno, (char *) name, len, NULL, &qbh);
+       de = map_dirent(dir, hpfs_i(dir)->i_dno, name, len, NULL, &qbh);
 
        /*
         * This is not really a bailout, just means file not found.
@@ -250,7 +250,7 @@ struct dentry *hpfs_lookup(struct inode *dir, struct dentry *dentry, struct name
        hpfs_result = hpfs_i(result);
        if (!de->directory) hpfs_result->i_parent_dir = dir->i_ino;
 
-       hpfs_decide_conv(result, (char *)name, len);
+       hpfs_decide_conv(result, name, len);
 
        if (de->has_acl || de->has_xtd_perm) if (!(dir->i_sb->s_flags & MS_RDONLY)) {
                hpfs_error(result->i_sb, "ACLs or XPERM found. This is probably HPFS386. This driver doesn't support it now. Send me some info on these structures");
index fe83c2b..9b2ffad 100644 (file)
@@ -158,7 +158,8 @@ static void set_last_pointer(struct super_block *s, struct dnode *d, dnode_secno
 
 /* Add an entry to dnode and don't care if it grows over 2048 bytes */
 
-struct hpfs_dirent *hpfs_add_de(struct super_block *s, struct dnode *d, unsigned char *name,
+struct hpfs_dirent *hpfs_add_de(struct super_block *s, struct dnode *d,
+                               const unsigned char *name,
                                unsigned namelen, secno down_ptr)
 {
        struct hpfs_dirent *de;
@@ -223,7 +224,7 @@ static void fix_up_ptrs(struct super_block *s, struct dnode *d)
 /* Add an entry to dnode and do dnode splitting if required */
 
 static int hpfs_add_to_dnode(struct inode *i, dnode_secno dno,
-                            unsigned char *name, unsigned namelen,
+                            const unsigned char *name, unsigned namelen,
                             struct hpfs_dirent *new_de, dnode_secno down_ptr)
 {
        struct quad_buffer_head qbh, qbh1, qbh2;
@@ -231,7 +232,7 @@ static int hpfs_add_to_dnode(struct inode *i, dnode_secno dno,
        dnode_secno adno, rdno;
        struct hpfs_dirent *de;
        struct hpfs_dirent nde;
-       char *nname;
+       unsigned char *nname;
        int h;
        int pos;
        struct buffer_head *bh;
@@ -305,7 +306,9 @@ static int hpfs_add_to_dnode(struct inode *i, dnode_secno dno,
                pos++;
        }
        copy_de(new_de = &nde, de);
-       memcpy(name = nname, de->name, namelen = de->namelen);
+       memcpy(nname, de->name, de->namelen);
+       name = nname;
+       namelen = de->namelen;
        for_all_poss(i, hpfs_pos_subst, ((loff_t)dno << 4) | pos, 4);
        down_ptr = adno;
        set_last_pointer(i->i_sb, ad, de->down ? de_down_pointer(de) : 0);
@@ -368,7 +371,8 @@ static int hpfs_add_to_dnode(struct inode *i, dnode_secno dno,
  * I hope, now it's finally bug-free.
  */
 
-int hpfs_add_dirent(struct inode *i, unsigned char *name, unsigned namelen,
+int hpfs_add_dirent(struct inode *i,
+                   const unsigned char *name, unsigned namelen,
                    struct hpfs_dirent *new_de, int cdepth)
 {
        struct hpfs_inode_info *hpfs_inode = hpfs_i(i);
@@ -897,7 +901,8 @@ struct hpfs_dirent *map_pos_dirent(struct inode *inode, loff_t *posp,
 
 /* Find a dirent in tree */
 
-struct hpfs_dirent *map_dirent(struct inode *inode, dnode_secno dno, char *name, unsigned len,
+struct hpfs_dirent *map_dirent(struct inode *inode, dnode_secno dno,
+                              const unsigned char *name, unsigned len,
                               dnode_secno *dd, struct quad_buffer_head *qbh)
 {
        struct dnode *dnode;
@@ -988,8 +993,8 @@ void hpfs_remove_dtree(struct super_block *s, dnode_secno dno)
 struct hpfs_dirent *map_fnode_dirent(struct super_block *s, fnode_secno fno,
                                     struct fnode *f, struct quad_buffer_head *qbh)
 {
-       char *name1;
-       char *name2;
+       unsigned char *name1;
+       unsigned char *name2;
        int name1len, name2len;
        struct dnode *d;
        dnode_secno dno, downd;
index 547a838..45e53d9 100644 (file)
@@ -62,8 +62,8 @@ static char *get_indirect_ea(struct super_block *s, int ano, secno a, int size)
        return ret;
 }
 
-static void set_indirect_ea(struct super_block *s, int ano, secno a, char *data,
-                           int size)
+static void set_indirect_ea(struct super_block *s, int ano, secno a,
+                           const char *data, int size)
 {
        hpfs_ea_write(s, a, ano, 0, size, data);
 }
@@ -186,7 +186,8 @@ char *hpfs_get_ea(struct super_block *s, struct fnode *fnode, char *key, int *si
  * This driver can't change sizes of eas ('cause I just don't need it).
  */
 
-void hpfs_set_ea(struct inode *inode, struct fnode *fnode, char *key, char *data, int size)
+void hpfs_set_ea(struct inode *inode, struct fnode *fnode, const char *key,
+                const char *data, int size)
 {
        fnode_secno fno = inode->i_ino;
        struct super_block *s = inode->i_sb;
index 701ca54..97bf738 100644 (file)
@@ -215,7 +215,7 @@ secno hpfs_bplus_lookup(struct super_block *, struct inode *, struct bplus_heade
 secno hpfs_add_sector_to_btree(struct super_block *, secno, int, unsigned);
 void hpfs_remove_btree(struct super_block *, struct bplus_header *);
 int hpfs_ea_read(struct super_block *, secno, int, unsigned, unsigned, char *);
-int hpfs_ea_write(struct super_block *, secno, int, unsigned, unsigned, char *);
+int hpfs_ea_write(struct super_block *, secno, int, unsigned, unsigned, const char *);
 void hpfs_ea_remove(struct super_block *, secno, int, unsigned);
 void hpfs_truncate_btree(struct super_block *, secno, int, unsigned);
 void hpfs_remove_fnode(struct super_block *, fnode_secno fno);
@@ -244,13 +244,17 @@ extern const struct file_operations hpfs_dir_ops;
 
 void hpfs_add_pos(struct inode *, loff_t *);
 void hpfs_del_pos(struct inode *, loff_t *);
-struct hpfs_dirent *hpfs_add_de(struct super_block *, struct dnode *, unsigned char *, unsigned, secno);
-int hpfs_add_dirent(struct inode *, unsigned char *, unsigned, struct hpfs_dirent *, int);
+struct hpfs_dirent *hpfs_add_de(struct super_block *, struct dnode *,
+                               const unsigned char *, unsigned, secno);
+int hpfs_add_dirent(struct inode *, const unsigned char *, unsigned,
+                   struct hpfs_dirent *, int);
 int hpfs_remove_dirent(struct inode *, dnode_secno, struct hpfs_dirent *, struct quad_buffer_head *, int);
 void hpfs_count_dnodes(struct super_block *, dnode_secno, int *, int *, int *);
 dnode_secno hpfs_de_as_down_as_possible(struct super_block *, dnode_secno dno);
 struct hpfs_dirent *map_pos_dirent(struct inode *, loff_t *, struct quad_buffer_head *);
-struct hpfs_dirent *map_dirent(struct inode *, dnode_secno, char *, unsigned, dnode_secno *, struct quad_buffer_head *);
+struct hpfs_dirent *map_dirent(struct inode *, dnode_secno,
+                              const unsigned char *, unsigned, dnode_secno *,
+                              struct quad_buffer_head *);
 void hpfs_remove_dtree(struct super_block *, dnode_secno);
 struct hpfs_dirent *map_fnode_dirent(struct super_block *, fnode_secno, struct fnode *, struct quad_buffer_head *);
 
@@ -259,7 +263,8 @@ struct hpfs_dirent *map_fnode_dirent(struct super_block *, fnode_secno, struct f
 void hpfs_ea_ext_remove(struct super_block *, secno, int, unsigned);
 int hpfs_read_ea(struct super_block *, struct fnode *, char *, char *, int);
 char *hpfs_get_ea(struct super_block *, struct fnode *, char *, int *);
-void hpfs_set_ea(struct inode *, struct fnode *, char *, char *, int);
+void hpfs_set_ea(struct inode *, struct fnode *, const char *,
+                const char *, int);
 
 /* file.c */
 
@@ -282,7 +287,7 @@ void hpfs_delete_inode(struct inode *);
 
 unsigned *hpfs_map_dnode_bitmap(struct super_block *, struct quad_buffer_head *);
 unsigned *hpfs_map_bitmap(struct super_block *, unsigned, struct quad_buffer_head *, char *);
-char *hpfs_load_code_page(struct super_block *, secno);
+unsigned char *hpfs_load_code_page(struct super_block *, secno);
 secno *hpfs_load_bitmap_directory(struct super_block *, secno bmp);
 struct fnode *hpfs_map_fnode(struct super_block *s, ino_t, struct buffer_head **);
 struct anode *hpfs_map_anode(struct super_block *s, anode_secno, struct buffer_head **);
@@ -292,12 +297,13 @@ dnode_secno hpfs_fnode_dno(struct super_block *s, ino_t ino);
 /* name.c */
 
 unsigned char hpfs_upcase(unsigned char *, unsigned char);
-int hpfs_chk_name(unsigned char *, unsigned *);
-char *hpfs_translate_name(struct super_block *, unsigned char *, unsigned, int, int);
-int hpfs_compare_names(struct super_block *, unsigned char *, unsigned, unsigned char *, unsigned, int);
-int hpfs_is_name_long(unsigned char *, unsigned);
-void hpfs_adjust_length(unsigned char *, unsigned *);
-void hpfs_decide_conv(struct inode *, unsigned char *, unsigned);
+int hpfs_chk_name(const unsigned char *, unsigned *);
+unsigned char *hpfs_translate_name(struct super_block *, unsigned char *, unsigned, int, int);
+int hpfs_compare_names(struct super_block *, const unsigned char *, unsigned,
+                      const unsigned char *, unsigned, int);
+int hpfs_is_name_long(const unsigned char *, unsigned);
+void hpfs_adjust_length(const unsigned char *, unsigned *);
+void hpfs_decide_conv(struct inode *, const unsigned char *, unsigned);
 
 /* namei.c */
 
index fe703ae..ff90aff 100644 (file)
@@ -46,7 +46,7 @@ void hpfs_read_inode(struct inode *i)
        struct fnode *fnode;
        struct super_block *sb = i->i_sb;
        struct hpfs_inode_info *hpfs_inode = hpfs_i(i);
-       unsigned char *ea;
+       void *ea;
        int ea_size;
 
        if (!(fnode = hpfs_map_fnode(sb, i->i_ino, &bh))) {
@@ -112,7 +112,7 @@ void hpfs_read_inode(struct inode *i)
                }
        }
        if (fnode->dirflag) {
-               unsigned n_dnodes, n_subdirs;
+               int n_dnodes, n_subdirs;
                i->i_mode |= S_IFDIR;
                i->i_op = &hpfs_dir_iops;
                i->i_fop = &hpfs_dir_ops;
index c472458..840d033 100644 (file)
@@ -35,7 +35,7 @@ unsigned int *hpfs_map_bitmap(struct super_block *s, unsigned bmp_block,
  * lowercasing table
  */
 
-char *hpfs_load_code_page(struct super_block *s, secno cps)
+unsigned char *hpfs_load_code_page(struct super_block *s, secno cps)
 {
        struct buffer_head *bh;
        secno cpds;
@@ -71,7 +71,7 @@ char *hpfs_load_code_page(struct super_block *s, secno cps)
                brelse(bh);
                return NULL;
        }
-       ptr = (char *)cpd + cpd->offs[cpi] + 6;
+       ptr = (unsigned char *)cpd + cpd->offs[cpi] + 6;
        if (!(cp_table = kmalloc(256, GFP_KERNEL))) {
                printk("HPFS: out of memory for code page table\n");
                brelse(bh);
@@ -217,7 +217,7 @@ struct dnode *hpfs_map_dnode(struct super_block *s, unsigned secno,
        if ((dnode = hpfs_map_4sectors(s, secno, qbh, DNODE_RD_AHEAD)))
                if (hpfs_sb(s)->sb_chk) {
                        unsigned p, pp = 0;
-                       unsigned char *d = (char *)dnode;
+                       unsigned char *d = (unsigned char *)dnode;
                        int b = 0;
                        if (dnode->magic != DNODE_MAGIC) {
                                hpfs_error(s, "bad magic on dnode %08x", secno);
index 1f4a964..f24736d 100644 (file)
@@ -8,16 +8,16 @@
 
 #include "hpfs_fn.h"
 
-static char *text_postfix[]={
+static const char *text_postfix[]={
 ".ASM", ".BAS", ".BAT", ".C", ".CC", ".CFG", ".CMD", ".CON", ".CPP", ".DEF",
 ".DOC", ".DPR", ".ERX", ".H", ".HPP", ".HTM", ".HTML", ".JAVA", ".LOG", ".PAS",
 ".RC", ".TEX", ".TXT", ".Y", ""};
 
-static char *text_prefix[]={
+static const char *text_prefix[]={
 "AUTOEXEC.", "CHANGES", "COPYING", "CONFIG.", "CREDITS", "FAQ", "FILE_ID.DIZ",
 "MAKEFILE", "READ.ME", "README", "TERMCAP", ""};
 
-void hpfs_decide_conv(struct inode *inode, unsigned char *name, unsigned len)
+void hpfs_decide_conv(struct inode *inode, const unsigned char *name, unsigned len)
 {
        struct hpfs_inode_info *hpfs_inode = hpfs_i(inode);
        int i;
@@ -71,7 +71,7 @@ static inline unsigned char locase(unsigned char *dir, unsigned char a)
        return dir[a];
 }
 
-int hpfs_chk_name(unsigned char *name, unsigned *len)
+int hpfs_chk_name(const unsigned char *name, unsigned *len)
 {
        int i;
        if (*len > 254) return -ENAMETOOLONG;
@@ -83,10 +83,10 @@ int hpfs_chk_name(unsigned char *name, unsigned *len)
        return 0;
 }
 
-char *hpfs_translate_name(struct super_block *s, unsigned char *from,
+unsigned char *hpfs_translate_name(struct super_block *s, unsigned char *from,
                          unsigned len, int lc, int lng)
 {
-       char *to;
+       unsigned char *to;
        int i;
        if (hpfs_sb(s)->sb_chk >= 2) if (hpfs_is_name_long(from, len) != lng) {
                printk("HPFS: Long name flag mismatch - name ");
@@ -103,8 +103,9 @@ char *hpfs_translate_name(struct super_block *s, unsigned char *from,
        return to;
 }
 
-int hpfs_compare_names(struct super_block *s, unsigned char *n1, unsigned l1,
-                      unsigned char *n2, unsigned l2, int last)
+int hpfs_compare_names(struct super_block *s,
+                      const unsigned char *n1, unsigned l1,
+                      const unsigned char *n2, unsigned l2, int last)
 {
        unsigned l = l1 < l2 ? l1 : l2;
        unsigned i;
@@ -120,7 +121,7 @@ int hpfs_compare_names(struct super_block *s, unsigned char *n1, unsigned l1,
        return 0;
 }
 
-int hpfs_is_name_long(unsigned char *name, unsigned len)
+int hpfs_is_name_long(const unsigned char *name, unsigned len)
 {
        int i,j;
        for (i = 0; i < len && name[i] != '.'; i++)
@@ -134,7 +135,7 @@ int hpfs_is_name_long(unsigned char *name, unsigned len)
 
 /* OS/2 clears dots and spaces at the end of file name, so we have to */
 
-void hpfs_adjust_length(unsigned char *name, unsigned *len)
+void hpfs_adjust_length(const unsigned char *name, unsigned *len)
 {
        if (!*len) return;
        if (*len == 1 && name[0] == '.') return;
index 82b9c4b..11c2b40 100644 (file)
@@ -11,7 +11,7 @@
 
 static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
 {
-       const char *name = dentry->d_name.name;
+       const unsigned char *name = dentry->d_name.name;
        unsigned len = dentry->d_name.len;
        struct quad_buffer_head qbh0;
        struct buffer_head *bh;
@@ -24,7 +24,7 @@ static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
        int r;
        struct hpfs_dirent dee;
        int err;
-       if ((err = hpfs_chk_name((char *)name, &len))) return err==-ENOENT ? -EINVAL : err;
+       if ((err = hpfs_chk_name(name, &len))) return err==-ENOENT ? -EINVAL : err;
        lock_kernel();
        err = -ENOSPC;
        fnode = hpfs_alloc_fnode(dir->i_sb, hpfs_i(dir)->i_dno, &fno, &bh);
@@ -62,7 +62,7 @@ static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
                result->i_mode &= ~0222;
 
        mutex_lock(&hpfs_i(dir)->i_mutex);
-       r = hpfs_add_dirent(dir, (char *)name, len, &dee, 0);
+       r = hpfs_add_dirent(dir, name, len, &dee, 0);
        if (r == 1)
                goto bail3;
        if (r == -1) {
@@ -121,7 +121,7 @@ bail:
 
 static int hpfs_create(struct inode *dir, struct dentry *dentry, int mode, struct nameidata *nd)
 {
-       const char *name = dentry->d_name.name;
+       const unsigned char *name = dentry->d_name.name;
        unsigned len = dentry->d_name.len;
        struct inode *result = NULL;
        struct buffer_head *bh;
@@ -130,7 +130,7 @@ static int hpfs_create(struct inode *dir, struct dentry *dentry, int mode, struc
        int r;
        struct hpfs_dirent dee;
        int err;
-       if ((err = hpfs_chk_name((char *)name, &len)))
+       if ((err = hpfs_chk_name(name, &len)))
                return err==-ENOENT ? -EINVAL : err;
        lock_kernel();
        err = -ENOSPC;
@@ -155,7 +155,7 @@ static int hpfs_create(struct inode *dir, struct dentry *dentry, int mode, struc
        result->i_op = &hpfs_file_iops;
        result->i_fop = &hpfs_file_ops;
        result->i_nlink = 1;
-       hpfs_decide_conv(result, (char *)name, len);
+       hpfs_decide_conv(result, name, len);
        hpfs_i(result)->i_parent_dir = dir->i_ino;
        result->i_ctime.tv_sec = result->i_mtime.tv_sec = result->i_atime.tv_sec = local_to_gmt(dir->i_sb, dee.creation_date);
        result->i_ctime.tv_nsec = 0;
@@ -170,7 +170,7 @@ static int hpfs_create(struct inode *dir, struct dentry *dentry, int mode, struc
        hpfs_i(result)->mmu_private = 0;
 
        mutex_lock(&hpfs_i(dir)->i_mutex);
-       r = hpfs_add_dirent(dir, (char *)name, len, &dee, 0);
+       r = hpfs_add_dirent(dir, name, len, &dee, 0);
        if (r == 1)
                goto bail2;
        if (r == -1) {
@@ -211,7 +211,7 @@ bail:
 
 static int hpfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t rdev)
 {
-       const char *name = dentry->d_name.name;
+       const unsigned char *name = dentry->d_name.name;
        unsigned len = dentry->d_name.len;
        struct buffer_head *bh;
        struct fnode *fnode;
@@ -220,7 +220,7 @@ static int hpfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t
        struct hpfs_dirent dee;
        struct inode *result = NULL;
        int err;
-       if ((err = hpfs_chk_name((char *)name, &len))) return err==-ENOENT ? -EINVAL : err;
+       if ((err = hpfs_chk_name(name, &len))) return err==-ENOENT ? -EINVAL : err;
        if (hpfs_sb(dir->i_sb)->sb_eas < 2) return -EPERM;
        if (!new_valid_dev(rdev))
                return -EINVAL;
@@ -256,7 +256,7 @@ static int hpfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t
        init_special_inode(result, mode, rdev);
 
        mutex_lock(&hpfs_i(dir)->i_mutex);
-       r = hpfs_add_dirent(dir, (char *)name, len, &dee, 0);
+       r = hpfs_add_dirent(dir, name, len, &dee, 0);
        if (r == 1)
                goto bail2;
        if (r == -1) {
@@ -289,7 +289,7 @@ bail:
 
 static int hpfs_symlink(struct inode *dir, struct dentry *dentry, const char *symlink)
 {
-       const char *name = dentry->d_name.name;
+       const unsigned char *name = dentry->d_name.name;
        unsigned len = dentry->d_name.len;
        struct buffer_head *bh;
        struct fnode *fnode;
@@ -298,7 +298,7 @@ static int hpfs_symlink(struct inode *dir, struct dentry *dentry, const char *sy
        struct hpfs_dirent dee;
        struct inode *result;
        int err;
-       if ((err = hpfs_chk_name((char *)name, &len))) return err==-ENOENT ? -EINVAL : err;
+       if ((err = hpfs_chk_name(name, &len))) return err==-ENOENT ? -EINVAL : err;
        lock_kernel();
        if (hpfs_sb(dir->i_sb)->sb_eas < 2) {
                unlock_kernel();
@@ -335,7 +335,7 @@ static int hpfs_symlink(struct inode *dir, struct dentry *dentry, const char *sy
        result->i_data.a_ops = &hpfs_symlink_aops;
 
        mutex_lock(&hpfs_i(dir)->i_mutex);
-       r = hpfs_add_dirent(dir, (char *)name, len, &dee, 0);
+       r = hpfs_add_dirent(dir, name, len, &dee, 0);
        if (r == 1)
                goto bail2;
        if (r == -1) {
@@ -345,7 +345,7 @@ static int hpfs_symlink(struct inode *dir, struct dentry *dentry, const char *sy
        fnode->len = len;
        memcpy(fnode->name, name, len > 15 ? 15 : len);
        fnode->up = dir->i_ino;
-       hpfs_set_ea(result, fnode, "SYMLINK", (char *)symlink, strlen(symlink));
+       hpfs_set_ea(result, fnode, "SYMLINK", symlink, strlen(symlink));
        mark_buffer_dirty(bh);
        brelse(bh);
 
@@ -369,7 +369,7 @@ bail:
 
 static int hpfs_unlink(struct inode *dir, struct dentry *dentry)
 {
-       const char *name = dentry->d_name.name;
+       const unsigned char *name = dentry->d_name.name;
        unsigned len = dentry->d_name.len;
        struct quad_buffer_head qbh;
        struct hpfs_dirent *de;
@@ -381,12 +381,12 @@ static int hpfs_unlink(struct inode *dir, struct dentry *dentry)
        int err;
 
        lock_kernel();
-       hpfs_adjust_length((char *)name, &len);
+       hpfs_adjust_length(name, &len);
 again:
        mutex_lock(&hpfs_i(inode)->i_parent_mutex);
        mutex_lock(&hpfs_i(dir)->i_mutex);
        err = -ENOENT;
-       de = map_dirent(dir, hpfs_i(dir)->i_dno, (char *)name, len, &dno, &qbh);
+       de = map_dirent(dir, hpfs_i(dir)->i_dno, name, len, &dno, &qbh);
        if (!de)
                goto out;
 
@@ -413,22 +413,25 @@ again:
 
                mutex_unlock(&hpfs_i(dir)->i_mutex);
                mutex_unlock(&hpfs_i(inode)->i_parent_mutex);
-               d_drop(dentry);
-               spin_lock(&dentry->d_lock);
-               if (atomic_read(&dentry->d_count) > 1 ||
-                   generic_permission(inode, MAY_WRITE, NULL) ||
+               dentry_unhash(dentry);
+               if (!d_unhashed(dentry)) {
+                       dput(dentry);
+                       unlock_kernel();
+                       return -ENOSPC;
+               }
+               if (generic_permission(inode, MAY_WRITE, NULL) ||
                    !S_ISREG(inode->i_mode) ||
                    get_write_access(inode)) {
-                       spin_unlock(&dentry->d_lock);
                        d_rehash(dentry);
+                       dput(dentry);
                } else {
                        struct iattr newattrs;
-                       spin_unlock(&dentry->d_lock);
                        /*printk("HPFS: truncating file before delete.\n");*/
                        newattrs.ia_size = 0;
                        newattrs.ia_valid = ATTR_SIZE | ATTR_CTIME;
                        err = notify_change(dentry, &newattrs);
                        put_write_access(inode);
+                       dput(dentry);
                        if (!err)
                                goto again;
                }
@@ -451,7 +454,7 @@ out:
 
 static int hpfs_rmdir(struct inode *dir, struct dentry *dentry)
 {
-       const char *name = dentry->d_name.name;
+       const unsigned char *name = dentry->d_name.name;
        unsigned len = dentry->d_name.len;
        struct quad_buffer_head qbh;
        struct hpfs_dirent *de;
@@ -462,12 +465,12 @@ static int hpfs_rmdir(struct inode *dir, struct dentry *dentry)
        int err;
        int r;
 
-       hpfs_adjust_length((char *)name, &len);
+       hpfs_adjust_length(name, &len);
        lock_kernel();
        mutex_lock(&hpfs_i(inode)->i_parent_mutex);
        mutex_lock(&hpfs_i(dir)->i_mutex);
        err = -ENOENT;
-       de = map_dirent(dir, hpfs_i(dir)->i_dno, (char *)name, len, &dno, &qbh);
+       de = map_dirent(dir, hpfs_i(dir)->i_dno, name, len, &dno, &qbh);
        if (!de)
                goto out;
 
@@ -546,10 +549,10 @@ const struct address_space_operations hpfs_symlink_aops = {
 static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry,
                struct inode *new_dir, struct dentry *new_dentry)
 {
-       char *old_name = (char *)old_dentry->d_name.name;
-       int old_len = old_dentry->d_name.len;
-       char *new_name = (char *)new_dentry->d_name.name;
-       int new_len = new_dentry->d_name.len;
+       const unsigned char *old_name = old_dentry->d_name.name;
+       unsigned old_len = old_dentry->d_name.len;
+       const unsigned char *new_name = new_dentry->d_name.name;
+       unsigned new_len = new_dentry->d_name.len;
        struct inode *i = old_dentry->d_inode;
        struct inode *new_inode = new_dentry->d_inode;
        struct quad_buffer_head qbh, qbh1;
@@ -560,9 +563,9 @@ static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry,
        struct buffer_head *bh;
        struct fnode *fnode;
        int err;
-       if ((err = hpfs_chk_name((char *)new_name, &new_len))) return err;
+       if ((err = hpfs_chk_name(new_name, &new_len))) return err;
        err = 0;
-       hpfs_adjust_length((char *)old_name, &old_len);
+       hpfs_adjust_length(old_name, &old_len);
 
        lock_kernel();
        /* order doesn't matter, due to VFS exclusion */
@@ -579,7 +582,7 @@ static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry,
                goto end1;
        }
 
-       if (!(dep = map_dirent(old_dir, hpfs_i(old_dir)->i_dno, (char *)old_name, old_len, &dno, &qbh))) {
+       if (!(dep = map_dirent(old_dir, hpfs_i(old_dir)->i_dno, old_name, old_len, &dno, &qbh))) {
                hpfs_error(i->i_sb, "lookup succeeded but map dirent failed");
                err = -ENOENT;
                goto end1;
@@ -590,7 +593,7 @@ static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry,
        if (new_inode) {
                int r;
                if ((r = hpfs_remove_dirent(old_dir, dno, dep, &qbh, 1)) != 2) {
-                       if ((nde = map_dirent(new_dir, hpfs_i(new_dir)->i_dno, (char *)new_name, new_len, NULL, &qbh1))) {
+                       if ((nde = map_dirent(new_dir, hpfs_i(new_dir)->i_dno, new_name, new_len, NULL, &qbh1))) {
                                clear_nlink(new_inode);
                                copy_de(nde, &de);
                                memcpy(nde->name, new_name, new_len);
@@ -618,7 +621,7 @@ static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry,
        }
        
        if (new_dir == old_dir)
-               if (!(dep = map_dirent(old_dir, hpfs_i(old_dir)->i_dno, (char *)old_name, old_len, &dno, &qbh))) {
+               if (!(dep = map_dirent(old_dir, hpfs_i(old_dir)->i_dno, old_name, old_len, &dno, &qbh))) {
                        hpfs_unlock_creation(i->i_sb);
                        hpfs_error(i->i_sb, "lookup succeeded but map dirent failed at #2");
                        err = -ENOENT;
@@ -648,7 +651,7 @@ static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry,
                brelse(bh);
        }
        hpfs_i(i)->i_conv = hpfs_sb(i->i_sb)->sb_conv;
-       hpfs_decide_conv(i, (char *)new_name, new_len);
+       hpfs_decide_conv(i, new_name, new_len);
 end1:
        if (old_dir != new_dir)
                mutex_unlock(&hpfs_i(new_dir)->i_mutex);
index 7239efc..2e4dfa8 100644 (file)
@@ -718,7 +718,7 @@ static int hppfs_fill_super(struct super_block *sb, void *d, int silent)
        struct vfsmount *proc_mnt;
        int err = -ENOENT;
 
-       proc_mnt = do_kern_mount("proc", 0, "proc", NULL);
+       proc_mnt = mntget(current->nsproxy->pid_ns->proc_mnt);
        if (IS_ERR(proc_mnt))
                goto out;
 
index e96a166..8a03a54 100644 (file)
@@ -70,6 +70,8 @@ extern struct vfsmount *copy_tree(struct vfsmount *, struct dentry *, int);
 
 extern void __init mnt_init(void);
 
+extern spinlock_t vfsmount_lock;
+
 /*
  * fs_struct.c
  */
index 6e8d17e..9e50bcf 100644 (file)
@@ -338,28 +338,14 @@ int simple_readpage(struct file *file, struct page *page)
        return 0;
 }
 
-int simple_prepare_write(struct file *file, struct page *page,
-                       unsigned from, unsigned to)
-{
-       if (!PageUptodate(page)) {
-               if (to - from != PAGE_CACHE_SIZE)
-                       zero_user_segments(page,
-                               0, from,
-                               to, PAGE_CACHE_SIZE);
-       }
-       return 0;
-}
-
 int simple_write_begin(struct file *file, struct address_space *mapping,
                        loff_t pos, unsigned len, unsigned flags,
                        struct page **pagep, void **fsdata)
 {
        struct page *page;
        pgoff_t index;
-       unsigned from;
 
        index = pos >> PAGE_CACHE_SHIFT;
-       from = pos & (PAGE_CACHE_SIZE - 1);
 
        page = grab_cache_page_write_begin(mapping, index, flags);
        if (!page)
@@ -367,43 +353,59 @@ int simple_write_begin(struct file *file, struct address_space *mapping,
 
        *pagep = page;
 
-       return simple_prepare_write(file, page, from, from+len);
-}
-
-static int simple_commit_write(struct file *file, struct page *page,
-                              unsigned from, unsigned to)
-{
-       struct inode *inode = page->mapping->host;
-       loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
+       if (!PageUptodate(page) && (len != PAGE_CACHE_SIZE)) {
+               unsigned from = pos & (PAGE_CACHE_SIZE - 1);
 
-       if (!PageUptodate(page))
-               SetPageUptodate(page);
-       /*
-        * No need to use i_size_read() here, the i_size
-        * cannot change under us because we hold the i_mutex.
-        */
-       if (pos > inode->i_size)
-               i_size_write(inode, pos);
-       set_page_dirty(page);
+               zero_user_segments(page, 0, from, from + len, PAGE_CACHE_SIZE);
+       }
        return 0;
 }
 
+/**
+ * simple_write_end - .write_end helper for non-block-device FSes
+ * @available: See .write_end of address_space_operations
+ * @file:              "
+ * @mapping:           "
+ * @pos:               "
+ * @len:               "
+ * @copied:            "
+ * @page:              "
+ * @fsdata:            "
+ *
+ * simple_write_end does the minimum needed for updating a page after writing is
+ * done. It has the same API signature as the .write_end of
+ * address_space_operations vector. So it can just be set onto .write_end for
+ * FSes that don't need any other processing. i_mutex is assumed to be held.
+ * Block based filesystems should use generic_write_end().
+ * NOTE: Even though i_size might get updated by this function, mark_inode_dirty
+ * is not called, so a filesystem that actually does store data in .write_inode
+ * should extend on what's done here with a call to mark_inode_dirty() in the
+ * case that i_size has changed.
+ */
 int simple_write_end(struct file *file, struct address_space *mapping,
                        loff_t pos, unsigned len, unsigned copied,
                        struct page *page, void *fsdata)
 {
-       unsigned from = pos & (PAGE_CACHE_SIZE - 1);
+       struct inode *inode = page->mapping->host;
+       loff_t last_pos = pos + copied;
 
        /* zero the stale part of the page if we did a short copy */
        if (copied < len) {
-               void *kaddr = kmap_atomic(page, KM_USER0);
-               memset(kaddr + from + copied, 0, len - copied);
-               flush_dcache_page(page);
-               kunmap_atomic(kaddr, KM_USER0);
+               unsigned from = pos & (PAGE_CACHE_SIZE - 1);
+
+               zero_user(page, from + copied, len - copied);
        }
 
-       simple_commit_write(file, page, from, from+copied);
+       if (!PageUptodate(page))
+               SetPageUptodate(page);
+       /*
+        * No need to use i_size_read() here, the i_size
+        * cannot change under us because we hold the i_mutex.
+        */
+       if (last_pos > inode->i_size)
+               i_size_write(inode, last_pos);
 
+       set_page_dirty(page);
        unlock_page(page);
        page_cache_release(page);
 
@@ -853,7 +855,6 @@ EXPORT_SYMBOL(simple_getattr);
 EXPORT_SYMBOL(simple_link);
 EXPORT_SYMBOL(simple_lookup);
 EXPORT_SYMBOL(simple_pin_fs);
-EXPORT_UNUSED_SYMBOL(simple_prepare_write);
 EXPORT_SYMBOL(simple_readpage);
 EXPORT_SYMBOL(simple_release_fs);
 EXPORT_SYMBOL(simple_rename);
index a8794f2..ae9ded0 100644 (file)
@@ -1182,8 +1182,9 @@ int __break_lease(struct inode *inode, unsigned int mode)
        struct file_lock *fl;
        unsigned long break_time;
        int i_have_this_lease = 0;
+       int want_write = (mode & O_ACCMODE) != O_RDONLY;
 
-       new_fl = lease_alloc(NULL, mode & FMODE_WRITE ? F_WRLCK : F_RDLCK);
+       new_fl = lease_alloc(NULL, want_write ? F_WRLCK : F_RDLCK);
 
        lock_kernel();
 
@@ -1197,7 +1198,7 @@ int __break_lease(struct inode *inode, unsigned int mode)
                if (fl->fl_owner == current->files)
                        i_have_this_lease = 1;
 
-       if (mode & FMODE_WRITE) {
+       if (want_write) {
                /* If we want write access, we have to revoke any lease. */
                future = F_UNLCK | F_INPROGRESS;
        } else if (flock->fl_type & F_INPROGRESS) {
index 865282f..0741c69 100644 (file)
@@ -689,33 +689,20 @@ static __always_inline void follow_dotdot(struct nameidata *nd)
        set_root(nd);
 
        while(1) {
-               struct vfsmount *parent;
                struct dentry *old = nd->path.dentry;
 
                if (nd->path.dentry == nd->root.dentry &&
                    nd->path.mnt == nd->root.mnt) {
                        break;
                }
-               spin_lock(&dcache_lock);
                if (nd->path.dentry != nd->path.mnt->mnt_root) {
-                       nd->path.dentry = dget(nd->path.dentry->d_parent);
-                       spin_unlock(&dcache_lock);
+                       /* rare case of legitimate dget_parent()... */
+                       nd->path.dentry = dget_parent(nd->path.dentry);
                        dput(old);
                        break;
                }
-               spin_unlock(&dcache_lock);
-               spin_lock(&vfsmount_lock);
-               parent = nd->path.mnt->mnt_parent;
-               if (parent == nd->path.mnt) {
-                       spin_unlock(&vfsmount_lock);
+               if (!follow_up(&nd->path))
                        break;
-               }
-               mntget(parent);
-               nd->path.dentry = dget(nd->path.mnt->mnt_mountpoint);
-               spin_unlock(&vfsmount_lock);
-               dput(old);
-               mntput(nd->path.mnt);
-               nd->path.mnt = parent;
        }
        follow_mount(&nd->path);
 }
@@ -823,6 +810,17 @@ fail:
 }
 
 /*
+ * This is a temporary kludge to deal with "automount" symlinks; proper
+ * solution is to trigger them on follow_mount(), so that do_lookup()
+ * would DTRT.  To be killed before 2.6.34-final.
+ */
+static inline int follow_on_final(struct inode *inode, unsigned lookup_flags)
+{
+       return inode && unlikely(inode->i_op->follow_link) &&
+               ((lookup_flags & LOOKUP_FOLLOW) || S_ISDIR(inode->i_mode));
+}
+
+/*
  * Name resolution.
  * This is the basic name resolution function, turning a pathname into
  * the final dentry. We expect 'base' to be positive and a directory.
@@ -942,8 +940,7 @@ last_component:
                if (err)
                        break;
                inode = next.dentry->d_inode;
-               if ((lookup_flags & LOOKUP_FOLLOW)
-                   && inode && inode->i_op->follow_link) {
+               if (follow_on_final(inode, lookup_flags)) {
                        err = do_follow_link(&next, nd);
                        if (err)
                                goto return_err;
@@ -1493,7 +1490,7 @@ int may_open(struct path *path, int acc_mode, int flag)
         * An append-only file must be opened in append mode for writing.
         */
        if (IS_APPEND(inode)) {
-               if  ((flag & FMODE_WRITE) && !(flag & O_APPEND))
+               if  ((flag & O_ACCMODE) != O_RDONLY && !(flag & O_APPEND))
                        return -EPERM;
                if (flag & O_TRUNC)
                        return -EPERM;
@@ -1537,7 +1534,7 @@ static int handle_truncate(struct path *path)
  * what get passed to sys_open().
  */
 static int __open_namei_create(struct nameidata *nd, struct path *path,
-                               int flag, int mode)
+                               int open_flag, int mode)
 {
        int error;
        struct dentry *dir = nd->path.dentry;
@@ -1555,7 +1552,7 @@ out_unlock:
        if (error)
                return error;
        /* Don't check for write permission, don't truncate */
-       return may_open(&nd->path, 0, flag & ~O_TRUNC);
+       return may_open(&nd->path, 0, open_flag & ~O_TRUNC);
 }
 
 /*
@@ -1726,7 +1723,7 @@ do_last:
                error = mnt_want_write(nd.path.mnt);
                if (error)
                        goto exit_mutex_unlock;
-               error = __open_namei_create(&nd, &path, flag, mode);
+               error = __open_namei_create(&nd, &path, open_flag, mode);
                if (error) {
                        mnt_drop_write(nd.path.mnt);
                        goto exit;
@@ -1788,7 +1785,7 @@ ok:
                if (error)
                        goto exit;
        }
-       error = may_open(&nd.path, acc_mode, flag);
+       error = may_open(&nd.path, acc_mode, open_flag);
        if (error) {
                if (will_truncate)
                        mnt_drop_write(nd.path.mnt);
@@ -2265,8 +2262,11 @@ int vfs_unlink(struct inode *dir, struct dentry *dentry)
                error = -EBUSY;
        else {
                error = security_inode_unlink(dir, dentry);
-               if (!error)
+               if (!error) {
                        error = dir->i_op->unlink(dir, dentry);
+                       if (!error)
+                               dentry->d_inode->i_flags |= S_DEAD;
+               }
        }
        mutex_unlock(&dentry->d_inode->i_mutex);
 
@@ -2619,6 +2619,8 @@ static int vfs_rename_other(struct inode *old_dir, struct dentry *old_dentry,
        else
                error = old_dir->i_op->rename(old_dir, old_dentry, new_dir, new_dentry);
        if (!error) {
+               if (target)
+                       target->i_flags |= S_DEAD;
                if (!(old_dir->i_sb->s_type->fs_flags & FS_RENAME_DOES_D_MOVE))
                        d_move(old_dentry, new_dentry);
        }
index c768f73..8174c8a 100644 (file)
@@ -573,7 +573,7 @@ static struct vfsmount *clone_mnt(struct vfsmount *old, struct dentry *root,
                        mnt->mnt_master = old;
                        CLEAR_MNT_SHARED(mnt);
                } else if (!(flag & CL_PRIVATE)) {
-                       if ((flag & CL_PROPAGATION) || IS_MNT_SHARED(old))
+                       if ((flag & CL_MAKE_SHARED) || IS_MNT_SHARED(old))
                                list_add(&mnt->mnt_share, &old->mnt_share);
                        if (IS_MNT_SLAVE(old))
                                list_add(&mnt->mnt_slave, &old->mnt_slave);
@@ -737,6 +737,21 @@ static void m_stop(struct seq_file *m, void *v)
        up_read(&namespace_sem);
 }
 
+int mnt_had_events(struct proc_mounts *p)
+{
+       struct mnt_namespace *ns = p->ns;
+       int res = 0;
+
+       spin_lock(&vfsmount_lock);
+       if (p->event != ns->event) {
+               p->event = ns->event;
+               res = 1;
+       }
+       spin_unlock(&vfsmount_lock);
+
+       return res;
+}
+
 struct proc_fs_info {
        int flag;
        const char *str;
@@ -1121,8 +1136,15 @@ SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
 {
        struct path path;
        int retval;
+       int lookup_flags = 0;
 
-       retval = user_path(name, &path);
+       if (flags & ~(MNT_FORCE | MNT_DETACH | MNT_EXPIRE | UMOUNT_NOFOLLOW))
+               return -EINVAL;
+
+       if (!(flags & UMOUNT_NOFOLLOW))
+               lookup_flags |= LOOKUP_FOLLOW;
+
+       retval = user_path_at(AT_FDCWD, name, lookup_flags, &path);
        if (retval)
                goto out;
        retval = -EINVAL;
@@ -1246,6 +1268,21 @@ void drop_collected_mounts(struct vfsmount *mnt)
        release_mounts(&umount_list);
 }
 
+int iterate_mounts(int (*f)(struct vfsmount *, void *), void *arg,
+                  struct vfsmount *root)
+{
+       struct vfsmount *mnt;
+       int res = f(root, arg);
+       if (res)
+               return res;
+       list_for_each_entry(mnt, &root->mnt_list, mnt_list) {
+               res = f(mnt, arg);
+               if (res)
+                       return res;
+       }
+       return 0;
+}
+
 static void cleanup_group_ids(struct vfsmount *mnt, struct vfsmount *end)
 {
        struct vfsmount *p;
@@ -1538,7 +1575,7 @@ static int do_remount(struct path *path, int flags, int mnt_flags,
                err = do_remount_sb(sb, flags, data, 0);
        if (!err) {
                spin_lock(&vfsmount_lock);
-               mnt_flags |= path->mnt->mnt_flags & MNT_PNODE_MASK;
+               mnt_flags |= path->mnt->mnt_flags & MNT_PROPAGATION_MASK;
                path->mnt->mnt_flags = mnt_flags;
                spin_unlock(&vfsmount_lock);
        }
@@ -1671,7 +1708,7 @@ int do_add_mount(struct vfsmount *newmnt, struct path *path,
 {
        int err;
 
-       mnt_flags &= ~(MNT_SHARED | MNT_WRITE_HOLD);
+       mnt_flags &= ~(MNT_SHARED | MNT_WRITE_HOLD | MNT_INTERNAL);
 
        down_write(&namespace_sem);
        /* Something was mounted here while we slept */
@@ -2314,17 +2351,13 @@ void __init mnt_init(void)
 
 void put_mnt_ns(struct mnt_namespace *ns)
 {
-       struct vfsmount *root;
        LIST_HEAD(umount_list);
 
-       if (!atomic_dec_and_lock(&ns->count, &vfsmount_lock))
+       if (!atomic_dec_and_test(&ns->count))
                return;
-       root = ns->root;
-       ns->root = NULL;
-       spin_unlock(&vfsmount_lock);
        down_write(&namespace_sem);
        spin_lock(&vfsmount_lock);
-       umount_tree(root, 0, &umount_list);
+       umount_tree(ns->root, 0, &umount_list);
        spin_unlock(&vfsmount_lock);
        up_write(&namespace_sem);
        release_mounts(&umount_list);
index e1d415e..0d28982 100644 (file)
@@ -342,6 +342,7 @@ static ssize_t nfs_direct_read_schedule_segment(struct nfs_direct_req *dreq,
                data->res.fattr = &data->fattr;
                data->res.eof = 0;
                data->res.count = bytes;
+               nfs_fattr_init(&data->fattr);
                msg.rpc_argp = &data->args;
                msg.rpc_resp = &data->res;
 
@@ -575,6 +576,7 @@ static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
        data->res.count = 0;
        data->res.fattr = &data->fattr;
        data->res.verf = &data->verf;
+       nfs_fattr_init(&data->fattr);
 
        NFS_PROTO(data->inode)->commit_setup(data, &msg);
 
@@ -766,6 +768,7 @@ static ssize_t nfs_direct_write_schedule_segment(struct nfs_direct_req *dreq,
                data->res.fattr = &data->fattr;
                data->res.count = bytes;
                data->res.verf = &data->verf;
+               nfs_fattr_init(&data->fattr);
 
                task_setup_data.task = &data->task;
                task_setup_data.callback_data = data;
index fa58800..237874f 100644 (file)
@@ -354,12 +354,11 @@ void nfs_fscache_reset_inode_cookie(struct inode *inode)
  */
 int nfs_fscache_release_page(struct page *page, gfp_t gfp)
 {
-       struct nfs_inode *nfsi = NFS_I(page->mapping->host);
-       struct fscache_cookie *cookie = nfsi->fscache;
-
-       BUG_ON(!cookie);
-
        if (PageFsCache(page)) {
+               struct nfs_inode *nfsi = NFS_I(page->mapping->host);
+               struct fscache_cookie *cookie = nfsi->fscache;
+
+               BUG_ON(!cookie);
                dfprintk(FSCACHE, "NFS: fscache releasepage (0x%p/0x%p/0x%p)\n",
                         cookie, page, nfsi);
 
index f141bde..7570573 100644 (file)
@@ -574,14 +574,14 @@ void nfs_close_context(struct nfs_open_context *ctx, int is_sync)
        nfs_revalidate_inode(server, inode);
 }
 
-static struct nfs_open_context *alloc_nfs_open_context(struct vfsmount *mnt, struct dentry *dentry, struct rpc_cred *cred)
+static struct nfs_open_context *alloc_nfs_open_context(struct path *path, struct rpc_cred *cred)
 {
        struct nfs_open_context *ctx;
 
        ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
        if (ctx != NULL) {
-               ctx->path.dentry = dget(dentry);
-               ctx->path.mnt = mntget(mnt);
+               ctx->path = *path;
+               path_get(&ctx->path);
                ctx->cred = get_rpccred(cred);
                ctx->state = NULL;
                ctx->lockowner = current->files;
@@ -686,7 +686,7 @@ int nfs_open(struct inode *inode, struct file *filp)
        cred = rpc_lookup_cred();
        if (IS_ERR(cred))
                return PTR_ERR(cred);
-       ctx = alloc_nfs_open_context(filp->f_path.mnt, filp->f_path.dentry, cred);
+       ctx = alloc_nfs_open_context(&filp->f_path, cred);
        put_rpccred(cred);
        if (ctx == NULL)
                return -ENOMEM;
index 0adefc4..59047f8 100644 (file)
@@ -120,7 +120,7 @@ static struct {
        { .status = MNT3ERR_INVAL,              .errno = -EINVAL,       },
        { .status = MNT3ERR_NAMETOOLONG,        .errno = -ENAMETOOLONG, },
        { .status = MNT3ERR_NOTSUPP,            .errno = -ENOTSUPP,     },
-       { .status = MNT3ERR_SERVERFAULT,        .errno = -ESERVERFAULT, },
+       { .status = MNT3ERR_SERVERFAULT,        .errno = -EREMOTEIO,    },
 };
 
 struct mountres {
index 5e078b2..7bc2da8 100644 (file)
@@ -699,7 +699,7 @@ static struct {
        { NFSERR_BAD_COOKIE,    -EBADCOOKIE     },
        { NFSERR_NOTSUPP,       -ENOTSUPP       },
        { NFSERR_TOOSMALL,      -ETOOSMALL      },
-       { NFSERR_SERVERFAULT,   -ESERVERFAULT   },
+       { NFSERR_SERVERFAULT,   -EREMOTEIO      },
        { NFSERR_BADTYPE,       -EBADTYPE       },
        { NFSERR_JUKEBOX,       -EJUKEBOX       },
        { -1,                   -EIO            }
index 375f0fa..84d83be 100644 (file)
@@ -724,8 +724,8 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct path *path,
        p->o_arg.seqid = nfs_alloc_seqid(&sp->so_seqid);
        if (p->o_arg.seqid == NULL)
                goto err_free;
-       p->path.mnt = mntget(path->mnt);
-       p->path.dentry = dget(path->dentry);
+       path_get(path);
+       p->path = *path;
        p->dir = parent;
        p->owner = sp;
        atomic_inc(&sp->so_count);
@@ -1944,8 +1944,8 @@ int nfs4_do_close(struct path *path, struct nfs4_state *state, int wait)
        calldata->res.seqid = calldata->arg.seqid;
        calldata->res.server = server;
        calldata->res.seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE;
-       calldata->path.mnt = mntget(path->mnt);
-       calldata->path.dentry = dget(path->dentry);
+       path_get(path);
+       calldata->path = *path;
 
        msg.rpc_argp = &calldata->arg,
        msg.rpc_resp = &calldata->res,
index e437fd6..5cd5184 100644 (file)
@@ -4631,7 +4631,7 @@ static int decode_sequence(struct xdr_stream *xdr,
         * If the server returns different values for sessionID, slotID or
         * sequence number, the server is looney tunes.
         */
-       status = -ESERVERFAULT;
+       status = -EREMOTEIO;
 
        if (memcmp(id.data, res->sr_session->sess_id.data,
                   NFS4_MAX_SESSIONID_LEN)) {
@@ -5774,7 +5774,7 @@ static struct {
        { NFS4ERR_BAD_COOKIE,   -EBADCOOKIE     },
        { NFS4ERR_NOTSUPP,      -ENOTSUPP       },
        { NFS4ERR_TOOSMALL,     -ETOOSMALL      },
-       { NFS4ERR_SERVERFAULT,  -ESERVERFAULT   },
+       { NFS4ERR_SERVERFAULT,  -EREMOTEIO      },
        { NFS4ERR_BADTYPE,      -EBADTYPE       },
        { NFS4ERR_LOCKED,       -EAGAIN         },
        { NFS4ERR_SYMLINK,      -ELOOP          },
@@ -5801,7 +5801,7 @@ nfs4_stat_to_errno(int stat)
        }
        if (stat <= 10000 || stat > 10100) {
                /* The server is looney tunes. */
-               return -ESERVERFAULT;
+               return -EREMOTEIO;
        }
        /* If we cannot translate the error, the recovery routines should
         * handle it.
index 7b54b8b..d63d964 100644 (file)
@@ -1598,8 +1598,7 @@ int nfs_migrate_page(struct address_space *mapping, struct page *newpage,
        struct nfs_page *req;
        int ret;
 
-       if (PageFsCache(page))
-               nfs_fscache_release_page(page, GFP_KERNEL);
+       nfs_fscache_release_page(page, GFP_KERNEL);
 
        req = nfs_find_and_lock_request(page);
        ret = PTR_ERR(req);
index d3854d9..bf9cbd2 100644 (file)
@@ -36,10 +36,9 @@ static struct file *do_open(char *name, int flags)
                return ERR_PTR(error);
 
        if (flags == O_RDWR)
-               error = may_open(&nd.path, MAY_READ|MAY_WRITE,
-                                          FMODE_READ|FMODE_WRITE);
+               error = may_open(&nd.path, MAY_READ|MAY_WRITE, flags);
        else
-               error = may_open(&nd.path, MAY_WRITE, FMODE_WRITE);
+               error = may_open(&nd.path, MAY_WRITE, flags);
 
        if (!error)
                return dentry_open(nd.path.dentry, nd.path.mnt, flags,
index c487810..a0c4016 100644 (file)
@@ -1316,19 +1316,11 @@ rqst_exp_parent(struct svc_rqst *rqstp, struct path *path)
 
 static struct svc_export *find_fsidzero_export(struct svc_rqst *rqstp)
 {
-       struct svc_export *exp;
        u32 fsidv[2];
 
        mk_fsid(FSID_NUM, fsidv, 0, 0, 0, NULL);
 
-       exp = rqst_exp_find(rqstp, FSID_NUM, fsidv);
-       /*
-        * We shouldn't have accepting an nfsv4 request at all if we
-        * don't have a pseudoexport!:
-        */
-       if (IS_ERR(exp) && PTR_ERR(exp) == -ENOENT)
-               exp = ERR_PTR(-ESERVERFAULT);
-       return exp;
+       return rqst_exp_find(rqstp, FSID_NUM, fsidv);
 }
 
 /*
index a8587e9..bbf72d8 100644 (file)
@@ -2121,9 +2121,15 @@ out_acl:
                 * and this is the root of a cross-mounted filesystem.
                 */
                if (ignore_crossmnt == 0 &&
-                   exp->ex_path.mnt->mnt_root->d_inode == dentry->d_inode) {
-                       err = vfs_getattr(exp->ex_path.mnt->mnt_parent,
-                               exp->ex_path.mnt->mnt_mountpoint, &stat);
+                   dentry == exp->ex_path.mnt->mnt_root) {
+                       struct path path = exp->ex_path;
+                       path_get(&path);
+                       while (follow_up(&path)) {
+                               if (path.dentry != path.mnt->mnt_root)
+                                       break;
+                       }
+                       err = vfs_getattr(path.mnt, path.dentry, &stat);
+                       path_put(&path);
                        if (err)
                                goto out_nfserr;
                }
index 97d79ef..15dc2de 100644 (file)
@@ -361,7 +361,7 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
                 * If we are changing the size of the file, then
                 * we need to break all leases.
                 */
-               host_err = break_lease(inode, FMODE_WRITE | O_NONBLOCK);
+               host_err = break_lease(inode, O_WRONLY | O_NONBLOCK);
                if (host_err == -EWOULDBLOCK)
                        host_err = -ETIMEDOUT;
                if (host_err) /* ENOMEM or EWOULDBLOCK */
@@ -734,7 +734,7 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
         * Check to see if there are any leases on this file.
         * This may block while leases are broken.
         */
-       host_err = break_lease(inode, O_NONBLOCK | ((access & NFSD_MAY_WRITE) ? FMODE_WRITE : 0));
+       host_err = break_lease(inode, O_NONBLOCK | ((access & NFSD_MAY_WRITE) ? O_WRONLY : 0));
        if (host_err == -EWOULDBLOCK)
                host_err = -ETIMEDOUT;
        if (host_err) /* NOMEM or WOULDBLOCK */
@@ -752,7 +752,8 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
                            flags, current_cred());
        if (IS_ERR(*filp))
                host_err = PTR_ERR(*filp);
-       host_err = ima_file_check(*filp, access);
+       else
+               host_err = ima_file_check(*filp, access);
 out_nfserr:
        err = nfserrno(host_err);
 out:
index 76d803e..0092840 100644 (file)
@@ -224,7 +224,7 @@ fail:
  * len <= NILFS_NAME_LEN and de != NULL are guaranteed by caller.
  */
 static int
-nilfs_match(int len, const char * const name, struct nilfs_dir_entry *de)
+nilfs_match(int len, const unsigned char *name, struct nilfs_dir_entry *de)
 {
        if (len != de->name_len)
                return 0;
@@ -349,11 +349,11 @@ done:
  * Entry is guaranteed to be valid.
  */
 struct nilfs_dir_entry *
-nilfs_find_entry(struct inode *dir, struct dentry *dentry,
+nilfs_find_entry(struct inode *dir, const struct qstr *qstr,
                 struct page **res_page)
 {
-       const char *name = dentry->d_name.name;
-       int namelen = dentry->d_name.len;
+       const unsigned char *name = qstr->name;
+       int namelen = qstr->len;
        unsigned reclen = NILFS_DIR_REC_LEN(namelen);
        unsigned long start, n;
        unsigned long npages = dir_pages(dir);
@@ -424,13 +424,13 @@ struct nilfs_dir_entry *nilfs_dotdot(struct inode *dir, struct page **p)
        return de;
 }
 
-ino_t nilfs_inode_by_name(struct inode *dir, struct dentry *dentry)
+ino_t nilfs_inode_by_name(struct inode *dir, const struct qstr *qstr)
 {
        ino_t res = 0;
        struct nilfs_dir_entry *de;
        struct page *page;
 
-       de = nilfs_find_entry(dir, dentry, &page);
+       de = nilfs_find_entry(dir, qstr, &page);
        if (de) {
                res = le64_to_cpu(de->inode);
                kunmap(page);
@@ -465,7 +465,7 @@ void nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de,
 int nilfs_add_link(struct dentry *dentry, struct inode *inode)
 {
        struct inode *dir = dentry->d_parent->d_inode;
-       const char *name = dentry->d_name.name;
+       const unsigned char *name = dentry->d_name.name;
        int namelen = dentry->d_name.len;
        unsigned chunk_size = nilfs_chunk_size(dir);
        unsigned reclen = NILFS_DIR_REC_LEN(namelen);
index 07ba838..ad6ed2c 100644 (file)
@@ -67,7 +67,7 @@ nilfs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
        if (dentry->d_name.len > NILFS_NAME_LEN)
                return ERR_PTR(-ENAMETOOLONG);
 
-       ino = nilfs_inode_by_name(dir, dentry);
+       ino = nilfs_inode_by_name(dir, &dentry->d_name);
        inode = NULL;
        if (ino) {
                inode = nilfs_iget(dir->i_sb, ino);
@@ -81,10 +81,7 @@ struct dentry *nilfs_get_parent(struct dentry *child)
 {
        unsigned long ino;
        struct inode *inode;
-       struct dentry dotdot;
-
-       dotdot.d_name.name = "..";
-       dotdot.d_name.len = 2;
+       struct qstr dotdot = {.name = "..", .len = 2};
 
        ino = nilfs_inode_by_name(child->d_inode, &dotdot);
        if (!ino)
@@ -296,7 +293,7 @@ static int nilfs_do_unlink(struct inode *dir, struct dentry *dentry)
        int err;
 
        err = -ENOENT;
-       de = nilfs_find_entry(dir, dentry, &page);
+       de = nilfs_find_entry(dir, &dentry->d_name, &page);
        if (!de)
                goto out;
 
@@ -389,7 +386,7 @@ static int nilfs_rename(struct inode *old_dir, struct dentry *old_dentry,
                return err;
 
        err = -ENOENT;
-       old_de = nilfs_find_entry(old_dir, old_dentry, &old_page);
+       old_de = nilfs_find_entry(old_dir, &old_dentry->d_name, &old_page);
        if (!old_de)
                goto out;
 
@@ -409,7 +406,7 @@ static int nilfs_rename(struct inode *old_dir, struct dentry *old_dentry,
                        goto out_dir;
 
                err = -ENOENT;
-               new_de = nilfs_find_entry(new_dir, new_dentry, &new_page);
+               new_de = nilfs_find_entry(new_dir, &new_dentry->d_name, &new_page);
                if (!new_de)
                        goto out_dir;
                inc_nlink(old_inode);
index 4da6f67..8723e5b 100644 (file)
@@ -217,10 +217,10 @@ static inline int nilfs_init_acl(struct inode *inode, struct inode *dir)
 
 /* dir.c */
 extern int nilfs_add_link(struct dentry *, struct inode *);
-extern ino_t nilfs_inode_by_name(struct inode *, struct dentry *);
+extern ino_t nilfs_inode_by_name(struct inode *, const struct qstr *);
 extern int nilfs_make_empty(struct inode *, struct inode *);
 extern struct nilfs_dir_entry *
-nilfs_find_entry(struct inode *, struct dentry *, struct page **);
+nilfs_find_entry(struct inode *, const struct qstr *, struct page **);
 extern int nilfs_delete_entry(struct nilfs_dir_entry *, struct page *);
 extern int nilfs_empty_dir(struct inode *);
 extern struct nilfs_dir_entry *nilfs_dotdot(struct inode *, struct page **);
index 3dae4a1..7e9df11 100644 (file)
@@ -599,7 +599,7 @@ bail:
        return ret;
 }
 
-/* 
+/*
  * ocfs2_dio_end_io is called by the dio core when a dio is finished.  We're
  * particularly interested in the aio/dio case.  Like the core uses
  * i_alloc_sem, we use the rw_lock DLM lock to protect io on one node from
@@ -670,7 +670,7 @@ static ssize_t ocfs2_direct_IO(int rw,
 
        ret = blockdev_direct_IO_no_locking(rw, iocb, inode,
                                            inode->i_sb->s_bdev, iov, offset,
-                                           nr_segs, 
+                                           nr_segs,
                                            ocfs2_direct_IO_get_blocks,
                                            ocfs2_dio_end_io);
 
index d43d34a..21c808f 100644 (file)
@@ -368,7 +368,7 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
        }
        ocfs2_metadata_cache_io_unlock(ci);
 
-       mlog(ML_BH_IO, "block=(%llu), nr=(%d), cached=%s, flags=0x%x\n", 
+       mlog(ML_BH_IO, "block=(%llu), nr=(%d), cached=%s, flags=0x%x\n",
             (unsigned long long)block, nr,
             ((flags & OCFS2_BH_IGNORE_CACHE) || ignore_cache) ? "no" : "yes",
             flags);
index eda5b8b..5c98900 100644 (file)
@@ -78,7 +78,7 @@ static struct o2hb_callback *hbcall_from_type(enum o2hb_callback_type type);
 
 unsigned int o2hb_dead_threshold = O2HB_DEFAULT_DEAD_THRESHOLD;
 
-/* Only sets a new threshold if there are no active regions. 
+/* Only sets a new threshold if there are no active regions.
  *
  * No locking or otherwise interesting code is required for reading
  * o2hb_dead_threshold as it can't change once regions are active and
@@ -170,7 +170,7 @@ static void o2hb_write_timeout(struct work_struct *work)
 
        mlog(ML_ERROR, "Heartbeat write timeout to device %s after %u "
             "milliseconds\n", reg->hr_dev_name,
-            jiffies_to_msecs(jiffies - reg->hr_last_timeout_start)); 
+            jiffies_to_msecs(jiffies - reg->hr_last_timeout_start));
        o2quo_disk_timeout();
 }
 
@@ -624,7 +624,7 @@ static int o2hb_check_slot(struct o2hb_region *reg,
             "seq %llu last %llu changed %u equal %u\n",
             slot->ds_node_num, (long long)slot->ds_last_generation,
             le32_to_cpu(hb_block->hb_cksum),
-            (unsigned long long)le64_to_cpu(hb_block->hb_seq), 
+            (unsigned long long)le64_to_cpu(hb_block->hb_seq),
             (unsigned long long)slot->ds_last_time, slot->ds_changed_samples,
             slot->ds_equal_samples);
 
index 334f231..d8d0c65 100644 (file)
@@ -485,7 +485,7 @@ static void o2net_set_nn_state(struct o2net_node *nn,
        }
 
        if (was_valid && !valid) {
-               printk(KERN_INFO "o2net: no longer connected to "
+               printk(KERN_NOTICE "o2net: no longer connected to "
                       SC_NODEF_FMT "\n", SC_NODEF_ARGS(old_sc));
                o2net_complete_nodes_nsw(nn);
        }
@@ -493,7 +493,7 @@ static void o2net_set_nn_state(struct o2net_node *nn,
        if (!was_valid && valid) {
                o2quo_conn_up(o2net_num_from_nn(nn));
                cancel_delayed_work(&nn->nn_connect_expired);
-               printk(KERN_INFO "o2net: %s " SC_NODEF_FMT "\n",
+               printk(KERN_NOTICE "o2net: %s " SC_NODEF_FMT "\n",
                       o2nm_this_node() > sc->sc_node->nd_num ?
                                "connected to" : "accepted connection from",
                       SC_NODEF_ARGS(sc));
@@ -930,7 +930,7 @@ static void o2net_sendpage(struct o2net_sock_container *sc,
                        cond_resched();
                        continue;
                }
-               mlog(ML_ERROR, "sendpage of size %zu to " SC_NODEF_FMT 
+               mlog(ML_ERROR, "sendpage of size %zu to " SC_NODEF_FMT
                     " failed with %zd\n", size, SC_NODEF_ARGS(sc), ret);
                o2net_ensure_shutdown(nn, sc, 0);
                break;
@@ -1476,14 +1476,14 @@ static void o2net_idle_timer(unsigned long data)
 
        do_gettimeofday(&now);
 
-       printk(KERN_INFO "o2net: connection to " SC_NODEF_FMT " has been idle for %u.%u "
+       printk(KERN_NOTICE "o2net: connection to " SC_NODEF_FMT " has been idle for %u.%u "
             "seconds, shutting it down.\n", SC_NODEF_ARGS(sc),
                     o2net_idle_timeout() / 1000,
                     o2net_idle_timeout() % 1000);
        mlog(ML_NOTICE, "here are some times that might help debug the "
             "situation: (tmr %ld.%ld now %ld.%ld dr %ld.%ld adv "
             "%ld.%ld:%ld.%ld func (%08x:%u) %ld.%ld:%ld.%ld)\n",
-            sc->sc_tv_timer.tv_sec, (long) sc->sc_tv_timer.tv_usec, 
+            sc->sc_tv_timer.tv_sec, (long) sc->sc_tv_timer.tv_usec,
             now.tv_sec, (long) now.tv_usec,
             sc->sc_tv_data_ready.tv_sec, (long) sc->sc_tv_data_ready.tv_usec,
             sc->sc_tv_advance_start.tv_sec,
index 8d58cfe..96fa7eb 100644 (file)
  * on their number */
 #define O2NET_QUORUM_DELAY_MS  ((o2hb_dead_threshold + 2) * O2HB_REGION_TIMEOUT_MS)
 
-/* 
+/*
  * This version number represents quite a lot, unfortunately.  It not
  * only represents the raw network message protocol on the wire but also
- * locking semantics of the file system using the protocol.  It should 
+ * locking semantics of the file system using the protocol.  It should
  * be somewhere else, I'm sure, but right now it isn't.
  *
  * With version 11, we separate out the filesystem locking portion.  The
index b5786a7..3cfa114 100644 (file)
@@ -95,7 +95,7 @@ const char *dlm_errname(enum dlm_status err);
                mlog(ML_ERROR, "dlm status = %s\n", dlm_errname((st))); \
 } while (0)
 
-#define DLM_LKSB_UNUSED1           0x01  
+#define DLM_LKSB_UNUSED1           0x01
 #define DLM_LKSB_PUT_LVB           0x02
 #define DLM_LKSB_GET_LVB           0x04
 #define DLM_LKSB_UNUSED2           0x08
index 01cf8cc..dccc439 100644 (file)
@@ -123,7 +123,7 @@ static void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
                dlm_lock_put(lock);
                /* free up the reserved bast that we are cancelling.
                 * guaranteed that this will not be the last reserved
-                * ast because *both* an ast and a bast were reserved 
+                * ast because *both* an ast and a bast were reserved
                 * to get to this point.  the res->spinlock will not be
                 * taken here */
                dlm_lockres_release_ast(dlm, res);
index ca96bce..f283bce 100644 (file)
@@ -396,7 +396,7 @@ static enum dlm_status dlm_send_remote_convert_request(struct dlm_ctxt *dlm,
                        /* instead of logging the same network error over
                         * and over, sleep here and wait for the heartbeat
                         * to notice the node is dead.  times out after 5s. */
-                       dlm_wait_for_node_death(dlm, res->owner, 
+                       dlm_wait_for_node_death(dlm, res->owner,
                                                DLM_NODE_DEATH_WAIT_MAX);
                        ret = DLM_RECOVERING;
                        mlog(0, "node %u died so returning DLM_RECOVERING "
index 42b0bad..0cd24cf 100644 (file)
@@ -102,7 +102,7 @@ void __dlm_print_one_lock_resource(struct dlm_lock_resource *res)
        assert_spin_locked(&res->spinlock);
 
        stringify_lockname(res->lockname.name, res->lockname.len,
-                          buf, sizeof(buf) - 1);
+                          buf, sizeof(buf));
        printk("lockres: %s, owner=%u, state=%u\n",
               buf, res->owner, res->state);
        printk("  last used: %lu, refcnt: %u, on purge list: %s\n",
index 0334000..988c905 100644 (file)
@@ -816,7 +816,7 @@ static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data,
        }
 
        /* Once the dlm ctxt is marked as leaving then we don't want
-        * to be put in someone's domain map. 
+        * to be put in someone's domain map.
         * Also, explicitly disallow joining at certain troublesome
         * times (ie. during recovery). */
        if (dlm && dlm->dlm_state != DLM_CTXT_LEAVING) {
index 437698e..7333377 100644 (file)
@@ -269,7 +269,7 @@ static enum dlm_status dlmlock_remote(struct dlm_ctxt *dlm,
                }
                dlm_revert_pending_lock(res, lock);
                dlm_lock_put(lock);
-       } else if (dlm_is_recovery_lock(res->lockname.name, 
+       } else if (dlm_is_recovery_lock(res->lockname.name,
                                        res->lockname.len)) {
                /* special case for the $RECOVERY lock.
                 * there will never be an AST delivered to put
index 03ccf9a..a659606 100644 (file)
@@ -366,7 +366,7 @@ void dlm_hb_event_notify_attached(struct dlm_ctxt *dlm, int idx, int node_up)
        struct dlm_master_list_entry *mle;
 
        assert_spin_locked(&dlm->spinlock);
-       
+
        list_for_each_entry(mle, &dlm->mle_hb_events, hb_events) {
                if (node_up)
                        dlm_mle_node_up(dlm, mle, NULL, idx);
@@ -833,7 +833,7 @@ lookup:
                __dlm_insert_mle(dlm, mle);
 
                /* still holding the dlm spinlock, check the recovery map
-                * to see if there are any nodes that still need to be 
+                * to see if there are any nodes that still need to be
                 * considered.  these will not appear in the mle nodemap
                 * but they might own this lockres.  wait on them. */
                bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
@@ -883,7 +883,7 @@ redo_request:
                                msleep(500);
                        }
                        continue;
-               } 
+               }
 
                dlm_kick_recovery_thread(dlm);
                msleep(1000);
@@ -939,8 +939,8 @@ wait:
                     res->lockname.name, blocked);
                if (++tries > 20) {
                        mlog(ML_ERROR, "%s:%.*s: spinning on "
-                            "dlm_wait_for_lock_mastery, blocked=%d\n", 
-                            dlm->name, res->lockname.len, 
+                            "dlm_wait_for_lock_mastery, blocked=%d\n",
+                            dlm->name, res->lockname.len,
                             res->lockname.name, blocked);
                        dlm_print_one_lock_resource(res);
                        dlm_print_one_mle(mle);
@@ -1029,7 +1029,7 @@ recheck:
                ret = dlm_restart_lock_mastery(dlm, res, mle, *blocked);
                b = (mle->type == DLM_MLE_BLOCK);
                if ((*blocked && !b) || (!*blocked && b)) {
-                       mlog(0, "%s:%.*s: status change: old=%d new=%d\n", 
+                       mlog(0, "%s:%.*s: status change: old=%d new=%d\n",
                             dlm->name, res->lockname.len, res->lockname.name,
                             *blocked, b);
                        *blocked = b;
@@ -1602,7 +1602,7 @@ send_response:
                }
                mlog(0, "%u is the owner of %.*s, cleaning everyone else\n",
                             dlm->node_num, res->lockname.len, res->lockname.name);
-               ret = dlm_dispatch_assert_master(dlm, res, 0, request->node_idx, 
+               ret = dlm_dispatch_assert_master(dlm, res, 0, request->node_idx,
                                                 DLM_ASSERT_MASTER_MLE_CLEANUP);
                if (ret < 0) {
                        mlog(ML_ERROR, "failed to dispatch assert master work\n");
@@ -1701,7 +1701,7 @@ again:
 
                if (r & DLM_ASSERT_RESPONSE_REASSERT) {
                        mlog(0, "%.*s: node %u create mles on other "
-                            "nodes and requests a re-assert\n", 
+                            "nodes and requests a re-assert\n",
                             namelen, lockname, to);
                        reassert = 1;
                }
@@ -1812,7 +1812,7 @@ int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data,
                                spin_unlock(&dlm->master_lock);
                                spin_unlock(&dlm->spinlock);
                                goto done;
-                       }       
+                       }
                }
        }
        spin_unlock(&dlm->master_lock);
@@ -1883,7 +1883,7 @@ ok:
                int extra_ref = 0;
                int nn = -1;
                int rr, err = 0;
-               
+
                spin_lock(&mle->spinlock);
                if (mle->type == DLM_MLE_BLOCK || mle->type == DLM_MLE_MIGRATION)
                        extra_ref = 1;
@@ -1891,7 +1891,7 @@ ok:
                        /* MASTER mle: if any bits set in the response map
                         * then the calling node needs to re-assert to clear
                         * up nodes that this node contacted */
-                       while ((nn = find_next_bit (mle->response_map, O2NM_MAX_NODES, 
+                       while ((nn = find_next_bit (mle->response_map, O2NM_MAX_NODES,
                                                    nn+1)) < O2NM_MAX_NODES) {
                                if (nn != dlm->node_num && nn != assert->node_idx)
                                        master_request = 1;
@@ -2002,7 +2002,7 @@ kill:
        __dlm_print_one_lock_resource(res);
        spin_unlock(&res->spinlock);
        spin_unlock(&dlm->spinlock);
-       *ret_data = (void *)res; 
+       *ret_data = (void *)res;
        dlm_put(dlm);
        return -EINVAL;
 }
@@ -2040,10 +2040,10 @@ int dlm_dispatch_assert_master(struct dlm_ctxt *dlm,
        item->u.am.request_from = request_from;
        item->u.am.flags = flags;
 
-       if (ignore_higher) 
-               mlog(0, "IGNORE HIGHER: %.*s\n", res->lockname.len, 
+       if (ignore_higher)
+               mlog(0, "IGNORE HIGHER: %.*s\n", res->lockname.len,
                     res->lockname.name);
-               
+
        spin_lock(&dlm->work_lock);
        list_add_tail(&item->list, &dlm->work_list);
        spin_unlock(&dlm->work_lock);
@@ -2133,7 +2133,7 @@ put:
  * think that $RECOVERY is currently mastered by a dead node.  If so,
  * we wait a short time to allow that node to get notified by its own
  * heartbeat stack, then check again.  All $RECOVERY lock resources
- * mastered by dead nodes are purged when the hearbeat callback is 
+ * mastered by dead nodes are purged when the hearbeat callback is
  * fired, so we can know for sure that it is safe to continue once
  * the node returns a live node or no node.  */
 static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm,
@@ -2174,7 +2174,7 @@ static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm,
                                ret = -EAGAIN;
                        }
                        spin_unlock(&dlm->spinlock);
-                       mlog(0, "%s: reco lock master is %u\n", dlm->name, 
+                       mlog(0, "%s: reco lock master is %u\n", dlm->name,
                             master);
                        break;
                }
@@ -2602,7 +2602,7 @@ fail:
 
                        mlog(0, "%s:%.*s: timed out during migration\n",
                             dlm->name, res->lockname.len, res->lockname.name);
-                       /* avoid hang during shutdown when migrating lockres 
+                       /* avoid hang during shutdown when migrating lockres
                         * to a node which also goes down */
                        if (dlm_is_node_dead(dlm, target)) {
                                mlog(0, "%s:%.*s: expected migration "
@@ -2738,7 +2738,7 @@ static int dlm_migration_can_proceed(struct dlm_ctxt *dlm,
        can_proceed = !!(res->state & DLM_LOCK_RES_MIGRATING);
        spin_unlock(&res->spinlock);
 
-       /* target has died, so make the caller break out of the 
+       /* target has died, so make the caller break out of the
         * wait_event, but caller must recheck the domain_map */
        spin_lock(&dlm->spinlock);
        if (!test_bit(mig_target, dlm->domain_map))
index 2f9e4e1..344bcf9 100644 (file)
@@ -1050,7 +1050,7 @@ static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm,
                                if (lock->ml.node == dead_node) {
                                        mlog(0, "AHA! there was "
                                             "a $RECOVERY lock for dead "
-                                            "node %u (%s)!\n", 
+                                            "node %u (%s)!\n",
                                             dead_node, dlm->name);
                                        list_del_init(&lock->list);
                                        dlm_lock_put(lock);
@@ -1164,6 +1164,39 @@ static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres,
        mres->master = master;
 }
 
+static void dlm_prepare_lvb_for_migration(struct dlm_lock *lock,
+                                         struct dlm_migratable_lockres *mres,
+                                         int queue)
+{
+       if (!lock->lksb)
+              return;
+
+       /* Ignore lvb in all locks in the blocked list */
+       if (queue == DLM_BLOCKED_LIST)
+               return;
+
+       /* Only consider lvbs in locks with granted EX or PR lock levels */
+       if (lock->ml.type != LKM_EXMODE && lock->ml.type != LKM_PRMODE)
+               return;
+
+       if (dlm_lvb_is_empty(mres->lvb)) {
+               memcpy(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN);
+               return;
+       }
+
+       /* Ensure the lvb copied for migration matches in other valid locks */
+       if (!memcmp(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN))
+               return;
+
+       mlog(ML_ERROR, "Mismatched lvb in lock cookie=%u:%llu, name=%.*s, "
+            "node=%u\n",
+            dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
+            dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
+            lock->lockres->lockname.len, lock->lockres->lockname.name,
+            lock->ml.node);
+       dlm_print_one_lock_resource(lock->lockres);
+       BUG();
+}
 
 /* returns 1 if this lock fills the network structure,
  * 0 otherwise */
@@ -1181,20 +1214,7 @@ static int dlm_add_lock_to_array(struct dlm_lock *lock,
        ml->list = queue;
        if (lock->lksb) {
                ml->flags = lock->lksb->flags;
-               /* send our current lvb */
-               if (ml->type == LKM_EXMODE ||
-                   ml->type == LKM_PRMODE) {
-                       /* if it is already set, this had better be a PR
-                        * and it has to match */
-                       if (!dlm_lvb_is_empty(mres->lvb) &&
-                           (ml->type == LKM_EXMODE ||
-                            memcmp(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN))) {
-                               mlog(ML_ERROR, "mismatched lvbs!\n");
-                               dlm_print_one_lock_resource(lock->lockres);
-                               BUG();
-                       }
-                       memcpy(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN);
-               }
+               dlm_prepare_lvb_for_migration(lock, mres, queue);
        }
        ml->node = lock->ml.node;
        mres->num_locks++;
@@ -1730,6 +1750,7 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
        struct dlm_lock *lock = NULL;
        u8 from = O2NM_MAX_NODES;
        unsigned int added = 0;
+       __be64 c;
 
        mlog(0, "running %d locks for this lockres\n", mres->num_locks);
        for (i=0; i<mres->num_locks; i++) {
@@ -1777,19 +1798,48 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
                        /* lock is always created locally first, and
                         * destroyed locally last.  it must be on the list */
                        if (!lock) {
-                               __be64 c = ml->cookie;
-                               mlog(ML_ERROR, "could not find local lock "
-                                              "with cookie %u:%llu!\n",
+                               c = ml->cookie;
+                               mlog(ML_ERROR, "Could not find local lock "
+                                              "with cookie %u:%llu, node %u, "
+                                              "list %u, flags 0x%x, type %d, "
+                                              "conv %d, highest blocked %d\n",
                                     dlm_get_lock_cookie_node(be64_to_cpu(c)),
-                                    dlm_get_lock_cookie_seq(be64_to_cpu(c)));
+                                    dlm_get_lock_cookie_seq(be64_to_cpu(c)),
+                                    ml->node, ml->list, ml->flags, ml->type,
+                                    ml->convert_type, ml->highest_blocked);
+                               __dlm_print_one_lock_resource(res);
+                               BUG();
+                       }
+
+                       if (lock->ml.node != ml->node) {
+                               c = lock->ml.cookie;
+                               mlog(ML_ERROR, "Mismatched node# in lock "
+                                    "cookie %u:%llu, name %.*s, node %u\n",
+                                    dlm_get_lock_cookie_node(be64_to_cpu(c)),
+                                    dlm_get_lock_cookie_seq(be64_to_cpu(c)),
+                                    res->lockname.len, res->lockname.name,
+                                    lock->ml.node);
+                               c = ml->cookie;
+                               mlog(ML_ERROR, "Migrate lock cookie %u:%llu, "
+                                    "node %u, list %u, flags 0x%x, type %d, "
+                                    "conv %d, highest blocked %d\n",
+                                    dlm_get_lock_cookie_node(be64_to_cpu(c)),
+                                    dlm_get_lock_cookie_seq(be64_to_cpu(c)),
+                                    ml->node, ml->list, ml->flags, ml->type,
+                                    ml->convert_type, ml->highest_blocked);
                                __dlm_print_one_lock_resource(res);
                                BUG();
                        }
-                       BUG_ON(lock->ml.node != ml->node);
 
                        if (tmpq != queue) {
-                               mlog(0, "lock was on %u instead of %u for %.*s\n",
-                                    j, ml->list, res->lockname.len, res->lockname.name);
+                               c = ml->cookie;
+                               mlog(0, "Lock cookie %u:%llu was on list %u "
+                                    "instead of list %u for %.*s\n",
+                                    dlm_get_lock_cookie_node(be64_to_cpu(c)),
+                                    dlm_get_lock_cookie_seq(be64_to_cpu(c)),
+                                    j, ml->list, res->lockname.len,
+                                    res->lockname.name);
+                               __dlm_print_one_lock_resource(res);
                                spin_unlock(&res->spinlock);
                                continue;
                        }
@@ -1839,7 +1889,7 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
                                 * the lvb. */
                                memcpy(res->lvb, mres->lvb, DLM_LVB_LEN);
                        } else {
-                               /* otherwise, the node is sending its 
+                               /* otherwise, the node is sending its
                                 * most recent valid lvb info */
                                BUG_ON(ml->type != LKM_EXMODE &&
                                       ml->type != LKM_PRMODE);
@@ -1886,7 +1936,7 @@ skip_lvb:
                spin_lock(&res->spinlock);
                list_for_each_entry(lock, queue, list) {
                        if (lock->ml.cookie == ml->cookie) {
-                               __be64 c = lock->ml.cookie;
+                               c = lock->ml.cookie;
                                mlog(ML_ERROR, "%s:%.*s: %u:%llu: lock already "
                                     "exists on this lockres!\n", dlm->name,
                                     res->lockname.len, res->lockname.name,
@@ -2114,7 +2164,7 @@ static void dlm_revalidate_lvb(struct dlm_ctxt *dlm,
        assert_spin_locked(&res->spinlock);
 
        if (res->owner == dlm->node_num)
-               /* if this node owned the lockres, and if the dead node 
+               /* if this node owned the lockres, and if the dead node
                 * had an EX when he died, blank out the lvb */
                search_node = dead_node;
        else {
@@ -2152,7 +2202,7 @@ static void dlm_free_dead_locks(struct dlm_ctxt *dlm,
 
        /* this node is the lockres master:
         * 1) remove any stale locks for the dead node
-        * 2) if the dead node had an EX when he died, blank out the lvb 
+        * 2) if the dead node had an EX when he died, blank out the lvb
         */
        assert_spin_locked(&dlm->spinlock);
        assert_spin_locked(&res->spinlock);
@@ -2193,7 +2243,12 @@ static void dlm_free_dead_locks(struct dlm_ctxt *dlm,
                mlog(0, "%s:%.*s: freed %u locks for dead node %u, "
                     "dropping ref from lockres\n", dlm->name,
                     res->lockname.len, res->lockname.name, freed, dead_node);
-               BUG_ON(!test_bit(dead_node, res->refmap));
+               if(!test_bit(dead_node, res->refmap)) {
+                       mlog(ML_ERROR, "%s:%.*s: freed %u locks for dead node %u, "
+                            "but ref was not set\n", dlm->name,
+                            res->lockname.len, res->lockname.name, freed, dead_node);
+                       __dlm_print_one_lock_resource(res);
+               }
                dlm_lockres_clear_refmap_bit(dead_node, res);
        } else if (test_bit(dead_node, res->refmap)) {
                mlog(0, "%s:%.*s: dead node %u had a ref, but had "
@@ -2260,7 +2315,7 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
                                }
                                spin_unlock(&res->spinlock);
                                continue;
-                       }                       
+                       }
                        spin_lock(&res->spinlock);
                        /* zero the lvb if necessary */
                        dlm_revalidate_lvb(dlm, res, dead_node);
@@ -2411,7 +2466,7 @@ static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st)
  * this function on each node racing to become the recovery
  * master will not stop attempting this until either:
  * a) this node gets the EX (and becomes the recovery master),
- * or b) dlm->reco.new_master gets set to some nodenum 
+ * or b) dlm->reco.new_master gets set to some nodenum
  * != O2NM_INVALID_NODE_NUM (another node will do the reco).
  * so each time a recovery master is needed, the entire cluster
  * will sync at this point.  if the new master dies, that will
@@ -2424,7 +2479,7 @@ static int dlm_pick_recovery_master(struct dlm_ctxt *dlm)
 
        mlog(0, "starting recovery of %s at %lu, dead=%u, this=%u\n",
             dlm->name, jiffies, dlm->reco.dead_node, dlm->node_num);
-again: 
+again:
        memset(&lksb, 0, sizeof(lksb));
 
        ret = dlmlock(dlm, LKM_EXMODE, &lksb, LKM_NOQUEUE|LKM_RECOVERY,
@@ -2437,8 +2492,8 @@ again:
        if (ret == DLM_NORMAL) {
                mlog(0, "dlm=%s dlmlock says I got it (this=%u)\n",
                     dlm->name, dlm->node_num);
-               
-               /* got the EX lock.  check to see if another node 
+
+               /* got the EX lock.  check to see if another node
                 * just became the reco master */
                if (dlm_reco_master_ready(dlm)) {
                        mlog(0, "%s: got reco EX lock, but %u will "
@@ -2451,12 +2506,12 @@ again:
                        /* see if recovery was already finished elsewhere */
                        spin_lock(&dlm->spinlock);
                        if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
-                               status = -EINVAL;       
+                               status = -EINVAL;
                                mlog(0, "%s: got reco EX lock, but "
                                     "node got recovered already\n", dlm->name);
                                if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) {
                                        mlog(ML_ERROR, "%s: new master is %u "
-                                            "but no dead node!\n", 
+                                            "but no dead node!\n",
                                             dlm->name, dlm->reco.new_master);
                                        BUG();
                                }
@@ -2468,7 +2523,7 @@ again:
                 * set the master and send the messages to begin recovery */
                if (!status) {
                        mlog(0, "%s: dead=%u, this=%u, sending "
-                            "begin_reco now\n", dlm->name, 
+                            "begin_reco now\n", dlm->name,
                             dlm->reco.dead_node, dlm->node_num);
                        status = dlm_send_begin_reco_message(dlm,
                                      dlm->reco.dead_node);
@@ -2501,7 +2556,7 @@ again:
                mlog(0, "dlm=%s dlmlock says another node got it (this=%u)\n",
                     dlm->name, dlm->node_num);
                /* another node is master. wait on
-                * reco.new_master != O2NM_INVALID_NODE_NUM 
+                * reco.new_master != O2NM_INVALID_NODE_NUM
                 * for at most one second */
                wait_event_timeout(dlm->dlm_reco_thread_wq,
                                         dlm_reco_master_ready(dlm),
@@ -2589,7 +2644,13 @@ retry:
                             "begin reco msg (%d)\n", dlm->name, nodenum, ret);
                        ret = 0;
                }
-               if (ret == -EAGAIN) {
+
+               /*
+                * Prior to commit aad1b15310b9bcd59fa81ab8f2b1513b59553ea8,
+                * dlm_begin_reco_handler() returned EAGAIN and not -EAGAIN.
+                * We are handling both for compatibility reasons.
+                */
+               if (ret == -EAGAIN || ret == EAGAIN) {
                        mlog(0, "%s: trying to start recovery of node "
                             "%u, but node %u is waiting for last recovery "
                             "to complete, backoff for a bit\n", dlm->name,
@@ -2599,7 +2660,7 @@ retry:
                }
                if (ret < 0) {
                        struct dlm_lock_resource *res;
-                       /* this is now a serious problem, possibly ENOMEM 
+                       /* this is now a serious problem, possibly ENOMEM
                         * in the network stack.  must retry */
                        mlog_errno(ret);
                        mlog(ML_ERROR, "begin reco of dlm %s to node %u "
@@ -2612,7 +2673,7 @@ retry:
                        } else {
                                mlog(ML_ERROR, "recovery lock not found\n");
                        }
-                       /* sleep for a bit in hopes that we can avoid 
+                       /* sleep for a bit in hopes that we can avoid
                         * another ENOMEM */
                        msleep(100);
                        goto retry;
@@ -2664,7 +2725,7 @@ int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data,
        }
        if (dlm->reco.dead_node != O2NM_INVALID_NODE_NUM) {
                mlog(ML_NOTICE, "%s: dead_node previously set to %u, "
-                    "node %u changing it to %u\n", dlm->name, 
+                    "node %u changing it to %u\n", dlm->name,
                     dlm->reco.dead_node, br->node_idx, br->dead_node);
        }
        dlm_set_reco_master(dlm, br->node_idx);
@@ -2730,8 +2791,8 @@ stage2:
                if (ret < 0) {
                        mlog_errno(ret);
                        if (dlm_is_host_down(ret)) {
-                               /* this has no effect on this recovery 
-                                * session, so set the status to zero to 
+                               /* this has no effect on this recovery
+                                * session, so set the status to zero to
                                 * finish out the last recovery */
                                mlog(ML_ERROR, "node %u went down after this "
                                     "node finished recovery.\n", nodenum);
@@ -2768,7 +2829,7 @@ int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data,
        mlog(0, "%s: node %u finalizing recovery stage%d of "
             "node %u (%u:%u)\n", dlm->name, fr->node_idx, stage,
             fr->dead_node, dlm->reco.dead_node, dlm->reco.new_master);
+
        spin_lock(&dlm->spinlock);
 
        if (dlm->reco.new_master != fr->node_idx) {
index 00f53b2..49e29ec 100644 (file)
@@ -190,8 +190,8 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm,
                        actions &= ~(DLM_UNLOCK_REMOVE_LOCK|
                                     DLM_UNLOCK_REGRANT_LOCK|
                                     DLM_UNLOCK_CLEAR_CONVERT_TYPE);
-               } else if (status == DLM_RECOVERING || 
-                          status == DLM_MIGRATING || 
+               } else if (status == DLM_RECOVERING ||
+                          status == DLM_MIGRATING ||
                           status == DLM_FORWARD) {
                        /* must clear the actions because this unlock
                         * is about to be retried.  cannot free or do
@@ -661,14 +661,14 @@ retry:
        if (call_ast) {
                mlog(0, "calling unlockast(%p, %d)\n", data, status);
                if (is_master) {
-                       /* it is possible that there is one last bast 
+                       /* it is possible that there is one last bast
                         * pending.  make sure it is flushed, then
                         * call the unlockast.
                         * not an issue if this is a mastered remotely,
                         * since this lock has been removed from the
                         * lockres queues and cannot be found. */
                        dlm_kick_thread(dlm, NULL);
-                       wait_event(dlm->ast_wq, 
+                       wait_event(dlm->ast_wq,
                                   dlm_lock_basts_flushed(dlm, lock));
                }
                (*unlockast)(data, status);
index c5e4a49..e044019 100644 (file)
@@ -875,6 +875,14 @@ static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lo
                lockres_or_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
 
        lockres->l_level = lockres->l_requested;
+
+       /*
+        * We set the OCFS2_LOCK_UPCONVERT_FINISHING flag before clearing
+        * the OCFS2_LOCK_BUSY flag to prevent the dc thread from
+        * downconverting the lock before the upconvert has fully completed.
+        */
+       lockres_or_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING);
+
        lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
 
        mlog_exit_void();
@@ -907,8 +915,6 @@ static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres,
 
        assert_spin_locked(&lockres->l_lock);
 
-       lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED);
-
        if (level > lockres->l_blocking) {
                /* only schedule a downconvert if we haven't already scheduled
                 * one that goes low enough to satisfy the level we're
@@ -921,6 +927,9 @@ static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres,
                lockres->l_blocking = level;
        }
 
+       if (needs_downconvert)
+               lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED);
+
        mlog_exit(needs_downconvert);
        return needs_downconvert;
 }
@@ -1133,6 +1142,7 @@ static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres,
        mlog_entry_void();
        spin_lock_irqsave(&lockres->l_lock, flags);
        lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
+       lockres_clear_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING);
        if (convert)
                lockres->l_action = OCFS2_AST_INVALID;
        else
@@ -1323,13 +1333,13 @@ static int __ocfs2_cluster_lock(struct ocfs2_super *osb,
 again:
        wait = 0;
 
+       spin_lock_irqsave(&lockres->l_lock, flags);
+
        if (catch_signals && signal_pending(current)) {
                ret = -ERESTARTSYS;
-               goto out;
+               goto unlock;
        }
 
-       spin_lock_irqsave(&lockres->l_lock, flags);
-
        mlog_bug_on_msg(lockres->l_flags & OCFS2_LOCK_FREEING,
                        "Cluster lock called on freeing lockres %s! flags "
                        "0x%lx\n", lockres->l_name, lockres->l_flags);
@@ -1346,6 +1356,25 @@ again:
                goto unlock;
        }
 
+       if (lockres->l_flags & OCFS2_LOCK_UPCONVERT_FINISHING) {
+               /*
+                * We've upconverted. If the lock now has a level we can
+                * work with, we take it. If, however, the lock is not at the
+                * required level, we go thru the full cycle. One way this could
+                * happen is if a process requesting an upconvert to PR is
+                * closely followed by another requesting upconvert to an EX.
+                * If the process requesting EX lands here, we want it to
+                * continue attempting to upconvert and let the process
+                * requesting PR take the lock.
+                * If multiple processes request upconvert to PR, the first one
+                * here will take the lock. The others will have to go thru the
+                * OCFS2_LOCK_BLOCKED check to ensure that there is no pending
+                * downconvert request.
+                */
+               if (level <= lockres->l_level)
+                       goto update_holders;
+       }
+
        if (lockres->l_flags & OCFS2_LOCK_BLOCKED &&
            !ocfs2_may_continue_on_blocked_lock(lockres, level)) {
                /* is the lock is currently blocked on behalf of
@@ -1416,11 +1445,14 @@ again:
                goto again;
        }
 
+update_holders:
        /* Ok, if we get here then we're good to go. */
        ocfs2_inc_holders(lockres, level);
 
        ret = 0;
 unlock:
+       lockres_clear_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING);
+
        spin_unlock_irqrestore(&lockres->l_lock, flags);
 out:
        /*
@@ -3155,7 +3187,7 @@ out:
 /* Mark the lockres as being dropped. It will no longer be
  * queued if blocking, but we still may have to wait on it
  * being dequeued from the downconvert thread before we can consider
- * it safe to drop. 
+ * it safe to drop.
  *
  * You can *not* attempt to call cluster_lock on this lockres anymore. */
 void ocfs2_mark_lockres_freeing(struct ocfs2_lock_res *lockres)
@@ -3352,6 +3384,7 @@ static int ocfs2_unblock_lock(struct ocfs2_super *osb,
        unsigned long flags;
        int blocking;
        int new_level;
+       int level;
        int ret = 0;
        int set_lvb = 0;
        unsigned int gen;
@@ -3360,9 +3393,17 @@ static int ocfs2_unblock_lock(struct ocfs2_super *osb,
 
        spin_lock_irqsave(&lockres->l_lock, flags);
 
-       BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED));
-
 recheck:
+       /*
+        * Is it still blocking? If not, we have no more work to do.
+        */
+       if (!(lockres->l_flags & OCFS2_LOCK_BLOCKED)) {
+               BUG_ON(lockres->l_blocking != DLM_LOCK_NL);
+               spin_unlock_irqrestore(&lockres->l_lock, flags);
+               ret = 0;
+               goto leave;
+       }
+
        if (lockres->l_flags & OCFS2_LOCK_BUSY) {
                /* XXX
                 * This is a *big* race.  The OCFS2_LOCK_PENDING flag
@@ -3401,6 +3442,31 @@ recheck:
                goto leave;
        }
 
+       /*
+        * This prevents livelocks. OCFS2_LOCK_UPCONVERT_FINISHING flag is
+        * set when the ast is received for an upconvert just before the
+        * OCFS2_LOCK_BUSY flag is cleared. Now if the fs received a bast
+        * on the heels of the ast, we want to delay the downconvert just
+        * enough to allow the up requestor to do its task. Because this
+        * lock is in the blocked queue, the lock will be downconverted
+        * as soon as the requestor is done with the lock.
+        */
+       if (lockres->l_flags & OCFS2_LOCK_UPCONVERT_FINISHING)
+               goto leave_requeue;
+
+       /*
+        * How can we block and yet be at NL?  We were trying to upconvert
+        * from NL and got canceled.  The code comes back here, and now
+        * we notice and clear BLOCKING.
+        */
+       if (lockres->l_level == DLM_LOCK_NL) {
+               BUG_ON(lockres->l_ex_holders || lockres->l_ro_holders);
+               lockres->l_blocking = DLM_LOCK_NL;
+               lockres_clear_flags(lockres, OCFS2_LOCK_BLOCKED);
+               spin_unlock_irqrestore(&lockres->l_lock, flags);
+               goto leave;
+       }
+
        /* if we're blocking an exclusive and we have *any* holders,
         * then requeue. */
        if ((lockres->l_blocking == DLM_LOCK_EX)
@@ -3438,6 +3504,7 @@ recheck:
         * may sleep, so we save off a copy of what we're blocking as
         * it may change while we're not holding the spin lock. */
        blocking = lockres->l_blocking;
+       level = lockres->l_level;
        spin_unlock_irqrestore(&lockres->l_lock, flags);
 
        ctl->unblock_action = lockres->l_ops->downconvert_worker(lockres, blocking);
@@ -3446,7 +3513,7 @@ recheck:
                goto leave;
 
        spin_lock_irqsave(&lockres->l_lock, flags);
-       if (blocking != lockres->l_blocking) {
+       if ((blocking != lockres->l_blocking) || (level != lockres->l_level)) {
                /* If this changed underneath us, then we can't drop
                 * it just yet. */
                goto recheck;
index 15713cb..19ad145 100644 (file)
@@ -239,7 +239,7 @@ static int ocfs2_encode_fh(struct dentry *dentry, u32 *fh_in, int *max_len,
                mlog(0, "Encoding parent: blkno: %llu, generation: %u\n",
                     (unsigned long long)blkno, generation);
        }
-       
+
        *max_len = len;
 
 bail:
index d35a27f..5328529 100644 (file)
@@ -192,7 +192,7 @@ static int ocfs2_try_to_merge_extent_map(struct ocfs2_extent_map_item *emi,
                emi->ei_clusters += ins->ei_clusters;
                return 1;
        } else if ((ins->ei_phys + ins->ei_clusters) == emi->ei_phys &&
-                  (ins->ei_cpos + ins->ei_clusters) == emi->ei_phys &&
+                  (ins->ei_cpos + ins->ei_clusters) == emi->ei_cpos &&
                   ins->ei_flags == emi->ei_flags) {
                emi->ei_phys = ins->ei_phys;
                emi->ei_cpos = ins->ei_cpos;
index 06ccf6a..558ce03 100644 (file)
@@ -749,7 +749,7 @@ static int ocfs2_write_zero_page(struct inode *inode,
        int ret;
 
        offset = (size & (PAGE_CACHE_SIZE-1)); /* Within page */
-       /* ugh.  in prepare/commit_write, if from==to==start of block, we 
+       /* ugh.  in prepare/commit_write, if from==to==start of block, we
        ** skip the prepare.  make sure we never send an offset for the start
        ** of a block
        */
@@ -1779,7 +1779,7 @@ static int ocfs2_prepare_inode_for_write(struct dentry *dentry,
        struct inode *inode = dentry->d_inode;
        loff_t saved_pos, end;
 
-       /* 
+       /*
         * We start with a read level meta lock and only jump to an ex
         * if we need to make modifications here.
         */
@@ -2013,8 +2013,8 @@ out_dio:
        /* buffered aio wouldn't have proper lock coverage today */
        BUG_ON(ret == -EIOCBQUEUED && !(file->f_flags & O_DIRECT));
 
-       if ((file->f_flags & O_DSYNC && !direct_io) || IS_SYNC(inode) ||
-           (file->f_flags & O_DIRECT && has_refcount)) {
+       if (((file->f_flags & O_DSYNC) && !direct_io) || IS_SYNC(inode) ||
+           ((file->f_flags & O_DIRECT) && has_refcount)) {
                ret = filemap_fdatawrite_range(file->f_mapping, pos,
                                               pos + count - 1);
                if (ret < 0)
@@ -2033,7 +2033,7 @@ out_dio:
                                                      pos + count - 1);
        }
 
-       /* 
+       /*
         * deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io
         * function pointer which is called when o_direct io completes so that
         * it can unlock our rw lock.  (it's the clustered equivalent of
@@ -2198,7 +2198,7 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
                goto bail;
        }
 
-       /* 
+       /*
         * buffered reads protect themselves in ->readpage().  O_DIRECT reads
         * need locks to protect pending reads from racing with truncate.
         */
@@ -2220,10 +2220,10 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
         * We're fine letting folks race truncates and extending
         * writes with read across the cluster, just like they can
         * locally. Hence no rw_lock during read.
-        * 
+        *
         * Take and drop the meta data lock to update inode fields
         * like i_size. This allows the checks down below
-        * generic_file_aio_read() a chance of actually working. 
+        * generic_file_aio_read() a chance of actually working.
         */
        ret = ocfs2_inode_lock_atime(inode, filp->f_vfsmnt, &lock_level);
        if (ret < 0) {
@@ -2248,7 +2248,7 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
 bail:
        if (have_alloc_sem)
                up_read(&inode->i_alloc_sem);
-       if (rw_level != -1) 
+       if (rw_level != -1)
                ocfs2_rw_unlock(inode, rw_level);
        mlog_exit(ret);
 
index 0297fb8..88459bd 100644 (file)
@@ -475,7 +475,7 @@ static int ocfs2_read_locked_inode(struct inode *inode,
        if (args->fi_flags & OCFS2_FI_FLAG_ORPHAN_RECOVERY) {
                status = ocfs2_try_open_lock(inode, 0);
                if (status) {
-                       make_bad_inode(inode);  
+                       make_bad_inode(inode);
                        return status;
                }
        }
@@ -684,7 +684,7 @@ bail:
        return status;
 }
 
-/* 
+/*
  * Serialize with orphan dir recovery. If the process doing
  * recovery on this orphan dir does an iget() with the dir
  * i_mutex held, we'll deadlock here. Instead we detect this
index 31fbb06..7d9d9c1 100644 (file)
@@ -7,6 +7,7 @@
 
 #include <linux/fs.h>
 #include <linux/mount.h>
+#include <linux/compat.h>
 
 #define MLOG_MASK_PREFIX ML_INODE
 #include <cluster/masklog.h>
@@ -181,6 +182,10 @@ long ocfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 #ifdef CONFIG_COMPAT
 long ocfs2_compat_ioctl(struct file *file, unsigned cmd, unsigned long arg)
 {
+       bool preserve;
+       struct reflink_arguments args;
+       struct inode *inode = file->f_path.dentry->d_inode;
+
        switch (cmd) {
        case OCFS2_IOC32_GETFLAGS:
                cmd = OCFS2_IOC_GETFLAGS;
@@ -195,8 +200,15 @@ long ocfs2_compat_ioctl(struct file *file, unsigned cmd, unsigned long arg)
        case OCFS2_IOC_GROUP_EXTEND:
        case OCFS2_IOC_GROUP_ADD:
        case OCFS2_IOC_GROUP_ADD64:
-       case OCFS2_IOC_REFLINK:
                break;
+       case OCFS2_IOC_REFLINK:
+               if (copy_from_user(&args, (struct reflink_arguments *)arg,
+                                  sizeof(args)))
+                       return -EFAULT;
+               preserve = (args.preserve != 0);
+
+               return ocfs2_reflink_ioctl(inode, compat_ptr(args.old_path),
+                                          compat_ptr(args.new_path), preserve);
        default:
                return -ENOIOCTLCMD;
        }
index bf34c49..9336c60 100644 (file)
@@ -2034,7 +2034,7 @@ static int ocfs2_queue_orphans(struct ocfs2_super *osb,
                status = -ENOENT;
                mlog_errno(status);
                return status;
-       }       
+       }
 
        mutex_lock(&orphan_dir_inode->i_mutex);
        status = ocfs2_inode_lock(orphan_dir_inode, NULL, 0);
index 9362eea..740f448 100644 (file)
@@ -136,6 +136,10 @@ enum ocfs2_unlock_action {
 #define OCFS2_LOCK_PENDING       (0x00000400) /* This lockres is pending a
                                                 call to dlm_lock.  Only
                                                 exists with BUSY set. */
+#define OCFS2_LOCK_UPCONVERT_FINISHING (0x00000800) /* blocks the dc thread
+                                                    * from downconverting
+                                                    * before the upconvert
+                                                    * has completed */
 
 struct ocfs2_lock_res_ops;
 
index 1a1a679..7638a38 100644 (file)
@@ -1417,9 +1417,16 @@ static inline int ocfs2_fast_symlink_chars(int blocksize)
        return blocksize - offsetof(struct ocfs2_dinode, id2.i_symlink);
 }
 
-static inline int ocfs2_max_inline_data(int blocksize)
+static inline int ocfs2_max_inline_data_with_xattr(int blocksize,
+                                                  struct ocfs2_dinode *di)
 {
-       return blocksize - offsetof(struct ocfs2_dinode, id2.i_data.id_data);
+       if (di && (di->i_dyn_features & OCFS2_INLINE_XATTR_FL))
+               return blocksize -
+                       offsetof(struct ocfs2_dinode, id2.i_data.id_data) -
+                       di->i_xattr_inline_size;
+       else
+               return blocksize -
+                       offsetof(struct ocfs2_dinode, id2.i_data.id_data);
 }
 
 static inline int ocfs2_extent_recs_per_inode(int blocksize)
index 74db2be..8ae65c9 100644 (file)
@@ -2945,7 +2945,7 @@ static int ocfs2_duplicate_clusters_by_page(handle_t *handle,
 
        while (offset < end) {
                page_index = offset >> PAGE_CACHE_SHIFT;
-               map_end = (page_index + 1) << PAGE_CACHE_SHIFT;
+               map_end = ((loff_t)page_index + 1) << PAGE_CACHE_SHIFT;
                if (map_end > end)
                        map_end = end;
 
@@ -2957,8 +2957,12 @@ static int ocfs2_duplicate_clusters_by_page(handle_t *handle,
 
                page = grab_cache_page(mapping, page_index);
 
-               /* This page can't be dirtied before we CoW it out. */
-               BUG_ON(PageDirty(page));
+               /*
+                * In case PAGE_CACHE_SIZE <= CLUSTER_SIZE, This page
+                * can't be dirtied before we CoW it out.
+                */
+               if (PAGE_CACHE_SIZE <= OCFS2_SB(sb)->s_clustersize)
+                       BUG_ON(PageDirty(page));
 
                if (!PageUptodate(page)) {
                        ret = block_read_full_page(page, ocfs2_get_block);
@@ -3170,7 +3174,7 @@ static int ocfs2_cow_sync_writeback(struct super_block *sb,
 
        while (offset < end) {
                page_index = offset >> PAGE_CACHE_SHIFT;
-               map_end = (page_index + 1) << PAGE_CACHE_SHIFT;
+               map_end = ((loff_t)page_index + 1) << PAGE_CACHE_SHIFT;
                if (map_end > end)
                        map_end = end;
 
index e49c410..3038c92 100644 (file)
@@ -277,7 +277,7 @@ static int o2cb_cluster_connect(struct ocfs2_cluster_connection *conn)
        u32 dlm_key;
        struct dlm_ctxt *dlm;
        struct o2dlm_private *priv;
-       struct dlm_protocol_version dlm_version;
+       struct dlm_protocol_version fs_version;
 
        BUG_ON(conn == NULL);
        BUG_ON(o2cb_stack.sp_proto == NULL);
@@ -304,18 +304,18 @@ static int o2cb_cluster_connect(struct ocfs2_cluster_connection *conn)
        /* used by the dlm code to make message headers unique, each
         * node in this domain must agree on this. */
        dlm_key = crc32_le(0, conn->cc_name, conn->cc_namelen);
-       dlm_version.pv_major = conn->cc_version.pv_major;
-       dlm_version.pv_minor = conn->cc_version.pv_minor;
+       fs_version.pv_major = conn->cc_version.pv_major;
+       fs_version.pv_minor = conn->cc_version.pv_minor;
 
-       dlm = dlm_register_domain(conn->cc_name, dlm_key, &dlm_version);
+       dlm = dlm_register_domain(conn->cc_name, dlm_key, &fs_version);
        if (IS_ERR(dlm)) {
                rc = PTR_ERR(dlm);
                mlog_errno(rc);
                goto out_free;
        }
 
-       conn->cc_version.pv_major = dlm_version.pv_major;
-       conn->cc_version.pv_minor = dlm_version.pv_minor;
+       conn->cc_version.pv_major = fs_version.pv_major;
+       conn->cc_version.pv_minor = fs_version.pv_minor;
        conn->cc_lockspace = dlm;
 
        dlm_register_eviction_cb(dlm, &priv->op_eviction_cb);
index 2606991..755cd49 100644 (file)
@@ -1062,7 +1062,7 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
                                     "file system, but write access is "
                                     "unavailable.\n");
                        else
-                               mlog_errno(status);                     
+                               mlog_errno(status);
                        goto read_super_error;
                }
 
index 49b133c..32499d2 100644 (file)
@@ -137,20 +137,20 @@ static void *ocfs2_fast_follow_link(struct dentry *dentry,
        }
 
        memcpy(link, target, len);
-       nd_set_link(nd, link);
 
 bail:
+       nd_set_link(nd, status ? ERR_PTR(status) : link);
        brelse(bh);
 
        mlog_exit(status);
-       return status ? ERR_PTR(status) : link;
+       return NULL;
 }
 
 static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
 {
-       char *link = cookie;
-
-       kfree(link);
+       char *link = nd_get_link(nd);
+       if (!IS_ERR(link))
+               kfree(link);
 }
 
 const struct inode_operations ocfs2_symlink_inode_operations = {
index c613693..a0a120e 100644 (file)
@@ -267,8 +267,8 @@ static int ocfs2_buffer_cached(struct ocfs2_caching_info *ci,
 }
 
 /* Warning: even if it returns true, this does *not* guarantee that
- * the block is stored in our inode metadata cache. 
- * 
+ * the block is stored in our inode metadata cache.
+ *
  * This can be called under lock_buffer()
  */
 int ocfs2_buffer_uptodate(struct ocfs2_caching_info *ci,
index 040cef7..e0b2d88 100644 (file)
--- a/fs/open.c
+++ b/fs/open.c
@@ -271,7 +271,7 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
         * Make sure that there are no leases.  get_write_access() protects
         * against the truncate racing with a lease-granting setlease().
         */
-       error = break_lease(inode, FMODE_WRITE);
+       error = break_lease(inode, O_WRONLY);
        if (error)
                goto put_write_and_out;
 
index 8d5f392..5cc564a 100644 (file)
@@ -86,7 +86,7 @@ static int do_make_slave(struct vfsmount *mnt)
 
        /*
         * slave 'mnt' to a peer mount that has the
-        * same root dentry. If none is available than
+        * same root dentry. If none is available then
         * slave it to anything that is available.
         */
        while ((peer_mnt = next_peer(peer_mnt)) != mnt &&
@@ -147,6 +147,11 @@ void change_mnt_propagation(struct vfsmount *mnt, int type)
  * get the next mount in the propagation tree.
  * @m: the mount seen last
  * @origin: the original mount from where the tree walk initiated
+ *
+ * Note that peer groups form contiguous segments of slave lists.
+ * We rely on that in get_source() to be able to find out if
+ * vfsmount found while iterating with propagation_next() is
+ * a peer of one we'd found earlier.
  */
 static struct vfsmount *propagation_next(struct vfsmount *m,
                                         struct vfsmount *origin)
@@ -186,10 +191,6 @@ static struct vfsmount *get_source(struct vfsmount *dest,
 {
        struct vfsmount *p_last_src = NULL;
        struct vfsmount *p_last_dest = NULL;
-       *type = CL_PROPAGATION;
-
-       if (IS_MNT_SHARED(dest))
-               *type |= CL_MAKE_SHARED;
 
        while (last_dest != dest->mnt_master) {
                p_last_dest = last_dest;
@@ -202,13 +203,18 @@ static struct vfsmount *get_source(struct vfsmount *dest,
                do {
                        p_last_dest = next_peer(p_last_dest);
                } while (IS_MNT_NEW(p_last_dest));
+               /* is that a peer of the earlier? */
+               if (dest == p_last_dest) {
+                       *type = CL_MAKE_SHARED;
+                       return p_last_src;
+               }
        }
-
-       if (dest != p_last_dest) {
-               *type |= CL_SLAVE;
-               return last_src;
-       } else
-               return p_last_src;
+       /* slave of the earlier, then */
+       *type = CL_SLAVE;
+       /* beginning of peer group among the slaves? */
+       if (IS_MNT_SHARED(dest))
+               *type |= CL_MAKE_SHARED;
+       return last_src;
 }
 
 /*
index 958665d..1ea4ae1 100644 (file)
 #define CL_SLAVE               0x02
 #define CL_COPY_ALL            0x04
 #define CL_MAKE_SHARED                 0x08
-#define CL_PROPAGATION                 0x10
-#define CL_PRIVATE             0x20
+#define CL_PRIVATE             0x10
 
 static inline void set_mnt_shared(struct vfsmount *mnt)
 {
-       mnt->mnt_flags &= ~MNT_PNODE_MASK;
+       mnt->mnt_flags &= ~MNT_SHARED_MASK;
        mnt->mnt_flags |= MNT_SHARED;
 }
 
index e42bbd8..746895d 100644 (file)
@@ -647,17 +647,11 @@ static int mounts_release(struct inode *inode, struct file *file)
 static unsigned mounts_poll(struct file *file, poll_table *wait)
 {
        struct proc_mounts *p = file->private_data;
-       struct mnt_namespace *ns = p->ns;
        unsigned res = POLLIN | POLLRDNORM;
 
-       poll_wait(file, &ns->poll, wait);
-
-       spin_lock(&vfsmount_lock);
-       if (p->event != ns->event) {
-               p->event = ns->event;
+       poll_wait(file, &p->ns->poll, wait);
+       if (mnt_had_events(p))
                res |= POLLERR | POLLPRI;
-       }
-       spin_unlock(&vfsmount_lock);
 
        return res;
 }
@@ -2369,16 +2363,30 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
 {
        struct pid_namespace *ns = dentry->d_sb->s_fs_info;
        pid_t tgid = task_tgid_nr_ns(current, ns);
-       char tmp[PROC_NUMBUF];
-       if (!tgid)
-               return ERR_PTR(-ENOENT);
-       sprintf(tmp, "%d", task_tgid_nr_ns(current, ns));
-       return ERR_PTR(vfs_follow_link(nd,tmp));
+       char *name = ERR_PTR(-ENOENT);
+       if (tgid) {
+               name = __getname();
+               if (!name)
+                       name = ERR_PTR(-ENOMEM);
+               else
+                       sprintf(name, "%d", tgid);
+       }
+       nd_set_link(nd, name);
+       return NULL;
+}
+
+static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
+                               void *cookie)
+{
+       char *s = nd_get_link(nd);
+       if (!IS_ERR(s))
+               __putname(s);
 }
 
 static const struct inode_operations proc_self_inode_operations = {
        .readlink       = proc_self_readlink,
        .follow_link    = proc_self_follow_link,
+       .put_link       = proc_self_put_link,
 };
 
 /*
index 480cb10..9580abe 100644 (file)
@@ -662,6 +662,7 @@ struct proc_dir_entry *proc_symlink(const char *name,
        }
        return ent;
 }
+EXPORT_SYMBOL(proc_symlink);
 
 struct proc_dir_entry *proc_mkdir_mode(const char *name, mode_t mode,
                struct proc_dir_entry *parent)
@@ -700,6 +701,7 @@ struct proc_dir_entry *proc_mkdir(const char *name,
 {
        return proc_mkdir_mode(name, S_IRUGO | S_IXUGO, parent);
 }
+EXPORT_SYMBOL(proc_mkdir);
 
 struct proc_dir_entry *create_proc_entry(const char *name, mode_t mode,
                                         struct proc_dir_entry *parent)
@@ -728,6 +730,7 @@ struct proc_dir_entry *create_proc_entry(const char *name, mode_t mode,
        }
        return ent;
 }
+EXPORT_SYMBOL(create_proc_entry);
 
 struct proc_dir_entry *proc_create_data(const char *name, mode_t mode,
                                        struct proc_dir_entry *parent,
@@ -762,6 +765,7 @@ out_free:
 out:
        return NULL;
 }
+EXPORT_SYMBOL(proc_create_data);
 
 static void free_proc_entry(struct proc_dir_entry *de)
 {
@@ -853,3 +857,4 @@ continue_removing:
                        de->parent->name, de->name, de->subdir->name);
        pde_put(de);
 }
+EXPORT_SYMBOL(remove_proc_entry);
index b080b79..757c069 100644 (file)
@@ -220,9 +220,3 @@ void pid_ns_release_proc(struct pid_namespace *ns)
 {
        mntput(ns->proc_mnt);
 }
-
-EXPORT_SYMBOL(proc_symlink);
-EXPORT_SYMBOL(proc_mkdir);
-EXPORT_SYMBOL(create_proc_entry);
-EXPORT_SYMBOL(proc_create_data);
-EXPORT_SYMBOL(remove_proc_entry);
index 9087b10..2df0f5c 100644 (file)
@@ -1497,9 +1497,11 @@ struct inode *reiserfs_iget(struct super_block *s, const struct cpu_key *key)
 
        args.objectid = key->on_disk_key.k_objectid;
        args.dirid = key->on_disk_key.k_dir_id;
+       reiserfs_write_unlock(s);
        inode = iget5_locked(s, key->on_disk_key.k_objectid,
                             reiserfs_find_actor, reiserfs_init_locked_inode,
                             (void *)(&args));
+       reiserfs_write_lock(s);
        if (!inode)
                return ERR_PTR(-ENOMEM);
 
index aff046b..f35ac60 100644 (file)
@@ -568,7 +568,7 @@ out:
 int do_remount_sb(struct super_block *sb, int flags, void *data, int force)
 {
        int retval;
-       int remount_rw;
+       int remount_rw, remount_ro;
 
        if (sb->s_frozen != SB_UNFROZEN)
                return -EBUSY;
@@ -583,9 +583,12 @@ int do_remount_sb(struct super_block *sb, int flags, void *data, int force)
        shrink_dcache_sb(sb);
        sync_filesystem(sb);
 
+       remount_ro = (flags & MS_RDONLY) && !(sb->s_flags & MS_RDONLY);
+       remount_rw = !(flags & MS_RDONLY) && (sb->s_flags & MS_RDONLY);
+
        /* If we are remounting RDONLY and current sb is read/write,
           make sure there are no rw files opened */
-       if ((flags & MS_RDONLY) && !(sb->s_flags & MS_RDONLY)) {
+       if (remount_ro) {
                if (force)
                        mark_files_ro(sb);
                else if (!fs_may_remount_ro(sb))
@@ -594,7 +597,6 @@ int do_remount_sb(struct super_block *sb, int flags, void *data, int force)
                if (retval < 0 && retval != -ENOSYS)
                        return -EBUSY;
        }
-       remount_rw = !(flags & MS_RDONLY) && (sb->s_flags & MS_RDONLY);
 
        if (sb->s_op->remount_fs) {
                retval = sb->s_op->remount_fs(sb, &flags, data);
@@ -604,6 +606,16 @@ int do_remount_sb(struct super_block *sb, int flags, void *data, int force)
        sb->s_flags = (sb->s_flags & ~MS_RMT_MASK) | (flags & MS_RMT_MASK);
        if (remount_rw)
                vfs_dq_quota_on_remount(sb);
+       /*
+        * Some filesystems modify their metadata via some other path than the
+        * bdev buffer cache (eg. use a private mapping, or directories in
+        * pagecache, etc). Also file data modifications go via their own
+        * mappings. So If we try to mount readonly then copy the filesystem
+        * from bdev, we could get stale data, so invalidate it to give a best
+        * effort at coherency.
+        */
+       if (remount_ro && sb->s_bdev)
+               invalidate_bdev(sb->s_bdev);
        return 0;
 }
 
@@ -925,6 +937,9 @@ vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void
        if (!mnt)
                goto out;
 
+       if (flags & MS_KERNMOUNT)
+               mnt->mnt_flags = MNT_INTERNAL;
+
        if (data && !(type->fs_flags & FS_BINARY_MOUNTDATA)) {
                secdata = alloc_secdata();
                if (!secdata)
index 220b758..6a06a1d 100644 (file)
@@ -81,24 +81,23 @@ int sysfs_sd_setattr(struct sysfs_dirent *sd, struct iattr * iattr)
                if (!sd_attrs)
                        return -ENOMEM;
                sd->s_iattr = sd_attrs;
-       } else {
-               /* attributes were changed at least once in past */
-               iattrs = &sd_attrs->ia_iattr;
-
-               if (ia_valid & ATTR_UID)
-                       iattrs->ia_uid = iattr->ia_uid;
-               if (ia_valid & ATTR_GID)
-                       iattrs->ia_gid = iattr->ia_gid;
-               if (ia_valid & ATTR_ATIME)
-                       iattrs->ia_atime = iattr->ia_atime;
-               if (ia_valid & ATTR_MTIME)
-                       iattrs->ia_mtime = iattr->ia_mtime;
-               if (ia_valid & ATTR_CTIME)
-                       iattrs->ia_ctime = iattr->ia_ctime;
-               if (ia_valid & ATTR_MODE) {
-                       umode_t mode = iattr->ia_mode;
-                       iattrs->ia_mode = sd->s_mode = mode;
-               }
+       }
+       /* attributes were changed at least once in past */
+       iattrs = &sd_attrs->ia_iattr;
+
+       if (ia_valid & ATTR_UID)
+               iattrs->ia_uid = iattr->ia_uid;
+       if (ia_valid & ATTR_GID)
+               iattrs->ia_gid = iattr->ia_gid;
+       if (ia_valid & ATTR_ATIME)
+               iattrs->ia_atime = iattr->ia_atime;
+       if (ia_valid & ATTR_MTIME)
+               iattrs->ia_mtime = iattr->ia_mtime;
+       if (ia_valid & ATTR_CTIME)
+               iattrs->ia_ctime = iattr->ia_ctime;
+       if (ia_valid & ATTR_MODE) {
+               umode_t mode = iattr->ia_mode;
+               iattrs->ia_mode = sd->s_mode = mode;
        }
        return 0;
 }
index 82372e3..b2d96f4 100644 (file)
@@ -547,7 +547,7 @@ static void udf_table_free_blocks(struct super_block *sb,
                }
 
                if (epos.offset + (2 * adsize) > sb->s_blocksize) {
-                       char *sptr, *dptr;
+                       unsigned char *sptr, *dptr;
                        int loffset;
 
                        brelse(oepos.bh);
index 61d9a76..f0f2a43 100644 (file)
@@ -45,8 +45,8 @@ static int do_udf_readdir(struct inode *dir, struct file *filp,
        int block, iblock;
        loff_t nf_pos = (filp->f_pos - 1) << 2;
        int flen;
-       char *fname = NULL;
-       char *nameptr;
+       unsigned char *fname = NULL;
+       unsigned char *nameptr;
        uint16_t liu;
        uint8_t lfi;
        loff_t size = udf_ext0_offset(dir) + dir->i_size;
index f90231e..378a759 100644 (file)
@@ -1672,7 +1672,7 @@ int8_t udf_add_aext(struct inode *inode, struct extent_position *epos,
                return -1;
 
        if (epos->offset + (2 * adsize) > inode->i_sb->s_blocksize) {
-               char *sptr, *dptr;
+               unsigned char *sptr, *dptr;
                struct buffer_head *nbh;
                int err, loffset;
                struct kernel_lb_addr obloc = epos->block;
index cd21150..7c56ff0 100644 (file)
@@ -34,8 +34,8 @@
 #include <linux/crc-itu-t.h>
 #include <linux/exportfs.h>
 
-static inline int udf_match(int len1, const char *name1, int len2,
-                           const char *name2)
+static inline int udf_match(int len1, const unsigned char *name1, int len2,
+                           const unsigned char *name2)
 {
        if (len1 != len2)
                return 0;
@@ -142,15 +142,15 @@ int udf_write_fi(struct inode *inode, struct fileIdentDesc *cfi,
 }
 
 static struct fileIdentDesc *udf_find_entry(struct inode *dir,
-                                           struct qstr *child,
+                                           const struct qstr *child,
                                            struct udf_fileident_bh *fibh,
                                            struct fileIdentDesc *cfi)
 {
        struct fileIdentDesc *fi = NULL;
        loff_t f_pos;
        int block, flen;
-       char *fname = NULL;
-       char *nameptr;
+       unsigned char *fname = NULL;
+       unsigned char *nameptr;
        uint8_t lfi;
        uint16_t liu;
        loff_t size;
@@ -308,7 +308,7 @@ static struct fileIdentDesc *udf_add_entry(struct inode *dir,
 {
        struct super_block *sb = dir->i_sb;
        struct fileIdentDesc *fi = NULL;
-       char *name = NULL;
+       unsigned char *name = NULL;
        int namelen;
        loff_t f_pos;
        loff_t size = udf_ext0_offset(dir) + dir->i_size;
@@ -885,16 +885,16 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry,
 {
        struct inode *inode;
        struct pathComponent *pc;
-       char *compstart;
+       const char *compstart;
        struct udf_fileident_bh fibh;
        struct extent_position epos = {};
        int eoffset, elen = 0;
        struct fileIdentDesc *fi;
        struct fileIdentDesc cfi;
-       char *ea;
+       uint8_t *ea;
        int err;
        int block;
-       char *name = NULL;
+       unsigned char *name = NULL;
        int namelen;
        struct buffer_head *bh;
        struct udf_inode_info *iinfo;
@@ -970,7 +970,7 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry,
 
                pc = (struct pathComponent *)(ea + elen);
 
-               compstart = (char *)symname;
+               compstart = symname;
 
                do {
                        symname++;
index c3265e1..852e918 100644 (file)
 #include <linux/buffer_head.h>
 #include "udf_i.h"
 
-static void udf_pc_to_char(struct super_block *sb, char *from, int fromlen,
-                          char *to)
+static void udf_pc_to_char(struct super_block *sb, unsigned char *from,
+                          int fromlen, unsigned char *to)
 {
        struct pathComponent *pc;
        int elen = 0;
-       char *p = to;
+       unsigned char *p = to;
 
        while (elen < fromlen) {
                pc = (struct pathComponent *)(from + elen);
@@ -75,9 +75,9 @@ static int udf_symlink_filler(struct file *file, struct page *page)
 {
        struct inode *inode = page->mapping->host;
        struct buffer_head *bh = NULL;
-       char *symlink;
+       unsigned char *symlink;
        int err = -EIO;
-       char *p = kmap(page);
+       unsigned char *p = kmap(page);
        struct udf_inode_info *iinfo;
 
        lock_kernel();
index 22af68f..317a0d4 100644 (file)
@@ -31,7 +31,7 @@
  * len <= UFS_MAXNAMLEN and de != NULL are guaranteed by caller.
  */
 static inline int ufs_match(struct super_block *sb, int len,
-               const char * const name, struct ufs_dir_entry * de)
+               const unsigned char *name, struct ufs_dir_entry *de)
 {
        if (len != ufs_get_de_namlen(sb, de))
                return 0;
@@ -70,7 +70,7 @@ static inline unsigned long ufs_dir_pages(struct inode *inode)
        return (inode->i_size+PAGE_CACHE_SIZE-1)>>PAGE_CACHE_SHIFT;
 }
 
-ino_t ufs_inode_by_name(struct inode *dir, struct qstr *qstr)
+ino_t ufs_inode_by_name(struct inode *dir, const struct qstr *qstr)
 {
        ino_t res = 0;
        struct ufs_dir_entry *de;
@@ -249,11 +249,11 @@ struct ufs_dir_entry *ufs_dotdot(struct inode *dir, struct page **p)
  * (as a parameter - res_dir). Page is returned mapped and unlocked.
  * Entry is guaranteed to be valid.
  */
-struct ufs_dir_entry *ufs_find_entry(struct inode *dir, struct qstr *qstr,
+struct ufs_dir_entry *ufs_find_entry(struct inode *dir, const struct qstr *qstr,
                                     struct page **res_page)
 {
        struct super_block *sb = dir->i_sb;
-       const char *name = qstr->name;
+       const unsigned char *name = qstr->name;
        int namelen = qstr->len;
        unsigned reclen = UFS_DIR_REC_LEN(namelen);
        unsigned long start, n;
@@ -313,7 +313,7 @@ found:
 int ufs_add_link(struct dentry *dentry, struct inode *inode)
 {
        struct inode *dir = dentry->d_parent->d_inode;
-       const char *name = dentry->d_name.name;
+       const unsigned char *name = dentry->d_name.name;
        int namelen = dentry->d_name.len;
        struct super_block *sb = dir->i_sb;
        unsigned reclen = UFS_DIR_REC_LEN(namelen);
index 0b4c39b..01d0e2a 100644 (file)
@@ -86,9 +86,9 @@ extern void ufs_put_cylinder (struct super_block *, unsigned);
 /* dir.c */
 extern const struct inode_operations ufs_dir_inode_operations;
 extern int ufs_add_link (struct dentry *, struct inode *);
-extern ino_t ufs_inode_by_name(struct inode *, struct qstr *);
+extern ino_t ufs_inode_by_name(struct inode *, const struct qstr *);
 extern int ufs_make_empty(struct inode *, struct inode *);
-extern struct ufs_dir_entry *ufs_find_entry(struct inode *, struct qstr *, struct page **);
+extern struct ufs_dir_entry *ufs_find_entry(struct inode *, const struct qstr *, struct page **);
 extern int ufs_delete_entry(struct inode *, struct ufs_dir_entry *, struct page *);
 extern int ufs_empty_dir (struct inode *);
 extern struct ufs_dir_entry *ufs_dotdot(struct inode *, struct page **);
index 1e67c44..f745948 100644 (file)
@@ -77,6 +77,7 @@ struct drm_nouveau_gpuobj_free {
 #define NOUVEAU_GETPARAM_PCI_PHYSICAL    10
 #define NOUVEAU_GETPARAM_CHIPSET_ID      11
 #define NOUVEAU_GETPARAM_VM_VRAM_BASE    12
+#define NOUVEAU_GETPARAM_GRAPH_UNITS     13
 struct drm_nouveau_getparam {
        uint64_t param;
        uint64_t value;
index 2be7e12..c7645f4 100644 (file)
@@ -68,7 +68,8 @@
 #define DRM_VMW_PARAM_NUM_FREE_STREAMS 1
 #define DRM_VMW_PARAM_3D               2
 #define DRM_VMW_PARAM_FIFO_OFFSET      3
-
+#define DRM_VMW_PARAM_HW_CAPS          4
+#define DRM_VMW_PARAM_FIFO_CAPS        5
 
 /**
  * struct drm_vmw_getparam_arg
@@ -181,6 +182,8 @@ struct drm_vmw_context_arg {
  * The size of the array should equal the total number of mipmap levels.
  * @shareable: Boolean whether other clients (as identified by file descriptors)
  * may reference this surface.
+ * @scanout: Boolean whether the surface is intended to be used as a
+ * scanout.
  *
  * Input data to the DRM_VMW_CREATE_SURFACE Ioctl.
  * Output data from the DRM_VMW_REF_SURFACE Ioctl.
@@ -192,7 +195,7 @@ struct drm_vmw_surface_create_req {
        uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES];
        uint64_t size_addr;
        int32_t shareable;
-       uint32_t pad64;
+       int32_t scanout;
 };
 
 /**
@@ -295,17 +298,28 @@ union drm_vmw_surface_reference_arg {
  *
  * @commands: User-space address of a command buffer cast to an uint64_t.
  * @command-size: Size in bytes of the command buffer.
+ * @throttle-us: Sleep until software is less than @throttle_us
+ * microseconds ahead of hardware. The driver may round this value
+ * to the nearest kernel tick.
  * @fence_rep: User-space address of a struct drm_vmw_fence_rep cast to an
  * uint64_t.
+ * @version: Allows expanding the execbuf ioctl parameters without breaking
+ * backwards compatibility, since user-space will always tell the kernel
+ * which version it uses.
+ * @flags: Execbuf flags. None currently.
  *
  * Argument to the DRM_VMW_EXECBUF Ioctl.
  */
 
+#define DRM_VMW_EXECBUF_VERSION 0
+
 struct drm_vmw_execbuf_arg {
        uint64_t commands;
        uint32_t command_size;
-       uint32_t pad64;
+       uint32_t throttle_us;
        uint64_t fence_rep;
+        uint32_t version;
+        uint32_t flags;
 };
 
 /**
index ab94335..6816be6 100644 (file)
@@ -1,5 +1,9 @@
 /*
- *  linux/include/asm-arm/hardware/amba.h
+ *  linux/include/amba/bus.h
+ *
+ *  This device type deals with ARM PrimeCells and anything else that
+ *  presents a proper CID (0xB105F00D) at the end of the I/O register
+ *  region or that is derived from a PrimeCell.
  *
  *  Copyright (C) 2003 Deep Blue Solutions Ltd, All Rights Reserved.
  *
index 5c80189..1896e86 100644 (file)
@@ -461,8 +461,7 @@ struct request_queue
 #define QUEUE_FLAG_NONROT      14      /* non-rotational device (SSD) */
 #define QUEUE_FLAG_VIRT        QUEUE_FLAG_NONROT /* paravirt device */
 #define QUEUE_FLAG_IO_STAT     15      /* do IO stats */
-#define QUEUE_FLAG_CQ         16       /* hardware does queuing */
-#define QUEUE_FLAG_DISCARD     17      /* supports DISCARD */
+#define QUEUE_FLAG_DISCARD     16      /* supports DISCARD */
 
 #define QUEUE_FLAG_DEFAULT     ((1 << QUEUE_FLAG_IO_STAT) |            \
                                 (1 << QUEUE_FLAG_CLUSTER) |            \
@@ -586,7 +585,6 @@ enum {
 
 #define blk_queue_plugged(q)   test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags)
 #define blk_queue_tagged(q)    test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
-#define blk_queue_queuing(q)   test_bit(QUEUE_FLAG_CQ, &(q)->queue_flags)
 #define blk_queue_stopped(q)   test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
 #define blk_queue_nomerges(q)  test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
 #define blk_queue_nonrot(q)    test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
index b1bcb27..5b3182c 100644 (file)
@@ -729,6 +729,7 @@ struct inode {
        uid_t                   i_uid;
        gid_t                   i_gid;
        dev_t                   i_rdev;
+       unsigned int            i_blkbits;
        u64                     i_version;
        loff_t                  i_size;
 #ifdef __NEED_I_SIZE_ORDERED
@@ -738,7 +739,6 @@ struct inode {
        struct timespec         i_mtime;
        struct timespec         i_ctime;
        blkcnt_t                i_blocks;
-       unsigned int            i_blkbits;
        unsigned short          i_bytes;
        umode_t                 i_mode;
        spinlock_t              i_lock; /* i_blocks, i_bytes, maybe i_size */
@@ -1305,6 +1305,8 @@ extern int send_sigurg(struct fown_struct *fown);
 #define MNT_FORCE      0x00000001      /* Attempt to forcibily umount */
 #define MNT_DETACH     0x00000002      /* Just detach from the tree */
 #define MNT_EXPIRE     0x00000004      /* Mark for expiry */
+#define UMOUNT_NOFOLLOW        0x00000008      /* Don't follow symlink on umount */
+#define UMOUNT_UNUSED  0x80000000      /* Flag guaranteed to be unused */
 
 extern struct list_head super_blocks;
 extern spinlock_t sb_lock;
@@ -1314,9 +1316,9 @@ extern spinlock_t sb_lock;
 struct super_block {
        struct list_head        s_list;         /* Keep this first */
        dev_t                   s_dev;          /* search index; _not_ kdev_t */
-       unsigned long           s_blocksize;
-       unsigned char           s_blocksize_bits;
        unsigned char           s_dirt;
+       unsigned char           s_blocksize_bits;
+       unsigned long           s_blocksize;
        loff_t                  s_maxbytes;     /* Max file size */
        struct file_system_type *s_type;
        const struct super_operations   *s_op;
@@ -1357,16 +1359,16 @@ struct super_block {
        void                    *s_fs_info;     /* Filesystem private info */
        fmode_t                 s_mode;
 
+       /* Granularity of c/m/atime in ns.
+          Cannot be worse than a second */
+       u32                s_time_gran;
+
        /*
         * The next field is for VFS *only*. No filesystems have any business
         * even looking at it. You had been warned.
         */
        struct mutex s_vfs_rename_mutex;        /* Kludge */
 
-       /* Granularity of c/m/atime in ns.
-          Cannot be worse than a second */
-       u32                s_time_gran;
-
        /*
         * Filesystem subtype.  If non-empty the filesystem type field
         * in /proc/mounts will be "type.subtype"
@@ -1794,7 +1796,8 @@ extern int may_umount(struct vfsmount *);
 extern long do_mount(char *, char *, char *, unsigned long, void *);
 extern struct vfsmount *collect_mounts(struct path *);
 extern void drop_collected_mounts(struct vfsmount *);
-
+extern int iterate_mounts(int (*)(struct vfsmount *, void *), void *,
+                         struct vfsmount *);
 extern int vfs_statfs(struct dentry *, struct kstatfs *);
 
 extern int current_umask(void);
@@ -2058,12 +2061,6 @@ extern int invalidate_inodes(struct super_block *);
 unsigned long invalidate_mapping_pages(struct address_space *mapping,
                                        pgoff_t start, pgoff_t end);
 
-static inline unsigned long __deprecated
-invalidate_inode_pages(struct address_space *mapping)
-{
-       return invalidate_mapping_pages(mapping, 0, ~0UL);
-}
-
 static inline void invalidate_remote_inode(struct inode *inode)
 {
        if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
@@ -2132,6 +2129,7 @@ extern struct file * open_exec(const char *);
  
 /* fs/dcache.c -- generic fs support functions */
 extern int is_subdir(struct dentry *, struct dentry *);
+extern int path_is_under(struct path *, struct path *);
 extern ino_t find_inode_number(struct dentry *, struct qstr *);
 
 #include <linux/err.h>
@@ -2340,8 +2338,6 @@ extern int simple_rename(struct inode *, struct dentry *, struct inode *, struct
 extern int simple_sync_file(struct file *, struct dentry *, int);
 extern int simple_empty(struct dentry *);
 extern int simple_readpage(struct file *file, struct page *page);
-extern int simple_prepare_write(struct file *file, struct page *page,
-                       unsigned offset, unsigned to);
 extern int simple_write_begin(struct file *file, struct address_space *mapping,
                        loff_t pos, unsigned len, unsigned flags,
                        struct page **pagep, void **fsdata);
index 070ba06..5977b72 100644 (file)
@@ -44,7 +44,7 @@ static inline int hw_breakpoint_type(struct perf_event *bp)
        return bp->attr.bp_type;
 }
 
-static inline int hw_breakpoint_len(struct perf_event *bp)
+static inline unsigned long hw_breakpoint_len(struct perf_event *bp)
 {
        return bp->attr.bp_len;
 }
index 735ceaf..663208a 100644 (file)
@@ -376,6 +376,7 @@ struct input_absinfo {
 #define KEY_DISPLAY_OFF                245     /* display device to off state */
 
 #define KEY_WIMAX              246
+#define KEY_RFKILL             247     /* Key that controls all radios */
 
 /* Range 248 - 255 is reserved for special needs of AT keyboard driver */
 
index 6f6c5f3..bc0fc79 100644 (file)
@@ -124,7 +124,7 @@ extern __must_check unsigned int kfifo_out_peek(struct kfifo *fifo,
  */
 static inline bool kfifo_initialized(struct kfifo *fifo)
 {
-       return fifo->buffer != 0;
+       return fifo->buffer != NULL;
 }
 
 /**
index d74785c..0b89efc 100644 (file)
@@ -35,6 +35,7 @@ static inline void get_mnt_ns(struct mnt_namespace *ns)
 extern const struct seq_operations mounts_op;
 extern const struct seq_operations mountinfo_op;
 extern const struct seq_operations mountstats_op;
+extern int mnt_had_events(struct proc_mounts *);
 
 #endif
 #endif
index 5d52753..ca726eb 100644 (file)
@@ -34,7 +34,18 @@ struct mnt_namespace;
 
 #define MNT_SHARED     0x1000  /* if the vfsmount is a shared mount */
 #define MNT_UNBINDABLE 0x2000  /* if the vfsmount is a unbindable mount */
-#define MNT_PNODE_MASK 0x3000  /* propagation flag mask */
+/*
+ * MNT_SHARED_MASK is the set of flags that should be cleared when a
+ * mount becomes shared.  Currently, this is only the flag that says a
+ * mount cannot be bind mounted, since this is how we create a mount
+ * that shares events with another mount.  If you add a new MNT_*
+ * flag, consider how it interacts with shared mounts.
+ */
+#define MNT_SHARED_MASK        (MNT_UNBINDABLE)
+#define MNT_PROPAGATION_MASK   (MNT_SHARED | MNT_UNBINDABLE)
+
+
+#define MNT_INTERNAL   0x4000
 
 struct vfsmount {
        struct list_head mnt_hash;
@@ -123,7 +134,6 @@ extern int do_add_mount(struct vfsmount *newmnt, struct path *path,
 
 extern void mark_mounts_for_expiry(struct list_head *mounts);
 
-extern spinlock_t vfsmount_lock;
 extern dev_t name_to_dev_t(char *name);
 
 #endif /* _LINUX_MOUNT_H */
index 8fa7187..a177698 100644 (file)
@@ -211,11 +211,9 @@ struct perf_event_attr {
                __u32           wakeup_watermark; /* bytes before wakeup   */
        };
 
-       __u32                   __reserved_2;
-
-       __u64                   bp_addr;
        __u32                   bp_type;
-       __u32                   bp_len;
+       __u64                   bp_addr;
+       __u64                   bp_len;
 };
 
 /*
index ba1ba0c..63d4498 100644 (file)
@@ -11,6 +11,8 @@ struct nf_conntrack_ecache;
 struct netns_ct {
        atomic_t                count;
        unsigned int            expect_count;
+       unsigned int            htable_size;
+       struct kmem_cache       *nf_conntrack_cachep;
        struct hlist_nulls_head *hash;
        struct hlist_head       *expect_hash;
        struct hlist_nulls_head unconfirmed;
@@ -28,5 +30,6 @@ struct netns_ct {
 #endif
        int                     hash_vmalloc;
        int                     expect_vmalloc;
+       char                    *slabname;
 };
 #endif
index 2eb3814..9a4b8b7 100644 (file)
@@ -40,6 +40,7 @@ struct netns_ipv4 {
        struct xt_table         *iptable_security;
        struct xt_table         *nat_table;
        struct hlist_head       *nat_bysource;
+       unsigned int            nat_htable_size;
        int                     nat_vmalloced;
 #endif
 
index 614241b..2b10853 100644 (file)
@@ -30,11 +30,7 @@ static int __init do_linuxrc(void * shell)
        extern char * envp_init[];
 
        sys_close(old_fd);sys_close(root_fd);
-       sys_close(0);sys_close(1);sys_close(2);
        sys_setsid();
-       (void) sys_open("/dev/console",O_RDWR,0);
-       (void) sys_dup(0);
-       (void) sys_dup(0);
        return kernel_execve(shell, argv, envp_init);
 }
 
index 4cb47a1..106e02d 100644 (file)
@@ -806,11 +806,6 @@ static noinline int init_post(void)
        system_state = SYSTEM_RUNNING;
        numa_default_policy();
 
-       if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
-               printk(KERN_WARNING "Warning: unable to open an initial console.\n");
-
-       (void) sys_dup(0);
-       (void) sys_dup(0);
 
        current->signal->flags |= SIGNAL_UNKILLABLE;
 
@@ -873,6 +868,12 @@ static int __init kernel_init(void * unused)
 
        do_basic_setup();
 
+       /* Open the /dev/console on the rootfs, this should never fail */
+       if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
+               printk(KERN_WARNING "Warning: unable to open an initial console.\n");
+
+       (void) sys_dup(0);
+       (void) sys_dup(0);
        /*
         * check if there is an early userspace init.  If yes, let it do all
         * the work
index c79bd57..b6cb064 100644 (file)
@@ -134,7 +134,6 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
                        init_waitqueue_head(&info->wait_q);
                        INIT_LIST_HEAD(&info->e_wait_q[0].list);
                        INIT_LIST_HEAD(&info->e_wait_q[1].list);
-                       info->messages = NULL;
                        info->notify_owner = NULL;
                        info->qsize = 0;
                        info->user = NULL;      /* set when all is ok */
@@ -146,6 +145,10 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
                                info->attr.mq_msgsize = attr->mq_msgsize;
                        }
                        mq_msg_tblsz = info->attr.mq_maxmsg * sizeof(struct msg_msg *);
+                       info->messages = kmalloc(mq_msg_tblsz, GFP_KERNEL);
+                       if (!info->messages)
+                               goto out_inode;
+
                        mq_bytes = (mq_msg_tblsz +
                                (info->attr.mq_maxmsg * info->attr.mq_msgsize));
 
@@ -154,18 +157,12 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
                            u->mq_bytes + mq_bytes >
                            p->signal->rlim[RLIMIT_MSGQUEUE].rlim_cur) {
                                spin_unlock(&mq_lock);
+                               kfree(info->messages);
                                goto out_inode;
                        }
                        u->mq_bytes += mq_bytes;
                        spin_unlock(&mq_lock);
 
-                       info->messages = kmalloc(mq_msg_tblsz, GFP_KERNEL);
-                       if (!info->messages) {
-                               spin_lock(&mq_lock);
-                               u->mq_bytes -= mq_bytes;
-                               spin_unlock(&mq_lock);
-                               goto out_inode;
-                       }
                        /* all is ok */
                        info->user = get_uid(u);
                } else if (S_ISDIR(mode)) {
@@ -187,7 +184,7 @@ static int mqueue_fill_super(struct super_block *sb, void *data, int silent)
 {
        struct inode *inode;
        struct ipc_namespace *ns = data;
-       int error = 0;
+       int error;
 
        sb->s_blocksize = PAGE_CACHE_SIZE;
        sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
@@ -205,7 +202,9 @@ static int mqueue_fill_super(struct super_block *sb, void *data, int silent)
        if (!sb->s_root) {
                iput(inode);
                error = -ENOMEM;
+               goto out;
        }
+       error = 0;
 
 out:
        return error;
@@ -264,8 +263,9 @@ static void mqueue_delete_inode(struct inode *inode)
 
        clear_inode(inode);
 
-       mq_bytes = (info->attr.mq_maxmsg * sizeof(struct msg_msg *) +
-                  (info->attr.mq_maxmsg * info->attr.mq_msgsize));
+       /* Total amount of bytes accounted for the mqueue */
+       mq_bytes = info->attr.mq_maxmsg * (sizeof(struct msg_msg *)
+           + info->attr.mq_msgsize);
        user = info->user;
        if (user) {
                spin_lock(&mq_lock);
@@ -604,8 +604,8 @@ static int mq_attr_ok(struct ipc_namespace *ipc_ns, struct mq_attr *attr)
        /* check for overflow */
        if (attr->mq_msgsize > ULONG_MAX/attr->mq_maxmsg)
                return 0;
-       if ((unsigned long)(attr->mq_maxmsg * attr->mq_msgsize) +
-           (attr->mq_maxmsg * sizeof (struct msg_msg *)) <
+       if ((unsigned long)(attr->mq_maxmsg * (attr->mq_msgsize
+           + sizeof (struct msg_msg *))) <
            (unsigned long)(attr->mq_maxmsg * attr->mq_msgsize))
                return 0;
        return 1;
@@ -623,9 +623,10 @@ static struct file *do_create(struct ipc_namespace *ipc_ns, struct dentry *dir,
        int ret;
 
        if (attr) {
-               ret = -EINVAL;
-               if (!mq_attr_ok(ipc_ns, attr))
+               if (!mq_attr_ok(ipc_ns, attr)) {
+                       ret = -EINVAL;
                        goto out;
+               }
                /* store for use during create */
                dentry->d_fsdata = attr;
        }
@@ -659,24 +660,28 @@ out:
 static struct file *do_open(struct ipc_namespace *ipc_ns,
                                struct dentry *dentry, int oflag)
 {
+       int ret;
        const struct cred *cred = current_cred();
 
        static const int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE,
                                                  MAY_READ | MAY_WRITE };
 
        if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY)) {
-               dput(dentry);
-               mntput(ipc_ns->mq_mnt);
-               return ERR_PTR(-EINVAL);
+               ret = -EINVAL;
+               goto err;
        }
 
        if (inode_permission(dentry->d_inode, oflag2acc[oflag & O_ACCMODE])) {
-               dput(dentry);
-               mntput(ipc_ns->mq_mnt);
-               return ERR_PTR(-EACCES);
+               ret = -EACCES;
+               goto err;
        }
 
        return dentry_open(dentry, ipc_ns->mq_mnt, oflag, cred);
+
+err:
+       dput(dentry);
+       mntput(ipc_ns->mq_mnt);
+       return ERR_PTR(ret);
 }
 
 SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, mode_t, mode,
@@ -705,16 +710,17 @@ SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, mode_t, mode,
        dentry = lookup_one_len(name, ipc_ns->mq_mnt->mnt_root, strlen(name));
        if (IS_ERR(dentry)) {
                error = PTR_ERR(dentry);
-               goto out_err;
+               goto out_putfd;
        }
        mntget(ipc_ns->mq_mnt);
 
        if (oflag & O_CREAT) {
                if (dentry->d_inode) {  /* entry already exists */
                        audit_inode(name, dentry);
-                       error = -EEXIST;
-                       if (oflag & O_EXCL)
+                       if (oflag & O_EXCL) {
+                               error = -EEXIST;
                                goto out;
+                       }
                        filp = do_open(ipc_ns, dentry, oflag);
                } else {
                        filp = do_create(ipc_ns, ipc_ns->mq_mnt->mnt_root,
@@ -722,9 +728,10 @@ SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, mode_t, mode,
                                                u_attr ? &attr : NULL);
                }
        } else {
-               error = -ENOENT;
-               if (!dentry->d_inode)
+               if (!dentry->d_inode) {
+                       error = -ENOENT;
                        goto out;
+               }
                audit_inode(name, dentry);
                filp = do_open(ipc_ns, dentry, oflag);
        }
@@ -742,7 +749,6 @@ out:
        mntput(ipc_ns->mq_mnt);
 out_putfd:
        put_unused_fd(fd);
-out_err:
        fd = error;
 out_upsem:
        mutex_unlock(&ipc_ns->mq_mnt->mnt_root->d_inode->i_mutex);
@@ -872,19 +878,24 @@ SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
        audit_mq_sendrecv(mqdes, msg_len, msg_prio, p);
        timeout = prepare_timeout(p);
 
-       ret = -EBADF;
        filp = fget(mqdes);
-       if (unlikely(!filp))
+       if (unlikely(!filp)) {
+               ret = -EBADF;
                goto out;
+       }
 
        inode = filp->f_path.dentry->d_inode;
-       if (unlikely(filp->f_op != &mqueue_file_operations))
+       if (unlikely(filp->f_op != &mqueue_file_operations)) {
+               ret = -EBADF;
                goto out_fput;
+       }
        info = MQUEUE_I(inode);
        audit_inode(NULL, filp->f_path.dentry);
 
-       if (unlikely(!(filp->f_mode & FMODE_WRITE)))
+       if (unlikely(!(filp->f_mode & FMODE_WRITE))) {
+               ret = -EBADF;
                goto out_fput;
+       }
 
        if (unlikely(msg_len > info->attr.mq_msgsize)) {
                ret = -EMSGSIZE;
@@ -961,19 +972,24 @@ SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr,
        audit_mq_sendrecv(mqdes, msg_len, 0, p);
        timeout = prepare_timeout(p);
 
-       ret = -EBADF;
        filp = fget(mqdes);
-       if (unlikely(!filp))
+       if (unlikely(!filp)) {
+               ret = -EBADF;
                goto out;
+       }
 
        inode = filp->f_path.dentry->d_inode;
-       if (unlikely(filp->f_op != &mqueue_file_operations))
+       if (unlikely(filp->f_op != &mqueue_file_operations)) {
+               ret = -EBADF;
                goto out_fput;
+       }
        info = MQUEUE_I(inode);
        audit_inode(NULL, filp->f_path.dentry);
 
-       if (unlikely(!(filp->f_mode & FMODE_READ)))
+       if (unlikely(!(filp->f_mode & FMODE_READ))) {
+               ret = -EBADF;
                goto out_fput;
+       }
 
        /* checks if buffer is big enough */
        if (unlikely(msg_len < info->attr.mq_msgsize)) {
@@ -1063,13 +1079,14 @@ SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes,
 
                        /* create the notify skb */
                        nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL);
-                       ret = -ENOMEM;
-                       if (!nc)
+                       if (!nc) {
+                               ret = -ENOMEM;
                                goto out;
-                       ret = -EFAULT;
+                       }
                        if (copy_from_user(nc->data,
                                        notification.sigev_value.sival_ptr,
                                        NOTIFY_COOKIE_LEN)) {
+                               ret = -EFAULT;
                                goto out;
                        }
 
@@ -1078,9 +1095,10 @@ SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes,
                        /* and attach it to the socket */
 retry:
                        filp = fget(notification.sigev_signo);
-                       ret = -EBADF;
-                       if (!filp)
+                       if (!filp) {
+                               ret = -EBADF;
                                goto out;
+                       }
                        sock = netlink_getsockbyfilp(filp);
                        fput(filp);
                        if (IS_ERR(sock)) {
@@ -1092,7 +1110,7 @@ retry:
                        timeo = MAX_SCHEDULE_TIMEOUT;
                        ret = netlink_attachskb(sock, nc, &timeo, NULL);
                        if (ret == 1)
-                               goto retry;
+                               goto retry;
                        if (ret) {
                                sock = NULL;
                                nc = NULL;
@@ -1101,14 +1119,17 @@ retry:
                }
        }
 
-       ret = -EBADF;
        filp = fget(mqdes);
-       if (!filp)
+       if (!filp) {
+               ret = -EBADF;
                goto out;
+       }
 
        inode = filp->f_path.dentry->d_inode;
-       if (unlikely(filp->f_op != &mqueue_file_operations))
+       if (unlikely(filp->f_op != &mqueue_file_operations)) {
+               ret = -EBADF;
                goto out_fput;
+       }
        info = MQUEUE_I(inode);
 
        ret = 0;
@@ -1171,14 +1192,17 @@ SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes,
                        return -EINVAL;
        }
 
-       ret = -EBADF;
        filp = fget(mqdes);
-       if (!filp)
+       if (!filp) {
+               ret = -EBADF;
                goto out;
+       }
 
        inode = filp->f_path.dentry->d_inode;
-       if (unlikely(filp->f_op != &mqueue_file_operations))
+       if (unlikely(filp->f_op != &mqueue_file_operations)) {
+               ret = -EBADF;
                goto out_fput;
+       }
        info = MQUEUE_I(inode);
 
        spin_lock(&info->lock);
@@ -1272,7 +1296,7 @@ static int __init init_mqueue_fs(void)
        if (mqueue_inode_cachep == NULL)
                return -ENOMEM;
 
-       /* ignore failues - they are not fatal */
+       /* ignore failures - they are not fatal */
        mq_sysctl_table = mq_register_sysctl_table();
 
        error = register_filesystem(&mqueue_fs_type);
index 4b05bd9..028e856 100644 (file)
@@ -548,6 +548,11 @@ int audit_remove_tree_rule(struct audit_krule *rule)
        return 0;
 }
 
+static int compare_root(struct vfsmount *mnt, void *arg)
+{
+       return mnt->mnt_root->d_inode == arg;
+}
+
 void audit_trim_trees(void)
 {
        struct list_head cursor;
@@ -559,7 +564,6 @@ void audit_trim_trees(void)
                struct path path;
                struct vfsmount *root_mnt;
                struct node *node;
-               struct list_head list;
                int err;
 
                tree = container_of(cursor.next, struct audit_tree, list);
@@ -577,24 +581,16 @@ void audit_trim_trees(void)
                if (!root_mnt)
                        goto skip_it;
 
-               list_add_tail(&list, &root_mnt->mnt_list);
                spin_lock(&hash_lock);
                list_for_each_entry(node, &tree->chunks, list) {
-                       struct audit_chunk *chunk = find_chunk(node);
-                       struct inode *inode = chunk->watch.inode;
-                       struct vfsmount *mnt;
+                       struct inode *inode = find_chunk(node)->watch.inode;
                        node->index |= 1U<<31;
-                       list_for_each_entry(mnt, &list, mnt_list) {
-                               if (mnt->mnt_root->d_inode == inode) {
-                                       node->index &= ~(1U<<31);
-                                       break;
-                               }
-                       }
+                       if (iterate_mounts(compare_root, inode, root_mnt))
+                               node->index &= ~(1U<<31);
                }
                spin_unlock(&hash_lock);
                trim_marked(tree);
                put_tree(tree);
-               list_del_init(&list);
                drop_collected_mounts(root_mnt);
 skip_it:
                mutex_lock(&audit_filter_mutex);
@@ -603,22 +599,6 @@ skip_it:
        mutex_unlock(&audit_filter_mutex);
 }
 
-static int is_under(struct vfsmount *mnt, struct dentry *dentry,
-                   struct path *path)
-{
-       if (mnt != path->mnt) {
-               for (;;) {
-                       if (mnt->mnt_parent == mnt)
-                               return 0;
-                       if (mnt->mnt_parent == path->mnt)
-                                       break;
-                       mnt = mnt->mnt_parent;
-               }
-               dentry = mnt->mnt_mountpoint;
-       }
-       return is_subdir(dentry, path->dentry);
-}
-
 int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op)
 {
 
@@ -638,13 +618,17 @@ void audit_put_tree(struct audit_tree *tree)
        put_tree(tree);
 }
 
+static int tag_mount(struct vfsmount *mnt, void *arg)
+{
+       return tag_chunk(mnt->mnt_root->d_inode, arg);
+}
+
 /* called with audit_filter_mutex */
 int audit_add_tree_rule(struct audit_krule *rule)
 {
        struct audit_tree *seed = rule->tree, *tree;
        struct path path;
-       struct vfsmount *mnt, *p;
-       struct list_head list;
+       struct vfsmount *mnt;
        int err;
 
        list_for_each_entry(tree, &tree_list, list) {
@@ -670,16 +654,9 @@ int audit_add_tree_rule(struct audit_krule *rule)
                err = -ENOMEM;
                goto Err;
        }
-       list_add_tail(&list, &mnt->mnt_list);
 
        get_tree(tree);
-       list_for_each_entry(p, &list, mnt_list) {
-               err = tag_chunk(p->mnt_root->d_inode, tree);
-               if (err)
-                       break;
-       }
-
-       list_del(&list);
+       err = iterate_mounts(tag_mount, tree, mnt);
        drop_collected_mounts(mnt);
 
        if (!err) {
@@ -714,31 +691,23 @@ int audit_tag_tree(char *old, char *new)
 {
        struct list_head cursor, barrier;
        int failed = 0;
-       struct path path;
+       struct path path1, path2;
        struct vfsmount *tagged;
-       struct list_head list;
-       struct vfsmount *mnt;
-       struct dentry *dentry;
        int err;
 
-       err = kern_path(new, 0, &path);
+       err = kern_path(new, 0, &path2);
        if (err)
                return err;
-       tagged = collect_mounts(&path);
-       path_put(&path);
+       tagged = collect_mounts(&path2);
+       path_put(&path2);
        if (!tagged)
                return -ENOMEM;
 
-       err = kern_path(old, 0, &path);
+       err = kern_path(old, 0, &path1);
        if (err) {
                drop_collected_mounts(tagged);
                return err;
        }
-       mnt = mntget(path.mnt);
-       dentry = dget(path.dentry);
-       path_put(&path);
-
-       list_add_tail(&list, &tagged->mnt_list);
 
        mutex_lock(&audit_filter_mutex);
        list_add(&barrier, &tree_list);
@@ -746,7 +715,7 @@ int audit_tag_tree(char *old, char *new)
 
        while (cursor.next != &tree_list) {
                struct audit_tree *tree;
-               struct vfsmount *p;
+               int good_one = 0;
 
                tree = container_of(cursor.next, struct audit_tree, list);
                get_tree(tree);
@@ -754,30 +723,19 @@ int audit_tag_tree(char *old, char *new)
                list_add(&cursor, &tree->list);
                mutex_unlock(&audit_filter_mutex);
 
-               err = kern_path(tree->pathname, 0, &path);
-               if (err) {
-                       put_tree(tree);
-                       mutex_lock(&audit_filter_mutex);
-                       continue;
+               err = kern_path(tree->pathname, 0, &path2);
+               if (!err) {
+                       good_one = path_is_under(&path1, &path2);
+                       path_put(&path2);
                }
 
-               spin_lock(&vfsmount_lock);
-               if (!is_under(mnt, dentry, &path)) {
-                       spin_unlock(&vfsmount_lock);
-                       path_put(&path);
+               if (!good_one) {
                        put_tree(tree);
                        mutex_lock(&audit_filter_mutex);
                        continue;
                }
-               spin_unlock(&vfsmount_lock);
-               path_put(&path);
-
-               list_for_each_entry(p, &list, mnt_list) {
-                       failed = tag_chunk(p->mnt_root->d_inode, tree);
-                       if (failed)
-                               break;
-               }
 
+               failed = iterate_mounts(tag_mount, tree, tagged);
                if (failed) {
                        put_tree(tree);
                        mutex_lock(&audit_filter_mutex);
@@ -818,10 +776,8 @@ int audit_tag_tree(char *old, char *new)
        }
        list_del(&barrier);
        list_del(&cursor);
-       list_del(&list);
        mutex_unlock(&audit_filter_mutex);
-       dput(dentry);
-       mntput(mnt);
+       path_put(&path1);
        drop_collected_mounts(tagged);
        return failed;
 }
index 8a5c7d5..967e661 100644 (file)
@@ -360,8 +360,8 @@ EXPORT_SYMBOL_GPL(register_user_hw_breakpoint);
 int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr)
 {
        u64 old_addr = bp->attr.bp_addr;
+       u64 old_len = bp->attr.bp_len;
        int old_type = bp->attr.bp_type;
-       int old_len = bp->attr.bp_len;
        int err = 0;
 
        perf_event_disable(bp);
index 498cabb..35edbe2 100644 (file)
@@ -80,7 +80,7 @@ int kfifo_alloc(struct kfifo *fifo, unsigned int size, gfp_t gfp_mask)
 
        buffer = kmalloc(size, gfp_mask);
        if (!buffer) {
-               _kfifo_init(fifo, 0, 0);
+               _kfifo_init(fifo, NULL, 0);
                return -ENOMEM;
        }
 
@@ -97,6 +97,7 @@ EXPORT_SYMBOL(kfifo_alloc);
 void kfifo_free(struct kfifo *fifo)
 {
        kfree(fifo->buffer);
+       _kfifo_init(fifo, NULL, 0);
 }
 EXPORT_SYMBOL(kfifo_free);
 
index d27746b..2ae7409 100644 (file)
@@ -3259,8 +3259,6 @@ static void perf_event_task_output(struct perf_event *event,
        task_event->event_id.tid = perf_event_tid(event, task);
        task_event->event_id.ptid = perf_event_tid(event, current);
 
-       task_event->event_id.time = perf_clock();
-
        perf_output_put(&handle, task_event->event_id);
 
        perf_output_end(&handle);
@@ -3268,7 +3266,7 @@ static void perf_event_task_output(struct perf_event *event,
 
 static int perf_event_task_match(struct perf_event *event)
 {
-       if (event->state != PERF_EVENT_STATE_ACTIVE)
+       if (event->state < PERF_EVENT_STATE_INACTIVE)
                return 0;
 
        if (event->cpu != -1 && event->cpu != smp_processor_id())
@@ -3300,7 +3298,7 @@ static void perf_event_task_event(struct perf_task_event *task_event)
        cpuctx = &get_cpu_var(perf_cpu_context);
        perf_event_task_ctx(&cpuctx->ctx, task_event);
        if (!ctx)
-               ctx = rcu_dereference(task_event->task->perf_event_ctxp);
+               ctx = rcu_dereference(current->perf_event_ctxp);
        if (ctx)
                perf_event_task_ctx(ctx, task_event);
        put_cpu_var(perf_cpu_context);
@@ -3331,6 +3329,7 @@ static void perf_event_task(struct task_struct *task,
                        /* .ppid */
                        /* .tid  */
                        /* .ptid */
+                       .time = perf_clock(),
                },
        };
 
@@ -3380,7 +3379,7 @@ static void perf_event_comm_output(struct perf_event *event,
 
 static int perf_event_comm_match(struct perf_event *event)
 {
-       if (event->state != PERF_EVENT_STATE_ACTIVE)
+       if (event->state < PERF_EVENT_STATE_INACTIVE)
                return 0;
 
        if (event->cpu != -1 && event->cpu != smp_processor_id())
@@ -3500,7 +3499,7 @@ static void perf_event_mmap_output(struct perf_event *event,
 static int perf_event_mmap_match(struct perf_event *event,
                                   struct perf_mmap_event *mmap_event)
 {
-       if (event->state != PERF_EVENT_STATE_ACTIVE)
+       if (event->state < PERF_EVENT_STATE_INACTIVE)
                return 0;
 
        if (event->cpu != -1 && event->cpu != smp_processor_id())
@@ -4580,7 +4579,7 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr,
        if (attr->type >= PERF_TYPE_MAX)
                return -EINVAL;
 
-       if (attr->__reserved_1 || attr->__reserved_2)
+       if (attr->__reserved_1)
                return -EINVAL;
 
        if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
index a09502e..7c1a67e 100644 (file)
@@ -500,22 +500,17 @@ EXPORT_SYMBOL(tasklet_kill);
  */
 
 /*
- * The trampoline is called when the hrtimer expires. If this is
- * called from the hrtimer interrupt then we schedule the tasklet as
- * the timer callback function expects to run in softirq context. If
- * it's called in softirq context anyway (i.e. high resolution timers
- * disabled) then the hrtimer callback is called right away.
+ * The trampoline is called when the hrtimer expires. It schedules a tasklet
+ * to run __tasklet_hrtimer_trampoline() which in turn will call the intended
+ * hrtimer callback, but from softirq context.
  */
 static enum hrtimer_restart __hrtimer_tasklet_trampoline(struct hrtimer *timer)
 {
        struct tasklet_hrtimer *ttimer =
                container_of(timer, struct tasklet_hrtimer, timer);
 
-       if (hrtimer_is_hres_active(timer)) {
-               tasklet_hi_schedule(&ttimer->tasklet);
-               return HRTIMER_NORESTART;
-       }
-       return ttimer->function(timer);
+       tasklet_hi_schedule(&ttimer->tasklet);
+       return HRTIMER_NORESTART;
 }
 
 /*
index 26a6b73..18bde97 100644 (file)
@@ -222,6 +222,7 @@ SYSCALL_DEFINE2(getpriority, int, which, int, who)
        if (which > PRIO_USER || which < PRIO_PROCESS)
                return -EINVAL;
 
+       rcu_read_lock();
        read_lock(&tasklist_lock);
        switch (which) {
                case PRIO_PROCESS:
@@ -267,6 +268,7 @@ SYSCALL_DEFINE2(getpriority, int, which, int, who)
        }
 out_unlock:
        read_unlock(&tasklist_lock);
+       rcu_read_unlock();
 
        return retval;
 }
index 8f5d16e..8cd50d8 100644 (file)
@@ -1331,7 +1331,7 @@ static ssize_t binary_sysctl(const int *name, int nlen,
        ssize_t result;
        char *pathname;
        int flags;
-       int acc_mode, fmode;
+       int acc_mode;
 
        pathname = sysctl_getname(name, nlen, &table);
        result = PTR_ERR(pathname);
@@ -1342,15 +1342,12 @@ static ssize_t binary_sysctl(const int *name, int nlen,
        if (oldval && oldlen && newval && newlen) {
                flags = O_RDWR;
                acc_mode = MAY_READ | MAY_WRITE;
-               fmode = FMODE_READ | FMODE_WRITE;
        } else if (newval && newlen) {
                flags = O_WRONLY;
                acc_mode = MAY_WRITE;
-               fmode = FMODE_WRITE;
        } else if (oldval && oldlen) {
                flags = O_RDONLY;
                acc_mode = MAY_READ;
-               fmode = FMODE_READ;
        } else {
                result = 0;
                goto out_putname;
@@ -1361,7 +1358,7 @@ static ssize_t binary_sysctl(const int *name, int nlen,
        if (result)
                goto out_putname;
 
-       result = may_open(&nd.path, acc_mode, fmode);
+       result = may_open(&nd.path, acc_mode, flags);
        if (result)
                goto out_putpath;
 
index 7faaa32..e2ab064 100644 (file)
@@ -880,6 +880,7 @@ void getboottime(struct timespec *ts)
 
        set_normalized_timespec(ts, -boottime.tv_sec, -boottime.tv_nsec);
 }
+EXPORT_SYMBOL_GPL(getboottime);
 
 /**
  * monotonic_to_bootbased - Convert the monotonic time to boot based.
@@ -889,6 +890,7 @@ void monotonic_to_bootbased(struct timespec *ts)
 {
        *ts = timespec_add_safe(*ts, total_sleep_time);
 }
+EXPORT_SYMBOL_GPL(monotonic_to_bootbased);
 
 unsigned long get_seconds(void)
 {
index 6ea90c0..50b1b82 100644 (file)
@@ -689,7 +689,7 @@ static int create_trace_probe(int argc, char **argv)
                        return -EINVAL;
                }
                /* an address specified */
-               ret = strict_strtoul(&argv[0][2], 0, (unsigned long *)&addr);
+               ret = strict_strtoul(&argv[1][0], 0, (unsigned long *)&addr);
                if (ret) {
                        pr_info("Failed to parse address.\n");
                        return ret;
index 678a512..f4bc9b2 100644 (file)
@@ -157,6 +157,7 @@ stack_max_size_write(struct file *filp, const char __user *ubuf,
        unsigned long val, flags;
        char buf[64];
        int ret;
+       int cpu;
 
        if (count >= sizeof(buf))
                return -EINVAL;
@@ -171,9 +172,20 @@ stack_max_size_write(struct file *filp, const char __user *ubuf,
                return ret;
 
        local_irq_save(flags);
+
+       /*
+        * In case we trace inside arch_spin_lock() or after (NMI),
+        * we will cause circular lock, so we also need to increase
+        * the percpu trace_active here.
+        */
+       cpu = smp_processor_id();
+       per_cpu(trace_active, cpu)++;
+
        arch_spin_lock(&max_stack_lock);
        *ptr = val;
        arch_spin_unlock(&max_stack_lock);
+
+       per_cpu(trace_active, cpu)--;
        local_irq_restore(flags);
 
        return count;
@@ -206,7 +218,13 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
 
 static void *t_start(struct seq_file *m, loff_t *pos)
 {
+       int cpu;
+
        local_irq_disable();
+
+       cpu = smp_processor_id();
+       per_cpu(trace_active, cpu)++;
+
        arch_spin_lock(&max_stack_lock);
 
        if (*pos == 0)
@@ -217,7 +235,13 @@ static void *t_start(struct seq_file *m, loff_t *pos)
 
 static void t_stop(struct seq_file *m, void *p)
 {
+       int cpu;
+
        arch_spin_unlock(&max_stack_lock);
+
+       cpu = smp_processor_id();
+       per_cpu(trace_active, cpu)--;
+
        local_irq_enable();
 }
 
index 1cac726..0dc7822 100644 (file)
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -156,10 +156,12 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
                        id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
 
                        /* if already at the top layer, we need to grow */
-                       if (!(p = pa[l])) {
+                       if (id >= 1 << (idp->layers * IDR_BITS)) {
                                *starting_id = id;
                                return IDR_NEED_TO_GROW;
                        }
+                       p = pa[l];
+                       BUG_ON(!p);
 
                        /* If we need to go up one layer, continue the
                         * loop; otherwise, restart from the top.
index 698ea80..148b52a 100644 (file)
@@ -1117,7 +1117,7 @@ readpage:
                        if (!PageUptodate(page)) {
                                if (page->mapping == NULL) {
                                        /*
-                                        * invalidate_inode_pages got it
+                                        * invalidate_mapping_pages got it
                                         */
                                        unlock_page(page);
                                        page_cache_release(page);
index 9a0db5b..880bd59 100644 (file)
@@ -1002,33 +1002,27 @@ static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
 #define DO_PAGES_STAT_CHUNK_NR 16
        const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
        int chunk_status[DO_PAGES_STAT_CHUNK_NR];
-       unsigned long i, chunk_nr = DO_PAGES_STAT_CHUNK_NR;
-       int err;
 
-       for (i = 0; i < nr_pages; i += chunk_nr) {
-               if (chunk_nr > nr_pages - i)
-                       chunk_nr = nr_pages - i;
+       while (nr_pages) {
+               unsigned long chunk_nr;
 
-               err = copy_from_user(chunk_pages, &pages[i],
-                                    chunk_nr * sizeof(*chunk_pages));
-               if (err) {
-                       err = -EFAULT;
-                       goto out;
-               }
+               chunk_nr = nr_pages;
+               if (chunk_nr > DO_PAGES_STAT_CHUNK_NR)
+                       chunk_nr = DO_PAGES_STAT_CHUNK_NR;
+
+               if (copy_from_user(chunk_pages, pages, chunk_nr * sizeof(*chunk_pages)))
+                       break;
 
                do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
 
-               err = copy_to_user(&status[i], chunk_status,
-                                  chunk_nr * sizeof(*chunk_status));
-               if (err) {
-                       err = -EFAULT;
-                       goto out;
-               }
-       }
-       err = 0;
+               if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
+                       break;
 
-out:
-       return err;
+               pages += chunk_nr;
+               status += chunk_nr;
+               nr_pages -= chunk_nr;
+       }
+       return nr_pages ? -EFAULT : 0;
 }
 
 /*
index f52481b..2370504 100644 (file)
@@ -459,6 +459,8 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
        list_for_each_entry(c, &p->children, sibling) {
                if (c->mm == p->mm)
                        continue;
+               if (mem && !task_in_mem_cgroup(c, mem))
+                       continue;
                if (!oom_kill_task(c))
                        return 0;
        }
index 8af95b2..09d4f1e 100644 (file)
@@ -69,7 +69,7 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...);
 
 static int parse_opts(char *opts, struct p9_client *clnt)
 {
-       char *options;
+       char *options, *tmp_options;
        char *p;
        substring_t args[MAX_OPT_ARGS];
        int option;
@@ -81,12 +81,13 @@ static int parse_opts(char *opts, struct p9_client *clnt)
        if (!opts)
                return 0;
 
-       options = kstrdup(opts, GFP_KERNEL);
-       if (!options) {
+       tmp_options = kstrdup(opts, GFP_KERNEL);
+       if (!tmp_options) {
                P9_DPRINTK(P9_DEBUG_ERROR,
                                "failed to allocate copy of option string\n");
                return -ENOMEM;
        }
+       options = tmp_options;
 
        while ((p = strsep(&options, ",")) != NULL) {
                int token;
@@ -108,6 +109,13 @@ static int parse_opts(char *opts, struct p9_client *clnt)
                        break;
                case Opt_trans:
                        clnt->trans_mod = v9fs_get_trans_by_name(&args[0]);
+                       if(clnt->trans_mod == NULL) {
+                               P9_DPRINTK(P9_DEBUG_ERROR,
+                                  "Could not find request transport: %s\n",
+                                  (char *) &args[0]);
+                               ret = -EINVAL;
+                               goto free_and_return;
+                       }
                        break;
                case Opt_legacy:
                        clnt->dotu = 0;
@@ -117,7 +125,8 @@ static int parse_opts(char *opts, struct p9_client *clnt)
                }
        }
 
-       kfree(options);
+free_and_return:
+       kfree(tmp_options);
        return ret;
 }
 
@@ -667,18 +676,12 @@ struct p9_client *p9_client_create(const char *dev_name, char *options)
        clnt->trans = NULL;
        spin_lock_init(&clnt->lock);
        INIT_LIST_HEAD(&clnt->fidlist);
-       clnt->fidpool = p9_idpool_create();
-       if (IS_ERR(clnt->fidpool)) {
-               err = PTR_ERR(clnt->fidpool);
-               clnt->fidpool = NULL;
-               goto error;
-       }
 
        p9_tag_init(clnt);
 
        err = parse_opts(options, clnt);
        if (err < 0)
-               goto error;
+               goto free_client;
 
        if (!clnt->trans_mod)
                clnt->trans_mod = v9fs_get_default_trans();
@@ -687,7 +690,14 @@ struct p9_client *p9_client_create(const char *dev_name, char *options)
                err = -EPROTONOSUPPORT;
                P9_DPRINTK(P9_DEBUG_ERROR,
                                "No transport defined or default transport\n");
-               goto error;
+               goto free_client;
+       }
+
+       clnt->fidpool = p9_idpool_create();
+       if (IS_ERR(clnt->fidpool)) {
+               err = PTR_ERR(clnt->fidpool);
+               clnt->fidpool = NULL;
+               goto put_trans;
        }
 
        P9_DPRINTK(P9_DEBUG_MUX, "clnt %p trans %p msize %d dotu %d\n",
@@ -695,19 +705,25 @@ struct p9_client *p9_client_create(const char *dev_name, char *options)
 
        err = clnt->trans_mod->create(clnt, dev_name, options);
        if (err)
-               goto error;
+               goto destroy_fidpool;
 
        if ((clnt->msize+P9_IOHDRSZ) > clnt->trans_mod->maxsize)
                clnt->msize = clnt->trans_mod->maxsize-P9_IOHDRSZ;
 
        err = p9_client_version(clnt);
        if (err)
-               goto error;
+               goto close_trans;
 
        return clnt;
 
-error:
-       p9_client_destroy(clnt);
+close_trans:
+       clnt->trans_mod->close(clnt);
+destroy_fidpool:
+       p9_idpool_destroy(clnt->fidpool);
+put_trans:
+       v9fs_put_trans(clnt->trans_mod);
+free_client:
+       kfree(clnt);
        return ERR_PTR(err);
 }
 EXPORT_SYMBOL(p9_client_create);
@@ -1214,10 +1230,11 @@ static int p9_client_statsize(struct p9_wstat *wst, int optional)
 {
        int ret;
 
+       /* NOTE: size shouldn't include its own length */
        /* size[2] type[2] dev[4] qid[13] */
        /* mode[4] atime[4] mtime[4] length[8]*/
        /* name[s] uid[s] gid[s] muid[s] */
-       ret = 2+2+4+13+4+4+4+8+2+2+2+2;
+       ret = 2+4+13+4+4+4+8+2+2+2+2;
 
        if (wst->name)
                ret += strlen(wst->name);
@@ -1258,7 +1275,7 @@ int p9_client_wstat(struct p9_fid *fid, struct p9_wstat *wst)
                wst->name, wst->uid, wst->gid, wst->muid, wst->extension,
                wst->n_uid, wst->n_gid, wst->n_muid);
 
-       req = p9_client_rpc(clnt, P9_TWSTAT, "dwS", fid->fid, wst->size, wst);
+       req = p9_client_rpc(clnt, P9_TWSTAT, "dwS", fid->fid, wst->size+2, wst);
        if (IS_ERR(req)) {
                err = PTR_ERR(req);
                goto error;
index be1cb90..31d0b05 100644 (file)
@@ -714,7 +714,7 @@ static int parse_opts(char *params, struct p9_fd_opts *opts)
        char *p;
        substring_t args[MAX_OPT_ARGS];
        int option;
-       char *options;
+       char *options, *tmp_options;
        int ret;
 
        opts->port = P9_PORT;
@@ -724,12 +724,13 @@ static int parse_opts(char *params, struct p9_fd_opts *opts)
        if (!params)
                return 0;
 
-       options = kstrdup(params, GFP_KERNEL);
-       if (!options) {
+       tmp_options = kstrdup(params, GFP_KERNEL);
+       if (!tmp_options) {
                P9_DPRINTK(P9_DEBUG_ERROR,
                                "failed to allocate copy of option string\n");
                return -ENOMEM;
        }
+       options = tmp_options;
 
        while ((p = strsep(&options, ",")) != NULL) {
                int token;
@@ -760,7 +761,8 @@ static int parse_opts(char *params, struct p9_fd_opts *opts)
                        continue;
                }
        }
-       kfree(options);
+
+       kfree(tmp_options);
        return 0;
 }
 
index 65cb29d..2c95a89 100644 (file)
@@ -166,7 +166,7 @@ static int parse_opts(char *params, struct p9_rdma_opts *opts)
        char *p;
        substring_t args[MAX_OPT_ARGS];
        int option;
-       char *options;
+       char *options, *tmp_options;
        int ret;
 
        opts->port = P9_PORT;
@@ -177,12 +177,13 @@ static int parse_opts(char *params, struct p9_rdma_opts *opts)
        if (!params)
                return 0;
 
-       options = kstrdup(params, GFP_KERNEL);
-       if (!options) {
+       tmp_options = kstrdup(params, GFP_KERNEL);
+       if (!tmp_options) {
                P9_DPRINTK(P9_DEBUG_ERROR,
                           "failed to allocate copy of option string\n");
                return -ENOMEM;
        }
+       options = tmp_options;
 
        while ((p = strsep(&options, ",")) != NULL) {
                int token;
@@ -216,7 +217,7 @@ static int parse_opts(char *params, struct p9_rdma_opts *opts)
        }
        /* RQ must be at least as large as the SQ */
        opts->rq_depth = max(opts->rq_depth, opts->sq_depth);
-       kfree(options);
+       kfree(tmp_options);
        return 0;
 }
 
index ea1e3da..cb50f4a 100644 (file)
@@ -102,7 +102,8 @@ static void p9_virtio_close(struct p9_client *client)
        struct virtio_chan *chan = client->trans;
 
        mutex_lock(&virtio_9p_lock);
-       chan->inuse = false;
+       if (chan)
+               chan->inuse = false;
        mutex_unlock(&virtio_9p_lock);
 }
 
@@ -311,6 +312,7 @@ p9_virtio_create(struct p9_client *client, const char *devname, char *args)
        }
 
        client->trans = (void *)chan;
+       client->status = Connected;
        chan->client = client;
 
        return 0;
index b7c4224..b10e3cd 100644 (file)
@@ -377,6 +377,9 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8
 
        if (acl->state == BT_CONNECTED &&
                        (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
+               acl->power_save = 1;
+               hci_conn_enter_active_mode(acl);
+
                if (lmp_esco_capable(hdev))
                        hci_setup_sync(sco, acl->handle);
                else
index 28517ba..592da5c 100644 (file)
@@ -1699,6 +1699,7 @@ static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_bu
                break;
 
        case 0x1c:      /* SCO interval rejected */
+       case 0x1a:      /* Unsupported Remote Feature */
        case 0x1f:      /* Unspecified error */
                if (conn->out && conn->attempt < 2) {
                        conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
index 6cf526d..fc6ec1e 100644 (file)
@@ -703,29 +703,9 @@ static void hidp_close(struct hid_device *hid)
 static int hidp_parse(struct hid_device *hid)
 {
        struct hidp_session *session = hid->driver_data;
-       struct hidp_connadd_req *req = session->req;
-       unsigned char *buf;
-       int ret;
-
-       buf = kmalloc(req->rd_size, GFP_KERNEL);
-       if (!buf)
-               return -ENOMEM;
-
-       if (copy_from_user(buf, req->rd_data, req->rd_size)) {
-               kfree(buf);
-               return -EFAULT;
-       }
-
-       ret = hid_parse_report(session->hid, buf, req->rd_size);
-
-       kfree(buf);
-
-       if (ret)
-               return ret;
-
-       session->req = NULL;
 
-       return 0;
+       return hid_parse_report(session->hid, session->rd_data,
+                       session->rd_size);
 }
 
 static int hidp_start(struct hid_device *hid)
@@ -770,12 +750,24 @@ static int hidp_setup_hid(struct hidp_session *session,
        bdaddr_t src, dst;
        int err;
 
+       session->rd_data = kzalloc(req->rd_size, GFP_KERNEL);
+       if (!session->rd_data)
+               return -ENOMEM;
+
+       if (copy_from_user(session->rd_data, req->rd_data, req->rd_size)) {
+               err = -EFAULT;
+               goto fault;
+       }
+       session->rd_size = req->rd_size;
+
        hid = hid_allocate_device();
-       if (IS_ERR(hid))
-               return PTR_ERR(hid);
+       if (IS_ERR(hid)) {
+               err = PTR_ERR(hid);
+               goto fault;
+       }
 
        session->hid = hid;
-       session->req = req;
+
        hid->driver_data = session;
 
        baswap(&src, &bt_sk(session->ctrl_sock->sk)->src);
@@ -806,6 +798,10 @@ failed:
        hid_destroy_device(hid);
        session->hid = NULL;
 
+fault:
+       kfree(session->rd_data);
+       session->rd_data = NULL;
+
        return err;
 }
 
@@ -900,6 +896,9 @@ unlink:
                session->hid = NULL;
        }
 
+       kfree(session->rd_data);
+       session->rd_data = NULL;
+
 purge:
        skb_queue_purge(&session->ctrl_transmit);
        skb_queue_purge(&session->intr_transmit);
index faf3d74..a4e215d 100644 (file)
@@ -154,7 +154,9 @@ struct hidp_session {
        struct sk_buff_head ctrl_transmit;
        struct sk_buff_head intr_transmit;
 
-       struct hidp_connadd_req *req;
+       /* Report descriptor */
+       __u8 *rd_data;
+       uint rd_size;
 };
 
 static inline void hidp_schedule(struct hidp_session *session)
index fc5ee32..89f4a59 100644 (file)
@@ -252,7 +252,6 @@ static void rfcomm_session_timeout(unsigned long arg)
        BT_DBG("session %p state %ld", s, s->state);
 
        set_bit(RFCOMM_TIMED_OUT, &s->flags);
-       rfcomm_session_put(s);
        rfcomm_schedule(RFCOMM_SCHED_TIMEO);
 }
 
@@ -1151,7 +1150,11 @@ static int rfcomm_recv_ua(struct rfcomm_session *s, u8 dlci)
                        break;
 
                case BT_DISCONN:
-                       rfcomm_session_put(s);
+                       /* When socket is closed and we are not RFCOMM
+                        * initiator rfcomm_process_rx already calls
+                        * rfcomm_session_put() */
+                       if (s->sock->sk->sk_state != BT_CLOSED)
+                               rfcomm_session_put(s);
                        break;
                }
        }
@@ -1920,6 +1923,7 @@ static inline void rfcomm_process_sessions(void)
                if (test_and_clear_bit(RFCOMM_TIMED_OUT, &s->flags)) {
                        s->state = BT_DISCONN;
                        rfcomm_send_disc(s, 0);
+                       rfcomm_session_put(s);
                        continue;
                }
 
index be9924f..ec87421 100644 (file)
@@ -2761,7 +2761,7 @@ gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb,
        switch (ret) {
        case GRO_NORMAL:
        case GRO_HELD:
-               skb->protocol = eth_type_trans(skb, napi->dev);
+               skb->protocol = eth_type_trans(skb, skb->dev);
 
                if (ret == GRO_HELD)
                        skb_gro_pull(skb, -ETH_HLEN);
index 57bc4d5..cb1b348 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/string.h>
 #include <linux/types.h>
 #include <net/net_namespace.h>
+#include <linux/sched.h>
 
 #include <net/dst.h>
 
@@ -79,6 +80,7 @@ loop:
        while ((dst = next) != NULL) {
                next = dst->next;
                prefetch(&next->next);
+               cond_resched();
                if (likely(atomic_read(&dst->__refcnt))) {
                        last->next = dst;
                        last = dst;
index d8aee58..236a998 100644 (file)
@@ -927,6 +927,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
        case ETHTOOL_GPERMADDR:
        case ETHTOOL_GUFO:
        case ETHTOOL_GGSO:
+       case ETHTOOL_GGRO:
        case ETHTOOL_GFLAGS:
        case ETHTOOL_GPFLAGS:
        case ETHTOOL_GRXFH:
index fbc1c74..099c753 100644 (file)
@@ -410,7 +410,8 @@ static ssize_t wireless_show(struct device *d, char *buf,
        const struct iw_statistics *iw;
        ssize_t ret = -EINVAL;
 
-       rtnl_lock();
+       if (!rtnl_trylock())
+               return restart_syscall();
        if (dev_isalive(dev)) {
                iw = get_wireless_stats(dev);
                if (iw)
index de0c2c7..2e692af 100644 (file)
@@ -3524,6 +3524,7 @@ static int pktgen_thread_worker(void *arg)
                        wait_event_interruptible_timeout(t->queue,
                                                         t->control != 0,
                                                         HZ/10);
+                       try_to_freeze();
                        continue;
                }
 
index 57dfb9c..ff16e9d 100644 (file)
@@ -83,7 +83,7 @@ static struct kmem_cache *ccid_kmem_cache_create(int obj_size, char *slab_name_f
        va_list args;
 
        va_start(args, fmt);
-       vsnprintf(slab_name_fmt, sizeof(slab_name_fmt), fmt, args);
+       vsnprintf(slab_name_fmt, CCID_SLAB_NAME_LENGTH, fmt, args);
        va_end(args);
 
        slab = kmem_cache_create(slab_name_fmt, sizeof(struct ccid) + obj_size, 0,
index 269958b..6df6f8a 100644 (file)
@@ -19,7 +19,9 @@
 #include <linux/list.h>
 #include <linux/module.h>
 
-#define CCID_MAX 255
+/* maximum value for a CCID (RFC 4340, 19.5) */
+#define CCID_MAX               255
+#define CCID_SLAB_NAME_LENGTH  32
 
 struct tcp_info;
 
@@ -49,8 +51,8 @@ struct ccid_operations {
        const char              *ccid_name;
        struct kmem_cache       *ccid_hc_rx_slab,
                                *ccid_hc_tx_slab;
-       char                    ccid_hc_rx_slab_name[32];
-       char                    ccid_hc_tx_slab_name[32];
+       char                    ccid_hc_rx_slab_name[CCID_SLAB_NAME_LENGTH];
+       char                    ccid_hc_tx_slab_name[CCID_SLAB_NAME_LENGTH];
        __u32                   ccid_hc_rx_obj_size,
                                ccid_hc_tx_obj_size;
        /* Interface Routines */
index bace1d8..f5b3464 100644 (file)
@@ -161,8 +161,8 @@ static __init int dccpprobe_init(void)
        if (!proc_net_fops_create(&init_net, procname, S_IRUSR, &dccpprobe_fops))
                goto err0;
 
-       ret = try_then_request_module((register_jprobe(&dccp_send_probe) == 0),
-                                       "dccp");
+       try_then_request_module((ret = register_jprobe(&dccp_send_probe)) == 0,
+                               "dccp");
        if (ret)
                goto err1;
 
index 040c4f0..26dec2b 100644 (file)
@@ -1317,14 +1317,19 @@ static int devinet_sysctl_forward(ctl_table *ctl, int write,
 {
        int *valp = ctl->data;
        int val = *valp;
+       loff_t pos = *ppos;
        int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
 
        if (write && *valp != val) {
                struct net *net = ctl->extra2;
 
                if (valp != &IPV4_DEVCONF_DFLT(net, FORWARDING)) {
-                       if (!rtnl_trylock())
+                       if (!rtnl_trylock()) {
+                               /* Restore the original values before restarting */
+                               *valp = val;
+                               *ppos = pos;
                                return restart_syscall();
+                       }
                        if (valp == &IPV4_DEVCONF_ALL(net, FORWARDING)) {
                                inet_forward_change(net);
                        } else if (*valp) {
index 76c0840..a42f658 100644 (file)
@@ -946,7 +946,6 @@ int igmp_rcv(struct sk_buff *skb)
                break;
        case IGMP_HOST_MEMBERSHIP_REPORT:
        case IGMPV2_HOST_MEMBERSHIP_REPORT:
-       case IGMPV3_HOST_MEMBERSHIP_REPORT:
                /* Is it our report looped back? */
                if (skb_rtable(skb)->fl.iif == 0)
                        break;
@@ -960,6 +959,7 @@ int igmp_rcv(struct sk_buff *skb)
                in_dev_put(in_dev);
                return pim_rcv_v1(skb);
 #endif
+       case IGMPV3_HOST_MEMBERSHIP_REPORT:
        case IGMP_DVMRP:
        case IGMP_TRACE:
        case IGMP_HOST_LEAVE_MESSAGE:
index 38fbf04..544ce08 100644 (file)
@@ -124,16 +124,12 @@ static int ipcomp4_init_state(struct xfrm_state *x)
        if (x->props.mode == XFRM_MODE_TUNNEL) {
                err = ipcomp_tunnel_attach(x);
                if (err)
-                       goto error_tunnel;
+                       goto out;
        }
 
        err = 0;
 out:
        return err;
-
-error_tunnel:
-       ipcomp_destroy(x);
-       goto out;
 }
 
 static const struct xfrm_type ipcomp_type = {
index 0663276..90203e1 100644 (file)
@@ -925,10 +925,10 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
        if (t && !IS_ERR(t)) {
                struct arpt_getinfo info;
                const struct xt_table_info *private = t->private;
-
 #ifdef CONFIG_COMPAT
+               struct xt_table_info tmp;
+
                if (compat) {
-                       struct xt_table_info tmp;
                        ret = compat_table_info(private, &tmp);
                        xt_compat_flush_offsets(NFPROTO_ARP);
                        private = &tmp;
index 572330a..3ce53cf 100644 (file)
@@ -1132,10 +1132,10 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
        if (t && !IS_ERR(t)) {
                struct ipt_getinfo info;
                const struct xt_table_info *private = t->private;
-
 #ifdef CONFIG_COMPAT
+               struct xt_table_info tmp;
+
                if (compat) {
-                       struct xt_table_info tmp;
                        ret = compat_table_info(private, &tmp);
                        xt_compat_flush_offsets(AF_INET);
                        private = &tmp;
index d171b12..d1ea38a 100644 (file)
@@ -210,7 +210,7 @@ static ctl_table ip_ct_sysctl_table[] = {
        },
        {
                .procname       = "ip_conntrack_buckets",
-               .data           = &nf_conntrack_htable_size,
+               .data           = &init_net.ct.htable_size,
                .maxlen         = sizeof(unsigned int),
                .mode           = 0444,
                .proc_handler   = proc_dointvec,
index 8668a3d..2fb7b76 100644 (file)
@@ -32,7 +32,7 @@ static struct hlist_nulls_node *ct_get_first(struct seq_file *seq)
        struct hlist_nulls_node *n;
 
        for (st->bucket = 0;
-            st->bucket < nf_conntrack_htable_size;
+            st->bucket < net->ct.htable_size;
             st->bucket++) {
                n = rcu_dereference(net->ct.hash[st->bucket].first);
                if (!is_a_nulls(n))
@@ -50,7 +50,7 @@ static struct hlist_nulls_node *ct_get_next(struct seq_file *seq,
        head = rcu_dereference(head->next);
        while (is_a_nulls(head)) {
                if (likely(get_nulls_value(head) == st->bucket)) {
-                       if (++st->bucket >= nf_conntrack_htable_size)
+                       if (++st->bucket >= net->ct.htable_size)
                                return NULL;
                }
                head = rcu_dereference(net->ct.hash[st->bucket].first);
index fe1a644..26066a2 100644 (file)
@@ -35,9 +35,6 @@ static DEFINE_SPINLOCK(nf_nat_lock);
 
 static struct nf_conntrack_l3proto *l3proto __read_mostly;
 
-/* Calculated at init based on memory size */
-static unsigned int nf_nat_htable_size __read_mostly;
-
 #define MAX_IP_NAT_PROTO 256
 static const struct nf_nat_protocol *nf_nat_protos[MAX_IP_NAT_PROTO]
                                                __read_mostly;
@@ -72,7 +69,7 @@ EXPORT_SYMBOL_GPL(nf_nat_proto_put);
 
 /* We keep an extra hash for each conntrack, for fast searching. */
 static inline unsigned int
-hash_by_src(const struct nf_conntrack_tuple *tuple)
+hash_by_src(const struct net *net, const struct nf_conntrack_tuple *tuple)
 {
        unsigned int hash;
 
@@ -80,7 +77,7 @@ hash_by_src(const struct nf_conntrack_tuple *tuple)
        hash = jhash_3words((__force u32)tuple->src.u3.ip,
                            (__force u32)tuple->src.u.all,
                            tuple->dst.protonum, 0);
-       return ((u64)hash * nf_nat_htable_size) >> 32;
+       return ((u64)hash * net->ipv4.nat_htable_size) >> 32;
 }
 
 /* Is this tuple already taken? (not by us) */
@@ -147,7 +144,7 @@ find_appropriate_src(struct net *net,
                     struct nf_conntrack_tuple *result,
                     const struct nf_nat_range *range)
 {
-       unsigned int h = hash_by_src(tuple);
+       unsigned int h = hash_by_src(net, tuple);
        const struct nf_conn_nat *nat;
        const struct nf_conn *ct;
        const struct hlist_node *n;
@@ -330,7 +327,7 @@ nf_nat_setup_info(struct nf_conn *ct,
        if (have_to_hash) {
                unsigned int srchash;
 
-               srchash = hash_by_src(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
+               srchash = hash_by_src(net, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
                spin_lock_bh(&nf_nat_lock);
                /* nf_conntrack_alter_reply might re-allocate exntension aera */
                nat = nfct_nat(ct);
@@ -679,8 +676,10 @@ nfnetlink_parse_nat_setup(struct nf_conn *ct,
 
 static int __net_init nf_nat_net_init(struct net *net)
 {
-       net->ipv4.nat_bysource = nf_ct_alloc_hashtable(&nf_nat_htable_size,
-                                                     &net->ipv4.nat_vmalloced, 0);
+       /* Leave them the same for the moment. */
+       net->ipv4.nat_htable_size = net->ct.htable_size;
+       net->ipv4.nat_bysource = nf_ct_alloc_hashtable(&net->ipv4.nat_htable_size,
+                                                      &net->ipv4.nat_vmalloced, 0);
        if (!net->ipv4.nat_bysource)
                return -ENOMEM;
        return 0;
@@ -703,7 +702,7 @@ static void __net_exit nf_nat_net_exit(struct net *net)
        nf_ct_iterate_cleanup(net, &clean_nat, NULL);
        synchronize_rcu();
        nf_ct_free_hashtable(net->ipv4.nat_bysource, net->ipv4.nat_vmalloced,
-                            nf_nat_htable_size);
+                            net->ipv4.nat_htable_size);
 }
 
 static struct pernet_operations nf_nat_net_ops = {
@@ -724,9 +723,6 @@ static int __init nf_nat_init(void)
                return ret;
        }
 
-       /* Leave them the same for the moment. */
-       nf_nat_htable_size = nf_conntrack_htable_size;
-
        ret = register_pernet_subsys(&nf_nat_net_ops);
        if (ret < 0)
                goto cleanup_extend;
index 28e0296..3fddc69 100644 (file)
@@ -5783,11 +5783,9 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
 
                                /* tcp_ack considers this ACK as duplicate
                                 * and does not calculate rtt.
-                                * Fix it at least with timestamps.
+                                * Force it here.
                                 */
-                               if (tp->rx_opt.saw_tstamp &&
-                                   tp->rx_opt.rcv_tsecr && !tp->srtt)
-                                       tcp_ack_saw_tstamp(sk, 0);
+                               tcp_ack_update_rtt(sk, 0, 0);
 
                                if (tp->rx_opt.tstamp_ok)
                                        tp->advmss -= TCPOLEN_TSTAMP_ALIGNED;
index de7a194..143791d 100644 (file)
@@ -502,8 +502,11 @@ static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int old)
        if (p == &net->ipv6.devconf_dflt->forwarding)
                return 0;
 
-       if (!rtnl_trylock())
+       if (!rtnl_trylock()) {
+               /* Restore the original values before restarting */
+               *p = old;
                return restart_syscall();
+       }
 
        if (p == &net->ipv6.devconf_all->forwarding) {
                __s32 newf = net->ipv6.devconf_all->forwarding;
@@ -4028,12 +4031,15 @@ int addrconf_sysctl_forward(ctl_table *ctl, int write,
 {
        int *valp = ctl->data;
        int val = *valp;
+       loff_t pos = *ppos;
        int ret;
 
        ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
 
        if (write)
                ret = addrconf_fixup_forwarding(ctl, valp, val);
+       if (ret)
+               *ppos = pos;
        return ret;
 }
 
@@ -4075,8 +4081,11 @@ static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int old)
        if (p == &net->ipv6.devconf_dflt->disable_ipv6)
                return 0;
 
-       if (!rtnl_trylock())
+       if (!rtnl_trylock()) {
+               /* Restore the original values before restarting */
+               *p = old;
                return restart_syscall();
+       }
 
        if (p == &net->ipv6.devconf_all->disable_ipv6) {
                __s32 newf = net->ipv6.devconf_all->disable_ipv6;
@@ -4095,12 +4104,15 @@ int addrconf_sysctl_disable(ctl_table *ctl, int write,
 {
        int *valp = ctl->data;
        int val = *valp;
+       loff_t pos = *ppos;
        int ret;
 
        ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
 
        if (write)
                ret = addrconf_disable_ipv6(ctl, valp, val);
+       if (ret)
+               *ppos = pos;
        return ret;
 }
 
index 2f2a5ca..002e6ee 100644 (file)
@@ -154,16 +154,12 @@ static int ipcomp6_init_state(struct xfrm_state *x)
        if (x->props.mode == XFRM_MODE_TUNNEL) {
                err = ipcomp6_tunnel_attach(x);
                if (err)
-                       goto error_tunnel;
+                       goto out;
        }
 
        err = 0;
 out:
        return err;
-error_tunnel:
-       ipcomp_destroy(x);
-
-       goto out;
 }
 
 static const struct xfrm_type ipcomp6_type =
index 480d7f8..8a7e0f5 100644 (file)
@@ -1164,10 +1164,10 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
        if (t && !IS_ERR(t)) {
                struct ip6t_getinfo info;
                const struct xt_table_info *private = t->private;
-
 #ifdef CONFIG_COMPAT
+               struct xt_table_info tmp;
+
                if (compat) {
-                       struct xt_table_info tmp;
                        ret = compat_table_info(private, &tmp);
                        xt_compat_flush_offsets(AF_INET6);
                        private = &tmp;
index 156020d..6b3602d 100644 (file)
@@ -698,15 +698,18 @@ dev_irnet_ioctl(
 
       /* Query PPP channel and unit number */
     case PPPIOCGCHAN:
+      lock_kernel();
       if(ap->ppp_open && !put_user(ppp_channel_index(&ap->chan),
                                                (int __user *)argp))
        err = 0;
+      unlock_kernel();
       break;
     case PPPIOCGUNIT:
       lock_kernel();
       if(ap->ppp_open && !put_user(ppp_unit_number(&ap->chan),
                                                (int __user *)argp))
-      err = 0;
+        err = 0;
+      unlock_kernel();
       break;
 
       /* All these ioctls can be passed both directly and from ppp_generic,
index 76fa6fe..539f43b 100644 (file)
@@ -3794,9 +3794,9 @@ static struct pernet_operations pfkey_net_ops = {
 
 static void __exit ipsec_pfkey_exit(void)
 {
-       unregister_pernet_subsys(&pfkey_net_ops);
        xfrm_unregister_km(&pfkeyv2_mgr);
        sock_unregister(PF_KEY);
+       unregister_pernet_subsys(&pfkey_net_ops);
        proto_unregister(&key_proto);
 }
 
@@ -3807,21 +3807,22 @@ static int __init ipsec_pfkey_init(void)
        if (err != 0)
                goto out;
 
-       err = sock_register(&pfkey_family_ops);
+       err = register_pernet_subsys(&pfkey_net_ops);
        if (err != 0)
                goto out_unregister_key_proto;
+       err = sock_register(&pfkey_family_ops);
+       if (err != 0)
+               goto out_unregister_pernet;
        err = xfrm_register_km(&pfkeyv2_mgr);
        if (err != 0)
                goto out_sock_unregister;
-       err = register_pernet_subsys(&pfkey_net_ops);
-       if (err != 0)
-               goto out_xfrm_unregister_km;
 out:
        return err;
-out_xfrm_unregister_km:
-       xfrm_unregister_km(&pfkeyv2_mgr);
+
 out_sock_unregister:
        sock_unregister(PF_KEY);
+out_unregister_pernet:
+       unregister_pernet_subsys(&pfkey_net_ops);
 out_unregister_key_proto:
        proto_unregister(&key_proto);
        goto out;
index 1f2db64..22f0c2a 100644 (file)
@@ -647,7 +647,7 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
        }
        if (pos[1] != 0 &&
            (pos[1] != ifibss->ssid_len ||
-            !memcmp(pos + 2, ifibss->ssid, ifibss->ssid_len))) {
+            memcmp(pos + 2, ifibss->ssid, ifibss->ssid_len))) {
                /* Ignore ProbeReq for foreign SSID */
                return;
        }
index b9007f8..12a2bff 100644 (file)
@@ -245,6 +245,9 @@ void rate_control_get_rate(struct ieee80211_sub_if_data *sdata,
                info->control.rates[i].count = 1;
        }
 
+       if (sdata->local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL)
+               return;
+
        if (sta && sdata->force_unicast_rateidx > -1) {
                info->control.rates[0].idx = sdata->force_unicast_rateidx;
        } else {
index f934c96..bc17cf7 100644 (file)
@@ -439,6 +439,16 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
        if (local->scan_req)
                return -EBUSY;
 
+       if (req != local->int_scan_req &&
+           sdata->vif.type == NL80211_IFTYPE_STATION &&
+           !list_empty(&ifmgd->work_list)) {
+               /* actually wait for the work it's doing to finish/time out */
+               set_bit(IEEE80211_STA_REQ_SCAN, &ifmgd->request);
+               local->scan_req = req;
+               local->scan_sdata = sdata;
+               return 0;
+       }
+
        if (local->ops->hw_scan) {
                u8 *ies;
 
@@ -463,14 +473,6 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
        local->scan_req = req;
        local->scan_sdata = sdata;
 
-       if (req != local->int_scan_req &&
-           sdata->vif.type == NL80211_IFTYPE_STATION &&
-           !list_empty(&ifmgd->work_list)) {
-               /* actually wait for the work it's doing to finish/time out */
-               set_bit(IEEE80211_STA_REQ_SCAN, &ifmgd->request);
-               return 0;
-       }
-
        if (local->ops->hw_scan)
                __set_bit(SCAN_HW_SCANNING, &local->scanning);
        else
index 0e98c32..4d79e3c 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/netdevice.h>
 #include <linux/socket.h>
 #include <linux/mm.h>
+#include <linux/nsproxy.h>
 #include <linux/rculist_nulls.h>
 
 #include <net/netfilter/nf_conntrack.h>
@@ -63,8 +64,6 @@ EXPORT_SYMBOL_GPL(nf_conntrack_max);
 struct nf_conn nf_conntrack_untracked __read_mostly;
 EXPORT_SYMBOL_GPL(nf_conntrack_untracked);
 
-static struct kmem_cache *nf_conntrack_cachep __read_mostly;
-
 static int nf_conntrack_hash_rnd_initted;
 static unsigned int nf_conntrack_hash_rnd;
 
@@ -86,9 +85,10 @@ static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple,
        return ((u64)h * size) >> 32;
 }
 
-static inline u_int32_t hash_conntrack(const struct nf_conntrack_tuple *tuple)
+static inline u_int32_t hash_conntrack(const struct net *net,
+                                      const struct nf_conntrack_tuple *tuple)
 {
-       return __hash_conntrack(tuple, nf_conntrack_htable_size,
+       return __hash_conntrack(tuple, net->ct.htable_size,
                                nf_conntrack_hash_rnd);
 }
 
@@ -296,7 +296,7 @@ __nf_conntrack_find(struct net *net, const struct nf_conntrack_tuple *tuple)
 {
        struct nf_conntrack_tuple_hash *h;
        struct hlist_nulls_node *n;
-       unsigned int hash = hash_conntrack(tuple);
+       unsigned int hash = hash_conntrack(net, tuple);
 
        /* Disable BHs the entire time since we normally need to disable them
         * at least once for the stats anyway.
@@ -366,10 +366,11 @@ static void __nf_conntrack_hash_insert(struct nf_conn *ct,
 
 void nf_conntrack_hash_insert(struct nf_conn *ct)
 {
+       struct net *net = nf_ct_net(ct);
        unsigned int hash, repl_hash;
 
-       hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
-       repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
+       hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
+       repl_hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
 
        __nf_conntrack_hash_insert(ct, hash, repl_hash);
 }
@@ -397,8 +398,8 @@ __nf_conntrack_confirm(struct sk_buff *skb)
        if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
                return NF_ACCEPT;
 
-       hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
-       repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
+       hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
+       repl_hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
 
        /* We're not in hash table, and we refuse to set up related
           connections for unconfirmed conns.  But packet copies and
@@ -468,7 +469,7 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
        struct net *net = nf_ct_net(ignored_conntrack);
        struct nf_conntrack_tuple_hash *h;
        struct hlist_nulls_node *n;
-       unsigned int hash = hash_conntrack(tuple);
+       unsigned int hash = hash_conntrack(net, tuple);
 
        /* Disable BHs the entire time since we need to disable them at
         * least once for the stats anyway.
@@ -503,7 +504,7 @@ static noinline int early_drop(struct net *net, unsigned int hash)
        int dropped = 0;
 
        rcu_read_lock();
-       for (i = 0; i < nf_conntrack_htable_size; i++) {
+       for (i = 0; i < net->ct.htable_size; i++) {
                hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash],
                                         hnnode) {
                        tmp = nf_ct_tuplehash_to_ctrack(h);
@@ -523,7 +524,7 @@ static noinline int early_drop(struct net *net, unsigned int hash)
                if (cnt >= NF_CT_EVICTION_RANGE)
                        break;
 
-               hash = (hash + 1) % nf_conntrack_htable_size;
+               hash = (hash + 1) % net->ct.htable_size;
        }
        rcu_read_unlock();
 
@@ -557,7 +558,7 @@ struct nf_conn *nf_conntrack_alloc(struct net *net,
 
        if (nf_conntrack_max &&
            unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) {
-               unsigned int hash = hash_conntrack(orig);
+               unsigned int hash = hash_conntrack(net, orig);
                if (!early_drop(net, hash)) {
                        atomic_dec(&net->ct.count);
                        if (net_ratelimit())
@@ -572,7 +573,7 @@ struct nf_conn *nf_conntrack_alloc(struct net *net,
         * Do not use kmem_cache_zalloc(), as this cache uses
         * SLAB_DESTROY_BY_RCU.
         */
-       ct = kmem_cache_alloc(nf_conntrack_cachep, gfp);
+       ct = kmem_cache_alloc(net->ct.nf_conntrack_cachep, gfp);
        if (ct == NULL) {
                pr_debug("nf_conntrack_alloc: Can't alloc conntrack.\n");
                atomic_dec(&net->ct.count);
@@ -611,7 +612,7 @@ void nf_conntrack_free(struct nf_conn *ct)
        nf_ct_ext_destroy(ct);
        atomic_dec(&net->ct.count);
        nf_ct_ext_free(ct);
-       kmem_cache_free(nf_conntrack_cachep, ct);
+       kmem_cache_free(net->ct.nf_conntrack_cachep, ct);
 }
 EXPORT_SYMBOL_GPL(nf_conntrack_free);
 
@@ -1014,7 +1015,7 @@ get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data),
        struct hlist_nulls_node *n;
 
        spin_lock_bh(&nf_conntrack_lock);
-       for (; *bucket < nf_conntrack_htable_size; (*bucket)++) {
+       for (; *bucket < net->ct.htable_size; (*bucket)++) {
                hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) {
                        ct = nf_ct_tuplehash_to_ctrack(h);
                        if (iter(ct, data))
@@ -1113,9 +1114,12 @@ static void nf_ct_release_dying_list(struct net *net)
 
 static void nf_conntrack_cleanup_init_net(void)
 {
+       /* wait until all references to nf_conntrack_untracked are dropped */
+       while (atomic_read(&nf_conntrack_untracked.ct_general.use) > 1)
+               schedule();
+
        nf_conntrack_helper_fini();
        nf_conntrack_proto_fini();
-       kmem_cache_destroy(nf_conntrack_cachep);
 }
 
 static void nf_conntrack_cleanup_net(struct net *net)
@@ -1127,15 +1131,14 @@ static void nf_conntrack_cleanup_net(struct net *net)
                schedule();
                goto i_see_dead_people;
        }
-       /* wait until all references to nf_conntrack_untracked are dropped */
-       while (atomic_read(&nf_conntrack_untracked.ct_general.use) > 1)
-               schedule();
 
        nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc,
-                            nf_conntrack_htable_size);
+                            net->ct.htable_size);
        nf_conntrack_ecache_fini(net);
        nf_conntrack_acct_fini(net);
        nf_conntrack_expect_fini(net);
+       kmem_cache_destroy(net->ct.nf_conntrack_cachep);
+       kfree(net->ct.slabname);
        free_percpu(net->ct.stat);
 }
 
@@ -1190,10 +1193,12 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
 {
        int i, bucket, vmalloced, old_vmalloced;
        unsigned int hashsize, old_size;
-       int rnd;
        struct hlist_nulls_head *hash, *old_hash;
        struct nf_conntrack_tuple_hash *h;
 
+       if (current->nsproxy->net_ns != &init_net)
+               return -EOPNOTSUPP;
+
        /* On boot, we can set this without any fancy locking. */
        if (!nf_conntrack_htable_size)
                return param_set_uint(val, kp);
@@ -1206,33 +1211,29 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
        if (!hash)
                return -ENOMEM;
 
-       /* We have to rehahs for the new table anyway, so we also can
-        * use a newrandom seed */
-       get_random_bytes(&rnd, sizeof(rnd));
-
        /* Lookups in the old hash might happen in parallel, which means we
         * might get false negatives during connection lookup. New connections
         * created because of a false negative won't make it into the hash
         * though since that required taking the lock.
         */
        spin_lock_bh(&nf_conntrack_lock);
-       for (i = 0; i < nf_conntrack_htable_size; i++) {
+       for (i = 0; i < init_net.ct.htable_size; i++) {
                while (!hlist_nulls_empty(&init_net.ct.hash[i])) {
                        h = hlist_nulls_entry(init_net.ct.hash[i].first,
                                        struct nf_conntrack_tuple_hash, hnnode);
                        hlist_nulls_del_rcu(&h->hnnode);
-                       bucket = __hash_conntrack(&h->tuple, hashsize, rnd);
+                       bucket = __hash_conntrack(&h->tuple, hashsize,
+                                                 nf_conntrack_hash_rnd);
                        hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]);
                }
        }
-       old_size = nf_conntrack_htable_size;
+       old_size = init_net.ct.htable_size;
        old_vmalloced = init_net.ct.hash_vmalloc;
        old_hash = init_net.ct.hash;
 
-       nf_conntrack_htable_size = hashsize;
+       init_net.ct.htable_size = nf_conntrack_htable_size = hashsize;
        init_net.ct.hash_vmalloc = vmalloced;
        init_net.ct.hash = hash;
-       nf_conntrack_hash_rnd = rnd;
        spin_unlock_bh(&nf_conntrack_lock);
 
        nf_ct_free_hashtable(old_hash, old_vmalloced, old_size);
@@ -1271,15 +1272,6 @@ static int nf_conntrack_init_init_net(void)
               NF_CONNTRACK_VERSION, nf_conntrack_htable_size,
               nf_conntrack_max);
 
-       nf_conntrack_cachep = kmem_cache_create("nf_conntrack",
-                                               sizeof(struct nf_conn),
-                                               0, SLAB_DESTROY_BY_RCU, NULL);
-       if (!nf_conntrack_cachep) {
-               printk(KERN_ERR "Unable to create nf_conn slab cache\n");
-               ret = -ENOMEM;
-               goto err_cache;
-       }
-
        ret = nf_conntrack_proto_init();
        if (ret < 0)
                goto err_proto;
@@ -1288,13 +1280,19 @@ static int nf_conntrack_init_init_net(void)
        if (ret < 0)
                goto err_helper;
 
+       /* Set up fake conntrack: to never be deleted, not in any hashes */
+#ifdef CONFIG_NET_NS
+       nf_conntrack_untracked.ct_net = &init_net;
+#endif
+       atomic_set(&nf_conntrack_untracked.ct_general.use, 1);
+       /*  - and look it like as a confirmed connection */
+       set_bit(IPS_CONFIRMED_BIT, &nf_conntrack_untracked.status);
+
        return 0;
 
 err_helper:
        nf_conntrack_proto_fini();
 err_proto:
-       kmem_cache_destroy(nf_conntrack_cachep);
-err_cache:
        return ret;
 }
 
@@ -1316,7 +1314,24 @@ static int nf_conntrack_init_net(struct net *net)
                ret = -ENOMEM;
                goto err_stat;
        }
-       net->ct.hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size,
+
+       net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
+       if (!net->ct.slabname) {
+               ret = -ENOMEM;
+               goto err_slabname;
+       }
+
+       net->ct.nf_conntrack_cachep = kmem_cache_create(net->ct.slabname,
+                                                       sizeof(struct nf_conn), 0,
+                                                       SLAB_DESTROY_BY_RCU, NULL);
+       if (!net->ct.nf_conntrack_cachep) {
+               printk(KERN_ERR "Unable to create nf_conn slab cache\n");
+               ret = -ENOMEM;
+               goto err_cache;
+       }
+
+       net->ct.htable_size = nf_conntrack_htable_size;
+       net->ct.hash = nf_ct_alloc_hashtable(&net->ct.htable_size,
                                             &net->ct.hash_vmalloc, 1);
        if (!net->ct.hash) {
                ret = -ENOMEM;
@@ -1333,15 +1348,6 @@ static int nf_conntrack_init_net(struct net *net)
        if (ret < 0)
                goto err_ecache;
 
-       /* Set up fake conntrack:
-           - to never be deleted, not in any hashes */
-#ifdef CONFIG_NET_NS
-       nf_conntrack_untracked.ct_net = &init_net;
-#endif
-       atomic_set(&nf_conntrack_untracked.ct_general.use, 1);
-       /*  - and look it like as a confirmed connection */
-       set_bit(IPS_CONFIRMED_BIT, &nf_conntrack_untracked.status);
-
        return 0;
 
 err_ecache:
@@ -1350,8 +1356,12 @@ err_acct:
        nf_conntrack_expect_fini(net);
 err_expect:
        nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc,
-                            nf_conntrack_htable_size);
+                            net->ct.htable_size);
 err_hash:
+       kmem_cache_destroy(net->ct.nf_conntrack_cachep);
+err_cache:
+       kfree(net->ct.slabname);
+err_slabname:
        free_percpu(net->ct.stat);
 err_stat:
        return ret;
index fdf5d2a..2f25ff6 100644 (file)
@@ -569,7 +569,7 @@ static void exp_proc_remove(struct net *net)
 #endif /* CONFIG_PROC_FS */
 }
 
-module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0600);
+module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0400);
 
 int nf_conntrack_expect_init(struct net *net)
 {
@@ -577,7 +577,7 @@ int nf_conntrack_expect_init(struct net *net)
 
        if (net_eq(net, &init_net)) {
                if (!nf_ct_expect_hsize) {
-                       nf_ct_expect_hsize = nf_conntrack_htable_size / 256;
+                       nf_ct_expect_hsize = net->ct.htable_size / 256;
                        if (!nf_ct_expect_hsize)
                                nf_ct_expect_hsize = 1;
                }
index 65c2a7b..4b1a56b 100644 (file)
@@ -192,7 +192,7 @@ static void __nf_conntrack_helper_unregister(struct nf_conntrack_helper *me,
        /* Get rid of expecteds, set helpers to NULL. */
        hlist_nulls_for_each_entry(h, nn, &net->ct.unconfirmed, hnnode)
                unhelp(h, me);
-       for (i = 0; i < nf_conntrack_htable_size; i++) {
+       for (i = 0; i < net->ct.htable_size; i++) {
                hlist_nulls_for_each_entry(h, nn, &net->ct.hash[i], hnnode)
                        unhelp(h, me);
        }
index 42f21c0..0ffe689 100644 (file)
@@ -594,7 +594,7 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
 
        rcu_read_lock();
        last = (struct nf_conn *)cb->args[1];
-       for (; cb->args[0] < nf_conntrack_htable_size; cb->args[0]++) {
+       for (; cb->args[0] < init_net.ct.htable_size; cb->args[0]++) {
 restart:
                hlist_nulls_for_each_entry_rcu(h, n, &init_net.ct.hash[cb->args[0]],
                                         hnnode) {
index 028aba6..e310f15 100644 (file)
@@ -51,7 +51,7 @@ static struct hlist_nulls_node *ct_get_first(struct seq_file *seq)
        struct hlist_nulls_node *n;
 
        for (st->bucket = 0;
-            st->bucket < nf_conntrack_htable_size;
+            st->bucket < net->ct.htable_size;
             st->bucket++) {
                n = rcu_dereference(net->ct.hash[st->bucket].first);
                if (!is_a_nulls(n))
@@ -69,7 +69,7 @@ static struct hlist_nulls_node *ct_get_next(struct seq_file *seq,
        head = rcu_dereference(head->next);
        while (is_a_nulls(head)) {
                if (likely(get_nulls_value(head) == st->bucket)) {
-                       if (++st->bucket >= nf_conntrack_htable_size)
+                       if (++st->bucket >= net->ct.htable_size)
                                return NULL;
                }
                head = rcu_dereference(net->ct.hash[st->bucket].first);
@@ -355,7 +355,7 @@ static ctl_table nf_ct_sysctl_table[] = {
        },
        {
                .procname       = "nf_conntrack_buckets",
-               .data           = &nf_conntrack_htable_size,
+               .data           = &init_net.ct.htable_size,
                .maxlen         = sizeof(unsigned int),
                .mode           = 0444,
                .proc_handler   = proc_dointvec,
@@ -421,6 +421,7 @@ static int nf_conntrack_standalone_init_sysctl(struct net *net)
                goto out_kmemdup;
 
        table[1].data = &net->ct.count;
+       table[2].data = &net->ct.htable_size;
        table[3].data = &net->ct.sysctl_checksum;
        table[4].data = &net->ct.sysctl_log_invalid;
 
index a4957bf..4c5972b 100644 (file)
@@ -455,9 +455,14 @@ static int netlink_create(struct net *net, struct socket *sock, int protocol,
        if (nl_table[protocol].registered &&
            try_module_get(nl_table[protocol].module))
                module = nl_table[protocol].module;
+       else
+               err = -EPROTONOSUPPORT;
        cb_mutex = nl_table[protocol].cb_mutex;
        netlink_unlock_table();
 
+       if (err < 0)
+               goto out;
+
        err = __netlink_create(net, sock, cb_mutex, protocol);
        if (err < 0)
                goto out_module;
index 929218a..21f9c76 100644 (file)
@@ -433,7 +433,7 @@ config NET_ACT_POLICE
          module.
 
          To compile this code as a module, choose M here: the
-         module will be called police.
+         module will be called act_police.
 
 config NET_ACT_GACT
         tristate "Generic actions"
@@ -443,7 +443,7 @@ config NET_ACT_GACT
          accepting packets.
 
          To compile this code as a module, choose M here: the
-         module will be called gact.
+         module will be called act_gact.
 
 config GACT_PROB
         bool "Probability support"
@@ -459,7 +459,7 @@ config NET_ACT_MIRRED
          other devices.
 
          To compile this code as a module, choose M here: the
-         module will be called mirred.
+         module will be called act_mirred.
 
 config NET_ACT_IPT
         tristate "IPtables targets"
@@ -469,7 +469,7 @@ config NET_ACT_IPT
          classification.
 
          To compile this code as a module, choose M here: the
-         module will be called ipt.
+         module will be called act_ipt.
 
 config NET_ACT_NAT
         tristate "Stateless NAT"
@@ -479,7 +479,7 @@ config NET_ACT_NAT
          netfilter for NAT unless you know what you are doing.
 
          To compile this code as a module, choose M here: the
-         module will be called nat.
+         module will be called act_nat.
 
 config NET_ACT_PEDIT
         tristate "Packet Editing"
@@ -488,7 +488,7 @@ config NET_ACT_PEDIT
          Say Y here if you want to mangle the content of packets.
 
          To compile this code as a module, choose M here: the
-         module will be called pedit.
+         module will be called act_pedit.
 
 config NET_ACT_SIMP
         tristate "Simple Example (Debug)"
@@ -502,7 +502,7 @@ config NET_ACT_SIMP
          If unsure, say N.
 
          To compile this code as a module, choose M here: the
-         module will be called simple.
+         module will be called act_simple.
 
 config NET_ACT_SKBEDIT
         tristate "SKB Editing"
@@ -513,7 +513,7 @@ config NET_ACT_SKBEDIT
          If unsure, say N.
 
          To compile this code as a module, choose M here: the
-         module will be called skbedit.
+         module will be called act_skbedit.
 
 config NET_CLS_IND
        bool "Incoming device classification"
index 49278f8..9ac493f 100644 (file)
@@ -999,19 +999,14 @@ rpc_fill_super(struct super_block *sb, void *data, int silent)
        inode = rpc_get_inode(sb, S_IFDIR | 0755);
        if (!inode)
                return -ENOMEM;
-       root = d_alloc_root(inode);
+       sb->s_root = root = d_alloc_root(inode);
        if (!root) {
                iput(inode);
                return -ENOMEM;
        }
        if (rpc_populate(root, files, RPCAUTH_lockd, RPCAUTH_RootEOF, NULL))
-               goto out;
-       sb->s_root = root;
+               return -ENOMEM;
        return 0;
-out:
-       d_genocide(root);
-       dput(root);
-       return -ENOMEM;
 }
 
 static int
index b36cc34..f445ea1 100644 (file)
@@ -1102,7 +1102,7 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, int *errp)
        int err = -ENOMEM;
        struct xfrm_state *x = xfrm_state_alloc(net);
        if (!x)
-               goto error;
+               goto out;
 
        memcpy(&x->id, &orig->id, sizeof(x->id));
        memcpy(&x->sel, &orig->sel, sizeof(x->sel));
@@ -1160,16 +1160,10 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, int *errp)
        return x;
 
  error:
+       xfrm_state_put(x);
+out:
        if (errp)
                *errp = err;
-       if (x) {
-               kfree(x->aalg);
-               kfree(x->ealg);
-               kfree(x->calg);
-               kfree(x->encap);
-               kfree(x->coaddr);
-       }
-       kfree(x);
        return NULL;
 }
 
index 529c9ca..8dffcb7 100644 (file)
@@ -387,7 +387,7 @@ static int smack_sb_umount(struct vfsmount *mnt, int flags)
        struct smk_audit_info ad;
 
        smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_FS);
-       smk_ad_setfield_u_fs_path_dentry(&ad, mnt->mnt_mountpoint);
+       smk_ad_setfield_u_fs_path_dentry(&ad, mnt->mnt_root);
        smk_ad_setfield_u_fs_path_mnt(&ad, mnt);
 
        sbp = mnt->mnt_sb->s_security;
index 18369d4..455bc39 100644 (file)
@@ -89,29 +89,14 @@ int tomoyo_realpath_from_path2(struct path *path, char *newname,
                sp = dentry->d_op->d_dname(dentry, newname + offset,
                                           newname_len - offset);
        } else {
-               /* Taken from d_namespace_path(). */
-               struct path root;
-               struct path ns_root = { };
-               struct path tmp;
-
-               read_lock(&current->fs->lock);
-               root = current->fs->root;
-               path_get(&root);
-               read_unlock(&current->fs->lock);
-               spin_lock(&vfsmount_lock);
-               if (root.mnt && root.mnt->mnt_ns)
-                       ns_root.mnt = mntget(root.mnt->mnt_ns->root);
-               if (ns_root.mnt)
-                       ns_root.dentry = dget(ns_root.mnt->mnt_root);
-               spin_unlock(&vfsmount_lock);
+               struct path ns_root = {.mnt = NULL, .dentry = NULL};
+
                spin_lock(&dcache_lock);
-               tmp = ns_root;
-               sp = __d_path(path, &tmp, newname, newname_len);
+               /* go to whatever namespace root we are under */
+               sp = __d_path(path, &ns_root, newname, newname_len);
                spin_unlock(&dcache_lock);
-               path_put(&root);
-               path_put(&ns_root);
                /* Prepend "/proc" prefix if using internal proc vfs mount. */
-               if (!IS_ERR(sp) && (path->mnt->mnt_parent == path->mnt) &&
+               if (!IS_ERR(sp) && (path->mnt->mnt_flags & MNT_INTERNAL) &&
                    (strcmp(path->mnt->mnt_sb->s_type->name, "proc") == 0)) {
                        sp -= 5;
                        if (sp >= newname)
index b8faa6d..ff6da6f 100644 (file)
@@ -1893,6 +1893,9 @@ static int azx_position_ok(struct azx *chip, struct azx_dev *azx_dev)
 
        if (!bdl_pos_adj[chip->dev_index])
                return 1; /* no delayed ack */
+       if (WARN_ONCE(!azx_dev->period_bytes,
+                     "hda-intel: zero azx_dev->period_bytes"))
+               return 0; /* this shouldn't happen! */
        if (pos % azx_dev->period_bytes > azx_dev->period_bytes / 2)
                return 0; /* NG - it's below the period boundary */
        return 1; /* OK, it's fine */
@@ -2347,7 +2350,7 @@ static void __devinit check_probe_mask(struct azx *chip, int dev)
  */
 static struct snd_pci_quirk msi_black_list[] __devinitdata = {
        SND_PCI_QUIRK(0x1043, 0x81f2, "ASUS", 0), /* Athlon64 X2 + nvidia */
-       SND_PCI_QUIRK(0x1043, 0x829c, "ASUS", 0), /* nvidia */
+       SND_PCI_QUIRK(0x1043, 0x81f6, "ASUS", 0), /* nvidia */
        {}
 };
 
index ddc584b..4b91d8c 100644 (file)
@@ -705,7 +705,7 @@ static void print_mapped_keys(void)
                fprintf(stdout, "\t[w]     toggle display weighted/count[E]r. \t(%d)\n", display_weighted ? 1 : 0);
 
        fprintf(stdout,
-               "\t[K]     hide kernel_symbols symbols.             \t(%s)\n",
+               "\t[K]     hide kernel_symbols symbols.     \t(%s)\n",
                hide_kernel_symbols ? "yes" : "no");
        fprintf(stdout,
                "\t[U]     hide user symbols.               \t(%s)\n",
index bb0fd6d..8a9e6ba 100644 (file)
@@ -295,10 +295,10 @@ void thread__find_addr_location(struct thread *self,
        al->thread = self;
        al->addr = addr;
 
-       if (cpumode & PERF_RECORD_MISC_KERNEL) {
+       if (cpumode == PERF_RECORD_MISC_KERNEL) {
                al->level = 'k';
                mg = &session->kmaps;
-       } else if (cpumode & PERF_RECORD_MISC_USER)
+       } else if (cpumode == PERF_RECORD_MISC_USER)
                al->level = '.';
        else {
                al->level = 'H';
index 29465d4..fde17b0 100644 (file)
@@ -272,6 +272,7 @@ int synthesize_perf_probe_point(struct probe_point *pp)
        int ret;
 
        pp->probes[0] = buf = zalloc(MAX_CMDLEN);
+       pp->found = 1;
        if (!buf)
                die("Failed to allocate memory by zalloc.");
        if (pp->offset) {
@@ -294,6 +295,7 @@ int synthesize_perf_probe_point(struct probe_point *pp)
 error:
                free(pp->probes[0]);
                pp->probes[0] = NULL;
+               pp->found = 0;
        }
        return ret;
 }
@@ -455,6 +457,7 @@ void show_perf_probe_events(void)
        struct strlist *rawlist;
        struct str_node *ent;
 
+       memset(&pp, 0, sizeof(pp));
        fd = open_kprobe_events(O_RDONLY, 0);
        rawlist = get_trace_kprobe_event_rawlist(fd);
        close(fd);