Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 20 Feb 2010 03:34:56 +0000 (19:34 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 20 Feb 2010 03:34:56 +0000 (19:34 -0800)
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6:
  sfc: SFE4002/SFN4112F: Widen temperature and voltage tolerances
  sfc: Fix sign of efx_mcdi_poll_reboot() error in efx_mcdi_poll()
  net-sysfs: Use rtnl_trylock in wireless sysfs methods.
  net: Fix sysctl restarts...

40 files changed:
Documentation/kernel-parameters.txt
MAINTAINERS
arch/ia64/include/asm/acpi.h
arch/powerpc/platforms/85xx/mpc85xx_mds.c
arch/powerpc/platforms/85xx/smp.c
arch/sparc/kernel/kstack.h
arch/sparc/kernel/of_device_32.c
arch/sparc/kernel/pci.c
arch/sparc/kernel/tsb.S
arch/x86/kernel/acpi/boot.c
drivers/acpi/dock.c
drivers/acpi/processor_idle.c
drivers/acpi/processor_pdc.c
drivers/acpi/scan.c
drivers/acpi/tables.c
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/i915/intel_lvds.c
drivers/gpu/drm/nouveau/nouveau_bios.c
drivers/gpu/drm/nouveau/nouveau_bios.h
drivers/gpu/drm/nouveau/nv17_tv.c
drivers/gpu/drm/radeon/atom.c
drivers/gpu/drm/radeon/r600_blit_kms.c
drivers/gpu/drm/radeon/r600_cp.c
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_atombios.c
drivers/gpu/drm/radeon/radeon_connectors.c
drivers/gpu/drm/radeon/radeon_cs.c
drivers/gpu/drm/radeon/radeon_drv.h
drivers/gpu/drm/radeon/radeon_object.c
drivers/gpu/drm/radeon/radeon_object.h
drivers/gpu/drm/radeon/radeon_ring.c
drivers/gpu/drm/radeon/rv770.c
drivers/gpu/drm/ttm/ttm_tt.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
drivers/gpu/vga/vgaarb.c
drivers/input/serio/i8042.c
drivers/pci/hotplug/acpiphp_glue.c
drivers/platform/x86/thinkpad_acpi.c
include/linux/input.h

index 736d456..826b6e1 100644 (file)
@@ -199,6 +199,10 @@ and is between 256 and 4096 characters. It is defined in the file
                        acpi_display_output=video
                        See above.
 
+       acpi_early_pdc_eval     [HW,ACPI] Evaluate processor _PDC methods
+                               early. Needed on some platforms to properly
+                               initialize the EC.
+
        acpi_irq_balance [HW,ACPI]
                        ACPI will balance active IRQs
                        default in APIC mode
index 412eff6..8ed3d0a 100644 (file)
@@ -1733,10 +1733,9 @@ F:       include/linux/tfrc.h
 F:     net/dccp/
 
 DECnet NETWORK LAYER
-M:     Christine Caulfield <christine.caulfield@googlemail.com>
 W:     http://linux-decnet.sourceforge.net
 L:     linux-decnet-user@lists.sourceforge.net
-S:     Maintained
+S:     Orphan
 F:     Documentation/networking/decnet.txt
 F:     net/decnet/
 
index 7ae5889..e97b255 100644 (file)
@@ -94,6 +94,7 @@ ia64_acpi_release_global_lock (unsigned int *lock)
 #define acpi_noirq 0   /* ACPI always enabled on IA64 */
 #define acpi_pci_disabled 0 /* ACPI PCI always enabled on IA64 */
 #define acpi_strict 1  /* no ACPI spec workarounds on IA64 */
+#define acpi_ht 0      /* no HT-only mode on IA64 */
 #endif
 #define acpi_processor_cstate_check(x) (x) /* no idle limits on IA64 :) */
 static inline void disable_acpi(void) { }
index 21f61b8..cc29c0f 100644 (file)
@@ -338,7 +338,8 @@ static void __init mpc85xx_mds_pic_init(void)
        }
 
        mpic = mpic_alloc(np, r.start,
-                       MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN,
+                       MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN |
+                       MPIC_BROKEN_FRR_NIRQS,
                        0, 256, " OpenPIC  ");
        BUG_ON(mpic == NULL);
        of_node_put(np);
index 04160a4..a15f582 100644 (file)
@@ -46,6 +46,7 @@ smp_85xx_kick_cpu(int nr)
        __iomem u32 *bptr_vaddr;
        struct device_node *np;
        int n = 0;
+       int ioremappable;
 
        WARN_ON (nr < 0 || nr >= NR_CPUS);
 
@@ -59,21 +60,37 @@ smp_85xx_kick_cpu(int nr)
                return;
        }
 
+       /*
+        * A secondary core could be in a spinloop in the bootpage
+        * (0xfffff000), somewhere in highmem, or somewhere in lowmem.
+        * The bootpage and highmem can be accessed via ioremap(), but
+        * we need to directly access the spinloop if its in lowmem.
+        */
+       ioremappable = *cpu_rel_addr > virt_to_phys(high_memory);
+
        /* Map the spin table */
-       bptr_vaddr = ioremap(*cpu_rel_addr, SIZE_BOOT_ENTRY);
+       if (ioremappable)
+               bptr_vaddr = ioremap(*cpu_rel_addr, SIZE_BOOT_ENTRY);
+       else
+               bptr_vaddr = phys_to_virt(*cpu_rel_addr);
 
        local_irq_save(flags);
 
        out_be32(bptr_vaddr + BOOT_ENTRY_PIR, nr);
        out_be32(bptr_vaddr + BOOT_ENTRY_ADDR_LOWER, __pa(__early_start));
 
+       if (!ioremappable)
+               flush_dcache_range((ulong)bptr_vaddr,
+                               (ulong)(bptr_vaddr + SIZE_BOOT_ENTRY));
+
        /* Wait a bit for the CPU to ack. */
        while ((__secondary_hold_acknowledge != nr) && (++n < 1000))
                mdelay(1);
 
        local_irq_restore(flags);
 
-       iounmap(bptr_vaddr);
+       if (ioremappable)
+               iounmap(bptr_vaddr);
 
        pr_debug("waited %d msecs for CPU #%d.\n", n, nr);
 }
index 4248d96..5247283 100644 (file)
@@ -11,6 +11,10 @@ static inline bool kstack_valid(struct thread_info *tp, unsigned long sp)
 {
        unsigned long base = (unsigned long) tp;
 
+       /* Stack pointer must be 16-byte aligned.  */
+       if (sp & (16UL - 1))
+               return false;
+
        if (sp >= (base + sizeof(struct thread_info)) &&
            sp <= (base + THREAD_SIZE - sizeof(struct sparc_stackf)))
                return true;
index 4c26eb5..53a58b3 100644 (file)
@@ -105,7 +105,7 @@ static unsigned long of_bus_sbus_get_flags(const u32 *addr, unsigned long flags)
 
 static int of_bus_ambapp_match(struct device_node *np)
 {
-       return !strcmp(np->name, "ambapp");
+       return !strcmp(np->type, "ambapp");
 }
 
 static void of_bus_ambapp_count_cells(struct device_node *child,
index 539e83f..592b03d 100644 (file)
@@ -247,6 +247,7 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
                                         struct pci_bus *bus, int devfn)
 {
        struct dev_archdata *sd;
+       struct pci_slot *slot;
        struct of_device *op;
        struct pci_dev *dev;
        const char *type;
@@ -286,6 +287,11 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
        dev->dev.bus = &pci_bus_type;
        dev->devfn = devfn;
        dev->multifunction = 0;         /* maybe a lie? */
+       set_pcie_port_type(dev);
+
+       list_for_each_entry(slot, &dev->bus->slots, list)
+               if (PCI_SLOT(dev->devfn) == slot->number)
+                       dev->slot = slot;
 
        dev->vendor = of_getintprop_default(node, "vendor-id", 0xffff);
        dev->device = of_getintprop_default(node, "device-id", 0xffff);
@@ -322,6 +328,7 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
 
        dev->current_state = 4;         /* unknown power state */
        dev->error_state = pci_channel_io_normal;
+       dev->dma_mask = 0xffffffff;
 
        if (!strcmp(node->name, "pci")) {
                /* a PCI-PCI bridge */
index 8c91d9b..db15d12 100644 (file)
@@ -191,10 +191,12 @@ tsb_dtlb_load:
 
 tsb_itlb_load:
        /* Executable bit must be set.  */
-661:   andcc           %g5, _PAGE_EXEC_4U, %g0
-       .section        .sun4v_1insn_patch, "ax"
+661:   sethi           %hi(_PAGE_EXEC_4U), %g4
+       andcc           %g5, %g4, %g0
+       .section        .sun4v_2insn_patch, "ax"
        .word           661b
        andcc           %g5, _PAGE_EXEC_4V, %g0
+       nop
        .previous
 
        be,pn           %xcc, tsb_do_fault
index 0acbcdf..af1c583 100644 (file)
@@ -1344,14 +1344,6 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
         },
        {
         .callback = force_acpi_ht,
-        .ident = "ASUS P2B-DS",
-        .matches = {
-                    DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
-                    DMI_MATCH(DMI_BOARD_NAME, "P2B-DS"),
-                    },
-        },
-       {
-        .callback = force_acpi_ht,
         .ident = "ASUS CUR-DLS",
         .matches = {
                     DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
index bbc2c13..b2586f5 100644 (file)
@@ -935,6 +935,7 @@ static int dock_add(acpi_handle handle)
        struct platform_device *dd;
 
        id = dock_station_count;
+       memset(&ds, 0, sizeof(ds));
        dd = platform_device_register_data(NULL, "dock", id, &ds, sizeof(ds));
        if (IS_ERR(dd))
                return PTR_ERR(dd);
index 7c0441f..e88e8ae 100644 (file)
@@ -110,6 +110,14 @@ static struct dmi_system_id __cpuinitdata processor_power_dmi_table[] = {
          DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
          DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")},
         (void *)2},
+       { set_max_cstate, "Pavilion zv5000", {
+         DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+         DMI_MATCH(DMI_PRODUCT_NAME,"Pavilion zv5000 (DS502A#ABA)")},
+        (void *)1},
+       { set_max_cstate, "Asus L8400B", {
+         DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
+         DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")},
+        (void *)1},
        {},
 };
 
index 7247819..e306ba9 100644 (file)
@@ -125,6 +125,8 @@ acpi_processor_eval_pdc(acpi_handle handle, struct acpi_object_list *pdc_in)
        return status;
 }
 
+static int early_pdc_done;
+
 void acpi_processor_set_pdc(acpi_handle handle)
 {
        struct acpi_object_list *obj_list;
@@ -132,6 +134,9 @@ void acpi_processor_set_pdc(acpi_handle handle)
        if (arch_has_acpi_pdc() == false)
                return;
 
+       if (early_pdc_done)
+               return;
+
        obj_list = acpi_processor_alloc_pdc();
        if (!obj_list)
                return;
@@ -151,6 +156,13 @@ static int set_early_pdc_optin(const struct dmi_system_id *id)
        return 0;
 }
 
+static int param_early_pdc_optin(char *s)
+{
+       early_pdc_optin = 1;
+       return 1;
+}
+__setup("acpi_early_pdc_eval", param_early_pdc_optin);
+
 static struct dmi_system_id __cpuinitdata early_pdc_optin_table[] = {
        {
        set_early_pdc_optin, "HP Envy", {
@@ -192,4 +204,6 @@ void __init acpi_early_processor_set_pdc(void)
        acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
                            ACPI_UINT32_MAX,
                            early_init_pdc, NULL, NULL, NULL);
+
+       early_pdc_done = 1;
 }
index ff9f622..3e00967 100644 (file)
@@ -1336,9 +1336,25 @@ static int acpi_bus_scan(acpi_handle handle, struct acpi_bus_ops *ops,
 
        if (child)
                *child = device;
-       return 0;
+
+       if (device)
+               return 0;
+       else
+               return -ENODEV;
 }
 
+/*
+ * acpi_bus_add and acpi_bus_start
+ *
+ * scan a given ACPI tree and (probably recently hot-plugged)
+ * create and add or starts found devices.
+ *
+ * If no devices were found -ENODEV is returned which does not
+ * mean that this is a real error, there just have been no suitable
+ * ACPI objects in the table trunk from which the kernel could create
+ * a device and add/start an appropriate driver.
+ */
+
 int
 acpi_bus_add(struct acpi_device **child,
             struct acpi_device *parent, acpi_handle handle, int type)
@@ -1348,8 +1364,7 @@ acpi_bus_add(struct acpi_device **child,
        memset(&ops, 0, sizeof(ops));
        ops.acpi_op_add = 1;
 
-       acpi_bus_scan(handle, &ops, child);
-       return 0;
+       return acpi_bus_scan(handle, &ops, child);
 }
 EXPORT_SYMBOL(acpi_bus_add);
 
@@ -1357,11 +1372,13 @@ int acpi_bus_start(struct acpi_device *device)
 {
        struct acpi_bus_ops ops;
 
+       if (!device)
+               return -EINVAL;
+
        memset(&ops, 0, sizeof(ops));
        ops.acpi_op_start = 1;
 
-       acpi_bus_scan(device->handle, &ops, NULL);
-       return 0;
+       return acpi_bus_scan(device->handle, &ops, NULL);
 }
 EXPORT_SYMBOL(acpi_bus_start);
 
index f336bca..8a0ed28 100644 (file)
@@ -213,7 +213,7 @@ acpi_table_parse_entries(char *id,
        unsigned long table_end;
        acpi_size tbl_size;
 
-       if (acpi_disabled)
+       if (acpi_disabled && !acpi_ht)
                return -ENODEV;
 
        if (!handler)
@@ -280,7 +280,7 @@ int __init acpi_table_parse(char *id, acpi_table_handler handler)
        struct acpi_table_header *table = NULL;
        acpi_size tbl_size;
 
-       if (acpi_disabled)
+       if (acpi_disabled && !acpi_ht)
                return -ENODEV;
 
        if (!handler)
index f665b05..ab6c973 100644 (file)
@@ -598,6 +598,50 @@ struct drm_display_mode *drm_mode_std(struct drm_device *dev,
        return mode;
 }
 
+/*
+ * EDID is delightfully ambiguous about how interlaced modes are to be
+ * encoded.  Our internal representation is of frame height, but some
+ * HDTV detailed timings are encoded as field height.
+ *
+ * The format list here is from CEA, in frame size.  Technically we
+ * should be checking refresh rate too.  Whatever.
+ */
+static void
+drm_mode_do_interlace_quirk(struct drm_display_mode *mode,
+                           struct detailed_pixel_timing *pt)
+{
+       int i;
+       static const struct {
+               int w, h;
+       } cea_interlaced[] = {
+               { 1920, 1080 },
+               {  720,  480 },
+               { 1440,  480 },
+               { 2880,  480 },
+               {  720,  576 },
+               { 1440,  576 },
+               { 2880,  576 },
+       };
+       static const int n_sizes =
+               sizeof(cea_interlaced)/sizeof(cea_interlaced[0]);
+
+       if (!(pt->misc & DRM_EDID_PT_INTERLACED))
+               return;
+
+       for (i = 0; i < n_sizes; i++) {
+               if ((mode->hdisplay == cea_interlaced[i].w) &&
+                   (mode->vdisplay == cea_interlaced[i].h / 2)) {
+                       mode->vdisplay *= 2;
+                       mode->vsync_start *= 2;
+                       mode->vsync_end *= 2;
+                       mode->vtotal *= 2;
+                       mode->vtotal |= 1;
+               }
+       }
+
+       mode->flags |= DRM_MODE_FLAG_INTERLACE;
+}
+
 /**
  * drm_mode_detailed - create a new mode from an EDID detailed timing section
  * @dev: DRM device (needed to create new mode)
@@ -680,8 +724,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
 
        drm_mode_set_name(mode);
 
-       if (pt->misc & DRM_EDID_PT_INTERLACED)
-               mode->flags |= DRM_MODE_FLAG_INTERLACE;
+       drm_mode_do_interlace_quirk(mode, pt);
 
        if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
                pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE;
index b1d0acb..c2e8a45 100644 (file)
@@ -636,6 +636,13 @@ static const struct dmi_system_id bad_lid_status[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "PC-81005"),
                },
        },
+       {
+               .ident = "Clevo M5x0N",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "CLEVO Co."),
+                       DMI_MATCH(DMI_BOARD_NAME, "M5x0N"),
+               },
+       },
        { }
 };
 
index 2cd0fad..0e9cd1d 100644 (file)
@@ -5861,13 +5861,12 @@ nouveau_bios_run_init_table(struct drm_device *dev, uint16_t table,
        struct drm_nouveau_private *dev_priv = dev->dev_private;
        struct nvbios *bios = &dev_priv->VBIOS;
        struct init_exec iexec = { true, false };
-       unsigned long flags;
 
-       spin_lock_irqsave(&bios->lock, flags);
+       mutex_lock(&bios->lock);
        bios->display.output = dcbent;
        parse_init_table(bios, table, &iexec);
        bios->display.output = NULL;
-       spin_unlock_irqrestore(&bios->lock, flags);
+       mutex_unlock(&bios->lock);
 }
 
 static bool NVInitVBIOS(struct drm_device *dev)
@@ -5876,7 +5875,7 @@ static bool NVInitVBIOS(struct drm_device *dev)
        struct nvbios *bios = &dev_priv->VBIOS;
 
        memset(bios, 0, sizeof(struct nvbios));
-       spin_lock_init(&bios->lock);
+       mutex_init(&bios->lock);
        bios->dev = dev;
 
        if (!NVShadowVBIOS(dev, bios->data))
index 68446fd..fd94bd6 100644 (file)
@@ -205,7 +205,7 @@ struct nvbios {
        struct drm_device *dev;
        struct nouveau_bios_info pub;
 
-       spinlock_t lock;
+       struct mutex lock;
 
        uint8_t data[NV_PROM_SIZE];
        unsigned int length;
index 58b917c..21ac6e4 100644 (file)
@@ -579,6 +579,8 @@ static void nv17_tv_restore(struct drm_encoder *encoder)
                                nouveau_encoder(encoder)->restore.output);
 
        nv17_tv_state_load(dev, &to_tv_enc(encoder)->saved_state);
+
+       nouveau_encoder(encoder)->last_dpms = NV_DPMS_CLEARED;
 }
 
 static int nv17_tv_create_resources(struct drm_encoder *encoder,
index 2a3df55..7f152f6 100644 (file)
@@ -643,7 +643,7 @@ static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg)
        uint8_t count = U8((*ptr)++);
        SDEBUG("   count: %d\n", count);
        if (arg == ATOM_UNIT_MICROSEC)
-               schedule_timeout_uninterruptible(usecs_to_jiffies(count));
+               udelay(count);
        else
                schedule_timeout_uninterruptible(msecs_to_jiffies(count));
 }
index af1c3ca..446b765 100644 (file)
@@ -543,9 +543,6 @@ int r600_vb_ib_get(struct radeon_device *rdev)
 void r600_vb_ib_put(struct radeon_device *rdev)
 {
        radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence);
-       mutex_lock(&rdev->ib_pool.mutex);
-       list_add_tail(&rdev->r600_blit.vb_ib->list, &rdev->ib_pool.scheduled_ibs);
-       mutex_unlock(&rdev->ib_pool.mutex);
        radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
 }
 
index 6d5a711..75bcf35 100644 (file)
@@ -1428,9 +1428,12 @@ static void r700_gfx_init(struct drm_device *dev,
 
        gb_tiling_config |= R600_BANK_SWAPS(1);
 
-       backend_map = r700_get_tile_pipe_to_backend_map(dev_priv->r600_max_tile_pipes,
-                                                       dev_priv->r600_max_backends,
-                                                       (0xff << dev_priv->r600_max_backends) & 0xff);
+       if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV740)
+               backend_map = 0x28;
+       else
+               backend_map = r700_get_tile_pipe_to_backend_map(dev_priv->r600_max_tile_pipes,
+                                                               dev_priv->r600_max_backends,
+                                                               (0xff << dev_priv->r600_max_backends) & 0xff);
        gb_tiling_config |= R600_BACKEND_MAP(backend_map);
 
        cc_gc_shader_pipe_config =
index f57480b..c0356bb 100644 (file)
@@ -96,6 +96,7 @@ extern int radeon_audio;
  * symbol;
  */
 #define RADEON_MAX_USEC_TIMEOUT                100000  /* 100 ms */
+/* RADEON_IB_POOL_SIZE must be a power of 2 */
 #define RADEON_IB_POOL_SIZE            16
 #define RADEON_DEBUGFS_MAX_NUM_FILES   32
 #define RADEONFB_CONN_LIMIT            4
@@ -363,11 +364,12 @@ void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev);
  */
 struct radeon_ib {
        struct list_head        list;
-       unsigned long           idx;
+       unsigned                idx;
        uint64_t                gpu_addr;
        struct radeon_fence     *fence;
-       uint32_t        *ptr;
+       uint32_t                *ptr;
        uint32_t                length_dw;
+       bool                    free;
 };
 
 /*
@@ -377,10 +379,9 @@ struct radeon_ib {
 struct radeon_ib_pool {
        struct mutex            mutex;
        struct radeon_bo        *robj;
-       struct list_head        scheduled_ibs;
        struct radeon_ib        ibs[RADEON_IB_POOL_SIZE];
        bool                    ready;
-       DECLARE_BITMAP(alloc_bm, RADEON_IB_POOL_SIZE);
+       unsigned                head_id;
 };
 
 struct radeon_cp {
index 2dcda61..4d88315 100644 (file)
@@ -206,6 +206,15 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
                        *connector_type = DRM_MODE_CONNECTOR_DVID;
        }
 
+       /* Asrock RS600 board lists the DVI port as HDMI */
+       if ((dev->pdev->device == 0x7941) &&
+           (dev->pdev->subsystem_vendor == 0x1849) &&
+           (dev->pdev->subsystem_device == 0x7941)) {
+               if ((*connector_type == DRM_MODE_CONNECTOR_HDMIA) &&
+                   (supported_device == ATOM_DEVICE_DFP3_SUPPORT))
+                       *connector_type = DRM_MODE_CONNECTOR_DVID;
+       }
+
        /* a-bit f-i90hd - ciaranm on #radeonhd - this board has no DVI */
        if ((dev->pdev->device == 0x7941) &&
            (dev->pdev->subsystem_vendor == 0x147b) &&
index 2381885..65f8194 100644 (file)
@@ -780,7 +780,7 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect
                         * connected and the DVI port disconnected.  If the edid doesn't
                         * say HDMI, vice versa.
                         */
-                       if (radeon_connector->shared_ddc && connector_status_connected) {
+                       if (radeon_connector->shared_ddc && (ret == connector_status_connected)) {
                                struct drm_device *dev = connector->dev;
                                struct drm_connector *list_connector;
                                struct radeon_connector *list_radeon_connector;
@@ -1060,8 +1060,7 @@ radeon_add_atom_connector(struct drm_device *dev,
                        return;
                }
                if (radeon_connector->ddc_bus && i2c_bus->valid) {
-                       if (memcmp(&radeon_connector->ddc_bus->rec, i2c_bus,
-                                   sizeof(struct radeon_i2c_bus_rec)) == 0) {
+                       if (radeon_connector->ddc_bus->rec.i2c_id == i2c_bus->i2c_id) {
                                radeon_connector->shared_ddc = true;
                                shared_ddc = true;
                        }
index 1190148..e9d0850 100644 (file)
@@ -86,7 +86,7 @@ int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
                                                &p->validated);
                }
        }
-       return radeon_bo_list_validate(&p->validated, p->ib->fence);
+       return radeon_bo_list_validate(&p->validated);
 }
 
 int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
@@ -189,12 +189,10 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
 {
        unsigned i;
 
-       if (error && parser->ib) {
-               radeon_bo_list_unvalidate(&parser->validated,
-                                               parser->ib->fence);
-       } else {
-               radeon_bo_list_unreserve(&parser->validated);
+       if (!error && parser->ib) {
+               radeon_bo_list_fence(&parser->validated, parser->ib->fence);
        }
+       radeon_bo_list_unreserve(&parser->validated);
        for (i = 0; i < parser->nrelocs; i++) {
                if (parser->relocs[i].gobj) {
                        mutex_lock(&parser->rdev->ddev->struct_mutex);
index e137852..c57ad60 100644 (file)
  * 1.29- R500 3D cmd buffer support
  * 1.30- Add support for occlusion queries
  * 1.31- Add support for num Z pipes from GET_PARAM
+ * 1.32- fixes for rv740 setup
  */
 #define DRIVER_MAJOR           1
-#define DRIVER_MINOR           31
+#define DRIVER_MINOR           32
 #define DRIVER_PATCHLEVEL      0
 
 enum radeon_cp_microcode_version {
index d72a71b..f1da370 100644 (file)
@@ -306,11 +306,10 @@ void radeon_bo_list_unreserve(struct list_head *head)
        }
 }
 
-int radeon_bo_list_validate(struct list_head *head, void *fence)
+int radeon_bo_list_validate(struct list_head *head)
 {
        struct radeon_bo_list *lobj;
        struct radeon_bo *bo;
-       struct radeon_fence *old_fence = NULL;
        int r;
 
        r = radeon_bo_list_reserve(head);
@@ -334,32 +333,27 @@ int radeon_bo_list_validate(struct list_head *head, void *fence)
                }
                lobj->gpu_offset = radeon_bo_gpu_offset(bo);
                lobj->tiling_flags = bo->tiling_flags;
-               if (fence) {
-                       old_fence = (struct radeon_fence *)bo->tbo.sync_obj;
-                       bo->tbo.sync_obj = radeon_fence_ref(fence);
-                       bo->tbo.sync_obj_arg = NULL;
-               }
-               if (old_fence) {
-                       radeon_fence_unref(&old_fence);
-               }
        }
        return 0;
 }
 
-void radeon_bo_list_unvalidate(struct list_head *head, void *fence)
+void radeon_bo_list_fence(struct list_head *head, void *fence)
 {
        struct radeon_bo_list *lobj;
-       struct radeon_fence *old_fence;
-
-       if (fence)
-               list_for_each_entry(lobj, head, list) {
-                       old_fence = to_radeon_fence(lobj->bo->tbo.sync_obj);
-                       if (old_fence == fence) {
-                               lobj->bo->tbo.sync_obj = NULL;
-                               radeon_fence_unref(&old_fence);
-                       }
+       struct radeon_bo *bo;
+       struct radeon_fence *old_fence = NULL;
+
+       list_for_each_entry(lobj, head, list) {
+               bo = lobj->bo;
+               spin_lock(&bo->tbo.lock);
+               old_fence = (struct radeon_fence *)bo->tbo.sync_obj;
+               bo->tbo.sync_obj = radeon_fence_ref(fence);
+               bo->tbo.sync_obj_arg = NULL;
+               spin_unlock(&bo->tbo.lock);
+               if (old_fence) {
+                       radeon_fence_unref(&old_fence);
                }
-       radeon_bo_list_unreserve(head);
+       }
 }
 
 int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
index a02f180..7ab43de 100644 (file)
@@ -156,8 +156,8 @@ extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
                                struct list_head *head);
 extern int radeon_bo_list_reserve(struct list_head *head);
 extern void radeon_bo_list_unreserve(struct list_head *head);
-extern int radeon_bo_list_validate(struct list_head *head, void *fence);
-extern void radeon_bo_list_unvalidate(struct list_head *head, void *fence);
+extern int radeon_bo_list_validate(struct list_head *head);
+extern void radeon_bo_list_fence(struct list_head *head, void *fence);
 extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
                                struct vm_area_struct *vma);
 extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
index 4d12b2d..6579eb4 100644 (file)
@@ -41,68 +41,55 @@ int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib)
 {
        struct radeon_fence *fence;
        struct radeon_ib *nib;
-       unsigned long i;
-       int r = 0;
+       int r = 0, i, c;
 
        *ib = NULL;
        r = radeon_fence_create(rdev, &fence);
        if (r) {
-               DRM_ERROR("failed to create fence for new IB\n");
+               dev_err(rdev->dev, "failed to create fence for new IB\n");
                return r;
        }
        mutex_lock(&rdev->ib_pool.mutex);
-       i = find_first_zero_bit(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
-       if (i < RADEON_IB_POOL_SIZE) {
-               set_bit(i, rdev->ib_pool.alloc_bm);
-               rdev->ib_pool.ibs[i].length_dw = 0;
-               *ib = &rdev->ib_pool.ibs[i];
-               mutex_unlock(&rdev->ib_pool.mutex);
-               goto out;
+       for (i = rdev->ib_pool.head_id, c = 0, nib = NULL; c < RADEON_IB_POOL_SIZE; c++, i++) {
+               i &= (RADEON_IB_POOL_SIZE - 1);
+               if (rdev->ib_pool.ibs[i].free) {
+                       nib = &rdev->ib_pool.ibs[i];
+                       break;
+               }
        }
-       if (list_empty(&rdev->ib_pool.scheduled_ibs)) {
-               /* we go do nothings here */
+       if (nib == NULL) {
+               /* This should never happen, it means we allocated all
+                * IB and haven't scheduled one yet, return EBUSY to
+                * userspace hoping that on ioctl recall we get better
+                * luck
+                */
+               dev_err(rdev->dev, "no free indirect buffer !\n");
                mutex_unlock(&rdev->ib_pool.mutex);
-               DRM_ERROR("all IB allocated none scheduled.\n");
-               r = -EINVAL;
-               goto out;
+               radeon_fence_unref(&fence);
+               return -EBUSY;
        }
-       /* get the first ib on the scheduled list */
-       nib = list_entry(rdev->ib_pool.scheduled_ibs.next,
-                        struct radeon_ib, list);
-       if (nib->fence == NULL) {
-               /* we go do nothings here */
+       rdev->ib_pool.head_id = (nib->idx + 1) & (RADEON_IB_POOL_SIZE - 1);
+       nib->free = false;
+       if (nib->fence) {
                mutex_unlock(&rdev->ib_pool.mutex);
-               DRM_ERROR("IB %lu scheduled without a fence.\n", nib->idx);
-               r = -EINVAL;
-               goto out;
-       }
-       mutex_unlock(&rdev->ib_pool.mutex);
-
-       r = radeon_fence_wait(nib->fence, false);
-       if (r) {
-               DRM_ERROR("radeon: IB(%lu:0x%016lX:%u)\n", nib->idx,
-                         (unsigned long)nib->gpu_addr, nib->length_dw);
-               DRM_ERROR("radeon: GPU lockup detected, fail to get a IB\n");
-               goto out;
+               r = radeon_fence_wait(nib->fence, false);
+               if (r) {
+                       dev_err(rdev->dev, "error waiting fence of IB(%u:0x%016lX:%u)\n",
+                               nib->idx, (unsigned long)nib->gpu_addr, nib->length_dw);
+                       mutex_lock(&rdev->ib_pool.mutex);
+                       nib->free = true;
+                       mutex_unlock(&rdev->ib_pool.mutex);
+                       radeon_fence_unref(&fence);
+                       return r;
+               }
+               mutex_lock(&rdev->ib_pool.mutex);
        }
        radeon_fence_unref(&nib->fence);
-
+       nib->fence = fence;
        nib->length_dw = 0;
-
-       /* scheduled list is accessed here */
-       mutex_lock(&rdev->ib_pool.mutex);
-       list_del(&nib->list);
-       INIT_LIST_HEAD(&nib->list);
        mutex_unlock(&rdev->ib_pool.mutex);
-
        *ib = nib;
-out:
-       if (r) {
-               radeon_fence_unref(&fence);
-       } else {
-               (*ib)->fence = fence;
-       }
-       return r;
+       return 0;
 }
 
 void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
@@ -113,19 +100,10 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
        if (tmp == NULL) {
                return;
        }
-       mutex_lock(&rdev->ib_pool.mutex);
-       if (!list_empty(&tmp->list) && !radeon_fence_signaled(tmp->fence)) {
-               /* IB is scheduled & not signaled don't do anythings */
-               mutex_unlock(&rdev->ib_pool.mutex);
-               return;
-       }
-       list_del(&tmp->list);
-       INIT_LIST_HEAD(&tmp->list);
-       if (tmp->fence)
+       if (!tmp->fence->emited)
                radeon_fence_unref(&tmp->fence);
-
-       tmp->length_dw = 0;
-       clear_bit(tmp->idx, rdev->ib_pool.alloc_bm);
+       mutex_lock(&rdev->ib_pool.mutex);
+       tmp->free = true;
        mutex_unlock(&rdev->ib_pool.mutex);
 }
 
@@ -135,7 +113,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
 
        if (!ib->length_dw || !rdev->cp.ready) {
                /* TODO: Nothings in the ib we should report. */
-               DRM_ERROR("radeon: couldn't schedule IB(%lu).\n", ib->idx);
+               DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx);
                return -EINVAL;
        }
 
@@ -148,7 +126,8 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
        radeon_ring_ib_execute(rdev, ib);
        radeon_fence_emit(rdev, ib->fence);
        mutex_lock(&rdev->ib_pool.mutex);
-       list_add_tail(&ib->list, &rdev->ib_pool.scheduled_ibs);
+       /* once scheduled IB is considered free and protected by the fence */
+       ib->free = true;
        mutex_unlock(&rdev->ib_pool.mutex);
        radeon_ring_unlock_commit(rdev);
        return 0;
@@ -164,7 +143,6 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
        if (rdev->ib_pool.robj)
                return 0;
        /* Allocate 1M object buffer */
-       INIT_LIST_HEAD(&rdev->ib_pool.scheduled_ibs);
        r = radeon_bo_create(rdev, NULL,  RADEON_IB_POOL_SIZE*64*1024,
                                true, RADEON_GEM_DOMAIN_GTT,
                                &rdev->ib_pool.robj);
@@ -195,9 +173,9 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
                rdev->ib_pool.ibs[i].ptr = ptr + offset;
                rdev->ib_pool.ibs[i].idx = i;
                rdev->ib_pool.ibs[i].length_dw = 0;
-               INIT_LIST_HEAD(&rdev->ib_pool.ibs[i].list);
+               rdev->ib_pool.ibs[i].free = true;
        }
-       bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
+       rdev->ib_pool.head_id = 0;
        rdev->ib_pool.ready = true;
        DRM_INFO("radeon: ib pool ready.\n");
        if (radeon_debugfs_ib_init(rdev)) {
@@ -214,7 +192,6 @@ void radeon_ib_pool_fini(struct radeon_device *rdev)
                return;
        }
        mutex_lock(&rdev->ib_pool.mutex);
-       bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
        if (rdev->ib_pool.robj) {
                r = radeon_bo_reserve(rdev->ib_pool.robj, false);
                if (likely(r == 0)) {
@@ -363,7 +340,7 @@ static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
        if (ib == NULL) {
                return 0;
        }
-       seq_printf(m, "IB %04lu\n", ib->idx);
+       seq_printf(m, "IB %04u\n", ib->idx);
        seq_printf(m, "IB fence %p\n", ib->fence);
        seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
        for (i = 0; i < ib->length_dw; i++) {
index 5943d56..0302167 100644 (file)
@@ -549,9 +549,12 @@ static void rv770_gpu_init(struct radeon_device *rdev)
 
        gb_tiling_config |= BANK_SWAPS(1);
 
-       backend_map = r700_get_tile_pipe_to_backend_map(rdev->config.rv770.max_tile_pipes,
-                                                       rdev->config.rv770.max_backends,
-                                                       (0xff << rdev->config.rv770.max_backends) & 0xff);
+       if (rdev->family == CHIP_RV740)
+               backend_map = 0x28;
+       else
+               backend_map = r700_get_tile_pipe_to_backend_map(rdev->config.rv770.max_tile_pipes,
+                                                               rdev->config.rv770.max_backends,
+                                                               (0xff << rdev->config.rv770.max_backends) & 0xff);
        gb_tiling_config |= BACKEND_MAP(backend_map);
 
        cc_gc_shader_pipe_config =
index e2123af..3d47a2c 100644 (file)
@@ -196,14 +196,15 @@ EXPORT_SYMBOL(ttm_tt_populate);
 
 #ifdef CONFIG_X86
 static inline int ttm_tt_set_page_caching(struct page *p,
-                                         enum ttm_caching_state c_state)
+                                         enum ttm_caching_state c_old,
+                                         enum ttm_caching_state c_new)
 {
        int ret = 0;
 
        if (PageHighMem(p))
                return 0;
 
-       if (get_page_memtype(p) != -1) {
+       if (c_old != tt_cached) {
                /* p isn't in the default caching state, set it to
                 * writeback first to free its current memtype. */
 
@@ -212,16 +213,17 @@ static inline int ttm_tt_set_page_caching(struct page *p,
                        return ret;
        }
 
-       if (c_state == tt_wc)
+       if (c_new == tt_wc)
                ret = set_memory_wc((unsigned long) page_address(p), 1);
-       else if (c_state == tt_uncached)
+       else if (c_new == tt_uncached)
                ret = set_pages_uc(p, 1);
 
        return ret;
 }
 #else /* CONFIG_X86 */
 static inline int ttm_tt_set_page_caching(struct page *p,
-                                         enum ttm_caching_state c_state)
+                                         enum ttm_caching_state c_old,
+                                         enum ttm_caching_state c_new)
 {
        return 0;
 }
@@ -254,7 +256,9 @@ static int ttm_tt_set_caching(struct ttm_tt *ttm,
        for (i = 0; i < ttm->num_pages; ++i) {
                cur_page = ttm->pages[i];
                if (likely(cur_page != NULL)) {
-                       ret = ttm_tt_set_page_caching(cur_page, c_state);
+                       ret = ttm_tt_set_page_caching(cur_page,
+                                                     ttm->caching_state,
+                                                     c_state);
                        if (unlikely(ret != 0))
                                goto out_err;
                }
@@ -268,7 +272,7 @@ out_err:
        for (j = 0; j < i; ++j) {
                cur_page = ttm->pages[j];
                if (likely(cur_page != NULL)) {
-                       (void)ttm_tt_set_page_caching(cur_page,
+                       (void)ttm_tt_set_page_caching(cur_page, c_state,
                                                      ttm->caching_state);
                }
        }
index a6e8f68..0c9c081 100644 (file)
@@ -348,22 +348,19 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
                 */
 
                DRM_INFO("It appears like vesafb is loaded. "
-                        "Ignore above error if any. Entering stealth mode.\n");
+                        "Ignore above error if any.\n");
                ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
                if (unlikely(ret != 0)) {
                        DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
                        goto out_no_device;
                }
-               vmw_kms_init(dev_priv);
-               vmw_overlay_init(dev_priv);
-       } else {
-               ret = vmw_request_device(dev_priv);
-               if (unlikely(ret != 0))
-                       goto out_no_device;
-               vmw_kms_init(dev_priv);
-               vmw_overlay_init(dev_priv);
-               vmw_fb_init(dev_priv);
        }
+       ret = vmw_request_device(dev_priv);
+       if (unlikely(ret != 0))
+               goto out_no_device;
+       vmw_kms_init(dev_priv);
+       vmw_overlay_init(dev_priv);
+       vmw_fb_init(dev_priv);
 
        dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
        register_pm_notifier(&dev_priv->pm_nb);
@@ -406,17 +403,15 @@ static int vmw_driver_unload(struct drm_device *dev)
 
        unregister_pm_notifier(&dev_priv->pm_nb);
 
-       if (!dev_priv->stealth) {
-               vmw_fb_close(dev_priv);
-               vmw_kms_close(dev_priv);
-               vmw_overlay_close(dev_priv);
-               vmw_release_device(dev_priv);
-               pci_release_regions(dev->pdev);
-       } else {
-               vmw_kms_close(dev_priv);
-               vmw_overlay_close(dev_priv);
+       vmw_fb_close(dev_priv);
+       vmw_kms_close(dev_priv);
+       vmw_overlay_close(dev_priv);
+       vmw_release_device(dev_priv);
+       if (dev_priv->stealth)
                pci_release_region(dev->pdev, 2);
-       }
+       else
+               pci_release_regions(dev->pdev);
+
        if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
                drm_irq_uninstall(dev_priv->dev);
        if (dev->devname == vmw_devname)
@@ -585,11 +580,6 @@ static int vmw_master_set(struct drm_device *dev,
        int ret = 0;
 
        DRM_INFO("Master set.\n");
-       if (dev_priv->stealth) {
-               ret = vmw_request_device(dev_priv);
-               if (unlikely(ret != 0))
-                       return ret;
-       }
 
        if (active) {
                BUG_ON(active != &dev_priv->fbdev_master);
@@ -649,18 +639,11 @@ static void vmw_master_drop(struct drm_device *dev,
 
        ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
 
-       if (dev_priv->stealth) {
-               ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
-               if (unlikely(ret != 0))
-                       DRM_ERROR("Unable to clean VRAM on master drop.\n");
-               vmw_release_device(dev_priv);
-       }
        dev_priv->active_master = &dev_priv->fbdev_master;
        ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
        ttm_vt_unlock(&dev_priv->fbdev_master.lock);
 
-       if (!dev_priv->stealth)
-               vmw_fb_on(dev_priv);
+       vmw_fb_on(dev_priv);
 }
 
 
index 4f4f643..a933670 100644 (file)
@@ -559,6 +559,9 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
        info->pixmap.scan_align = 1;
 #endif
 
+       info->aperture_base = vmw_priv->vram_start;
+       info->aperture_size = vmw_priv->vram_size;
+
        /*
         * Dirty & Deferred IO
         */
index 24b56dc..2f6cf69 100644 (file)
@@ -961,7 +961,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
                remaining -= 7;
                pr_devel("client 0x%p called 'target'\n", priv);
                /* if target is default */
-               if (!strncmp(kbuf, "default", 7))
+               if (!strncmp(curr_pos, "default", 7))
                        pdev = pci_dev_get(vga_default_device());
                else {
                        if (!vga_pci_str_to_vars(curr_pos, remaining,
index d84a36e..b54aee7 100644 (file)
@@ -1161,9 +1161,17 @@ static int i8042_pm_restore(struct device *dev)
        return 0;
 }
 
+static int i8042_pm_thaw(struct device *dev)
+{
+       i8042_interrupt(0, NULL);
+
+       return 0;
+}
+
 static const struct dev_pm_ops i8042_pm_ops = {
        .suspend        = i8042_pm_reset,
        .resume         = i8042_pm_restore,
+       .thaw           = i8042_pm_thaw,
        .poweroff       = i8042_pm_reset,
        .restore        = i8042_pm_restore,
 };
index 8e952fd..cb2fd01 100644 (file)
@@ -720,12 +720,6 @@ static int acpiphp_bus_add(struct acpiphp_func *func)
                        -ret_val);
                goto acpiphp_bus_add_out;
        }
-       /*
-        * try to start anyway.  We could have failed to add
-        * simply because this bus had previously been added
-        * on another add.  Don't bother with the return value
-        * we just keep going.
-        */
        ret_val = acpi_bus_start(device);
 
 acpiphp_bus_add_out:
index e67e4fe..eb603f1 100644 (file)
@@ -5771,7 +5771,7 @@ static void thermal_exit(void)
        case TPACPI_THERMAL_ACPI_TMP07:
        case TPACPI_THERMAL_ACPI_UPDT:
                sysfs_remove_group(&tpacpi_sensors_pdev->dev.kobj,
-                                  &thermal_temp_input16_group);
+                                  &thermal_temp_input8_group);
                break;
        case TPACPI_THERMAL_NONE:
        default:
index 735ceaf..663208a 100644 (file)
@@ -376,6 +376,7 @@ struct input_absinfo {
 #define KEY_DISPLAY_OFF                245     /* display device to off state */
 
 #define KEY_WIMAX              246
+#define KEY_RFKILL             247     /* Key that controls all radios */
 
 /* Range 248 - 255 is reserved for special needs of AT keyboard driver */