* 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jwessel/linux-2.6-kgdb:
kgdb: Fix kernel-doc format error in kgdb.h
blackfin,kgdb: Do not put PC in gdb_regs into retx.
blackfin,kgdb,probe_kernel: Cleanup probe_kernel_read/write
maccess,probe_kernel: Allow arch specific override probe_kernel_(read|write)
#define clk_fin_epll clk_ext_xtal_mux
#define clk_fout_mpll clk_mpll
+#define clk_fout_epll clk_epll
struct clk_sources {
unsigned int nr_sources;
.sources = &clk_src_apll,
};
-static struct clk clk_fout_epll = {
- .name = "fout_epll",
- .id = -1,
-};
-
static struct clk *clk_src_epll_list[] = {
[0] = &clk_fin_epll,
[1] = &clk_fout_epll,
&clk_iis_cd1,
&clk_pcm_cd,
&clk_mout_epll.clk,
- &clk_fout_epll,
&clk_mout_mpll.clk,
&clk_dout_mpll,
&clk_mmc0.clk,
clkp->name, ret);
}
}
-
- clk_mpll.parent = &clk_mout_mpll.clk;
- clk_epll.parent = &clk_mout_epll.clk;
}
extern void _mcount(unsigned long pfs, unsigned long r1, unsigned long b0, unsigned long r0);
#define mcount _mcount
-#include <asm/kprobes.h>
/* In IA64, MCOUNT_ADDR is set in link time, so it's not a constant at compile time */
#define MCOUNT_ADDR (((struct fnptr *)mcount)->ip)
#define FTRACE_ADDR (((struct fnptr *)ftrace_caller)->ip)
bundle_t bundle;
} kprobe_opcode_t;
-struct fnptr {
- unsigned long ip;
- unsigned long gp;
-};
-
/* Architecture specific copy of original instruction*/
struct arch_specific_insn {
/* copy of the instruction to be emulated */
extern int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size);
extern void ia64_ptr_entry(u64 target_mask, int slot);
-extern struct ia64_tr_entry __per_cpu_idtrs[NR_CPUS][2][IA64_TR_ALLOC_MAX];
+extern struct ia64_tr_entry *ia64_idtrs[NR_CPUS];
/*
region register macros
/*
* Returns a bitmask of CPUs on Node 'node'.
*/
-#define cpumask_of_node(node) (&node_to_cpu_mask[node])
+#define cpumask_of_node(node) ((node) == -1 ? \
+ cpu_all_mask : \
+ &node_to_cpu_mask[node])
/*
* Returns the number of the node containing Node 'nid'.
*/
# ifdef __KERNEL__
+struct fnptr {
+ unsigned long ip;
+ unsigned long gp;
+};
+
/* DMA addresses are 64-bits wide, in general. */
typedef u64 dma_addr_t;
unsigned long psr;
int cpu = smp_processor_id();
+ if (!ia64_idtrs[cpu])
+ return;
+
psr = ia64_clear_ic();
for (i = IA64_TR_ALLOC_BASE; i < IA64_TR_ALLOC_MAX; i++) {
- p = &__per_cpu_idtrs[cpu][iord-1][i];
+ p = ia64_idtrs[cpu] + (iord - 1) * IA64_TR_ALLOC_MAX;
if (p->pte & 0x1) {
old_rr = ia64_get_rr(p->ifa);
if (old_rr != p->rr) {
* if ((mm->total_vm << PAGE_SHIFT) + len> task->rlim[RLIMIT_AS].rlim_cur)
* return -ENOMEM;
*/
- if (size > task->signal->rlim[RLIMIT_MEMLOCK].rlim_cur)
+ if (size > task_rlimit(task, RLIMIT_MEMLOCK))
return -ENOMEM;
/*
inline void
ia64_set_rbs_bot (void)
{
- unsigned long stack_size = current->signal->rlim[RLIMIT_STACK].rlim_max & -16;
+ unsigned long stack_size = rlimit_max(RLIMIT_STACK) & -16;
if (stack_size > MAX_USER_STACK_SIZE)
stack_size = MAX_USER_STACK_SIZE;
DEFINE_PER_CPU(u8, ia64_tr_num); /*Number of TR slots in current processor*/
DEFINE_PER_CPU(u8, ia64_tr_used); /*Max Slot number used by kernel*/
-struct ia64_tr_entry __per_cpu_idtrs[NR_CPUS][2][IA64_TR_ALLOC_MAX];
+struct ia64_tr_entry *ia64_idtrs[NR_CPUS];
/*
* Initializes the ia64_ctx.bitmap array based on max_ctx+1.
struct ia64_tr_entry *p;
int cpu = smp_processor_id();
+ if (!ia64_idtrs[cpu]) {
+ ia64_idtrs[cpu] = kmalloc(2 * IA64_TR_ALLOC_MAX *
+ sizeof (struct ia64_tr_entry), GFP_KERNEL);
+ if (!ia64_idtrs[cpu])
+ return -ENOMEM;
+ }
r = -EINVAL;
/*Check overlap with existing TR entries*/
if (target_mask & 0x1) {
- p = &__per_cpu_idtrs[cpu][0][0];
+ p = ia64_idtrs[cpu];
for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu);
i++, p++) {
if (p->pte & 0x1)
}
}
if (target_mask & 0x2) {
- p = &__per_cpu_idtrs[cpu][1][0];
+ p = ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX;
for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu);
i++, p++) {
if (p->pte & 0x1)
for (i = IA64_TR_ALLOC_BASE; i < per_cpu(ia64_tr_num, cpu); i++) {
switch (target_mask & 0x3) {
case 1:
- if (!(__per_cpu_idtrs[cpu][0][i].pte & 0x1))
+ if (!((ia64_idtrs[cpu] + i)->pte & 0x1))
goto found;
continue;
case 2:
- if (!(__per_cpu_idtrs[cpu][1][i].pte & 0x1))
+ if (!((ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i)->pte & 0x1))
goto found;
continue;
case 3:
- if (!(__per_cpu_idtrs[cpu][0][i].pte & 0x1) &&
- !(__per_cpu_idtrs[cpu][1][i].pte & 0x1))
+ if (!((ia64_idtrs[cpu] + i)->pte & 0x1) &&
+ !((ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i)->pte & 0x1))
goto found;
continue;
default:
if (target_mask & 0x1) {
ia64_itr(0x1, i, va, pte, log_size);
ia64_srlz_i();
- p = &__per_cpu_idtrs[cpu][0][i];
+ p = ia64_idtrs[cpu] + i;
p->ifa = va;
p->pte = pte;
p->itir = log_size << 2;
if (target_mask & 0x2) {
ia64_itr(0x2, i, va, pte, log_size);
ia64_srlz_i();
- p = &__per_cpu_idtrs[cpu][1][i];
+ p = ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i;
p->ifa = va;
p->pte = pte;
p->itir = log_size << 2;
return;
if (target_mask & 0x1) {
- p = &__per_cpu_idtrs[cpu][0][slot];
+ p = ia64_idtrs[cpu] + slot;
if ((p->pte&0x1) && is_tr_overlap(p, p->ifa, p->itir>>2)) {
p->pte = 0;
ia64_ptr(0x1, p->ifa, p->itir>>2);
}
if (target_mask & 0x2) {
- p = &__per_cpu_idtrs[cpu][1][slot];
+ p = ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + slot;
if ((p->pte & 0x1) && is_tr_overlap(p, p->ifa, p->itir>>2)) {
p->pte = 0;
ia64_ptr(0x2, p->ifa, p->itir>>2);
}
for (i = per_cpu(ia64_tr_used, cpu); i >= IA64_TR_ALLOC_BASE; i--) {
- if ((__per_cpu_idtrs[cpu][0][i].pte & 0x1) ||
- (__per_cpu_idtrs[cpu][1][i].pte & 0x1))
+ if (((ia64_idtrs[cpu] + i)->pte & 0x1) ||
+ ((ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i)->pte & 0x1))
break;
}
per_cpu(ia64_tr_used, cpu) = i;
unsigned long n)
{
int sz = __compiletime_object_size(to);
- int ret = -EFAULT;
if (likely(sz == -1 || sz >= n))
- ret = _copy_from_user(to, from, n);
+ n = _copy_from_user(to, from, n);
else
copy_from_user_overflow();
- return ret;
+ return n;
}
long __must_check strncpy_from_user(char *dst, const char __user *src,
unsigned long n)
{
int sz = __compiletime_object_size(to);
- int ret = -EFAULT;
might_fault();
if (likely(sz == -1 || sz >= n))
- ret = _copy_from_user(to, from, n);
+ n = _copy_from_user(to, from, n);
#ifdef CONFIG_DEBUG_VM
else
WARN(1, "Buffer overflow detected!\n");
#endif
- return ret;
+ return n;
}
static __always_inline __must_check
cfg = irq_cfg(irq);
raw_spin_lock(&desc->lock);
+ /*
+ * Check if the irq migration is in progress. If so, we
+ * haven't received the cleanup request yet for this irq.
+ */
+ if (cfg->move_in_progress)
+ goto unlock;
+
if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
goto unlock;
};
static struct early_res early_res[MAX_EARLY_RES] __initdata = {
{ 0, PAGE_SIZE, "BIOS data page", 1 }, /* BIOS data page */
-#ifdef CONFIG_X86_32
+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_TRAMPOLINE)
/*
* But first pinch a few for the stack/trampoline stuff
* FIXME: Don't need the extra page at 4K, but need to fix
* trampoline before removing it. (see the GDT stuff)
*/
- { PAGE_SIZE, PAGE_SIZE, "EX TRAMPOLINE", 1 },
+ { PAGE_SIZE, PAGE_SIZE + PAGE_SIZE, "EX TRAMPOLINE", 1 },
#endif
{}
u64 mmioh_base, mmioh_end;
int bus_base, bus_end;
+ /* some sys doesn't get mmconf enabled */
+ if (dev->cfg_size < 0x120)
+ return;
+
if (pci_root_num >= PCI_ROOT_NR) {
printk(KERN_DEBUG "intel_bus.c: PCI_ROOT_NR is too small\n");
return;
INIT_WORK(&hw->restart_work, sky2_restart);
pci_set_drvdata(pdev, hw);
+ pdev->d3_delay = 150;
return 0;
int len;
#ifdef CONFIG_NUMA
- mask = cpumask_of_node(dev_to_node(dev));
+ mask = (dev_to_node(dev) == -1) ? cpu_online_mask :
+ cpumask_of_node(dev_to_node(dev));
#else
mask = cpumask_of_pcibus(to_pci_dev(dev)->bus);
#endif
int len;
#ifdef CONFIG_NUMA
- mask = cpumask_of_node(dev_to_node(dev));
+ mask = (dev_to_node(dev) == -1) ? cpu_online_mask :
+ cpumask_of_node(dev_to_node(dev));
#else
mask = cpumask_of_pcibus(to_pci_dev(dev)->bus);
#endif
};
EXPORT_SYMBOL_GPL(pci_power_names);
-unsigned int pci_pm_d3_delay = PCI_PM_D3_WAIT;
+unsigned int pci_pm_d3_delay;
+
+static void pci_dev_d3_sleep(struct pci_dev *dev)
+{
+ unsigned int delay = dev->d3_delay;
+
+ if (delay < pci_pm_d3_delay)
+ delay = pci_pm_d3_delay;
+
+ msleep(delay);
+}
#ifdef CONFIG_PCI_DOMAINS
int pci_domains_supported = 1;
/* Mandatory power management transition delays */
/* see PCI PM 1.1 5.6.1 table 18 */
if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
- msleep(pci_pm_d3_delay);
+ pci_dev_d3_sleep(dev);
else if (state == PCI_D2 || dev->current_state == PCI_D2)
udelay(PCI_PM_D2_DELAY);
}
dev->pm_cap = pm;
+ dev->d3_delay = PCI_PM_D3_WAIT;
dev->d1_support = false;
dev->d2_support = false;
csr &= ~PCI_PM_CTRL_STATE_MASK;
csr |= PCI_D3hot;
pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
- msleep(pci_pm_d3_delay);
+ pci_dev_d3_sleep(dev);
csr &= ~PCI_PM_CTRL_STATE_MASK;
csr |= PCI_D0;
pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
- msleep(pci_pm_d3_delay);
+ pci_dev_d3_sleep(dev);
return 0;
}
unsigned long flags;
unsigned int devfn = PCI_DEVFN(einj->dev, einj->fn);
int pos_cap_err, rp_pos_cap_err;
- u32 sever;
+ u32 sever, mask;
int ret = 0;
dev = pci_get_domain_bus_and_slot((int)einj->domain, einj->bus, devfn);
err->header_log2 = einj->header_log2;
err->header_log3 = einj->header_log3;
+ pci_read_config_dword(dev, pos_cap_err + PCI_ERR_COR_MASK, &mask);
+ if (einj->cor_status && !(einj->cor_status & ~mask)) {
+ ret = -EINVAL;
+ printk(KERN_WARNING "The correctable error(s) is masked "
+ "by device\n");
+ spin_unlock_irqrestore(&inject_lock, flags);
+ goto out_put;
+ }
+
+ pci_read_config_dword(dev, pos_cap_err + PCI_ERR_UNCOR_MASK, &mask);
+ if (einj->uncor_status && !(einj->uncor_status & ~mask)) {
+ ret = -EINVAL;
+ printk(KERN_WARNING "The uncorrectable error(s) is masked "
+ "by device\n");
+ spin_unlock_irqrestore(&inject_lock, flags);
+ goto out_put;
+ }
+
rperr = __find_aer_error_by_dev(rpdev);
if (!rperr) {
rperr = rperr_alloc;
if (ret)
goto out_put;
- if (find_aer_device(rpdev, &edev))
+ if (find_aer_device(rpdev, &edev)) {
+ if (!get_service_data(edev)) {
+ printk(KERN_WARNING "AER service is not initialized\n");
+ ret = -EINVAL;
+ goto out_put;
+ }
aer_irq(-1, edev);
+ }
else
ret = -EINVAL;
out_put:
*/
static void release_pcie_device(struct device *dev)
{
- kfree(to_pcie_device(dev));
+ kfree(to_pcie_device(dev));
}
/**
{
struct pcie_port_service_driver *service_driver;
- if ((dev->bus == &pcie_port_bus_type) &&
- (dev->driver)) {
- service_driver = to_service_driver(dev->driver);
- if (service_driver->suspend)
- service_driver->suspend(to_pcie_device(dev));
- }
+ if ((dev->bus == &pcie_port_bus_type) && dev->driver) {
+ service_driver = to_service_driver(dev->driver);
+ if (service_driver->suspend)
+ service_driver->suspend(to_pcie_device(dev));
+ }
return 0;
}
return driver_register(&new->driver);
}
+EXPORT_SYMBOL(pcie_port_service_register);
/**
* pcie_port_service_unregister - unregister PCI Express port service driver
{
driver_unregister(&drv->driver);
}
-
-EXPORT_SYMBOL(pcie_port_service_register);
EXPORT_SYMBOL(pcie_port_service_unregister);
* pcie_portdrv_probe - Probe PCI-Express port devices
* @dev: PCI-Express port device being probed
*
- * If detected invokes the pcie_port_device_register() method for
+ * If detected invokes the pcie_port_device_register() method for
* this port device.
*
*/
(dev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM)))
return -ENODEV;
- if (!dev->irq && dev->pin) {
+ if (!dev->irq && dev->pin) {
dev_warn(&dev->dev, "device [%04x:%04x] has invalid IRQ; "
"check vendor BIOS\n", dev->vendor, dev->device);
}
return 0;
}
-static void pcie_portdrv_remove (struct pci_dev *dev)
+static void pcie_portdrv_remove(struct pci_dev *dev)
{
pcie_port_device_remove(dev);
pci_disable_device(dev);
static pci_ers_result_t pcie_portdrv_error_detected(struct pci_dev *dev,
enum pci_channel_state error)
{
- struct aer_broadcast_data result_data =
- {error, PCI_ERS_RESULT_CAN_RECOVER};
- int retval;
+ struct aer_broadcast_data data = {error, PCI_ERS_RESULT_CAN_RECOVER};
+ int ret;
/* can not fail */
- retval = device_for_each_child(&dev->dev, &result_data, error_detected_iter);
+ ret = device_for_each_child(&dev->dev, &data, error_detected_iter);
- return result_data.result;
+ return data.result;
}
static int mmio_enabled_iter(struct device *device, void *data)
return retval;
}
-static void __exit pcie_portdrv_exit(void)
+static void __exit pcie_portdrv_exit(void)
{
pci_unregister_driver(&pcie_portdriver);
pcie_port_bus_unregister();
} else
printk(KERN_INFO "HP WMI: Unknown key pressed - %x\n",
eventcode);
-
- kfree(obj);
}
static int __init hp_wmi_input_setup(void)
goto out;
new_dentry = dentry;
+ rehash = NULL;
new_inode = NULL;
}
}
int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
{
struct inode *inode = dentry->d_inode;
- int error;
unsigned int ia_valid;
+ int depth;
+ int error;
/* must be turned off for recursive notify_change calls */
ia_valid = attr->ia_valid &= ~(ATTR_KILL_SUID|ATTR_KILL_SGID);
- reiserfs_write_lock(inode->i_sb);
+ depth = reiserfs_write_lock_once(inode->i_sb);
if (attr->ia_valid & ATTR_SIZE) {
/* version 2 items will be caught by the s_maxbytes check
** done for us in vmtruncate
journal_end(&th, inode->i_sb, jbegin_count);
}
}
- if (!error)
+ if (!error) {
+ /*
+ * Relax the lock here, as it might truncate the
+ * inode pages and wait for inode pages locks.
+ * To release such page lock, the owner needs the
+ * reiserfs lock
+ */
+ reiserfs_write_unlock_once(inode->i_sb, depth);
error = inode_setattr(inode, attr);
+ depth = reiserfs_write_lock_once(inode->i_sb);
+ }
}
if (!error && reiserfs_posixacl(inode->i_sb)) {
}
out:
- reiserfs_write_unlock(inode->i_sb);
+ reiserfs_write_unlock_once(inode->i_sb, depth);
+
return error;
}
err = put_user(inode->i_generation, (int __user *)arg);
break;
case REISERFS_IOC_SETVERSION:
- if (!is_owner_or_cap(inode))
+ if (!is_owner_or_cap(inode)) {
err = -EPERM;
break;
+ }
err = mnt_want_write(filp->f_path.mnt);
if (err)
break;
}
if (dentry->d_inode) {
+ reiserfs_write_lock(inode->i_sb);
err = xattr_unlink(xadir->d_inode, dentry);
+ reiserfs_write_unlock(inode->i_sb);
update_ctime(inode);
}
if (get_inode_sd_version(inode) == STAT_DATA_V1)
return -EOPNOTSUPP;
- if (!buffer)
- return lookup_and_delete_xattr(inode, name);
-
reiserfs_write_unlock(inode->i_sb);
+
+ if (!buffer) {
+ err = lookup_and_delete_xattr(inode, name);
+ reiserfs_write_lock(inode->i_sb);
+ return err;
+ }
+
dentry = xattr_lookup(inode, name, flags);
if (IS_ERR(dentry)) {
reiserfs_write_lock(inode->i_sb);
return PTR_ERR(dentry);
}
- down_read(&REISERFS_I(inode)->i_xattr_sem);
+ down_write(&REISERFS_I(inode)->i_xattr_sem);
reiserfs_write_lock(inode->i_sb);
.ia_size = buffer_size,
.ia_valid = ATTR_SIZE | ATTR_CTIME,
};
+
+ reiserfs_write_unlock(inode->i_sb);
mutex_lock_nested(&dentry->d_inode->i_mutex, I_MUTEX_XATTR);
down_write(&dentry->d_inode->i_alloc_sem);
+ reiserfs_write_lock(inode->i_sb);
+
err = reiserfs_setattr(dentry, &newattrs);
up_write(&dentry->d_inode->i_alloc_sem);
mutex_unlock(&dentry->d_inode->i_mutex);
return 0;
}
+ reiserfs_write_unlock(inode->i_sb);
acl = reiserfs_get_acl(inode, ACL_TYPE_ACCESS);
+ reiserfs_write_lock(inode->i_sb);
if (!acl)
return 0;
if (IS_ERR(acl))
), \
TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx " \
"offset 0x%llx count %zd flags %s " \
- "startoff 0x%llx startblock 0x%llx blockcount 0x%llx", \
+ "startoff 0x%llx startblock %s blockcount 0x%llx", \
MAJOR(__entry->dev), MINOR(__entry->dev), \
__entry->ino, \
__entry->size, \
__entry->count, \
__print_flags(__entry->flags, "|", BMAPI_FLAGS), \
__entry->startoff, \
- __entry->startblock, \
+ xfs_fmtfsblock(__entry->startblock), \
__entry->blockcount) \
)
DEFINE_IOMAP_EVENT(xfs_iomap_enter);
TP_printk("dev %d:%d agno %u agbno %u minlen %u maxlen %u mod %u " \
"prod %u minleft %u total %u alignment %u minalignslop %u " \
"len %u type %s otype %s wasdel %d wasfromfl %d isfl %d " \
- "userdata %d firstblock 0x%llx", \
+ "userdata %d firstblock %s", \
MAJOR(__entry->dev), MINOR(__entry->dev), \
__entry->agno, \
__entry->agbno, \
__entry->wasfromfl, \
__entry->isfl, \
__entry->userdata, \
- __entry->firstblock) \
+ xfs_fmtfsblock(__entry->firstblock)) \
)
DEFINE_ALLOC_EVENT(xfs_alloc_exact_done);
unsigned int d2_support:1; /* Low power state D2 is supported */
unsigned int no_d1d2:1; /* Only allow D0 and D3 */
unsigned int wakeup_prepared:1;
+ unsigned int d3_delay; /* D3->D0 transition time in ms */
#ifdef CONFIG_PCIEASPM
struct pcie_link_state *link_state; /* ASPM link state. */
p = gss_fill_context(p, end, ctx, gss_msg->auth->mech);
if (IS_ERR(p)) {
err = PTR_ERR(p);
- gss_msg->msg.errno = (err == -EAGAIN) ? -EAGAIN : -EACCES;
+ switch (err) {
+ case -EACCES:
+ gss_msg->msg.errno = err;
+ err = mlen;
+ break;
+ case -EFAULT:
+ case -ENOMEM:
+ case -EINVAL:
+ case -ENOSYS:
+ gss_msg->msg.errno = -EAGAIN;
+ break;
+ default:
+ printk(KERN_CRIT "%s: bad return from "
+ "gss_fill_context: %zd\n", __func__, err);
+ BUG();
+ }
goto err_release_msg;
}
gss_msg->ctx = gss_get_ctx(ctx);
struct krb5_ctx *ctx;
int tmp;
- if (!(ctx = kzalloc(sizeof(*ctx), GFP_NOFS)))
+ if (!(ctx = kzalloc(sizeof(*ctx), GFP_NOFS))) {
+ p = ERR_PTR(-ENOMEM);
goto out_err;
+ }
p = simple_get_bytes(p, end, &ctx->initiate, sizeof(ctx->initiate));
if (IS_ERR(p))
struct gss_ctx **ctx_id)
{
if (!(*ctx_id = kzalloc(sizeof(**ctx_id), GFP_KERNEL)))
- return GSS_S_FAILURE;
+ return -ENOMEM;
(*ctx_id)->mech_type = gss_mech_get(mech);
return mech->gm_ops