There's no need to use the heavier (albiet safer)
*_irq[save|restore]() locking primitives within the driver's
interrupt handlers, interrupts are guaranteed to be
non-reentrant. Use lightweight spin_lock() and spin_unlock()
primitives while acquiring the hardware_lock.
Signed-off-by: Andrew Vasquez <andrew.vasquez@qlogic.com>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
scsi_qla_host_t *ha;
struct device_reg_2xxx __iomem *reg;
int status;
scsi_qla_host_t *ha;
struct device_reg_2xxx __iomem *reg;
int status;
unsigned long iter;
uint16_t hccr;
uint16_t mb[4];
unsigned long iter;
uint16_t hccr;
uint16_t mb[4];
reg = &ha->iobase->isp;
status = 0;
reg = &ha->iobase->isp;
status = 0;
- spin_lock_irqsave(&ha->hardware_lock, flags);
+ spin_lock(&ha->hardware_lock);
for (iter = 50; iter--; ) {
hccr = RD_REG_WORD(®->hccr);
if (hccr & HCCR_RISC_PAUSE) {
for (iter = 50; iter--; ) {
hccr = RD_REG_WORD(®->hccr);
if (hccr & HCCR_RISC_PAUSE) {
RD_REG_WORD(®->hccr);
}
}
RD_REG_WORD(®->hccr);
}
}
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_unlock(&ha->hardware_lock);
if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
(status & MBX_INTERRUPT) && ha->flags.mbox_int) {
if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
(status & MBX_INTERRUPT) && ha->flags.mbox_int) {
scsi_qla_host_t *ha;
struct device_reg_2xxx __iomem *reg;
int status;
scsi_qla_host_t *ha;
struct device_reg_2xxx __iomem *reg;
int status;
unsigned long iter;
uint32_t stat;
uint16_t hccr;
unsigned long iter;
uint32_t stat;
uint16_t hccr;
reg = &ha->iobase->isp;
status = 0;
reg = &ha->iobase->isp;
status = 0;
- spin_lock_irqsave(&ha->hardware_lock, flags);
+ spin_lock(&ha->hardware_lock);
for (iter = 50; iter--; ) {
stat = RD_REG_DWORD(®->u.isp2300.host_status);
if (stat & HSR_RISC_PAUSED) {
for (iter = 50; iter--; ) {
stat = RD_REG_DWORD(®->u.isp2300.host_status);
if (stat & HSR_RISC_PAUSED) {
WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT);
RD_REG_WORD_RELAXED(®->hccr);
}
WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT);
RD_REG_WORD_RELAXED(®->hccr);
}
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_unlock(&ha->hardware_lock);
if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
(status & MBX_INTERRUPT) && ha->flags.mbox_int) {
if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
(status & MBX_INTERRUPT) && ha->flags.mbox_int) {
scsi_qla_host_t *ha;
struct device_reg_24xx __iomem *reg;
int status;
scsi_qla_host_t *ha;
struct device_reg_24xx __iomem *reg;
int status;
unsigned long iter;
uint32_t stat;
uint32_t hccr;
unsigned long iter;
uint32_t stat;
uint32_t hccr;
reg = &ha->iobase->isp24;
status = 0;
reg = &ha->iobase->isp24;
status = 0;
- spin_lock_irqsave(&ha->hardware_lock, flags);
+ spin_lock(&ha->hardware_lock);
for (iter = 50; iter--; ) {
stat = RD_REG_DWORD(®->host_status);
if (stat & HSRX_RISC_PAUSED) {
for (iter = 50; iter--; ) {
stat = RD_REG_DWORD(®->host_status);
if (stat & HSRX_RISC_PAUSED) {
WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
RD_REG_DWORD_RELAXED(®->hccr);
}
WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
RD_REG_DWORD_RELAXED(®->hccr);
}
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_unlock(&ha->hardware_lock);
if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
(status & MBX_INTERRUPT) && ha->flags.mbox_int) {
if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
(status & MBX_INTERRUPT) && ha->flags.mbox_int) {
{
scsi_qla_host_t *ha;
struct device_reg_24xx __iomem *reg;
{
scsi_qla_host_t *ha;
struct device_reg_24xx __iomem *reg;
ha = dev_id;
reg = &ha->iobase->isp24;
ha = dev_id;
reg = &ha->iobase->isp24;
- spin_lock_irqsave(&ha->hardware_lock, flags);
+ spin_lock(&ha->hardware_lock);
qla24xx_process_response_queue(ha);
qla24xx_process_response_queue(ha);
WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_unlock(&ha->hardware_lock);
scsi_qla_host_t *ha;
struct device_reg_24xx __iomem *reg;
int status;
scsi_qla_host_t *ha;
struct device_reg_24xx __iomem *reg;
int status;
uint32_t stat;
uint32_t hccr;
uint16_t mb[4];
uint32_t stat;
uint32_t hccr;
uint16_t mb[4];
reg = &ha->iobase->isp24;
status = 0;
reg = &ha->iobase->isp24;
status = 0;
- spin_lock_irqsave(&ha->hardware_lock, flags);
+ spin_lock(&ha->hardware_lock);
do {
stat = RD_REG_DWORD(®->host_status);
if (stat & HSRX_RISC_PAUSED) {
do {
stat = RD_REG_DWORD(®->host_status);
if (stat & HSRX_RISC_PAUSED) {
}
WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
} while (0);
}
WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
} while (0);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_unlock(&ha->hardware_lock);
if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
(status & MBX_INTERRUPT) && ha->flags.mbox_int) {
if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
(status & MBX_INTERRUPT) && ha->flags.mbox_int) {
{
int ret;
device_reg_t __iomem *reg = ha->iobase;
{
int ret;
device_reg_t __iomem *reg = ha->iobase;
/* If possible, enable MSI-X. */
if (!IS_QLA2432(ha) && !IS_QLA2532(ha))
/* If possible, enable MSI-X. */
if (!IS_QLA2432(ha) && !IS_QLA2532(ha))
clear_risc_ints:
ha->isp_ops->disable_intrs(ha);
clear_risc_ints:
ha->isp_ops->disable_intrs(ha);
- spin_lock_irqsave(&ha->hardware_lock, flags);
+ spin_lock_irq(&ha->hardware_lock);
if (IS_FWI2_CAPABLE(ha)) {
WRT_REG_DWORD(®->isp24.hccr, HCCRX_CLR_HOST_INT);
WRT_REG_DWORD(®->isp24.hccr, HCCRX_CLR_RISC_INT);
if (IS_FWI2_CAPABLE(ha)) {
WRT_REG_DWORD(®->isp24.hccr, HCCRX_CLR_HOST_INT);
WRT_REG_DWORD(®->isp24.hccr, HCCRX_CLR_RISC_INT);
WRT_REG_WORD(®->isp.hccr, HCCR_CLR_RISC_INT);
WRT_REG_WORD(®->isp.hccr, HCCR_CLR_HOST_INT);
}
WRT_REG_WORD(®->isp.hccr, HCCR_CLR_RISC_INT);
WRT_REG_WORD(®->isp.hccr, HCCR_CLR_HOST_INT);
}
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_unlock_irq(&ha->hardware_lock);
ha->isp_ops->enable_intrs(ha);
fail:
ha->isp_ops->enable_intrs(ha);
fail: