#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/delay.h>
+#include <linux/gfp.h>
#include <asm/atomic.h>
#include <asm/debug.h>
#include <asm/qdio.h>
#include "device.h"
#include "qdio.h"
#include "qdio_debug.h"
-#include "qdio_perf.h"
MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\
"Jan Glauber <jang@linux.vnet.ibm.com>");
int rc;
BUG_ON(!q->irq_ptr->sch_token);
- qdio_perf_stat_inc(&perf_stats.debug_eqbs_all);
+ qperf_inc(q, eqbs);
if (!q->is_input_q)
nr += q->irq_ptr->nr_input_qs;
* buffers later.
*/
if ((ccq == 96) && (count != tmp_count)) {
- qdio_perf_stat_inc(&perf_stats.debug_eqbs_incomplete);
+ qperf_inc(q, eqbs_partial);
return (count - tmp_count);
}
return 0;
BUG_ON(!q->irq_ptr->sch_token);
- qdio_perf_stat_inc(&perf_stats.debug_sqbs_all);
+ qperf_inc(q, sqbs);
if (!q->is_input_q)
nr += q->irq_ptr->nr_input_qs;
rc = qdio_check_ccq(q, ccq);
if (rc == 1) {
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq);
- qdio_perf_stat_inc(&perf_stats.debug_sqbs_incomplete);
+ qperf_inc(q, sqbs_partial);
goto again;
}
if (rc < 0) {
return 0;
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr);
- qdio_perf_stat_inc(&perf_stats.siga_sync);
+ qperf_inc(q, siga_sync);
cc = do_siga_sync(q->irq_ptr->schid, output, input);
if (cc)
int cc;
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr);
- qdio_perf_stat_inc(&perf_stats.siga_in);
+ qperf_inc(q, siga_read);
cc = do_siga_input(q->irq_ptr->schid, q->mask);
if (cc)
return;
q->u.in.polling = 0;
- qdio_perf_stat_inc(&perf_stats.debug_stop_polling);
+ qperf_inc(q, stop_polling);
/* show the card that we are not polling anymore */
if (is_qebsm(q)) {
set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
}
+static inline void account_sbals(struct qdio_q *q, int count)
+{
+ int pos = 0;
+
+ q->q_stats.nr_sbal_total += count;
+ if (count == QDIO_MAX_BUFFERS_MASK) {
+ q->q_stats.nr_sbals[7]++;
+ return;
+ }
+ while (count >>= 1)
+ pos++;
+ q->q_stats.nr_sbals[pos]++;
+}
+
static void announce_buffer_error(struct qdio_q *q, int count)
{
q->qdio_error |= QDIO_ERROR_SLSB_STATE;
/* special handling for no target buffer empty */
if ((!q->is_input_q &&
(q->sbal[q->first_to_check]->element[15].flags & 0xff) == 0x10)) {
- qdio_perf_stat_inc(&perf_stats.outbound_target_full);
- DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%3d",
+ qperf_inc(q, target_full);
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x",
q->first_to_check);
return;
}
{
int new;
- DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim: %3d", count);
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim: %02x", count);
/* for QEBSM the ACK was already set by EQBS */
if (is_qebsm(q)) {
count--;
if (!count)
return;
-
- /*
- * Need to change all PRIMED buffers to NOT_INIT, otherwise
- * we're loosing initiative in the thinint code.
- */
- set_buf_states(q, q->first_to_check, SLSB_P_INPUT_NOT_INIT,
- count);
+ /* need to change ALL buffers to get more interrupts */
+ set_buf_states(q, q->first_to_check, SLSB_P_INPUT_NOT_INIT, count);
}
static int get_inbound_buffer_frontier(struct qdio_q *q)
count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
stop = add_buf(q->first_to_check, count);
- /*
- * No siga sync here, as a PCI or we after a thin interrupt
- * will sync the queues.
- */
-
- /* need to set count to 1 for non-qebsm */
- if (!is_qebsm(q))
- count = 1;
-
-check_next:
if (q->first_to_check == stop)
goto out;
+ /*
+ * No siga sync here, as a PCI or we after a thin interrupt
+ * already sync'ed the queues.
+ */
count = get_buf_states(q, q->first_to_check, &state, count, 1);
if (!count)
goto out;
switch (state) {
case SLSB_P_INPUT_PRIMED:
inbound_primed(q, count);
- /*
- * No siga-sync needed for non-qebsm here, as the inbound queue
- * will be synced on the next siga-r, resp.
- * qdio_inbound_q_done will do the siga-sync.
- */
q->first_to_check = add_buf(q->first_to_check, count);
- atomic_sub(count, &q->nr_buf_used);
- goto check_next;
+ if (atomic_sub(count, &q->nr_buf_used) == 0)
+ qperf_inc(q, inbound_queue_full);
+ if (q->irq_ptr->perf_stat_enabled)
+ account_sbals(q, count);
+ break;
case SLSB_P_INPUT_ERROR:
announce_buffer_error(q, count);
/* process the buffer, the upper layer will take care of it */
q->first_to_check = add_buf(q->first_to_check, count);
atomic_sub(count, &q->nr_buf_used);
+ if (q->irq_ptr->perf_stat_enabled)
+ account_sbals_error(q, count);
break;
case SLSB_CU_INPUT_EMPTY:
case SLSB_P_INPUT_NOT_INIT:
case SLSB_P_INPUT_ACK:
+ if (q->irq_ptr->perf_stat_enabled)
+ q->q_stats.nr_sbal_nop++;
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop");
break;
default:
if ((bufnr != q->last_move) || q->qdio_error) {
q->last_move = bufnr;
- if (!is_thinint_irq(q->irq_ptr) && !MACHINE_IS_VM)
+ if (!is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR)
q->u.in.timestamp = get_usecs();
return 1;
} else
qdio_siga_sync_q(q);
get_buf_state(q, q->first_to_check, &state, 0);
- if (state == SLSB_P_INPUT_PRIMED)
+ if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR)
/* more work coming */
return 0;
* has (probably) not moved (see qdio_inbound_processing).
*/
if (get_usecs() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
- DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%3d",
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x",
q->first_to_check);
return 1;
} else
count = sub_buf(end, start);
if (q->is_input_q) {
- qdio_perf_stat_inc(&perf_stats.inbound_handler);
- DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%3d c:%3d", start, count);
+ qperf_inc(q, inbound_handler);
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%02x c:%02x", start, count);
} else {
- DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: nr:%1d", q->nr);
- DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "s:%3d c:%3d", start, count);
+ qperf_inc(q, outbound_handler);
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x",
+ start, count);
}
q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count,
static void __qdio_inbound_processing(struct qdio_q *q)
{
- qdio_perf_stat_inc(&perf_stats.tasklet_inbound);
+ qperf_inc(q, tasklet_inbound);
again:
if (!qdio_inbound_q_moved(q))
return;
qdio_kick_handler(q);
- if (!qdio_inbound_q_done(q))
+ if (!qdio_inbound_q_done(q)) {
/* means poll time is not yet over */
+ qperf_inc(q, tasklet_inbound_resched);
goto again;
+ }
qdio_stop_polling(q);
/*
* We need to check again to not lose initiative after
* resetting the ACK state.
*/
- if (!qdio_inbound_q_done(q))
+ if (!qdio_inbound_q_done(q)) {
+ qperf_inc(q, tasklet_inbound_resched2);
goto again;
+ }
}
void qdio_inbound_processing(unsigned long data)
count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
stop = add_buf(q->first_to_check, count);
- /* need to set count to 1 for non-qebsm */
- if (!is_qebsm(q))
- count = 1;
-
-check_next:
if (q->first_to_check == stop)
return q->first_to_check;
switch (state) {
case SLSB_P_OUTPUT_EMPTY:
/* the adapter got it */
- DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out empty:%1d %3d", q->nr, count);
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out empty:%1d %02x", q->nr, count);
atomic_sub(count, &q->nr_buf_used);
q->first_to_check = add_buf(q->first_to_check, count);
- /*
- * We fetch all buffer states at once. get_buf_states may
- * return count < stop. For QEBSM we do not loop.
- */
- if (is_qebsm(q))
- break;
- goto check_next;
+ if (q->irq_ptr->perf_stat_enabled)
+ account_sbals(q, count);
+ break;
case SLSB_P_OUTPUT_ERROR:
announce_buffer_error(q, count);
/* process the buffer, the upper layer will take care of it */
q->first_to_check = add_buf(q->first_to_check, count);
atomic_sub(count, &q->nr_buf_used);
+ if (q->irq_ptr->perf_stat_enabled)
+ account_sbals_error(q, count);
break;
case SLSB_CU_OUTPUT_PRIMED:
/* the adapter has not fetched the output yet */
+ if (q->irq_ptr->perf_stat_enabled)
+ q->q_stats.nr_sbal_nop++;
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d", q->nr);
break;
case SLSB_P_OUTPUT_NOT_INIT:
return 0;
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr);
- qdio_perf_stat_inc(&perf_stats.siga_out);
+ qperf_inc(q, siga_write);
cc = qdio_siga_output(q, &busy_bit);
switch (cc) {
static void __qdio_outbound_processing(struct qdio_q *q)
{
- qdio_perf_stat_inc(&perf_stats.tasklet_outbound);
+ qperf_inc(q, tasklet_outbound);
BUG_ON(atomic_read(&q->nr_buf_used) < 0);
if (qdio_outbound_q_moved(q))
*/
if (qdio_outbound_q_done(q))
del_timer(&q->u.out.timer);
- else {
- if (!timer_pending(&q->u.out.timer)) {
+ else
+ if (!timer_pending(&q->u.out.timer))
mod_timer(&q->u.out.timer, jiffies + 10 * HZ);
- qdio_perf_stat_inc(&perf_stats.debug_tl_out_timer);
- }
- }
return;
sched:
static void __tiqdio_inbound_processing(struct qdio_q *q)
{
- qdio_perf_stat_inc(&perf_stats.thinint_inbound);
+ qperf_inc(q, tasklet_inbound);
qdio_sync_after_thinint(q);
/*
qdio_kick_handler(q);
if (!qdio_inbound_q_done(q)) {
- qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop);
- if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
+ qperf_inc(q, tasklet_inbound_resched);
+ if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) {
tasklet_schedule(&q->tasklet);
+ return;
+ }
}
qdio_stop_polling(q);
* resetting the ACK state.
*/
if (!qdio_inbound_q_done(q)) {
- qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop2);
+ qperf_inc(q, tasklet_inbound_resched2);
if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
tasklet_schedule(&q->tasklet);
}
if (unlikely(irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
return;
- qdio_perf_stat_inc(&perf_stats.pci_int);
-
for_each_input_queue(irq_ptr, q, i)
tasklet_schedule(&q->tasklet);
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
int cstat, dstat;
- qdio_perf_stat_inc(&perf_stats.qdio_int);
-
if (!intparm || !irq_ptr) {
DBF_ERROR("qint:%4x", cdev->private->schid.sch_no);
return;
qdio_handle_activate_check(cdev, intparm, cstat,
dstat);
break;
+ case QDIO_IRQ_STATE_STOPPED:
+ break;
default:
WARN_ON(1);
}
{
int used, diff;
+ qperf_inc(q, inbound_call);
+
if (!q->u.in.polling)
goto set;
unsigned char state;
int used, rc = 0;
- qdio_perf_stat_inc(&perf_stats.outbound_handler);
+ qperf_inc(q, outbound_call);
count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count);
used = atomic_add_return(count, &q->nr_buf_used);
BUG_ON(used > QDIO_MAX_BUFFERS_PER_Q);
- if (callflags & QDIO_FLAG_PCI_OUT)
+ if (callflags & QDIO_FLAG_PCI_OUT) {
q->u.out.pci_out_enabled = 1;
+ qperf_inc(q, pci_request_int);
+ }
else
q->u.out.pci_out_enabled = 0;
get_buf_state(q, prev_buf(bufnr), &state, 0);
if (state != SLSB_CU_OUTPUT_PRIMED)
rc = qdio_kick_outbound_q(q);
- else {
- DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "fast-req");
- qdio_perf_stat_inc(&perf_stats.fast_requeue);
- }
+ else
+ qperf_inc(q, fast_requeue);
+
out:
tasklet_schedule(&q->tasklet);
return rc;
* @count: how many buffers to process
*/
int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
- int q_nr, int bufnr, int count)
+ int q_nr, unsigned int bufnr, unsigned int count)
{
struct qdio_irq *irq_ptr;
- if ((bufnr > QDIO_MAX_BUFFERS_PER_Q) ||
- (count > QDIO_MAX_BUFFERS_PER_Q) ||
- (q_nr >= QDIO_MAX_QUEUES_PER_IRQ))
+ if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q)
return -EINVAL;
- if (!count)
- return 0;
-
irq_ptr = cdev->private->qdio_data;
if (!irq_ptr)
return -ENODEV;
- if (callflags & QDIO_FLAG_SYNC_INPUT)
- DBF_DEV_EVENT(DBF_INFO, irq_ptr, "doQDIO input");
- else
- DBF_DEV_EVENT(DBF_INFO, irq_ptr, "doQDIO output");
- DBF_DEV_EVENT(DBF_INFO, irq_ptr, "q:%1d flag:%4x", q_nr, callflags);
- DBF_DEV_EVENT(DBF_INFO, irq_ptr, "buf:%2d cnt:%3d", bufnr, count);
+ DBF_DEV_EVENT(DBF_INFO, irq_ptr,
+ "do%02x b:%02x c:%02x", callflags, bufnr, count);
if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)
return -EBUSY;
rc = qdio_debug_init();
if (rc)
goto out_ti;
- rc = qdio_setup_perf_stats();
- if (rc)
- goto out_debug;
rc = tiqdio_register_thinints();
if (rc)
- goto out_perf;
+ goto out_debug;
return 0;
-out_perf:
- qdio_remove_perf_stats();
out_debug:
qdio_debug_exit();
out_ti:
{
tiqdio_unregister_thinints();
tiqdio_free_memory();
- qdio_remove_perf_stats();
qdio_debug_exit();
qdio_setup_exit();
}