2 * linux/drivers/s390/cio/qdio_main.c
4 * Linux for s390 qdio support, buffer handling, qdio API and module support.
6 * Copyright 2000,2008 IBM Corp.
7 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
8 * Jan Glauber <jang@linux.vnet.ibm.com>
9 * 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com>
11 #include <linux/module.h>
12 #include <linux/init.h>
13 #include <linux/kernel.h>
14 #include <linux/timer.h>
15 #include <linux/delay.h>
16 #include <asm/atomic.h>
17 #include <asm/debug.h>
24 #include "qdio_debug.h"
25 #include "qdio_perf.h"
27 MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\
28 "Jan Glauber <jang@linux.vnet.ibm.com>");
29 MODULE_DESCRIPTION("QDIO base support");
30 MODULE_LICENSE("GPL");
32 static inline int do_siga_sync(struct subchannel_id schid,
33 unsigned int out_mask, unsigned int in_mask)
35 register unsigned long __fc asm ("0") = 2;
36 register struct subchannel_id __schid asm ("1") = schid;
37 register unsigned long out asm ("2") = out_mask;
38 register unsigned long in asm ("3") = in_mask;
46 : "d" (__fc), "d" (__schid), "d" (out), "d" (in) : "cc");
50 static inline int do_siga_input(struct subchannel_id schid, unsigned int mask)
52 register unsigned long __fc asm ("0") = 1;
53 register struct subchannel_id __schid asm ("1") = schid;
54 register unsigned long __mask asm ("2") = mask;
62 : "d" (__fc), "d" (__schid), "d" (__mask) : "cc", "memory");
67 * do_siga_output - perform SIGA-w/wt function
68 * @schid: subchannel id or in case of QEBSM the subchannel token
69 * @mask: which output queues to process
70 * @bb: busy bit indicator, set only if SIGA-w/wt could not access a buffer
71 * @fc: function code to perform
73 * Returns cc or QDIO_ERROR_SIGA_ACCESS_EXCEPTION.
74 * Note: For IQDC unicast queues only the highest priority queue is processed.
76 static inline int do_siga_output(unsigned long schid, unsigned long mask,
77 u32 *bb, unsigned int fc)
79 register unsigned long __fc asm("0") = fc;
80 register unsigned long __schid asm("1") = schid;
81 register unsigned long __mask asm("2") = mask;
82 int cc = QDIO_ERROR_SIGA_ACCESS_EXCEPTION;
90 : "+d" (cc), "+d" (__fc), "+d" (__schid), "+d" (__mask)
92 *bb = ((unsigned int) __fc) >> 31;
96 static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
98 /* all done or next buffer state different */
99 if (ccq == 0 || ccq == 32)
101 /* not all buffers processed */
102 if (ccq == 96 || ccq == 97)
104 /* notify devices immediately */
105 DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq);
110 * qdio_do_eqbs - extract buffer states for QEBSM
111 * @q: queue to manipulate
112 * @state: state of the extracted buffers
113 * @start: buffer number to start at
114 * @count: count of buffers to examine
115 * @auto_ack: automatically acknowledge buffers
117 * Returns the number of successfull extracted equal buffer states.
118 * Stops processing if a state is different from the last buffers state.
120 static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
121 int start, int count, int auto_ack)
123 unsigned int ccq = 0;
124 int tmp_count = count, tmp_start = start;
128 BUG_ON(!q->irq_ptr->sch_token);
129 qdio_perf_stat_inc(&perf_stats.debug_eqbs_all);
132 nr += q->irq_ptr->nr_input_qs;
134 ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count,
136 rc = qdio_check_ccq(q, ccq);
138 /* At least one buffer was processed, return and extract the remaining
141 if ((ccq == 96) && (count != tmp_count)) {
142 qdio_perf_stat_inc(&perf_stats.debug_eqbs_incomplete);
143 return (count - tmp_count);
147 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq);
152 DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
153 DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
154 q->handler(q->irq_ptr->cdev,
155 QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
156 0, -1, -1, q->irq_ptr->int_parm);
159 return count - tmp_count;
163 * qdio_do_sqbs - set buffer states for QEBSM
164 * @q: queue to manipulate
165 * @state: new state of the buffers
166 * @start: first buffer number to change
167 * @count: how many buffers to change
169 * Returns the number of successfully changed buffers.
170 * Does retrying until the specified count of buffer states is set or an
173 static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start,
176 unsigned int ccq = 0;
177 int tmp_count = count, tmp_start = start;
184 BUG_ON(!q->irq_ptr->sch_token);
185 qdio_perf_stat_inc(&perf_stats.debug_sqbs_all);
188 nr += q->irq_ptr->nr_input_qs;
190 ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count);
191 rc = qdio_check_ccq(q, ccq);
193 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq);
194 qdio_perf_stat_inc(&perf_stats.debug_sqbs_incomplete);
198 DBF_ERROR("%4x SQBS ERROR", SCH_NO(q));
199 DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
200 q->handler(q->irq_ptr->cdev,
201 QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
202 0, -1, -1, q->irq_ptr->int_parm);
206 return count - tmp_count;
209 /* returns number of examined buffers and their common state in *state */
210 static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
211 unsigned char *state, unsigned int count,
214 unsigned char __state = 0;
217 BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK);
218 BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q);
221 return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
223 for (i = 0; i < count; i++) {
225 __state = q->slsb.val[bufnr];
226 else if (q->slsb.val[bufnr] != __state)
228 bufnr = next_buf(bufnr);
234 inline int get_buf_state(struct qdio_q *q, unsigned int bufnr,
235 unsigned char *state, int auto_ack)
237 return get_buf_states(q, bufnr, state, 1, auto_ack);
240 /* wrap-around safe setting of slsb states, returns number of changed buffers */
241 static inline int set_buf_states(struct qdio_q *q, int bufnr,
242 unsigned char state, int count)
246 BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK);
247 BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q);
250 return qdio_do_sqbs(q, state, bufnr, count);
252 for (i = 0; i < count; i++) {
253 xchg(&q->slsb.val[bufnr], state);
254 bufnr = next_buf(bufnr);
259 static inline int set_buf_state(struct qdio_q *q, int bufnr,
262 return set_buf_states(q, bufnr, state, 1);
265 /* set slsb states to initial state */
266 void qdio_init_buf_states(struct qdio_irq *irq_ptr)
271 for_each_input_queue(irq_ptr, q, i)
272 set_buf_states(q, 0, SLSB_P_INPUT_NOT_INIT,
273 QDIO_MAX_BUFFERS_PER_Q);
274 for_each_output_queue(irq_ptr, q, i)
275 set_buf_states(q, 0, SLSB_P_OUTPUT_NOT_INIT,
276 QDIO_MAX_BUFFERS_PER_Q);
279 static int qdio_siga_sync(struct qdio_q *q, unsigned int output,
284 if (!need_siga_sync(q))
287 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:");
288 DBF_DEV_HEX(DBF_INFO, q->irq_ptr, q, sizeof(void *));
289 qdio_perf_stat_inc(&perf_stats.siga_sync);
291 cc = do_siga_sync(q->irq_ptr->schid, output, input);
293 DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q), cc);
297 inline int qdio_siga_sync_q(struct qdio_q *q)
300 return qdio_siga_sync(q, 0, q->mask);
302 return qdio_siga_sync(q, q->mask, 0);
305 static inline int qdio_siga_sync_out(struct qdio_q *q)
307 return qdio_siga_sync(q, ~0U, 0);
310 static inline int qdio_siga_sync_all(struct qdio_q *q)
312 return qdio_siga_sync(q, ~0U, ~0U);
315 static inline int qdio_do_siga_output(struct qdio_q *q, unsigned int *busy_bit)
320 if (q->u.out.use_enh_siga) {
324 schid = *((u32 *)&q->irq_ptr->schid);
326 schid = q->irq_ptr->sch_token;
329 return do_siga_output(schid, q->mask, busy_bit, fc);
332 static int qdio_siga_output(struct qdio_q *q)
338 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr);
339 qdio_perf_stat_inc(&perf_stats.siga_out);
341 cc = qdio_do_siga_output(q, &busy_bit);
342 if (queue_type(q) == QDIO_IQDIO_QFMT && cc == 2 && busy_bit) {
343 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w bb:%2d", q->nr);
346 start_time = get_usecs();
347 else if ((get_usecs() - start_time) < QDIO_BUSY_BIT_PATIENCE)
351 if (cc == 2 && busy_bit)
352 cc |= QDIO_ERROR_SIGA_BUSY;
354 DBF_ERROR("%4x SIGA-W:%2d", SCH_NO(q), cc);
358 static inline int qdio_siga_input(struct qdio_q *q)
362 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr);
363 qdio_perf_stat_inc(&perf_stats.siga_in);
365 cc = do_siga_input(q->irq_ptr->schid, q->mask);
367 DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q), cc);
371 /* called from thinint inbound handler */
372 void qdio_sync_after_thinint(struct qdio_q *q)
374 if (pci_out_supported(q)) {
375 if (need_siga_sync_thinint(q))
376 qdio_siga_sync_all(q);
377 else if (need_siga_sync_out_thinint(q))
378 qdio_siga_sync_out(q);
383 inline void qdio_stop_polling(struct qdio_q *q)
385 if (!q->u.in.polling)
389 qdio_perf_stat_inc(&perf_stats.debug_stop_polling);
391 /* show the card that we are not polling anymore */
393 set_buf_states(q, q->last_move_ftc, SLSB_P_INPUT_NOT_INIT,
395 q->u.in.ack_count = 0;
397 set_buf_state(q, q->last_move_ftc, SLSB_P_INPUT_NOT_INIT);
400 static void announce_buffer_error(struct qdio_q *q, int count)
402 q->qdio_error = QDIO_ERROR_SLSB_STATE;
404 /* special handling for no target buffer empty */
405 if ((!q->is_input_q &&
406 (q->sbal[q->first_to_check]->element[15].flags & 0xff) == 0x10)) {
407 qdio_perf_stat_inc(&perf_stats.outbound_target_full);
408 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%3d",
413 DBF_ERROR("%4x BUF ERROR", SCH_NO(q));
414 DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr);
415 DBF_ERROR("FTC:%3d C:%3d", q->first_to_check, count);
416 DBF_ERROR("F14:%2x F15:%2x",
417 q->sbal[q->first_to_check]->element[14].flags & 0xff,
418 q->sbal[q->first_to_check]->element[15].flags & 0xff);
421 static inline void inbound_primed(struct qdio_q *q, int count)
425 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim: %3d", count);
427 /* for QEBSM the ACK was already set by EQBS */
429 if (!q->u.in.polling) {
431 q->u.in.ack_count = count;
432 q->last_move_ftc = q->first_to_check;
436 /* delete the previous ACK's */
437 set_buf_states(q, q->last_move_ftc, SLSB_P_INPUT_NOT_INIT,
439 q->u.in.ack_count = count;
440 q->last_move_ftc = q->first_to_check;
445 * ACK the newest buffer. The ACK will be removed in qdio_stop_polling
446 * or by the next inbound run.
448 new = add_buf(q->first_to_check, count - 1);
449 if (q->u.in.polling) {
450 /* reset the previous ACK but first set the new one */
451 set_buf_state(q, new, SLSB_P_INPUT_ACK);
452 set_buf_state(q, q->last_move_ftc, SLSB_P_INPUT_NOT_INIT);
456 set_buf_state(q, q->first_to_check, SLSB_P_INPUT_ACK);
459 q->last_move_ftc = new;
465 * Need to change all PRIMED buffers to NOT_INIT, otherwise
466 * we're loosing initiative in the thinint code.
468 set_buf_states(q, next_buf(q->first_to_check), SLSB_P_INPUT_NOT_INIT,
472 static int get_inbound_buffer_frontier(struct qdio_q *q)
478 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
481 count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
482 stop = add_buf(q->first_to_check, count);
485 * No siga sync here, as a PCI or we after a thin interrupt
486 * will sync the queues.
489 /* need to set count to 1 for non-qebsm */
494 if (q->first_to_check == stop)
497 count = get_buf_states(q, q->first_to_check, &state, count, 1);
502 case SLSB_P_INPUT_PRIMED:
503 inbound_primed(q, count);
505 * No siga-sync needed for non-qebsm here, as the inbound queue
506 * will be synced on the next siga-r, resp.
507 * tiqdio_is_inbound_q_done will do the siga-sync.
509 q->first_to_check = add_buf(q->first_to_check, count);
510 atomic_sub(count, &q->nr_buf_used);
512 case SLSB_P_INPUT_ERROR:
513 announce_buffer_error(q, count);
514 /* process the buffer, the upper layer will take care of it */
515 q->first_to_check = add_buf(q->first_to_check, count);
516 atomic_sub(count, &q->nr_buf_used);
518 case SLSB_CU_INPUT_EMPTY:
519 case SLSB_P_INPUT_NOT_INIT:
520 case SLSB_P_INPUT_ACK:
521 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop");
527 return q->first_to_check;
530 int qdio_inbound_q_moved(struct qdio_q *q)
534 bufnr = get_inbound_buffer_frontier(q);
536 if ((bufnr != q->last_move_ftc) || q->qdio_error) {
537 if (!need_siga_sync(q) && !pci_out_supported(q))
538 q->u.in.timestamp = get_usecs();
540 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in moved");
546 static int qdio_inbound_q_done(struct qdio_q *q)
548 unsigned char state = 0;
550 if (!atomic_read(&q->nr_buf_used))
554 * We need that one for synchronization with the adapter, as it
555 * does a kind of PCI avoidance.
559 get_buf_state(q, q->first_to_check, &state, 0);
560 if (state == SLSB_P_INPUT_PRIMED)
561 /* we got something to do */
564 /* on VM, we don't poll, so the q is always done here */
565 if (need_siga_sync(q) || pci_out_supported(q))
569 * At this point we know, that inbound first_to_check
570 * has (probably) not moved (see qdio_inbound_processing).
572 if (get_usecs() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
573 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%3d",
577 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in notd:%3d",
583 void qdio_kick_inbound_handler(struct qdio_q *q)
585 int count, start, end;
587 qdio_perf_stat_inc(&perf_stats.inbound_handler);
589 start = q->first_to_kick;
590 end = q->first_to_check;
594 count = end + QDIO_MAX_BUFFERS_PER_Q - start;
596 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%3d c:%3d", start, count);
598 if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
601 q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr,
602 start, count, q->irq_ptr->int_parm);
604 /* for the next time */
605 q->first_to_kick = q->first_to_check;
609 static void __qdio_inbound_processing(struct qdio_q *q)
611 qdio_perf_stat_inc(&perf_stats.tasklet_inbound);
613 if (!qdio_inbound_q_moved(q))
616 qdio_kick_inbound_handler(q);
618 if (!qdio_inbound_q_done(q))
619 /* means poll time is not yet over */
622 qdio_stop_polling(q);
624 * We need to check again to not lose initiative after
625 * resetting the ACK state.
627 if (!qdio_inbound_q_done(q))
631 /* inbound tasklet */
632 void qdio_inbound_processing(unsigned long data)
634 struct qdio_q *q = (struct qdio_q *)data;
635 __qdio_inbound_processing(q);
638 static int get_outbound_buffer_frontier(struct qdio_q *q)
643 if (((queue_type(q) != QDIO_IQDIO_QFMT) && !pci_out_supported(q)) ||
644 (queue_type(q) == QDIO_IQDIO_QFMT && multicast_outbound(q)))
648 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
651 count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
652 stop = add_buf(q->first_to_check, count);
654 /* need to set count to 1 for non-qebsm */
659 if (q->first_to_check == stop)
660 return q->first_to_check;
662 count = get_buf_states(q, q->first_to_check, &state, count, 0);
664 return q->first_to_check;
667 case SLSB_P_OUTPUT_EMPTY:
668 /* the adapter got it */
669 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out empty:%1d %3d", q->nr, count);
671 atomic_sub(count, &q->nr_buf_used);
672 q->first_to_check = add_buf(q->first_to_check, count);
674 * We fetch all buffer states at once. get_buf_states may
675 * return count < stop. For QEBSM we do not loop.
680 case SLSB_P_OUTPUT_ERROR:
681 announce_buffer_error(q, count);
682 /* process the buffer, the upper layer will take care of it */
683 q->first_to_check = add_buf(q->first_to_check, count);
684 atomic_sub(count, &q->nr_buf_used);
686 case SLSB_CU_OUTPUT_PRIMED:
687 /* the adapter has not fetched the output yet */
688 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d", q->nr);
690 case SLSB_P_OUTPUT_NOT_INIT:
691 case SLSB_P_OUTPUT_HALTED:
696 return q->first_to_check;
699 /* all buffers processed? */
700 static inline int qdio_outbound_q_done(struct qdio_q *q)
702 return atomic_read(&q->nr_buf_used) == 0;
705 static inline int qdio_outbound_q_moved(struct qdio_q *q)
709 bufnr = get_outbound_buffer_frontier(q);
711 if ((bufnr != q->last_move_ftc) || q->qdio_error) {
712 q->last_move_ftc = bufnr;
713 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr);
720 * VM could present us cc=2 and busy bit set on SIGA-write
721 * during reconfiguration of their Guest LAN (only in iqdio mode,
722 * otherwise qdio is asynchronous and cc=2 and busy bit there will take
723 * the queues down immediately).
725 * Therefore qdio_siga_output will try for a short time constantly,
726 * if such a condition occurs. If it doesn't change, it will
727 * increase the busy_siga_counter and save the timestamp, and
728 * schedule the queue for later processing. qdio_outbound_processing
729 * will check out the counter. If non-zero, it will call qdio_kick_outbound_q
730 * as often as the value of the counter. This will attempt further SIGA
731 * instructions. For each successful SIGA, the counter is
732 * decreased, for failing SIGAs the counter remains the same, after
733 * all. After some time of no movement, qdio_kick_outbound_q will
734 * finally fail and reflect corresponding error codes to call
735 * the upper layer module and have it take the queues down.
737 * Note that this is a change from the original HiperSockets design
738 * (saying cc=2 and busy bit means take the queues down), but in
739 * these days Guest LAN didn't exist... excessive cc=2 with busy bit
740 * conditions will still take the queues down, but the threshold is
741 * higher due to the Guest LAN environment.
743 * Called from outbound tasklet and do_QDIO handler.
745 static void qdio_kick_outbound_q(struct qdio_q *q)
749 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kickoutq:%1d", q->nr);
751 if (!need_siga_out(q))
754 rc = qdio_siga_output(q);
757 /* TODO: improve error handling for CC=0 case */
758 if (q->u.out.timestamp)
759 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "cc2 rslv:%4x",
760 atomic_read(&q->u.out.busy_siga_counter));
761 /* went smooth this time, reset timestamp */
762 q->u.out.timestamp = 0;
764 /* cc=2 and busy bit */
765 case (2 | QDIO_ERROR_SIGA_BUSY):
766 atomic_inc(&q->u.out.busy_siga_counter);
768 /* if the last siga was successful, save timestamp here */
769 if (!q->u.out.timestamp)
770 q->u.out.timestamp = get_usecs();
772 /* if we're in time, don't touch qdio_error */
773 if (get_usecs() - q->u.out.timestamp < QDIO_BUSY_BIT_GIVE_UP) {
774 tasklet_schedule(&q->tasklet);
777 DBF_ERROR("%4x cc2 REP:%1d", SCH_NO(q), q->nr);
779 /* for plain cc=1, 2 or 3 */
784 static void qdio_kick_outbound_handler(struct qdio_q *q)
786 int start, end, count;
788 start = q->first_to_kick;
789 end = q->last_move_ftc;
793 count = end + QDIO_MAX_BUFFERS_PER_Q - start;
795 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kickouth: %1d", q->nr);
796 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "s:%3d c:%3d", start, count);
798 if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
801 q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count,
802 q->irq_ptr->int_parm);
804 /* for the next time: */
805 q->first_to_kick = q->last_move_ftc;
809 static void __qdio_outbound_processing(struct qdio_q *q)
813 qdio_perf_stat_inc(&perf_stats.tasklet_outbound);
815 /* see comment in qdio_kick_outbound_q */
816 siga_attempts = atomic_read(&q->u.out.busy_siga_counter);
817 while (siga_attempts--) {
818 atomic_dec(&q->u.out.busy_siga_counter);
819 qdio_kick_outbound_q(q);
822 BUG_ON(atomic_read(&q->nr_buf_used) < 0);
824 if (qdio_outbound_q_moved(q))
825 qdio_kick_outbound_handler(q);
827 if (queue_type(q) == QDIO_ZFCP_QFMT) {
828 if (!pci_out_supported(q) && !qdio_outbound_q_done(q))
829 tasklet_schedule(&q->tasklet);
833 /* bail out for HiperSockets unicast queues */
834 if (queue_type(q) == QDIO_IQDIO_QFMT && !multicast_outbound(q))
837 if ((queue_type(q) == QDIO_IQDIO_QFMT) &&
838 (atomic_read(&q->nr_buf_used)) > QDIO_IQDIO_POLL_LVL) {
839 tasklet_schedule(&q->tasklet);
843 if (q->u.out.pci_out_enabled)
847 * Now we know that queue type is either qeth without pci enabled
848 * or HiperSockets multicast. Make sure buffer switch from PRIMED to
849 * EMPTY is noticed and outbound_handler is called after some time.
851 if (qdio_outbound_q_done(q))
852 del_timer(&q->u.out.timer);
854 if (!timer_pending(&q->u.out.timer)) {
855 mod_timer(&q->u.out.timer, jiffies + 10 * HZ);
856 qdio_perf_stat_inc(&perf_stats.debug_tl_out_timer);
861 /* outbound tasklet */
862 void qdio_outbound_processing(unsigned long data)
864 struct qdio_q *q = (struct qdio_q *)data;
865 __qdio_outbound_processing(q);
868 void qdio_outbound_timer(unsigned long data)
870 struct qdio_q *q = (struct qdio_q *)data;
871 tasklet_schedule(&q->tasklet);
874 /* called from thinint inbound tasklet */
875 void qdio_check_outbound_after_thinint(struct qdio_q *q)
880 if (!pci_out_supported(q))
883 for_each_output_queue(q->irq_ptr, out, i)
884 if (!qdio_outbound_q_done(out))
885 tasklet_schedule(&out->tasklet);
888 static inline void qdio_set_state(struct qdio_irq *irq_ptr,
889 enum qdio_irq_states state)
891 DBF_DEV_EVENT(DBF_INFO, irq_ptr, "newstate: %1d", state);
893 irq_ptr->state = state;
897 static void qdio_irq_check_sense(struct qdio_irq *irq_ptr, struct irb *irb)
899 if (irb->esw.esw0.erw.cons) {
900 DBF_ERROR("%4x sense:", irq_ptr->schid.sch_no);
901 DBF_ERROR_HEX(irb, 64);
902 DBF_ERROR_HEX(irb->ecw, 64);
906 /* PCI interrupt handler */
907 static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
912 qdio_perf_stat_inc(&perf_stats.pci_int);
914 for_each_input_queue(irq_ptr, q, i)
915 tasklet_schedule(&q->tasklet);
917 if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED))
920 for_each_output_queue(irq_ptr, q, i) {
921 if (qdio_outbound_q_done(q))
924 if (!siga_syncs_out_pci(q))
927 tasklet_schedule(&q->tasklet);
931 static void qdio_handle_activate_check(struct ccw_device *cdev,
932 unsigned long intparm, int cstat, int dstat)
934 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
937 DBF_ERROR("%4x ACT CHECK", irq_ptr->schid.sch_no);
938 DBF_ERROR("intp :%lx", intparm);
939 DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
941 if (irq_ptr->nr_input_qs) {
942 q = irq_ptr->input_qs[0];
943 } else if (irq_ptr->nr_output_qs) {
944 q = irq_ptr->output_qs[0];
949 q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
950 0, -1, -1, irq_ptr->int_parm);
952 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
955 static void qdio_call_shutdown(struct work_struct *work)
957 struct ccw_device_private *priv;
958 struct ccw_device *cdev;
960 priv = container_of(work, struct ccw_device_private, kick_work);
962 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
963 put_device(&cdev->dev);
966 static void qdio_int_error(struct ccw_device *cdev)
968 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
970 switch (irq_ptr->state) {
971 case QDIO_IRQ_STATE_INACTIVE:
972 case QDIO_IRQ_STATE_CLEANUP:
973 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
975 case QDIO_IRQ_STATE_ESTABLISHED:
976 case QDIO_IRQ_STATE_ACTIVE:
977 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
978 if (get_device(&cdev->dev)) {
979 /* Can't call shutdown from interrupt context. */
980 PREPARE_WORK(&cdev->private->kick_work,
982 queue_work(ccw_device_work, &cdev->private->kick_work);
988 wake_up(&cdev->private->wait_q);
991 static int qdio_establish_check_errors(struct ccw_device *cdev, int cstat,
994 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
996 if (cstat || (dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END))) {
997 DBF_ERROR("EQ:ck con");
1001 if (!(dstat & DEV_STAT_DEV_END)) {
1002 DBF_ERROR("EQ:no dev");
1006 if (dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) {
1007 DBF_ERROR("EQ: bad io");
1012 DBF_ERROR("%4x EQ:error", irq_ptr->schid.sch_no);
1013 DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
1015 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
1019 static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat,
1022 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1024 DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq");
1025 if (!qdio_establish_check_errors(cdev, cstat, dstat))
1026 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED);
1029 /* qdio interrupt handler */
1030 void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
1033 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1036 qdio_perf_stat_inc(&perf_stats.qdio_int);
1038 if (!intparm || !irq_ptr) {
1039 DBF_ERROR("qint:%4x", cdev->private->schid.sch_no);
1044 switch (PTR_ERR(irb)) {
1046 DBF_ERROR("%4x IO error", irq_ptr->schid.sch_no);
1049 DBF_ERROR("%4x IO timeout", irq_ptr->schid.sch_no);
1050 qdio_int_error(cdev);
1057 qdio_irq_check_sense(irq_ptr, irb);
1059 cstat = irb->scsw.cmd.cstat;
1060 dstat = irb->scsw.cmd.dstat;
1062 switch (irq_ptr->state) {
1063 case QDIO_IRQ_STATE_INACTIVE:
1064 qdio_establish_handle_irq(cdev, cstat, dstat);
1067 case QDIO_IRQ_STATE_CLEANUP:
1068 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1071 case QDIO_IRQ_STATE_ESTABLISHED:
1072 case QDIO_IRQ_STATE_ACTIVE:
1073 if (cstat & SCHN_STAT_PCI) {
1074 qdio_int_handler_pci(irq_ptr);
1075 /* no state change so no need to wake up wait_q */
1078 if ((cstat & ~SCHN_STAT_PCI) || dstat) {
1079 qdio_handle_activate_check(cdev, intparm, cstat,
1086 wake_up(&cdev->private->wait_q);
1090 * qdio_get_ssqd_desc - get qdio subchannel description
1091 * @cdev: ccw device to get description for
1092 * @data: where to store the ssqd
1094 * Returns 0 or an error code. The results of the chsc are stored in the
1095 * specified structure.
1097 int qdio_get_ssqd_desc(struct ccw_device *cdev,
1098 struct qdio_ssqd_desc *data)
1101 if (!cdev || !cdev->private)
1104 DBF_EVENT("get ssqd:%4x", cdev->private->schid.sch_no);
1105 return qdio_setup_get_ssqd(NULL, &cdev->private->schid, data);
1107 EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc);
1110 * qdio_cleanup - shutdown queues and free data structures
1111 * @cdev: associated ccw device
1112 * @how: use halt or clear to shutdown
1114 * This function calls qdio_shutdown() for @cdev with method @how
1115 * and on success qdio_free() for @cdev.
1117 int qdio_cleanup(struct ccw_device *cdev, int how)
1119 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1125 rc = qdio_shutdown(cdev, how);
1127 rc = qdio_free(cdev);
1130 EXPORT_SYMBOL_GPL(qdio_cleanup);
1132 static void qdio_shutdown_queues(struct ccw_device *cdev)
1134 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1138 for_each_input_queue(irq_ptr, q, i)
1139 tasklet_disable(&q->tasklet);
1141 for_each_output_queue(irq_ptr, q, i) {
1142 tasklet_disable(&q->tasklet);
1143 del_timer(&q->u.out.timer);
1148 * qdio_shutdown - shut down a qdio subchannel
1149 * @cdev: associated ccw device
1150 * @how: use halt or clear to shutdown
1152 int qdio_shutdown(struct ccw_device *cdev, int how)
1154 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1156 unsigned long flags;
1161 DBF_EVENT("qshutdown:%4x", cdev->private->schid.sch_no);
1163 mutex_lock(&irq_ptr->setup_mutex);
1165 * Subchannel was already shot down. We cannot prevent being called
1166 * twice since cio may trigger a shutdown asynchronously.
1168 if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
1169 mutex_unlock(&irq_ptr->setup_mutex);
1173 tiqdio_remove_input_queues(irq_ptr);
1174 qdio_shutdown_queues(cdev);
1175 qdio_shutdown_debug_entries(irq_ptr, cdev);
1177 /* cleanup subchannel */
1178 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1180 if (how & QDIO_FLAG_CLEANUP_USING_CLEAR)
1181 rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
1183 /* default behaviour is halt */
1184 rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
1186 DBF_ERROR("%4x SHUTD ERR", irq_ptr->schid.sch_no);
1187 DBF_ERROR("rc:%4d", rc);
1191 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
1192 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1193 wait_event_interruptible_timeout(cdev->private->wait_q,
1194 irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
1195 irq_ptr->state == QDIO_IRQ_STATE_ERR,
1197 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1200 qdio_shutdown_thinint(irq_ptr);
1202 /* restore interrupt handler */
1203 if ((void *)cdev->handler == (void *)qdio_int_handler)
1204 cdev->handler = irq_ptr->orig_handler;
1205 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1207 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1208 mutex_unlock(&irq_ptr->setup_mutex);
1213 EXPORT_SYMBOL_GPL(qdio_shutdown);
1216 * qdio_free - free data structures for a qdio subchannel
1217 * @cdev: associated ccw device
1219 int qdio_free(struct ccw_device *cdev)
1221 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1226 DBF_EVENT("qfree:%4x", cdev->private->schid.sch_no);
1227 mutex_lock(&irq_ptr->setup_mutex);
1229 if (irq_ptr->debug_area != NULL) {
1230 debug_unregister(irq_ptr->debug_area);
1231 irq_ptr->debug_area = NULL;
1233 cdev->private->qdio_data = NULL;
1234 mutex_unlock(&irq_ptr->setup_mutex);
1236 qdio_release_memory(irq_ptr);
1239 EXPORT_SYMBOL_GPL(qdio_free);
1242 * qdio_initialize - allocate and establish queues for a qdio subchannel
1243 * @init_data: initialization data
1245 * This function first allocates queues via qdio_allocate() and on success
1246 * establishes them via qdio_establish().
1248 int qdio_initialize(struct qdio_initialize *init_data)
1252 rc = qdio_allocate(init_data);
1256 rc = qdio_establish(init_data);
1258 qdio_free(init_data->cdev);
1261 EXPORT_SYMBOL_GPL(qdio_initialize);
1264 * qdio_allocate - allocate qdio queues and associated data
1265 * @init_data: initialization data
1267 int qdio_allocate(struct qdio_initialize *init_data)
1269 struct qdio_irq *irq_ptr;
1271 DBF_EVENT("qallocate:%4x", init_data->cdev->private->schid.sch_no);
1273 if ((init_data->no_input_qs && !init_data->input_handler) ||
1274 (init_data->no_output_qs && !init_data->output_handler))
1277 if ((init_data->no_input_qs > QDIO_MAX_QUEUES_PER_IRQ) ||
1278 (init_data->no_output_qs > QDIO_MAX_QUEUES_PER_IRQ))
1281 if ((!init_data->input_sbal_addr_array) ||
1282 (!init_data->output_sbal_addr_array))
1285 /* irq_ptr must be in GFP_DMA since it contains ccw1.cda */
1286 irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1290 mutex_init(&irq_ptr->setup_mutex);
1291 qdio_allocate_dbf(init_data, irq_ptr);
1294 * Allocate a page for the chsc calls in qdio_establish.
1295 * Must be pre-allocated since a zfcp recovery will call
1296 * qdio_establish. In case of low memory and swap on a zfcp disk
1297 * we may not be able to allocate memory otherwise.
1299 irq_ptr->chsc_page = get_zeroed_page(GFP_KERNEL);
1300 if (!irq_ptr->chsc_page)
1303 /* qdr is used in ccw1.cda which is u32 */
1304 irq_ptr->qdr = (struct qdr *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1307 WARN_ON((unsigned long)irq_ptr->qdr & 0xfff);
1309 if (qdio_allocate_qs(irq_ptr, init_data->no_input_qs,
1310 init_data->no_output_qs))
1313 init_data->cdev->private->qdio_data = irq_ptr;
1314 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1317 qdio_release_memory(irq_ptr);
1321 EXPORT_SYMBOL_GPL(qdio_allocate);
1324 * qdio_establish - establish queues on a qdio subchannel
1325 * @init_data: initialization data
1327 int qdio_establish(struct qdio_initialize *init_data)
1329 struct qdio_irq *irq_ptr;
1330 struct ccw_device *cdev = init_data->cdev;
1331 unsigned long saveflags;
1334 DBF_EVENT("qestablish:%4x", cdev->private->schid.sch_no);
1336 irq_ptr = cdev->private->qdio_data;
1340 if (cdev->private->state != DEV_STATE_ONLINE)
1343 mutex_lock(&irq_ptr->setup_mutex);
1344 qdio_setup_irq(init_data);
1346 rc = qdio_establish_thinint(irq_ptr);
1348 mutex_unlock(&irq_ptr->setup_mutex);
1349 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1354 irq_ptr->ccw.cmd_code = irq_ptr->equeue.cmd;
1355 irq_ptr->ccw.flags = CCW_FLAG_SLI;
1356 irq_ptr->ccw.count = irq_ptr->equeue.count;
1357 irq_ptr->ccw.cda = (u32)((addr_t)irq_ptr->qdr);
1359 spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags);
1360 ccw_device_set_options_mask(cdev, 0);
1362 rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0);
1364 DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no);
1365 DBF_ERROR("rc:%4x", rc);
1367 spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags);
1370 mutex_unlock(&irq_ptr->setup_mutex);
1371 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1375 wait_event_interruptible_timeout(cdev->private->wait_q,
1376 irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
1377 irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ);
1379 if (irq_ptr->state != QDIO_IRQ_STATE_ESTABLISHED) {
1380 mutex_unlock(&irq_ptr->setup_mutex);
1381 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1385 qdio_setup_ssqd_info(irq_ptr);
1386 DBF_EVENT("qDmmwc:%2x", irq_ptr->ssqd_desc.mmwc);
1387 DBF_EVENT("qib ac:%4x", irq_ptr->qib.ac);
1389 /* qebsm is now setup if available, initialize buffer states */
1390 qdio_init_buf_states(irq_ptr);
1392 mutex_unlock(&irq_ptr->setup_mutex);
1393 qdio_print_subchannel_info(irq_ptr, cdev);
1394 qdio_setup_debug_entries(irq_ptr, cdev);
1397 EXPORT_SYMBOL_GPL(qdio_establish);
1400 * qdio_activate - activate queues on a qdio subchannel
1401 * @cdev: associated cdev
1403 int qdio_activate(struct ccw_device *cdev)
1405 struct qdio_irq *irq_ptr;
1407 unsigned long saveflags;
1409 DBF_EVENT("qactivate:%4x", cdev->private->schid.sch_no);
1411 irq_ptr = cdev->private->qdio_data;
1415 if (cdev->private->state != DEV_STATE_ONLINE)
1418 mutex_lock(&irq_ptr->setup_mutex);
1419 if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
1424 irq_ptr->ccw.cmd_code = irq_ptr->aqueue.cmd;
1425 irq_ptr->ccw.flags = CCW_FLAG_SLI;
1426 irq_ptr->ccw.count = irq_ptr->aqueue.count;
1427 irq_ptr->ccw.cda = 0;
1429 spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags);
1430 ccw_device_set_options(cdev, CCWDEV_REPORT_ALL);
1432 rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE,
1433 0, DOIO_DENY_PREFETCH);
1435 DBF_ERROR("%4x act IO ERR", irq_ptr->schid.sch_no);
1436 DBF_ERROR("rc:%4x", rc);
1438 spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags);
1443 if (is_thinint_irq(irq_ptr))
1444 tiqdio_add_input_queues(irq_ptr);
1446 /* wait for subchannel to become active */
1449 switch (irq_ptr->state) {
1450 case QDIO_IRQ_STATE_STOPPED:
1451 case QDIO_IRQ_STATE_ERR:
1452 mutex_unlock(&irq_ptr->setup_mutex);
1453 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1456 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE);
1460 mutex_unlock(&irq_ptr->setup_mutex);
1463 EXPORT_SYMBOL_GPL(qdio_activate);
1465 static inline int buf_in_between(int bufnr, int start, int count)
1467 int end = add_buf(start, count);
1470 if (bufnr >= start && bufnr < end)
1476 /* wrap-around case */
1477 if ((bufnr >= start && bufnr <= QDIO_MAX_BUFFERS_PER_Q) ||
1485 * handle_inbound - reset processed input buffers
1486 * @q: queue containing the buffers
1488 * @bufnr: first buffer to process
1489 * @count: how many buffers are emptied
1491 static void handle_inbound(struct qdio_q *q, unsigned int callflags,
1492 int bufnr, int count)
1496 if (!q->u.in.polling)
1499 /* protect against stop polling setting an ACK for an emptied slsb */
1500 if (count == QDIO_MAX_BUFFERS_PER_Q) {
1501 /* overwriting everything, just delete polling status */
1502 q->u.in.polling = 0;
1503 q->u.in.ack_count = 0;
1505 } else if (buf_in_between(q->last_move_ftc, bufnr, count)) {
1507 /* partial overwrite, just update last_move_ftc */
1508 diff = add_buf(bufnr, count);
1509 diff = sub_buf(diff, q->last_move_ftc);
1510 q->u.in.ack_count -= diff;
1511 if (q->u.in.ack_count <= 0) {
1512 q->u.in.polling = 0;
1513 q->u.in.ack_count = 0;
1514 /* TODO: must we set last_move_ftc to something meaningful? */
1517 q->last_move_ftc = add_buf(q->last_move_ftc, diff);
1520 /* the only ACK will be deleted, so stop polling */
1521 q->u.in.polling = 0;
1525 count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count);
1527 used = atomic_add_return(count, &q->nr_buf_used) - count;
1528 BUG_ON(used + count > QDIO_MAX_BUFFERS_PER_Q);
1530 /* no need to signal as long as the adapter had free buffers */
1534 if (need_siga_in(q)) {
1535 rc = qdio_siga_input(q);
1542 * handle_outbound - process filled outbound buffers
1543 * @q: queue containing the buffers
1545 * @bufnr: first buffer to process
1546 * @count: how many buffers are filled
1548 static void handle_outbound(struct qdio_q *q, unsigned int callflags,
1549 int bufnr, int count)
1551 unsigned char state;
1554 qdio_perf_stat_inc(&perf_stats.outbound_handler);
1556 count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count);
1557 used = atomic_add_return(count, &q->nr_buf_used);
1558 BUG_ON(used > QDIO_MAX_BUFFERS_PER_Q);
1560 if (callflags & QDIO_FLAG_PCI_OUT)
1561 q->u.out.pci_out_enabled = 1;
1563 q->u.out.pci_out_enabled = 0;
1565 if (queue_type(q) == QDIO_IQDIO_QFMT) {
1566 if (multicast_outbound(q))
1567 qdio_kick_outbound_q(q);
1569 if ((q->irq_ptr->ssqd_desc.mmwc > 1) &&
1571 (count <= q->irq_ptr->ssqd_desc.mmwc)) {
1572 /* exploit enhanced SIGA */
1573 q->u.out.use_enh_siga = 1;
1574 qdio_kick_outbound_q(q);
1577 * One siga-w per buffer required for unicast
1580 q->u.out.use_enh_siga = 0;
1582 qdio_kick_outbound_q(q);
1587 if (need_siga_sync(q)) {
1588 qdio_siga_sync_q(q);
1592 /* try to fast requeue buffers */
1593 get_buf_state(q, prev_buf(bufnr), &state, 0);
1594 if (state != SLSB_CU_OUTPUT_PRIMED)
1595 qdio_kick_outbound_q(q);
1597 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "fast-req");
1598 qdio_perf_stat_inc(&perf_stats.fast_requeue);
1601 /* Fixme: could wait forever if called from process context */
1602 tasklet_schedule(&q->tasklet);
1606 * do_QDIO - process input or output buffers
1607 * @cdev: associated ccw_device for the qdio subchannel
1608 * @callflags: input or output and special flags from the program
1609 * @q_nr: queue number
1610 * @bufnr: buffer number
1611 * @count: how many buffers to process
1613 int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
1614 int q_nr, int bufnr, int count)
1616 struct qdio_irq *irq_ptr;
1618 if ((bufnr > QDIO_MAX_BUFFERS_PER_Q) ||
1619 (count > QDIO_MAX_BUFFERS_PER_Q) ||
1620 (q_nr > QDIO_MAX_QUEUES_PER_IRQ))
1626 irq_ptr = cdev->private->qdio_data;
1630 if (callflags & QDIO_FLAG_SYNC_INPUT)
1631 DBF_DEV_EVENT(DBF_INFO, irq_ptr, "doQDIO input");
1633 DBF_DEV_EVENT(DBF_INFO, irq_ptr, "doQDIO output");
1634 DBF_DEV_EVENT(DBF_INFO, irq_ptr, "q:%1d flag:%4x", q_nr, callflags);
1635 DBF_DEV_EVENT(DBF_INFO, irq_ptr, "buf:%2d cnt:%3d", bufnr, count);
1637 if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)
1640 if (callflags & QDIO_FLAG_SYNC_INPUT)
1641 handle_inbound(irq_ptr->input_qs[q_nr], callflags, bufnr,
1643 else if (callflags & QDIO_FLAG_SYNC_OUTPUT)
1644 handle_outbound(irq_ptr->output_qs[q_nr], callflags, bufnr,
1650 EXPORT_SYMBOL_GPL(do_QDIO);
1652 static int __init init_QDIO(void)
1656 rc = qdio_setup_init();
1659 rc = tiqdio_allocate_memory();
1662 rc = qdio_debug_init();
1665 rc = qdio_setup_perf_stats();
1668 rc = tiqdio_register_thinints();
1674 qdio_remove_perf_stats();
1678 tiqdio_free_memory();
1684 static void __exit exit_QDIO(void)
1686 tiqdio_unregister_thinints();
1687 tiqdio_free_memory();
1688 qdio_remove_perf_stats();
1693 module_init(init_QDIO);
1694 module_exit(exit_QDIO);