2 * linux/drivers/s390/cio/qdio_main.c
4 * Linux for s390 qdio support, buffer handling, qdio API and module support.
6 * Copyright 2000,2008 IBM Corp.
7 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
8 * Jan Glauber <jang@linux.vnet.ibm.com>
9 * 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com>
11 #include <linux/module.h>
12 #include <linux/init.h>
13 #include <linux/kernel.h>
14 #include <linux/timer.h>
15 #include <linux/delay.h>
16 #include <asm/atomic.h>
17 #include <asm/debug.h>
24 #include "qdio_debug.h"
25 #include "qdio_perf.h"
27 MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\
28 "Jan Glauber <jang@linux.vnet.ibm.com>");
29 MODULE_DESCRIPTION("QDIO base support");
30 MODULE_LICENSE("GPL");
32 static inline int do_siga_sync(struct subchannel_id schid,
33 unsigned int out_mask, unsigned int in_mask)
35 register unsigned long __fc asm ("0") = 2;
36 register struct subchannel_id __schid asm ("1") = schid;
37 register unsigned long out asm ("2") = out_mask;
38 register unsigned long in asm ("3") = in_mask;
46 : "d" (__fc), "d" (__schid), "d" (out), "d" (in) : "cc");
50 static inline int do_siga_input(struct subchannel_id schid, unsigned int mask)
52 register unsigned long __fc asm ("0") = 1;
53 register struct subchannel_id __schid asm ("1") = schid;
54 register unsigned long __mask asm ("2") = mask;
62 : "d" (__fc), "d" (__schid), "d" (__mask) : "cc", "memory");
67 * do_siga_output - perform SIGA-w/wt function
68 * @schid: subchannel id or in case of QEBSM the subchannel token
69 * @mask: which output queues to process
70 * @bb: busy bit indicator, set only if SIGA-w/wt could not access a buffer
71 * @fc: function code to perform
73 * Returns cc or QDIO_ERROR_SIGA_ACCESS_EXCEPTION.
74 * Note: For IQDC unicast queues only the highest priority queue is processed.
76 static inline int do_siga_output(unsigned long schid, unsigned long mask,
77 unsigned int *bb, unsigned int fc)
79 register unsigned long __fc asm("0") = fc;
80 register unsigned long __schid asm("1") = schid;
81 register unsigned long __mask asm("2") = mask;
82 int cc = QDIO_ERROR_SIGA_ACCESS_EXCEPTION;
90 : "+d" (cc), "+d" (__fc), "+d" (__schid), "+d" (__mask)
92 *bb = ((unsigned int) __fc) >> 31;
96 static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
98 /* all done or next buffer state different */
99 if (ccq == 0 || ccq == 32)
101 /* not all buffers processed */
102 if (ccq == 96 || ccq == 97)
104 /* notify devices immediately */
105 DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq);
110 * qdio_do_eqbs - extract buffer states for QEBSM
111 * @q: queue to manipulate
112 * @state: state of the extracted buffers
113 * @start: buffer number to start at
114 * @count: count of buffers to examine
115 * @auto_ack: automatically acknowledge buffers
117 * Returns the number of successfully extracted equal buffer states.
118 * Stops processing if a state is different from the last buffers state.
120 static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
121 int start, int count, int auto_ack)
123 unsigned int ccq = 0;
124 int tmp_count = count, tmp_start = start;
128 BUG_ON(!q->irq_ptr->sch_token);
129 qdio_perf_stat_inc(&perf_stats.debug_eqbs_all);
132 nr += q->irq_ptr->nr_input_qs;
134 ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count,
136 rc = qdio_check_ccq(q, ccq);
138 /* At least one buffer was processed, return and extract the remaining
141 if ((ccq == 96) && (count != tmp_count)) {
142 qdio_perf_stat_inc(&perf_stats.debug_eqbs_incomplete);
143 return (count - tmp_count);
147 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq);
152 DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
153 DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
154 q->handler(q->irq_ptr->cdev,
155 QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
156 0, -1, -1, q->irq_ptr->int_parm);
159 return count - tmp_count;
163 * qdio_do_sqbs - set buffer states for QEBSM
164 * @q: queue to manipulate
165 * @state: new state of the buffers
166 * @start: first buffer number to change
167 * @count: how many buffers to change
169 * Returns the number of successfully changed buffers.
170 * Does retrying until the specified count of buffer states is set or an
173 static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start,
176 unsigned int ccq = 0;
177 int tmp_count = count, tmp_start = start;
184 BUG_ON(!q->irq_ptr->sch_token);
185 qdio_perf_stat_inc(&perf_stats.debug_sqbs_all);
188 nr += q->irq_ptr->nr_input_qs;
190 ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count);
191 rc = qdio_check_ccq(q, ccq);
193 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq);
194 qdio_perf_stat_inc(&perf_stats.debug_sqbs_incomplete);
198 DBF_ERROR("%4x SQBS ERROR", SCH_NO(q));
199 DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
200 q->handler(q->irq_ptr->cdev,
201 QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
202 0, -1, -1, q->irq_ptr->int_parm);
206 return count - tmp_count;
209 /* returns number of examined buffers and their common state in *state */
210 static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
211 unsigned char *state, unsigned int count,
214 unsigned char __state = 0;
217 BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK);
218 BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q);
221 return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
223 for (i = 0; i < count; i++) {
225 __state = q->slsb.val[bufnr];
226 else if (q->slsb.val[bufnr] != __state)
228 bufnr = next_buf(bufnr);
234 static inline int get_buf_state(struct qdio_q *q, unsigned int bufnr,
235 unsigned char *state, int auto_ack)
237 return get_buf_states(q, bufnr, state, 1, auto_ack);
240 /* wrap-around safe setting of slsb states, returns number of changed buffers */
241 static inline int set_buf_states(struct qdio_q *q, int bufnr,
242 unsigned char state, int count)
246 BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK);
247 BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q);
250 return qdio_do_sqbs(q, state, bufnr, count);
252 for (i = 0; i < count; i++) {
253 xchg(&q->slsb.val[bufnr], state);
254 bufnr = next_buf(bufnr);
259 static inline int set_buf_state(struct qdio_q *q, int bufnr,
262 return set_buf_states(q, bufnr, state, 1);
265 /* set slsb states to initial state */
266 void qdio_init_buf_states(struct qdio_irq *irq_ptr)
271 for_each_input_queue(irq_ptr, q, i)
272 set_buf_states(q, 0, SLSB_P_INPUT_NOT_INIT,
273 QDIO_MAX_BUFFERS_PER_Q);
274 for_each_output_queue(irq_ptr, q, i)
275 set_buf_states(q, 0, SLSB_P_OUTPUT_NOT_INIT,
276 QDIO_MAX_BUFFERS_PER_Q);
279 static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output,
284 if (!need_siga_sync(q))
287 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr);
288 qdio_perf_stat_inc(&perf_stats.siga_sync);
290 cc = do_siga_sync(q->irq_ptr->schid, output, input);
292 DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q), cc);
296 static inline int qdio_siga_sync_q(struct qdio_q *q)
299 return qdio_siga_sync(q, 0, q->mask);
301 return qdio_siga_sync(q, q->mask, 0);
304 static inline int qdio_siga_sync_out(struct qdio_q *q)
306 return qdio_siga_sync(q, ~0U, 0);
309 static inline int qdio_siga_sync_all(struct qdio_q *q)
311 return qdio_siga_sync(q, ~0U, ~0U);
314 static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit)
321 if (q->u.out.use_enh_siga)
325 schid = q->irq_ptr->sch_token;
329 schid = *((u32 *)&q->irq_ptr->schid);
332 cc = do_siga_output(schid, q->mask, busy_bit, fc);
334 /* hipersocket busy condition */
336 WARN_ON(queue_type(q) != QDIO_IQDIO_QFMT || cc != 2);
339 start_time = get_usecs();
342 if ((get_usecs() - start_time) < QDIO_BUSY_BIT_PATIENCE)
348 static inline int qdio_siga_input(struct qdio_q *q)
352 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr);
353 qdio_perf_stat_inc(&perf_stats.siga_in);
355 cc = do_siga_input(q->irq_ptr->schid, q->mask);
357 DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q), cc);
361 static inline void qdio_sync_after_thinint(struct qdio_q *q)
363 if (pci_out_supported(q)) {
364 if (need_siga_sync_thinint(q))
365 qdio_siga_sync_all(q);
366 else if (need_siga_sync_out_thinint(q))
367 qdio_siga_sync_out(q);
372 int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
373 unsigned char *state)
376 return get_buf_states(q, bufnr, state, 1, 0);
379 static inline void qdio_stop_polling(struct qdio_q *q)
381 if (!q->u.in.polling)
385 qdio_perf_stat_inc(&perf_stats.debug_stop_polling);
387 /* show the card that we are not polling anymore */
389 set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
391 q->u.in.ack_count = 0;
393 set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
396 static void announce_buffer_error(struct qdio_q *q, int count)
398 q->qdio_error |= QDIO_ERROR_SLSB_STATE;
400 /* special handling for no target buffer empty */
401 if ((!q->is_input_q &&
402 (q->sbal[q->first_to_check]->element[15].flags & 0xff) == 0x10)) {
403 qdio_perf_stat_inc(&perf_stats.outbound_target_full);
404 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x",
409 DBF_ERROR("%4x BUF ERROR", SCH_NO(q));
410 DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr);
411 DBF_ERROR("FTC:%3d C:%3d", q->first_to_check, count);
412 DBF_ERROR("F14:%2x F15:%2x",
413 q->sbal[q->first_to_check]->element[14].flags & 0xff,
414 q->sbal[q->first_to_check]->element[15].flags & 0xff);
417 static inline void inbound_primed(struct qdio_q *q, int count)
421 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim: %02x", count);
423 /* for QEBSM the ACK was already set by EQBS */
425 if (!q->u.in.polling) {
427 q->u.in.ack_count = count;
428 q->u.in.ack_start = q->first_to_check;
432 /* delete the previous ACK's */
433 set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
435 q->u.in.ack_count = count;
436 q->u.in.ack_start = q->first_to_check;
441 * ACK the newest buffer. The ACK will be removed in qdio_stop_polling
442 * or by the next inbound run.
444 new = add_buf(q->first_to_check, count - 1);
445 if (q->u.in.polling) {
446 /* reset the previous ACK but first set the new one */
447 set_buf_state(q, new, SLSB_P_INPUT_ACK);
448 set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
451 set_buf_state(q, new, SLSB_P_INPUT_ACK);
454 q->u.in.ack_start = new;
458 /* need to change ALL buffers to get more interrupts */
459 set_buf_states(q, q->first_to_check, SLSB_P_INPUT_NOT_INIT, count);
462 static int get_inbound_buffer_frontier(struct qdio_q *q)
468 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
471 count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
472 stop = add_buf(q->first_to_check, count);
474 if (q->first_to_check == stop)
478 * No siga sync here, as a PCI or we after a thin interrupt
479 * already sync'ed the queues.
481 count = get_buf_states(q, q->first_to_check, &state, count, 1);
486 case SLSB_P_INPUT_PRIMED:
487 inbound_primed(q, count);
488 q->first_to_check = add_buf(q->first_to_check, count);
489 if (atomic_sub(count, &q->nr_buf_used) == 0)
490 qdio_perf_stat_inc(&perf_stats.inbound_queue_full);
492 case SLSB_P_INPUT_ERROR:
493 announce_buffer_error(q, count);
494 /* process the buffer, the upper layer will take care of it */
495 q->first_to_check = add_buf(q->first_to_check, count);
496 atomic_sub(count, &q->nr_buf_used);
498 case SLSB_CU_INPUT_EMPTY:
499 case SLSB_P_INPUT_NOT_INIT:
500 case SLSB_P_INPUT_ACK:
501 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop");
507 return q->first_to_check;
510 static int qdio_inbound_q_moved(struct qdio_q *q)
514 bufnr = get_inbound_buffer_frontier(q);
516 if ((bufnr != q->last_move) || q->qdio_error) {
517 q->last_move = bufnr;
518 if (!is_thinint_irq(q->irq_ptr) && !MACHINE_IS_VM)
519 q->u.in.timestamp = get_usecs();
525 static inline int qdio_inbound_q_done(struct qdio_q *q)
527 unsigned char state = 0;
529 if (!atomic_read(&q->nr_buf_used))
533 get_buf_state(q, q->first_to_check, &state, 0);
535 if (state == SLSB_P_INPUT_PRIMED)
536 /* more work coming */
539 if (is_thinint_irq(q->irq_ptr))
542 /* don't poll under z/VM */
547 * At this point we know, that inbound first_to_check
548 * has (probably) not moved (see qdio_inbound_processing).
550 if (get_usecs() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
551 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x",
558 static void qdio_kick_handler(struct qdio_q *q)
560 int start = q->first_to_kick;
561 int end = q->first_to_check;
564 if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
567 count = sub_buf(end, start);
570 qdio_perf_stat_inc(&perf_stats.inbound_handler);
571 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%02x c:%02x", start, count);
573 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x",
576 q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count,
577 q->irq_ptr->int_parm);
579 /* for the next time */
580 q->first_to_kick = end;
584 static void __qdio_inbound_processing(struct qdio_q *q)
586 qdio_perf_stat_inc(&perf_stats.tasklet_inbound);
588 if (!qdio_inbound_q_moved(q))
591 qdio_kick_handler(q);
593 if (!qdio_inbound_q_done(q))
594 /* means poll time is not yet over */
597 qdio_stop_polling(q);
599 * We need to check again to not lose initiative after
600 * resetting the ACK state.
602 if (!qdio_inbound_q_done(q))
606 void qdio_inbound_processing(unsigned long data)
608 struct qdio_q *q = (struct qdio_q *)data;
609 __qdio_inbound_processing(q);
612 static int get_outbound_buffer_frontier(struct qdio_q *q)
617 if (((queue_type(q) != QDIO_IQDIO_QFMT) && !pci_out_supported(q)) ||
618 (queue_type(q) == QDIO_IQDIO_QFMT && multicast_outbound(q)))
622 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
625 count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
626 stop = add_buf(q->first_to_check, count);
628 if (q->first_to_check == stop)
629 return q->first_to_check;
631 count = get_buf_states(q, q->first_to_check, &state, count, 0);
633 return q->first_to_check;
636 case SLSB_P_OUTPUT_EMPTY:
637 /* the adapter got it */
638 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out empty:%1d %02x", q->nr, count);
640 atomic_sub(count, &q->nr_buf_used);
641 q->first_to_check = add_buf(q->first_to_check, count);
643 case SLSB_P_OUTPUT_ERROR:
644 announce_buffer_error(q, count);
645 /* process the buffer, the upper layer will take care of it */
646 q->first_to_check = add_buf(q->first_to_check, count);
647 atomic_sub(count, &q->nr_buf_used);
649 case SLSB_CU_OUTPUT_PRIMED:
650 /* the adapter has not fetched the output yet */
651 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d", q->nr);
653 case SLSB_P_OUTPUT_NOT_INIT:
654 case SLSB_P_OUTPUT_HALTED:
659 return q->first_to_check;
662 /* all buffers processed? */
663 static inline int qdio_outbound_q_done(struct qdio_q *q)
665 return atomic_read(&q->nr_buf_used) == 0;
668 static inline int qdio_outbound_q_moved(struct qdio_q *q)
672 bufnr = get_outbound_buffer_frontier(q);
674 if ((bufnr != q->last_move) || q->qdio_error) {
675 q->last_move = bufnr;
676 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr);
682 static int qdio_kick_outbound_q(struct qdio_q *q)
684 unsigned int busy_bit;
687 if (!need_siga_out(q))
690 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr);
691 qdio_perf_stat_inc(&perf_stats.siga_out);
693 cc = qdio_siga_output(q, &busy_bit);
699 DBF_ERROR("%4x cc2 REP:%1d", SCH_NO(q), q->nr);
700 cc |= QDIO_ERROR_SIGA_BUSY;
702 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d", q->nr);
706 DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q), cc);
712 static void __qdio_outbound_processing(struct qdio_q *q)
714 qdio_perf_stat_inc(&perf_stats.tasklet_outbound);
715 BUG_ON(atomic_read(&q->nr_buf_used) < 0);
717 if (qdio_outbound_q_moved(q))
718 qdio_kick_handler(q);
720 if (queue_type(q) == QDIO_ZFCP_QFMT)
721 if (!pci_out_supported(q) && !qdio_outbound_q_done(q))
724 /* bail out for HiperSockets unicast queues */
725 if (queue_type(q) == QDIO_IQDIO_QFMT && !multicast_outbound(q))
728 if ((queue_type(q) == QDIO_IQDIO_QFMT) &&
729 (atomic_read(&q->nr_buf_used)) > QDIO_IQDIO_POLL_LVL)
732 if (q->u.out.pci_out_enabled)
736 * Now we know that queue type is either qeth without pci enabled
737 * or HiperSockets multicast. Make sure buffer switch from PRIMED to
738 * EMPTY is noticed and outbound_handler is called after some time.
740 if (qdio_outbound_q_done(q))
741 del_timer(&q->u.out.timer);
743 if (!timer_pending(&q->u.out.timer)) {
744 mod_timer(&q->u.out.timer, jiffies + 10 * HZ);
745 qdio_perf_stat_inc(&perf_stats.debug_tl_out_timer);
751 if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
753 tasklet_schedule(&q->tasklet);
756 /* outbound tasklet */
757 void qdio_outbound_processing(unsigned long data)
759 struct qdio_q *q = (struct qdio_q *)data;
760 __qdio_outbound_processing(q);
763 void qdio_outbound_timer(unsigned long data)
765 struct qdio_q *q = (struct qdio_q *)data;
767 if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
769 tasklet_schedule(&q->tasklet);
772 static inline void qdio_check_outbound_after_thinint(struct qdio_q *q)
777 if (!pci_out_supported(q))
780 for_each_output_queue(q->irq_ptr, out, i)
781 if (!qdio_outbound_q_done(out))
782 tasklet_schedule(&out->tasklet);
785 static void __tiqdio_inbound_processing(struct qdio_q *q)
787 qdio_perf_stat_inc(&perf_stats.thinint_inbound);
788 qdio_sync_after_thinint(q);
791 * The interrupt could be caused by a PCI request. Check the
792 * PCI capable outbound queues.
794 qdio_check_outbound_after_thinint(q);
796 if (!qdio_inbound_q_moved(q))
799 qdio_kick_handler(q);
801 if (!qdio_inbound_q_done(q)) {
802 qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop);
803 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) {
804 tasklet_schedule(&q->tasklet);
809 qdio_stop_polling(q);
811 * We need to check again to not lose initiative after
812 * resetting the ACK state.
814 if (!qdio_inbound_q_done(q)) {
815 qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop2);
816 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
817 tasklet_schedule(&q->tasklet);
821 void tiqdio_inbound_processing(unsigned long data)
823 struct qdio_q *q = (struct qdio_q *)data;
824 __tiqdio_inbound_processing(q);
827 static inline void qdio_set_state(struct qdio_irq *irq_ptr,
828 enum qdio_irq_states state)
830 DBF_DEV_EVENT(DBF_INFO, irq_ptr, "newstate: %1d", state);
832 irq_ptr->state = state;
836 static void qdio_irq_check_sense(struct qdio_irq *irq_ptr, struct irb *irb)
838 if (irb->esw.esw0.erw.cons) {
839 DBF_ERROR("%4x sense:", irq_ptr->schid.sch_no);
840 DBF_ERROR_HEX(irb, 64);
841 DBF_ERROR_HEX(irb->ecw, 64);
845 /* PCI interrupt handler */
846 static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
851 if (unlikely(irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
854 qdio_perf_stat_inc(&perf_stats.pci_int);
856 for_each_input_queue(irq_ptr, q, i)
857 tasklet_schedule(&q->tasklet);
859 if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED))
862 for_each_output_queue(irq_ptr, q, i) {
863 if (qdio_outbound_q_done(q))
866 if (!siga_syncs_out_pci(q))
869 tasklet_schedule(&q->tasklet);
873 static void qdio_handle_activate_check(struct ccw_device *cdev,
874 unsigned long intparm, int cstat, int dstat)
876 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
879 DBF_ERROR("%4x ACT CHECK", irq_ptr->schid.sch_no);
880 DBF_ERROR("intp :%lx", intparm);
881 DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
883 if (irq_ptr->nr_input_qs) {
884 q = irq_ptr->input_qs[0];
885 } else if (irq_ptr->nr_output_qs) {
886 q = irq_ptr->output_qs[0];
891 q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
892 0, -1, -1, irq_ptr->int_parm);
894 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
897 static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat,
900 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
902 DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq");
906 if (dstat & ~(DEV_STAT_DEV_END | DEV_STAT_CHN_END))
908 if (!(dstat & DEV_STAT_DEV_END))
910 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED);
914 DBF_ERROR("%4x EQ:error", irq_ptr->schid.sch_no);
915 DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
916 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
919 /* qdio interrupt handler */
920 void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
923 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
926 qdio_perf_stat_inc(&perf_stats.qdio_int);
928 if (!intparm || !irq_ptr) {
929 DBF_ERROR("qint:%4x", cdev->private->schid.sch_no);
934 switch (PTR_ERR(irb)) {
936 DBF_ERROR("%4x IO error", irq_ptr->schid.sch_no);
937 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
938 wake_up(&cdev->private->wait_q);
945 qdio_irq_check_sense(irq_ptr, irb);
946 cstat = irb->scsw.cmd.cstat;
947 dstat = irb->scsw.cmd.dstat;
949 switch (irq_ptr->state) {
950 case QDIO_IRQ_STATE_INACTIVE:
951 qdio_establish_handle_irq(cdev, cstat, dstat);
953 case QDIO_IRQ_STATE_CLEANUP:
954 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
956 case QDIO_IRQ_STATE_ESTABLISHED:
957 case QDIO_IRQ_STATE_ACTIVE:
958 if (cstat & SCHN_STAT_PCI) {
959 qdio_int_handler_pci(irq_ptr);
963 qdio_handle_activate_check(cdev, intparm, cstat,
969 wake_up(&cdev->private->wait_q);
973 * qdio_get_ssqd_desc - get qdio subchannel description
974 * @cdev: ccw device to get description for
975 * @data: where to store the ssqd
977 * Returns 0 or an error code. The results of the chsc are stored in the
978 * specified structure.
980 int qdio_get_ssqd_desc(struct ccw_device *cdev,
981 struct qdio_ssqd_desc *data)
984 if (!cdev || !cdev->private)
987 DBF_EVENT("get ssqd:%4x", cdev->private->schid.sch_no);
988 return qdio_setup_get_ssqd(NULL, &cdev->private->schid, data);
990 EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc);
993 * qdio_cleanup - shutdown queues and free data structures
994 * @cdev: associated ccw device
995 * @how: use halt or clear to shutdown
997 * This function calls qdio_shutdown() for @cdev with method @how.
998 * and qdio_free(). The qdio_free() return value is ignored since
999 * !irq_ptr is already checked.
1001 int qdio_cleanup(struct ccw_device *cdev, int how)
1003 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1009 rc = qdio_shutdown(cdev, how);
1014 EXPORT_SYMBOL_GPL(qdio_cleanup);
1016 static void qdio_shutdown_queues(struct ccw_device *cdev)
1018 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1022 for_each_input_queue(irq_ptr, q, i)
1023 tasklet_kill(&q->tasklet);
1025 for_each_output_queue(irq_ptr, q, i) {
1026 del_timer(&q->u.out.timer);
1027 tasklet_kill(&q->tasklet);
1032 * qdio_shutdown - shut down a qdio subchannel
1033 * @cdev: associated ccw device
1034 * @how: use halt or clear to shutdown
1036 int qdio_shutdown(struct ccw_device *cdev, int how)
1038 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1040 unsigned long flags;
1045 BUG_ON(irqs_disabled());
1046 DBF_EVENT("qshutdown:%4x", cdev->private->schid.sch_no);
1048 mutex_lock(&irq_ptr->setup_mutex);
1050 * Subchannel was already shot down. We cannot prevent being called
1051 * twice since cio may trigger a shutdown asynchronously.
1053 if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
1054 mutex_unlock(&irq_ptr->setup_mutex);
1059 * Indicate that the device is going down. Scheduling the queue
1060 * tasklets is forbidden from here on.
1062 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
1064 tiqdio_remove_input_queues(irq_ptr);
1065 qdio_shutdown_queues(cdev);
1066 qdio_shutdown_debug_entries(irq_ptr, cdev);
1068 /* cleanup subchannel */
1069 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1071 if (how & QDIO_FLAG_CLEANUP_USING_CLEAR)
1072 rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
1074 /* default behaviour is halt */
1075 rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
1077 DBF_ERROR("%4x SHUTD ERR", irq_ptr->schid.sch_no);
1078 DBF_ERROR("rc:%4d", rc);
1082 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
1083 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1084 wait_event_interruptible_timeout(cdev->private->wait_q,
1085 irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
1086 irq_ptr->state == QDIO_IRQ_STATE_ERR,
1088 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1091 qdio_shutdown_thinint(irq_ptr);
1093 /* restore interrupt handler */
1094 if ((void *)cdev->handler == (void *)qdio_int_handler)
1095 cdev->handler = irq_ptr->orig_handler;
1096 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1098 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1099 mutex_unlock(&irq_ptr->setup_mutex);
1104 EXPORT_SYMBOL_GPL(qdio_shutdown);
1107 * qdio_free - free data structures for a qdio subchannel
1108 * @cdev: associated ccw device
1110 int qdio_free(struct ccw_device *cdev)
1112 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1117 DBF_EVENT("qfree:%4x", cdev->private->schid.sch_no);
1118 mutex_lock(&irq_ptr->setup_mutex);
1120 if (irq_ptr->debug_area != NULL) {
1121 debug_unregister(irq_ptr->debug_area);
1122 irq_ptr->debug_area = NULL;
1124 cdev->private->qdio_data = NULL;
1125 mutex_unlock(&irq_ptr->setup_mutex);
1127 qdio_release_memory(irq_ptr);
1130 EXPORT_SYMBOL_GPL(qdio_free);
1133 * qdio_initialize - allocate and establish queues for a qdio subchannel
1134 * @init_data: initialization data
1136 * This function first allocates queues via qdio_allocate() and on success
1137 * establishes them via qdio_establish().
1139 int qdio_initialize(struct qdio_initialize *init_data)
1143 rc = qdio_allocate(init_data);
1147 rc = qdio_establish(init_data);
1149 qdio_free(init_data->cdev);
1152 EXPORT_SYMBOL_GPL(qdio_initialize);
1155 * qdio_allocate - allocate qdio queues and associated data
1156 * @init_data: initialization data
1158 int qdio_allocate(struct qdio_initialize *init_data)
1160 struct qdio_irq *irq_ptr;
1162 DBF_EVENT("qallocate:%4x", init_data->cdev->private->schid.sch_no);
1164 if ((init_data->no_input_qs && !init_data->input_handler) ||
1165 (init_data->no_output_qs && !init_data->output_handler))
1168 if ((init_data->no_input_qs > QDIO_MAX_QUEUES_PER_IRQ) ||
1169 (init_data->no_output_qs > QDIO_MAX_QUEUES_PER_IRQ))
1172 if ((!init_data->input_sbal_addr_array) ||
1173 (!init_data->output_sbal_addr_array))
1176 /* irq_ptr must be in GFP_DMA since it contains ccw1.cda */
1177 irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1181 mutex_init(&irq_ptr->setup_mutex);
1182 qdio_allocate_dbf(init_data, irq_ptr);
1185 * Allocate a page for the chsc calls in qdio_establish.
1186 * Must be pre-allocated since a zfcp recovery will call
1187 * qdio_establish. In case of low memory and swap on a zfcp disk
1188 * we may not be able to allocate memory otherwise.
1190 irq_ptr->chsc_page = get_zeroed_page(GFP_KERNEL);
1191 if (!irq_ptr->chsc_page)
1194 /* qdr is used in ccw1.cda which is u32 */
1195 irq_ptr->qdr = (struct qdr *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1198 WARN_ON((unsigned long)irq_ptr->qdr & 0xfff);
1200 if (qdio_allocate_qs(irq_ptr, init_data->no_input_qs,
1201 init_data->no_output_qs))
1204 init_data->cdev->private->qdio_data = irq_ptr;
1205 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1208 qdio_release_memory(irq_ptr);
1212 EXPORT_SYMBOL_GPL(qdio_allocate);
1215 * qdio_establish - establish queues on a qdio subchannel
1216 * @init_data: initialization data
1218 int qdio_establish(struct qdio_initialize *init_data)
1220 struct qdio_irq *irq_ptr;
1221 struct ccw_device *cdev = init_data->cdev;
1222 unsigned long saveflags;
1225 DBF_EVENT("qestablish:%4x", cdev->private->schid.sch_no);
1227 irq_ptr = cdev->private->qdio_data;
1231 if (cdev->private->state != DEV_STATE_ONLINE)
1234 mutex_lock(&irq_ptr->setup_mutex);
1235 qdio_setup_irq(init_data);
1237 rc = qdio_establish_thinint(irq_ptr);
1239 mutex_unlock(&irq_ptr->setup_mutex);
1240 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1245 irq_ptr->ccw.cmd_code = irq_ptr->equeue.cmd;
1246 irq_ptr->ccw.flags = CCW_FLAG_SLI;
1247 irq_ptr->ccw.count = irq_ptr->equeue.count;
1248 irq_ptr->ccw.cda = (u32)((addr_t)irq_ptr->qdr);
1250 spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags);
1251 ccw_device_set_options_mask(cdev, 0);
1253 rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0);
1255 DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no);
1256 DBF_ERROR("rc:%4x", rc);
1258 spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags);
1261 mutex_unlock(&irq_ptr->setup_mutex);
1262 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1266 wait_event_interruptible_timeout(cdev->private->wait_q,
1267 irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
1268 irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ);
1270 if (irq_ptr->state != QDIO_IRQ_STATE_ESTABLISHED) {
1271 mutex_unlock(&irq_ptr->setup_mutex);
1272 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1276 qdio_setup_ssqd_info(irq_ptr);
1277 DBF_EVENT("qDmmwc:%2x", irq_ptr->ssqd_desc.mmwc);
1278 DBF_EVENT("qib ac:%4x", irq_ptr->qib.ac);
1280 /* qebsm is now setup if available, initialize buffer states */
1281 qdio_init_buf_states(irq_ptr);
1283 mutex_unlock(&irq_ptr->setup_mutex);
1284 qdio_print_subchannel_info(irq_ptr, cdev);
1285 qdio_setup_debug_entries(irq_ptr, cdev);
1288 EXPORT_SYMBOL_GPL(qdio_establish);
1291 * qdio_activate - activate queues on a qdio subchannel
1292 * @cdev: associated cdev
1294 int qdio_activate(struct ccw_device *cdev)
1296 struct qdio_irq *irq_ptr;
1298 unsigned long saveflags;
1300 DBF_EVENT("qactivate:%4x", cdev->private->schid.sch_no);
1302 irq_ptr = cdev->private->qdio_data;
1306 if (cdev->private->state != DEV_STATE_ONLINE)
1309 mutex_lock(&irq_ptr->setup_mutex);
1310 if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
1315 irq_ptr->ccw.cmd_code = irq_ptr->aqueue.cmd;
1316 irq_ptr->ccw.flags = CCW_FLAG_SLI;
1317 irq_ptr->ccw.count = irq_ptr->aqueue.count;
1318 irq_ptr->ccw.cda = 0;
1320 spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags);
1321 ccw_device_set_options(cdev, CCWDEV_REPORT_ALL);
1323 rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE,
1324 0, DOIO_DENY_PREFETCH);
1326 DBF_ERROR("%4x act IO ERR", irq_ptr->schid.sch_no);
1327 DBF_ERROR("rc:%4x", rc);
1329 spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags);
1334 if (is_thinint_irq(irq_ptr))
1335 tiqdio_add_input_queues(irq_ptr);
1337 /* wait for subchannel to become active */
1340 switch (irq_ptr->state) {
1341 case QDIO_IRQ_STATE_STOPPED:
1342 case QDIO_IRQ_STATE_ERR:
1346 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE);
1350 mutex_unlock(&irq_ptr->setup_mutex);
1353 EXPORT_SYMBOL_GPL(qdio_activate);
1355 static inline int buf_in_between(int bufnr, int start, int count)
1357 int end = add_buf(start, count);
1360 if (bufnr >= start && bufnr < end)
1366 /* wrap-around case */
1367 if ((bufnr >= start && bufnr <= QDIO_MAX_BUFFERS_PER_Q) ||
1375 * handle_inbound - reset processed input buffers
1376 * @q: queue containing the buffers
1378 * @bufnr: first buffer to process
1379 * @count: how many buffers are emptied
1381 static int handle_inbound(struct qdio_q *q, unsigned int callflags,
1382 int bufnr, int count)
1386 if (!q->u.in.polling)
1389 /* protect against stop polling setting an ACK for an emptied slsb */
1390 if (count == QDIO_MAX_BUFFERS_PER_Q) {
1391 /* overwriting everything, just delete polling status */
1392 q->u.in.polling = 0;
1393 q->u.in.ack_count = 0;
1395 } else if (buf_in_between(q->u.in.ack_start, bufnr, count)) {
1397 /* partial overwrite, just update ack_start */
1398 diff = add_buf(bufnr, count);
1399 diff = sub_buf(diff, q->u.in.ack_start);
1400 q->u.in.ack_count -= diff;
1401 if (q->u.in.ack_count <= 0) {
1402 q->u.in.polling = 0;
1403 q->u.in.ack_count = 0;
1406 q->u.in.ack_start = add_buf(q->u.in.ack_start, diff);
1409 /* the only ACK will be deleted, so stop polling */
1410 q->u.in.polling = 0;
1414 count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count);
1416 used = atomic_add_return(count, &q->nr_buf_used) - count;
1417 BUG_ON(used + count > QDIO_MAX_BUFFERS_PER_Q);
1419 /* no need to signal as long as the adapter had free buffers */
1423 if (need_siga_in(q))
1424 return qdio_siga_input(q);
1429 * handle_outbound - process filled outbound buffers
1430 * @q: queue containing the buffers
1432 * @bufnr: first buffer to process
1433 * @count: how many buffers are filled
1435 static int handle_outbound(struct qdio_q *q, unsigned int callflags,
1436 int bufnr, int count)
1438 unsigned char state;
1441 qdio_perf_stat_inc(&perf_stats.outbound_handler);
1443 count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count);
1444 used = atomic_add_return(count, &q->nr_buf_used);
1445 BUG_ON(used > QDIO_MAX_BUFFERS_PER_Q);
1447 if (callflags & QDIO_FLAG_PCI_OUT)
1448 q->u.out.pci_out_enabled = 1;
1450 q->u.out.pci_out_enabled = 0;
1452 if (queue_type(q) == QDIO_IQDIO_QFMT) {
1453 if (multicast_outbound(q))
1454 rc = qdio_kick_outbound_q(q);
1456 if ((q->irq_ptr->ssqd_desc.mmwc > 1) &&
1458 (count <= q->irq_ptr->ssqd_desc.mmwc)) {
1459 /* exploit enhanced SIGA */
1460 q->u.out.use_enh_siga = 1;
1461 rc = qdio_kick_outbound_q(q);
1464 * One siga-w per buffer required for unicast
1467 q->u.out.use_enh_siga = 0;
1469 rc = qdio_kick_outbound_q(q);
1477 if (need_siga_sync(q)) {
1478 qdio_siga_sync_q(q);
1482 /* try to fast requeue buffers */
1483 get_buf_state(q, prev_buf(bufnr), &state, 0);
1484 if (state != SLSB_CU_OUTPUT_PRIMED)
1485 rc = qdio_kick_outbound_q(q);
1487 qdio_perf_stat_inc(&perf_stats.fast_requeue);
1490 tasklet_schedule(&q->tasklet);
1495 * do_QDIO - process input or output buffers
1496 * @cdev: associated ccw_device for the qdio subchannel
1497 * @callflags: input or output and special flags from the program
1498 * @q_nr: queue number
1499 * @bufnr: buffer number
1500 * @count: how many buffers to process
1502 int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
1503 int q_nr, unsigned int bufnr, unsigned int count)
1505 struct qdio_irq *irq_ptr;
1507 if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q)
1510 irq_ptr = cdev->private->qdio_data;
1514 DBF_DEV_EVENT(DBF_INFO, irq_ptr,
1515 "do%02x b:%02x c:%02x", callflags, bufnr, count);
1517 if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)
1520 if (callflags & QDIO_FLAG_SYNC_INPUT)
1521 return handle_inbound(irq_ptr->input_qs[q_nr],
1522 callflags, bufnr, count);
1523 else if (callflags & QDIO_FLAG_SYNC_OUTPUT)
1524 return handle_outbound(irq_ptr->output_qs[q_nr],
1525 callflags, bufnr, count);
1528 EXPORT_SYMBOL_GPL(do_QDIO);
1530 static int __init init_QDIO(void)
1534 rc = qdio_setup_init();
1537 rc = tiqdio_allocate_memory();
1540 rc = qdio_debug_init();
1543 rc = qdio_setup_perf_stats();
1546 rc = tiqdio_register_thinints();
1552 qdio_remove_perf_stats();
1556 tiqdio_free_memory();
1562 static void __exit exit_QDIO(void)
1564 tiqdio_unregister_thinints();
1565 tiqdio_free_memory();
1566 qdio_remove_perf_stats();
1571 module_init(init_QDIO);
1572 module_exit(exit_QDIO);