2 * linux/drivers/s390/cio/qdio_main.c
4 * Linux for s390 qdio support, buffer handling, qdio API and module support.
6 * Copyright 2000,2008 IBM Corp.
7 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
8 * Jan Glauber <jang@linux.vnet.ibm.com>
9 * 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com>
11 #include <linux/module.h>
12 #include <linux/init.h>
13 #include <linux/kernel.h>
14 #include <linux/timer.h>
15 #include <linux/delay.h>
16 #include <asm/atomic.h>
17 #include <asm/debug.h>
24 #include "qdio_debug.h"
26 MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\
27 "Jan Glauber <jang@linux.vnet.ibm.com>");
28 MODULE_DESCRIPTION("QDIO base support");
29 MODULE_LICENSE("GPL");
31 static inline int do_siga_sync(struct subchannel_id schid,
32 unsigned int out_mask, unsigned int in_mask)
34 register unsigned long __fc asm ("0") = 2;
35 register struct subchannel_id __schid asm ("1") = schid;
36 register unsigned long out asm ("2") = out_mask;
37 register unsigned long in asm ("3") = in_mask;
45 : "d" (__fc), "d" (__schid), "d" (out), "d" (in) : "cc");
49 static inline int do_siga_input(struct subchannel_id schid, unsigned int mask)
51 register unsigned long __fc asm ("0") = 1;
52 register struct subchannel_id __schid asm ("1") = schid;
53 register unsigned long __mask asm ("2") = mask;
61 : "d" (__fc), "d" (__schid), "d" (__mask) : "cc", "memory");
66 * do_siga_output - perform SIGA-w/wt function
67 * @schid: subchannel id or in case of QEBSM the subchannel token
68 * @mask: which output queues to process
69 * @bb: busy bit indicator, set only if SIGA-w/wt could not access a buffer
70 * @fc: function code to perform
72 * Returns cc or QDIO_ERROR_SIGA_ACCESS_EXCEPTION.
73 * Note: For IQDC unicast queues only the highest priority queue is processed.
75 static inline int do_siga_output(unsigned long schid, unsigned long mask,
76 unsigned int *bb, unsigned int fc)
78 register unsigned long __fc asm("0") = fc;
79 register unsigned long __schid asm("1") = schid;
80 register unsigned long __mask asm("2") = mask;
81 int cc = QDIO_ERROR_SIGA_ACCESS_EXCEPTION;
89 : "+d" (cc), "+d" (__fc), "+d" (__schid), "+d" (__mask)
91 *bb = ((unsigned int) __fc) >> 31;
95 static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
97 /* all done or next buffer state different */
98 if (ccq == 0 || ccq == 32)
100 /* not all buffers processed */
101 if (ccq == 96 || ccq == 97)
103 /* notify devices immediately */
104 DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq);
109 * qdio_do_eqbs - extract buffer states for QEBSM
110 * @q: queue to manipulate
111 * @state: state of the extracted buffers
112 * @start: buffer number to start at
113 * @count: count of buffers to examine
114 * @auto_ack: automatically acknowledge buffers
116 * Returns the number of successfully extracted equal buffer states.
117 * Stops processing if a state is different from the last buffers state.
119 static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
120 int start, int count, int auto_ack)
122 unsigned int ccq = 0;
123 int tmp_count = count, tmp_start = start;
127 BUG_ON(!q->irq_ptr->sch_token);
131 nr += q->irq_ptr->nr_input_qs;
133 ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count,
135 rc = qdio_check_ccq(q, ccq);
137 /* At least one buffer was processed, return and extract the remaining
140 if ((ccq == 96) && (count != tmp_count)) {
141 qperf_inc(q, eqbs_partial);
142 return (count - tmp_count);
146 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq);
151 DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
152 DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
153 q->handler(q->irq_ptr->cdev,
154 QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
155 0, -1, -1, q->irq_ptr->int_parm);
158 return count - tmp_count;
162 * qdio_do_sqbs - set buffer states for QEBSM
163 * @q: queue to manipulate
164 * @state: new state of the buffers
165 * @start: first buffer number to change
166 * @count: how many buffers to change
168 * Returns the number of successfully changed buffers.
169 * Does retrying until the specified count of buffer states is set or an
172 static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start,
175 unsigned int ccq = 0;
176 int tmp_count = count, tmp_start = start;
183 BUG_ON(!q->irq_ptr->sch_token);
187 nr += q->irq_ptr->nr_input_qs;
189 ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count);
190 rc = qdio_check_ccq(q, ccq);
192 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq);
193 qperf_inc(q, sqbs_partial);
197 DBF_ERROR("%4x SQBS ERROR", SCH_NO(q));
198 DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
199 q->handler(q->irq_ptr->cdev,
200 QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
201 0, -1, -1, q->irq_ptr->int_parm);
205 return count - tmp_count;
208 /* returns number of examined buffers and their common state in *state */
209 static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
210 unsigned char *state, unsigned int count,
213 unsigned char __state = 0;
216 BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK);
217 BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q);
220 return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
222 for (i = 0; i < count; i++) {
224 __state = q->slsb.val[bufnr];
225 else if (q->slsb.val[bufnr] != __state)
227 bufnr = next_buf(bufnr);
233 static inline int get_buf_state(struct qdio_q *q, unsigned int bufnr,
234 unsigned char *state, int auto_ack)
236 return get_buf_states(q, bufnr, state, 1, auto_ack);
239 /* wrap-around safe setting of slsb states, returns number of changed buffers */
240 static inline int set_buf_states(struct qdio_q *q, int bufnr,
241 unsigned char state, int count)
245 BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK);
246 BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q);
249 return qdio_do_sqbs(q, state, bufnr, count);
251 for (i = 0; i < count; i++) {
252 xchg(&q->slsb.val[bufnr], state);
253 bufnr = next_buf(bufnr);
258 static inline int set_buf_state(struct qdio_q *q, int bufnr,
261 return set_buf_states(q, bufnr, state, 1);
264 /* set slsb states to initial state */
265 void qdio_init_buf_states(struct qdio_irq *irq_ptr)
270 for_each_input_queue(irq_ptr, q, i)
271 set_buf_states(q, 0, SLSB_P_INPUT_NOT_INIT,
272 QDIO_MAX_BUFFERS_PER_Q);
273 for_each_output_queue(irq_ptr, q, i)
274 set_buf_states(q, 0, SLSB_P_OUTPUT_NOT_INIT,
275 QDIO_MAX_BUFFERS_PER_Q);
278 static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output,
283 if (!need_siga_sync(q))
286 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr);
287 qperf_inc(q, siga_sync);
289 cc = do_siga_sync(q->irq_ptr->schid, output, input);
291 DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q), cc);
295 static inline int qdio_siga_sync_q(struct qdio_q *q)
298 return qdio_siga_sync(q, 0, q->mask);
300 return qdio_siga_sync(q, q->mask, 0);
303 static inline int qdio_siga_sync_out(struct qdio_q *q)
305 return qdio_siga_sync(q, ~0U, 0);
308 static inline int qdio_siga_sync_all(struct qdio_q *q)
310 return qdio_siga_sync(q, ~0U, ~0U);
313 static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit)
320 if (q->u.out.use_enh_siga)
324 schid = q->irq_ptr->sch_token;
328 schid = *((u32 *)&q->irq_ptr->schid);
331 cc = do_siga_output(schid, q->mask, busy_bit, fc);
333 /* hipersocket busy condition */
335 WARN_ON(queue_type(q) != QDIO_IQDIO_QFMT || cc != 2);
338 start_time = get_usecs();
341 if ((get_usecs() - start_time) < QDIO_BUSY_BIT_PATIENCE)
347 static inline int qdio_siga_input(struct qdio_q *q)
351 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr);
352 qperf_inc(q, siga_read);
354 cc = do_siga_input(q->irq_ptr->schid, q->mask);
356 DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q), cc);
360 static inline void qdio_sync_after_thinint(struct qdio_q *q)
362 if (pci_out_supported(q)) {
363 if (need_siga_sync_thinint(q))
364 qdio_siga_sync_all(q);
365 else if (need_siga_sync_out_thinint(q))
366 qdio_siga_sync_out(q);
371 int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
372 unsigned char *state)
375 return get_buf_states(q, bufnr, state, 1, 0);
378 static inline void qdio_stop_polling(struct qdio_q *q)
380 if (!q->u.in.polling)
384 qperf_inc(q, stop_polling);
386 /* show the card that we are not polling anymore */
388 set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
390 q->u.in.ack_count = 0;
392 set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
395 static inline void account_sbals(struct qdio_q *q, int count)
399 q->q_stats.nr_sbal_total += count;
400 if (count == QDIO_MAX_BUFFERS_MASK) {
401 q->q_stats.nr_sbals[7]++;
406 q->q_stats.nr_sbals[pos]++;
409 static void announce_buffer_error(struct qdio_q *q, int count)
411 q->qdio_error |= QDIO_ERROR_SLSB_STATE;
413 /* special handling for no target buffer empty */
414 if ((!q->is_input_q &&
415 (q->sbal[q->first_to_check]->element[15].flags & 0xff) == 0x10)) {
416 qperf_inc(q, target_full);
417 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x",
422 DBF_ERROR("%4x BUF ERROR", SCH_NO(q));
423 DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr);
424 DBF_ERROR("FTC:%3d C:%3d", q->first_to_check, count);
425 DBF_ERROR("F14:%2x F15:%2x",
426 q->sbal[q->first_to_check]->element[14].flags & 0xff,
427 q->sbal[q->first_to_check]->element[15].flags & 0xff);
430 static inline void inbound_primed(struct qdio_q *q, int count)
434 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim: %02x", count);
436 /* for QEBSM the ACK was already set by EQBS */
438 if (!q->u.in.polling) {
440 q->u.in.ack_count = count;
441 q->u.in.ack_start = q->first_to_check;
445 /* delete the previous ACK's */
446 set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
448 q->u.in.ack_count = count;
449 q->u.in.ack_start = q->first_to_check;
454 * ACK the newest buffer. The ACK will be removed in qdio_stop_polling
455 * or by the next inbound run.
457 new = add_buf(q->first_to_check, count - 1);
458 if (q->u.in.polling) {
459 /* reset the previous ACK but first set the new one */
460 set_buf_state(q, new, SLSB_P_INPUT_ACK);
461 set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
464 set_buf_state(q, new, SLSB_P_INPUT_ACK);
467 q->u.in.ack_start = new;
471 /* need to change ALL buffers to get more interrupts */
472 set_buf_states(q, q->first_to_check, SLSB_P_INPUT_NOT_INIT, count);
475 static int get_inbound_buffer_frontier(struct qdio_q *q)
481 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
484 count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
485 stop = add_buf(q->first_to_check, count);
487 if (q->first_to_check == stop)
491 * No siga sync here, as a PCI or we after a thin interrupt
492 * already sync'ed the queues.
494 count = get_buf_states(q, q->first_to_check, &state, count, 1);
499 case SLSB_P_INPUT_PRIMED:
500 inbound_primed(q, count);
501 q->first_to_check = add_buf(q->first_to_check, count);
502 if (atomic_sub(count, &q->nr_buf_used) == 0)
503 qperf_inc(q, inbound_queue_full);
504 if (q->irq_ptr->perf_stat_enabled)
505 account_sbals(q, count);
507 case SLSB_P_INPUT_ERROR:
508 announce_buffer_error(q, count);
509 /* process the buffer, the upper layer will take care of it */
510 q->first_to_check = add_buf(q->first_to_check, count);
511 atomic_sub(count, &q->nr_buf_used);
512 if (q->irq_ptr->perf_stat_enabled)
513 account_sbals_error(q, count);
515 case SLSB_CU_INPUT_EMPTY:
516 case SLSB_P_INPUT_NOT_INIT:
517 case SLSB_P_INPUT_ACK:
518 if (q->irq_ptr->perf_stat_enabled)
519 q->q_stats.nr_sbal_nop++;
520 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop");
526 return q->first_to_check;
529 static int qdio_inbound_q_moved(struct qdio_q *q)
533 bufnr = get_inbound_buffer_frontier(q);
535 if ((bufnr != q->last_move) || q->qdio_error) {
536 q->last_move = bufnr;
537 if (!is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR)
538 q->u.in.timestamp = get_usecs();
544 static inline int qdio_inbound_q_done(struct qdio_q *q)
546 unsigned char state = 0;
548 if (!atomic_read(&q->nr_buf_used))
552 get_buf_state(q, q->first_to_check, &state, 0);
554 if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR)
555 /* more work coming */
558 if (is_thinint_irq(q->irq_ptr))
561 /* don't poll under z/VM */
566 * At this point we know, that inbound first_to_check
567 * has (probably) not moved (see qdio_inbound_processing).
569 if (get_usecs() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
570 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x",
577 static void qdio_kick_handler(struct qdio_q *q)
579 int start = q->first_to_kick;
580 int end = q->first_to_check;
583 if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
586 count = sub_buf(end, start);
589 qperf_inc(q, inbound_handler);
590 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%02x c:%02x", start, count);
592 qperf_inc(q, outbound_handler);
593 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x",
596 q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count,
597 q->irq_ptr->int_parm);
599 /* for the next time */
600 q->first_to_kick = end;
604 static void __qdio_inbound_processing(struct qdio_q *q)
606 qperf_inc(q, tasklet_inbound);
608 if (!qdio_inbound_q_moved(q))
611 qdio_kick_handler(q);
613 if (!qdio_inbound_q_done(q)) {
614 /* means poll time is not yet over */
615 qperf_inc(q, tasklet_inbound_resched);
619 qdio_stop_polling(q);
621 * We need to check again to not lose initiative after
622 * resetting the ACK state.
624 if (!qdio_inbound_q_done(q)) {
625 qperf_inc(q, tasklet_inbound_resched2);
630 void qdio_inbound_processing(unsigned long data)
632 struct qdio_q *q = (struct qdio_q *)data;
633 __qdio_inbound_processing(q);
636 static int get_outbound_buffer_frontier(struct qdio_q *q)
641 if (((queue_type(q) != QDIO_IQDIO_QFMT) && !pci_out_supported(q)) ||
642 (queue_type(q) == QDIO_IQDIO_QFMT && multicast_outbound(q)))
646 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
649 count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
650 stop = add_buf(q->first_to_check, count);
652 if (q->first_to_check == stop)
653 return q->first_to_check;
655 count = get_buf_states(q, q->first_to_check, &state, count, 0);
657 return q->first_to_check;
660 case SLSB_P_OUTPUT_EMPTY:
661 /* the adapter got it */
662 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out empty:%1d %02x", q->nr, count);
664 atomic_sub(count, &q->nr_buf_used);
665 q->first_to_check = add_buf(q->first_to_check, count);
666 if (q->irq_ptr->perf_stat_enabled)
667 account_sbals(q, count);
669 case SLSB_P_OUTPUT_ERROR:
670 announce_buffer_error(q, count);
671 /* process the buffer, the upper layer will take care of it */
672 q->first_to_check = add_buf(q->first_to_check, count);
673 atomic_sub(count, &q->nr_buf_used);
674 if (q->irq_ptr->perf_stat_enabled)
675 account_sbals_error(q, count);
677 case SLSB_CU_OUTPUT_PRIMED:
678 /* the adapter has not fetched the output yet */
679 if (q->irq_ptr->perf_stat_enabled)
680 q->q_stats.nr_sbal_nop++;
681 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d", q->nr);
683 case SLSB_P_OUTPUT_NOT_INIT:
684 case SLSB_P_OUTPUT_HALTED:
689 return q->first_to_check;
692 /* all buffers processed? */
693 static inline int qdio_outbound_q_done(struct qdio_q *q)
695 return atomic_read(&q->nr_buf_used) == 0;
698 static inline int qdio_outbound_q_moved(struct qdio_q *q)
702 bufnr = get_outbound_buffer_frontier(q);
704 if ((bufnr != q->last_move) || q->qdio_error) {
705 q->last_move = bufnr;
706 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr);
712 static int qdio_kick_outbound_q(struct qdio_q *q)
714 unsigned int busy_bit;
717 if (!need_siga_out(q))
720 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr);
721 qperf_inc(q, siga_write);
723 cc = qdio_siga_output(q, &busy_bit);
729 DBF_ERROR("%4x cc2 REP:%1d", SCH_NO(q), q->nr);
730 cc |= QDIO_ERROR_SIGA_BUSY;
732 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d", q->nr);
736 DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q), cc);
742 static void __qdio_outbound_processing(struct qdio_q *q)
744 qperf_inc(q, tasklet_outbound);
745 BUG_ON(atomic_read(&q->nr_buf_used) < 0);
747 if (qdio_outbound_q_moved(q))
748 qdio_kick_handler(q);
750 if (queue_type(q) == QDIO_ZFCP_QFMT)
751 if (!pci_out_supported(q) && !qdio_outbound_q_done(q))
754 /* bail out for HiperSockets unicast queues */
755 if (queue_type(q) == QDIO_IQDIO_QFMT && !multicast_outbound(q))
758 if ((queue_type(q) == QDIO_IQDIO_QFMT) &&
759 (atomic_read(&q->nr_buf_used)) > QDIO_IQDIO_POLL_LVL)
762 if (q->u.out.pci_out_enabled)
766 * Now we know that queue type is either qeth without pci enabled
767 * or HiperSockets multicast. Make sure buffer switch from PRIMED to
768 * EMPTY is noticed and outbound_handler is called after some time.
770 if (qdio_outbound_q_done(q))
771 del_timer(&q->u.out.timer);
773 if (!timer_pending(&q->u.out.timer))
774 mod_timer(&q->u.out.timer, jiffies + 10 * HZ);
778 if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
780 tasklet_schedule(&q->tasklet);
783 /* outbound tasklet */
784 void qdio_outbound_processing(unsigned long data)
786 struct qdio_q *q = (struct qdio_q *)data;
787 __qdio_outbound_processing(q);
790 void qdio_outbound_timer(unsigned long data)
792 struct qdio_q *q = (struct qdio_q *)data;
794 if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
796 tasklet_schedule(&q->tasklet);
799 static inline void qdio_check_outbound_after_thinint(struct qdio_q *q)
804 if (!pci_out_supported(q))
807 for_each_output_queue(q->irq_ptr, out, i)
808 if (!qdio_outbound_q_done(out))
809 tasklet_schedule(&out->tasklet);
812 static void __tiqdio_inbound_processing(struct qdio_q *q)
814 qperf_inc(q, tasklet_inbound);
815 qdio_sync_after_thinint(q);
818 * The interrupt could be caused by a PCI request. Check the
819 * PCI capable outbound queues.
821 qdio_check_outbound_after_thinint(q);
823 if (!qdio_inbound_q_moved(q))
826 qdio_kick_handler(q);
828 if (!qdio_inbound_q_done(q)) {
829 qperf_inc(q, tasklet_inbound_resched);
830 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) {
831 tasklet_schedule(&q->tasklet);
836 qdio_stop_polling(q);
838 * We need to check again to not lose initiative after
839 * resetting the ACK state.
841 if (!qdio_inbound_q_done(q)) {
842 qperf_inc(q, tasklet_inbound_resched2);
843 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
844 tasklet_schedule(&q->tasklet);
848 void tiqdio_inbound_processing(unsigned long data)
850 struct qdio_q *q = (struct qdio_q *)data;
851 __tiqdio_inbound_processing(q);
854 static inline void qdio_set_state(struct qdio_irq *irq_ptr,
855 enum qdio_irq_states state)
857 DBF_DEV_EVENT(DBF_INFO, irq_ptr, "newstate: %1d", state);
859 irq_ptr->state = state;
863 static void qdio_irq_check_sense(struct qdio_irq *irq_ptr, struct irb *irb)
865 if (irb->esw.esw0.erw.cons) {
866 DBF_ERROR("%4x sense:", irq_ptr->schid.sch_no);
867 DBF_ERROR_HEX(irb, 64);
868 DBF_ERROR_HEX(irb->ecw, 64);
872 /* PCI interrupt handler */
873 static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
878 if (unlikely(irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
881 for_each_input_queue(irq_ptr, q, i)
882 tasklet_schedule(&q->tasklet);
884 if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED))
887 for_each_output_queue(irq_ptr, q, i) {
888 if (qdio_outbound_q_done(q))
891 if (!siga_syncs_out_pci(q))
894 tasklet_schedule(&q->tasklet);
898 static void qdio_handle_activate_check(struct ccw_device *cdev,
899 unsigned long intparm, int cstat, int dstat)
901 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
904 DBF_ERROR("%4x ACT CHECK", irq_ptr->schid.sch_no);
905 DBF_ERROR("intp :%lx", intparm);
906 DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
908 if (irq_ptr->nr_input_qs) {
909 q = irq_ptr->input_qs[0];
910 } else if (irq_ptr->nr_output_qs) {
911 q = irq_ptr->output_qs[0];
916 q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
917 0, -1, -1, irq_ptr->int_parm);
919 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
922 static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat,
925 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
927 DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq");
931 if (dstat & ~(DEV_STAT_DEV_END | DEV_STAT_CHN_END))
933 if (!(dstat & DEV_STAT_DEV_END))
935 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED);
939 DBF_ERROR("%4x EQ:error", irq_ptr->schid.sch_no);
940 DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
941 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
944 /* qdio interrupt handler */
945 void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
948 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
951 if (!intparm || !irq_ptr) {
952 DBF_ERROR("qint:%4x", cdev->private->schid.sch_no);
957 switch (PTR_ERR(irb)) {
959 DBF_ERROR("%4x IO error", irq_ptr->schid.sch_no);
960 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
961 wake_up(&cdev->private->wait_q);
968 qdio_irq_check_sense(irq_ptr, irb);
969 cstat = irb->scsw.cmd.cstat;
970 dstat = irb->scsw.cmd.dstat;
972 switch (irq_ptr->state) {
973 case QDIO_IRQ_STATE_INACTIVE:
974 qdio_establish_handle_irq(cdev, cstat, dstat);
976 case QDIO_IRQ_STATE_CLEANUP:
977 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
979 case QDIO_IRQ_STATE_ESTABLISHED:
980 case QDIO_IRQ_STATE_ACTIVE:
981 if (cstat & SCHN_STAT_PCI) {
982 qdio_int_handler_pci(irq_ptr);
986 qdio_handle_activate_check(cdev, intparm, cstat,
989 case QDIO_IRQ_STATE_STOPPED:
994 wake_up(&cdev->private->wait_q);
998 * qdio_get_ssqd_desc - get qdio subchannel description
999 * @cdev: ccw device to get description for
1000 * @data: where to store the ssqd
1002 * Returns 0 or an error code. The results of the chsc are stored in the
1003 * specified structure.
1005 int qdio_get_ssqd_desc(struct ccw_device *cdev,
1006 struct qdio_ssqd_desc *data)
1009 if (!cdev || !cdev->private)
1012 DBF_EVENT("get ssqd:%4x", cdev->private->schid.sch_no);
1013 return qdio_setup_get_ssqd(NULL, &cdev->private->schid, data);
1015 EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc);
1018 * qdio_cleanup - shutdown queues and free data structures
1019 * @cdev: associated ccw device
1020 * @how: use halt or clear to shutdown
1022 * This function calls qdio_shutdown() for @cdev with method @how.
1023 * and qdio_free(). The qdio_free() return value is ignored since
1024 * !irq_ptr is already checked.
1026 int qdio_cleanup(struct ccw_device *cdev, int how)
1028 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1034 rc = qdio_shutdown(cdev, how);
1039 EXPORT_SYMBOL_GPL(qdio_cleanup);
1041 static void qdio_shutdown_queues(struct ccw_device *cdev)
1043 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1047 for_each_input_queue(irq_ptr, q, i)
1048 tasklet_kill(&q->tasklet);
1050 for_each_output_queue(irq_ptr, q, i) {
1051 del_timer(&q->u.out.timer);
1052 tasklet_kill(&q->tasklet);
1057 * qdio_shutdown - shut down a qdio subchannel
1058 * @cdev: associated ccw device
1059 * @how: use halt or clear to shutdown
1061 int qdio_shutdown(struct ccw_device *cdev, int how)
1063 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1065 unsigned long flags;
1070 BUG_ON(irqs_disabled());
1071 DBF_EVENT("qshutdown:%4x", cdev->private->schid.sch_no);
1073 mutex_lock(&irq_ptr->setup_mutex);
1075 * Subchannel was already shot down. We cannot prevent being called
1076 * twice since cio may trigger a shutdown asynchronously.
1078 if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
1079 mutex_unlock(&irq_ptr->setup_mutex);
1084 * Indicate that the device is going down. Scheduling the queue
1085 * tasklets is forbidden from here on.
1087 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
1089 tiqdio_remove_input_queues(irq_ptr);
1090 qdio_shutdown_queues(cdev);
1091 qdio_shutdown_debug_entries(irq_ptr, cdev);
1093 /* cleanup subchannel */
1094 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1096 if (how & QDIO_FLAG_CLEANUP_USING_CLEAR)
1097 rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
1099 /* default behaviour is halt */
1100 rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
1102 DBF_ERROR("%4x SHUTD ERR", irq_ptr->schid.sch_no);
1103 DBF_ERROR("rc:%4d", rc);
1107 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
1108 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1109 wait_event_interruptible_timeout(cdev->private->wait_q,
1110 irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
1111 irq_ptr->state == QDIO_IRQ_STATE_ERR,
1113 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1116 qdio_shutdown_thinint(irq_ptr);
1118 /* restore interrupt handler */
1119 if ((void *)cdev->handler == (void *)qdio_int_handler)
1120 cdev->handler = irq_ptr->orig_handler;
1121 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1123 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1124 mutex_unlock(&irq_ptr->setup_mutex);
1129 EXPORT_SYMBOL_GPL(qdio_shutdown);
1132 * qdio_free - free data structures for a qdio subchannel
1133 * @cdev: associated ccw device
1135 int qdio_free(struct ccw_device *cdev)
1137 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1142 DBF_EVENT("qfree:%4x", cdev->private->schid.sch_no);
1143 mutex_lock(&irq_ptr->setup_mutex);
1145 if (irq_ptr->debug_area != NULL) {
1146 debug_unregister(irq_ptr->debug_area);
1147 irq_ptr->debug_area = NULL;
1149 cdev->private->qdio_data = NULL;
1150 mutex_unlock(&irq_ptr->setup_mutex);
1152 qdio_release_memory(irq_ptr);
1155 EXPORT_SYMBOL_GPL(qdio_free);
1158 * qdio_initialize - allocate and establish queues for a qdio subchannel
1159 * @init_data: initialization data
1161 * This function first allocates queues via qdio_allocate() and on success
1162 * establishes them via qdio_establish().
1164 int qdio_initialize(struct qdio_initialize *init_data)
1168 rc = qdio_allocate(init_data);
1172 rc = qdio_establish(init_data);
1174 qdio_free(init_data->cdev);
1177 EXPORT_SYMBOL_GPL(qdio_initialize);
1180 * qdio_allocate - allocate qdio queues and associated data
1181 * @init_data: initialization data
1183 int qdio_allocate(struct qdio_initialize *init_data)
1185 struct qdio_irq *irq_ptr;
1187 DBF_EVENT("qallocate:%4x", init_data->cdev->private->schid.sch_no);
1189 if ((init_data->no_input_qs && !init_data->input_handler) ||
1190 (init_data->no_output_qs && !init_data->output_handler))
1193 if ((init_data->no_input_qs > QDIO_MAX_QUEUES_PER_IRQ) ||
1194 (init_data->no_output_qs > QDIO_MAX_QUEUES_PER_IRQ))
1197 if ((!init_data->input_sbal_addr_array) ||
1198 (!init_data->output_sbal_addr_array))
1201 /* irq_ptr must be in GFP_DMA since it contains ccw1.cda */
1202 irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1206 mutex_init(&irq_ptr->setup_mutex);
1207 qdio_allocate_dbf(init_data, irq_ptr);
1210 * Allocate a page for the chsc calls in qdio_establish.
1211 * Must be pre-allocated since a zfcp recovery will call
1212 * qdio_establish. In case of low memory and swap on a zfcp disk
1213 * we may not be able to allocate memory otherwise.
1215 irq_ptr->chsc_page = get_zeroed_page(GFP_KERNEL);
1216 if (!irq_ptr->chsc_page)
1219 /* qdr is used in ccw1.cda which is u32 */
1220 irq_ptr->qdr = (struct qdr *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1223 WARN_ON((unsigned long)irq_ptr->qdr & 0xfff);
1225 if (qdio_allocate_qs(irq_ptr, init_data->no_input_qs,
1226 init_data->no_output_qs))
1229 init_data->cdev->private->qdio_data = irq_ptr;
1230 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1233 qdio_release_memory(irq_ptr);
1237 EXPORT_SYMBOL_GPL(qdio_allocate);
1240 * qdio_establish - establish queues on a qdio subchannel
1241 * @init_data: initialization data
1243 int qdio_establish(struct qdio_initialize *init_data)
1245 struct qdio_irq *irq_ptr;
1246 struct ccw_device *cdev = init_data->cdev;
1247 unsigned long saveflags;
1250 DBF_EVENT("qestablish:%4x", cdev->private->schid.sch_no);
1252 irq_ptr = cdev->private->qdio_data;
1256 if (cdev->private->state != DEV_STATE_ONLINE)
1259 mutex_lock(&irq_ptr->setup_mutex);
1260 qdio_setup_irq(init_data);
1262 rc = qdio_establish_thinint(irq_ptr);
1264 mutex_unlock(&irq_ptr->setup_mutex);
1265 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1270 irq_ptr->ccw.cmd_code = irq_ptr->equeue.cmd;
1271 irq_ptr->ccw.flags = CCW_FLAG_SLI;
1272 irq_ptr->ccw.count = irq_ptr->equeue.count;
1273 irq_ptr->ccw.cda = (u32)((addr_t)irq_ptr->qdr);
1275 spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags);
1276 ccw_device_set_options_mask(cdev, 0);
1278 rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0);
1280 DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no);
1281 DBF_ERROR("rc:%4x", rc);
1283 spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags);
1286 mutex_unlock(&irq_ptr->setup_mutex);
1287 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1291 wait_event_interruptible_timeout(cdev->private->wait_q,
1292 irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
1293 irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ);
1295 if (irq_ptr->state != QDIO_IRQ_STATE_ESTABLISHED) {
1296 mutex_unlock(&irq_ptr->setup_mutex);
1297 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1301 qdio_setup_ssqd_info(irq_ptr);
1302 DBF_EVENT("qDmmwc:%2x", irq_ptr->ssqd_desc.mmwc);
1303 DBF_EVENT("qib ac:%4x", irq_ptr->qib.ac);
1305 /* qebsm is now setup if available, initialize buffer states */
1306 qdio_init_buf_states(irq_ptr);
1308 mutex_unlock(&irq_ptr->setup_mutex);
1309 qdio_print_subchannel_info(irq_ptr, cdev);
1310 qdio_setup_debug_entries(irq_ptr, cdev);
1313 EXPORT_SYMBOL_GPL(qdio_establish);
1316 * qdio_activate - activate queues on a qdio subchannel
1317 * @cdev: associated cdev
1319 int qdio_activate(struct ccw_device *cdev)
1321 struct qdio_irq *irq_ptr;
1323 unsigned long saveflags;
1325 DBF_EVENT("qactivate:%4x", cdev->private->schid.sch_no);
1327 irq_ptr = cdev->private->qdio_data;
1331 if (cdev->private->state != DEV_STATE_ONLINE)
1334 mutex_lock(&irq_ptr->setup_mutex);
1335 if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
1340 irq_ptr->ccw.cmd_code = irq_ptr->aqueue.cmd;
1341 irq_ptr->ccw.flags = CCW_FLAG_SLI;
1342 irq_ptr->ccw.count = irq_ptr->aqueue.count;
1343 irq_ptr->ccw.cda = 0;
1345 spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags);
1346 ccw_device_set_options(cdev, CCWDEV_REPORT_ALL);
1348 rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE,
1349 0, DOIO_DENY_PREFETCH);
1351 DBF_ERROR("%4x act IO ERR", irq_ptr->schid.sch_no);
1352 DBF_ERROR("rc:%4x", rc);
1354 spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags);
1359 if (is_thinint_irq(irq_ptr))
1360 tiqdio_add_input_queues(irq_ptr);
1362 /* wait for subchannel to become active */
1365 switch (irq_ptr->state) {
1366 case QDIO_IRQ_STATE_STOPPED:
1367 case QDIO_IRQ_STATE_ERR:
1371 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE);
1375 mutex_unlock(&irq_ptr->setup_mutex);
1378 EXPORT_SYMBOL_GPL(qdio_activate);
1380 static inline int buf_in_between(int bufnr, int start, int count)
1382 int end = add_buf(start, count);
1385 if (bufnr >= start && bufnr < end)
1391 /* wrap-around case */
1392 if ((bufnr >= start && bufnr <= QDIO_MAX_BUFFERS_PER_Q) ||
1400 * handle_inbound - reset processed input buffers
1401 * @q: queue containing the buffers
1403 * @bufnr: first buffer to process
1404 * @count: how many buffers are emptied
1406 static int handle_inbound(struct qdio_q *q, unsigned int callflags,
1407 int bufnr, int count)
1411 qperf_inc(q, inbound_call);
1413 if (!q->u.in.polling)
1416 /* protect against stop polling setting an ACK for an emptied slsb */
1417 if (count == QDIO_MAX_BUFFERS_PER_Q) {
1418 /* overwriting everything, just delete polling status */
1419 q->u.in.polling = 0;
1420 q->u.in.ack_count = 0;
1422 } else if (buf_in_between(q->u.in.ack_start, bufnr, count)) {
1424 /* partial overwrite, just update ack_start */
1425 diff = add_buf(bufnr, count);
1426 diff = sub_buf(diff, q->u.in.ack_start);
1427 q->u.in.ack_count -= diff;
1428 if (q->u.in.ack_count <= 0) {
1429 q->u.in.polling = 0;
1430 q->u.in.ack_count = 0;
1433 q->u.in.ack_start = add_buf(q->u.in.ack_start, diff);
1436 /* the only ACK will be deleted, so stop polling */
1437 q->u.in.polling = 0;
1441 count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count);
1443 used = atomic_add_return(count, &q->nr_buf_used) - count;
1444 BUG_ON(used + count > QDIO_MAX_BUFFERS_PER_Q);
1446 /* no need to signal as long as the adapter had free buffers */
1450 if (need_siga_in(q))
1451 return qdio_siga_input(q);
1456 * handle_outbound - process filled outbound buffers
1457 * @q: queue containing the buffers
1459 * @bufnr: first buffer to process
1460 * @count: how many buffers are filled
1462 static int handle_outbound(struct qdio_q *q, unsigned int callflags,
1463 int bufnr, int count)
1465 unsigned char state;
1468 qperf_inc(q, outbound_call);
1470 count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count);
1471 used = atomic_add_return(count, &q->nr_buf_used);
1472 BUG_ON(used > QDIO_MAX_BUFFERS_PER_Q);
1474 if (callflags & QDIO_FLAG_PCI_OUT) {
1475 q->u.out.pci_out_enabled = 1;
1476 qperf_inc(q, pci_request_int);
1479 q->u.out.pci_out_enabled = 0;
1481 if (queue_type(q) == QDIO_IQDIO_QFMT) {
1482 if (multicast_outbound(q))
1483 rc = qdio_kick_outbound_q(q);
1485 if ((q->irq_ptr->ssqd_desc.mmwc > 1) &&
1487 (count <= q->irq_ptr->ssqd_desc.mmwc)) {
1488 /* exploit enhanced SIGA */
1489 q->u.out.use_enh_siga = 1;
1490 rc = qdio_kick_outbound_q(q);
1493 * One siga-w per buffer required for unicast
1496 q->u.out.use_enh_siga = 0;
1498 rc = qdio_kick_outbound_q(q);
1506 if (need_siga_sync(q)) {
1507 qdio_siga_sync_q(q);
1511 /* try to fast requeue buffers */
1512 get_buf_state(q, prev_buf(bufnr), &state, 0);
1513 if (state != SLSB_CU_OUTPUT_PRIMED)
1514 rc = qdio_kick_outbound_q(q);
1516 qperf_inc(q, fast_requeue);
1519 tasklet_schedule(&q->tasklet);
1524 * do_QDIO - process input or output buffers
1525 * @cdev: associated ccw_device for the qdio subchannel
1526 * @callflags: input or output and special flags from the program
1527 * @q_nr: queue number
1528 * @bufnr: buffer number
1529 * @count: how many buffers to process
1531 int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
1532 int q_nr, unsigned int bufnr, unsigned int count)
1534 struct qdio_irq *irq_ptr;
1536 if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q)
1539 irq_ptr = cdev->private->qdio_data;
1543 DBF_DEV_EVENT(DBF_INFO, irq_ptr,
1544 "do%02x b:%02x c:%02x", callflags, bufnr, count);
1546 if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)
1549 if (callflags & QDIO_FLAG_SYNC_INPUT)
1550 return handle_inbound(irq_ptr->input_qs[q_nr],
1551 callflags, bufnr, count);
1552 else if (callflags & QDIO_FLAG_SYNC_OUTPUT)
1553 return handle_outbound(irq_ptr->output_qs[q_nr],
1554 callflags, bufnr, count);
1557 EXPORT_SYMBOL_GPL(do_QDIO);
1559 static int __init init_QDIO(void)
1563 rc = qdio_setup_init();
1566 rc = tiqdio_allocate_memory();
1569 rc = qdio_debug_init();
1572 rc = tiqdio_register_thinints();
1580 tiqdio_free_memory();
1586 static void __exit exit_QDIO(void)
1588 tiqdio_unregister_thinints();
1589 tiqdio_free_memory();
1594 module_init(init_QDIO);
1595 module_exit(exit_QDIO);