1 // SPDX-License-Identifier: GPL-2.0
3 * Linux for s390 qdio support, buffer handling, qdio API and module support.
5 * Copyright IBM Corp. 2000, 2008
6 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
7 * Jan Glauber <jang@linux.vnet.ibm.com>
8 * 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com>
10 #include <linux/module.h>
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/timer.h>
14 #include <linux/delay.h>
15 #include <linux/gfp.h>
17 #include <linux/atomic.h>
18 #include <asm/debug.h>
26 #include "qdio_debug.h"
28 MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\
29 "Jan Glauber <jang@linux.vnet.ibm.com>");
30 MODULE_DESCRIPTION("QDIO base support");
31 MODULE_LICENSE("GPL");
33 static inline int do_siga_sync(unsigned long schid,
34 unsigned int out_mask, unsigned int in_mask,
37 register unsigned long __fc asm ("0") = fc;
38 register unsigned long __schid asm ("1") = schid;
39 register unsigned long out asm ("2") = out_mask;
40 register unsigned long in asm ("3") = in_mask;
48 : "d" (__fc), "d" (__schid), "d" (out), "d" (in) : "cc");
52 static inline int do_siga_input(unsigned long schid, unsigned int mask,
55 register unsigned long __fc asm ("0") = fc;
56 register unsigned long __schid asm ("1") = schid;
57 register unsigned long __mask asm ("2") = mask;
65 : "d" (__fc), "d" (__schid), "d" (__mask) : "cc");
70 * do_siga_output - perform SIGA-w/wt function
71 * @schid: subchannel id or in case of QEBSM the subchannel token
72 * @mask: which output queues to process
73 * @bb: busy bit indicator, set only if SIGA-w/wt could not access a buffer
74 * @fc: function code to perform
75 * @aob: asynchronous operation block
77 * Returns condition code.
78 * Note: For IQDC unicast queues only the highest priority queue is processed.
80 static inline int do_siga_output(unsigned long schid, unsigned long mask,
81 unsigned int *bb, unsigned int fc,
84 register unsigned long __fc asm("0") = fc;
85 register unsigned long __schid asm("1") = schid;
86 register unsigned long __mask asm("2") = mask;
87 register unsigned long __aob asm("3") = aob;
94 : "=d" (cc), "+d" (__fc), "+d" (__aob)
95 : "d" (__schid), "d" (__mask)
102 * qdio_do_eqbs - extract buffer states for QEBSM
103 * @q: queue to manipulate
104 * @state: state of the extracted buffers
105 * @start: buffer number to start at
106 * @count: count of buffers to examine
107 * @auto_ack: automatically acknowledge buffers
109 * Returns the number of successfully extracted equal buffer states.
110 * Stops processing if a state is different from the last buffers state.
112 static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
113 int start, int count, int auto_ack)
115 int tmp_count = count, tmp_start = start, nr = q->nr;
116 unsigned int ccq = 0;
121 nr += q->irq_ptr->nr_input_qs;
123 ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count,
129 /* all done, or next buffer state different */
130 return count - tmp_count;
132 /* not all buffers processed */
133 qperf_inc(q, eqbs_partial);
134 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS part:%02x",
136 return count - tmp_count;
138 /* no buffer processed */
139 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq);
142 DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq);
143 DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
144 DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
145 q->handler(q->irq_ptr->cdev, QDIO_ERROR_GET_BUF_STATE, q->nr,
146 q->first_to_kick, count, q->irq_ptr->int_parm);
152 * qdio_do_sqbs - set buffer states for QEBSM
153 * @q: queue to manipulate
154 * @state: new state of the buffers
155 * @start: first buffer number to change
156 * @count: how many buffers to change
158 * Returns the number of successfully changed buffers.
159 * Does retrying until the specified count of buffer states is set or an
162 static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start,
165 unsigned int ccq = 0;
166 int tmp_count = count, tmp_start = start;
174 nr += q->irq_ptr->nr_input_qs;
176 ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count);
181 /* all done, or active buffer adapter-owned */
182 WARN_ON_ONCE(tmp_count);
183 return count - tmp_count;
185 /* not all buffers processed */
186 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq);
187 qperf_inc(q, sqbs_partial);
190 DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq);
191 DBF_ERROR("%4x SQBS ERROR", SCH_NO(q));
192 DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
193 q->handler(q->irq_ptr->cdev, QDIO_ERROR_SET_BUF_STATE, q->nr,
194 q->first_to_kick, count, q->irq_ptr->int_parm);
200 * Returns number of examined buffers and their common state in *state.
201 * Requested number of buffers-to-examine must be > 0.
203 static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
204 unsigned char *state, unsigned int count,
205 int auto_ack, int merge_pending)
207 unsigned char __state = 0;
211 return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
213 /* get initial state: */
214 __state = q->slsb.val[bufnr];
216 /* Bail out early if there is no work on the queue: */
217 if (__state & SLSB_OWNER_CU)
220 if (merge_pending && __state == SLSB_P_OUTPUT_PENDING)
221 __state = SLSB_P_OUTPUT_EMPTY;
223 for (; i < count; i++) {
224 bufnr = next_buf(bufnr);
226 /* merge PENDING into EMPTY: */
228 q->slsb.val[bufnr] == SLSB_P_OUTPUT_PENDING &&
229 __state == SLSB_P_OUTPUT_EMPTY)
232 /* stop if next state differs from initial state: */
233 if (q->slsb.val[bufnr] != __state)
242 static inline int get_buf_state(struct qdio_q *q, unsigned int bufnr,
243 unsigned char *state, int auto_ack)
245 return get_buf_states(q, bufnr, state, 1, auto_ack, 0);
248 /* wrap-around safe setting of slsb states, returns number of changed buffers */
249 static inline int set_buf_states(struct qdio_q *q, int bufnr,
250 unsigned char state, int count)
255 return qdio_do_sqbs(q, state, bufnr, count);
257 for (i = 0; i < count; i++) {
258 xchg(&q->slsb.val[bufnr], state);
259 bufnr = next_buf(bufnr);
264 static inline int set_buf_state(struct qdio_q *q, int bufnr,
267 return set_buf_states(q, bufnr, state, 1);
270 /* set slsb states to initial state */
271 static void qdio_init_buf_states(struct qdio_irq *irq_ptr)
276 for_each_input_queue(irq_ptr, q, i)
277 set_buf_states(q, 0, SLSB_P_INPUT_NOT_INIT,
278 QDIO_MAX_BUFFERS_PER_Q);
279 for_each_output_queue(irq_ptr, q, i)
280 set_buf_states(q, 0, SLSB_P_OUTPUT_NOT_INIT,
281 QDIO_MAX_BUFFERS_PER_Q);
284 static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output,
287 unsigned long schid = *((u32 *) &q->irq_ptr->schid);
288 unsigned int fc = QDIO_SIGA_SYNC;
291 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr);
292 qperf_inc(q, siga_sync);
295 schid = q->irq_ptr->sch_token;
296 fc |= QDIO_SIGA_QEBSM_FLAG;
299 cc = do_siga_sync(schid, output, input, fc);
301 DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q), cc);
302 return (cc) ? -EIO : 0;
305 static inline int qdio_siga_sync_q(struct qdio_q *q)
308 return qdio_siga_sync(q, 0, q->mask);
310 return qdio_siga_sync(q, q->mask, 0);
313 static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit,
316 unsigned long schid = *((u32 *) &q->irq_ptr->schid);
317 unsigned int fc = QDIO_SIGA_WRITE;
320 unsigned long laob = 0;
323 fc = QDIO_SIGA_WRITEQ;
328 schid = q->irq_ptr->sch_token;
329 fc |= QDIO_SIGA_QEBSM_FLAG;
332 cc = do_siga_output(schid, q->mask, busy_bit, fc, laob);
334 /* hipersocket busy condition */
335 if (unlikely(*busy_bit)) {
339 start_time = get_tod_clock_fast();
342 if (get_tod_clock_fast() - start_time < QDIO_BUSY_BIT_PATIENCE)
346 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr,
347 "%4x cc2 BB1:%1d", SCH_NO(q), q->nr);
348 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "count:%u", retries);
353 static inline int qdio_siga_input(struct qdio_q *q)
355 unsigned long schid = *((u32 *) &q->irq_ptr->schid);
356 unsigned int fc = QDIO_SIGA_READ;
359 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr);
360 qperf_inc(q, siga_read);
363 schid = q->irq_ptr->sch_token;
364 fc |= QDIO_SIGA_QEBSM_FLAG;
367 cc = do_siga_input(schid, q->mask, fc);
369 DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q), cc);
370 return (cc) ? -EIO : 0;
373 #define qdio_siga_sync_out(q) qdio_siga_sync(q, ~0U, 0)
374 #define qdio_siga_sync_all(q) qdio_siga_sync(q, ~0U, ~0U)
376 static inline void qdio_sync_queues(struct qdio_q *q)
378 /* PCI capable outbound queues will also be scanned so sync them too */
379 if (pci_out_supported(q->irq_ptr))
380 qdio_siga_sync_all(q);
385 int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
386 unsigned char *state)
388 if (need_siga_sync(q))
390 return get_buf_state(q, bufnr, state, 0);
393 static inline void qdio_stop_polling(struct qdio_q *q)
395 if (!q->u.in.polling)
399 qperf_inc(q, stop_polling);
401 /* show the card that we are not polling anymore */
403 set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
405 q->u.in.ack_count = 0;
407 set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
410 static inline void account_sbals(struct qdio_q *q, unsigned int count)
414 q->q_stats.nr_sbal_total += count;
415 if (count == QDIO_MAX_BUFFERS_MASK) {
416 q->q_stats.nr_sbals[7]++;
420 q->q_stats.nr_sbals[pos]++;
423 static void process_buffer_error(struct qdio_q *q, unsigned int start,
426 unsigned char state = (q->is_input_q) ? SLSB_P_INPUT_NOT_INIT :
427 SLSB_P_OUTPUT_NOT_INIT;
429 q->qdio_error = QDIO_ERROR_SLSB_STATE;
431 /* special handling for no target buffer empty */
432 if (queue_type(q) == QDIO_IQDIO_QFMT && !q->is_input_q &&
433 q->sbal[start]->element[15].sflags == 0x10) {
434 qperf_inc(q, target_full);
435 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x", start);
439 DBF_ERROR("%4x BUF ERROR", SCH_NO(q));
440 DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr);
441 DBF_ERROR("FTC:%3d C:%3d", start, count);
442 DBF_ERROR("F14:%2x F15:%2x",
443 q->sbal[start]->element[14].sflags,
444 q->sbal[start]->element[15].sflags);
448 * Interrupts may be avoided as long as the error is present
449 * so change the buffer state immediately to avoid starvation.
451 set_buf_states(q, start, state, count);
454 static inline void inbound_primed(struct qdio_q *q, unsigned int start,
459 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim:%1d %02x", q->nr, count);
461 /* for QEBSM the ACK was already set by EQBS */
463 if (!q->u.in.polling) {
465 q->u.in.ack_count = count;
466 q->u.in.ack_start = start;
470 /* delete the previous ACK's */
471 set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
473 q->u.in.ack_count = count;
474 q->u.in.ack_start = start;
479 * ACK the newest buffer. The ACK will be removed in qdio_stop_polling
480 * or by the next inbound run.
482 new = add_buf(start, count - 1);
483 if (q->u.in.polling) {
484 /* reset the previous ACK but first set the new one */
485 set_buf_state(q, new, SLSB_P_INPUT_ACK);
486 set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
489 set_buf_state(q, new, SLSB_P_INPUT_ACK);
492 q->u.in.ack_start = new;
496 /* need to change ALL buffers to get more interrupts */
497 set_buf_states(q, start, SLSB_P_INPUT_NOT_INIT, count);
500 static int get_inbound_buffer_frontier(struct qdio_q *q, unsigned int start)
502 unsigned char state = 0;
505 q->timestamp = get_tod_clock_fast();
508 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
511 count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
516 * No siga sync here, as a PCI or we after a thin interrupt
517 * already sync'ed the queues.
519 count = get_buf_states(q, start, &state, count, 1, 0);
524 case SLSB_P_INPUT_PRIMED:
525 inbound_primed(q, start, count);
526 if (atomic_sub_return(count, &q->nr_buf_used) == 0)
527 qperf_inc(q, inbound_queue_full);
528 if (q->irq_ptr->perf_stat_enabled)
529 account_sbals(q, count);
531 case SLSB_P_INPUT_ERROR:
532 process_buffer_error(q, start, count);
533 if (atomic_sub_return(count, &q->nr_buf_used) == 0)
534 qperf_inc(q, inbound_queue_full);
535 if (q->irq_ptr->perf_stat_enabled)
536 account_sbals_error(q, count);
538 case SLSB_CU_INPUT_EMPTY:
539 case SLSB_P_INPUT_NOT_INIT:
540 case SLSB_P_INPUT_ACK:
541 if (q->irq_ptr->perf_stat_enabled)
542 q->q_stats.nr_sbal_nop++;
543 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop:%1d %#02x",
552 static int qdio_inbound_q_moved(struct qdio_q *q, unsigned int start)
556 count = get_inbound_buffer_frontier(q, start);
558 if (count && !is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR)
559 q->u.in.timestamp = get_tod_clock();
564 static inline int qdio_inbound_q_done(struct qdio_q *q, unsigned int start)
566 unsigned char state = 0;
568 if (!atomic_read(&q->nr_buf_used))
571 if (need_siga_sync(q))
573 get_buf_state(q, start, &state, 0);
575 if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR)
576 /* more work coming */
579 if (is_thinint_irq(q->irq_ptr))
582 /* don't poll under z/VM */
587 * At this point we know, that inbound first_to_check
588 * has (probably) not moved (see qdio_inbound_processing).
590 if (get_tod_clock_fast() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
591 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x", start);
597 static inline void qdio_handle_aobs(struct qdio_q *q, int start, int count)
599 unsigned char state = 0;
602 for (j = 0; j < count; ++j) {
603 get_buf_state(q, b, &state, 0);
604 if (state == SLSB_P_OUTPUT_PENDING) {
605 struct qaob *aob = q->u.out.aobs[b];
609 q->u.out.sbal_state[b].flags |=
610 QDIO_OUTBUF_STATE_FLAG_PENDING;
611 q->u.out.aobs[b] = NULL;
617 static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q,
620 unsigned long phys_aob = 0;
622 if (!q->aobs[bufnr]) {
623 struct qaob *aob = qdio_allocate_aob();
624 q->aobs[bufnr] = aob;
626 if (q->aobs[bufnr]) {
627 q->aobs[bufnr]->user1 = (u64) q->sbal_state[bufnr].user;
628 phys_aob = virt_to_phys(q->aobs[bufnr]);
629 WARN_ON_ONCE(phys_aob & 0xFF);
632 q->sbal_state[bufnr].flags = 0;
636 static void qdio_kick_handler(struct qdio_q *q, unsigned int count)
638 int start = q->first_to_kick;
640 if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
644 qperf_inc(q, inbound_handler);
645 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%02x c:%02x", start, count);
647 qperf_inc(q, outbound_handler);
648 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x",
651 qdio_handle_aobs(q, start, count);
654 q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count,
655 q->irq_ptr->int_parm);
657 /* for the next time */
658 q->first_to_kick = add_buf(start, count);
662 static inline int qdio_tasklet_schedule(struct qdio_q *q)
664 if (likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE)) {
665 tasklet_schedule(&q->tasklet);
671 static void __qdio_inbound_processing(struct qdio_q *q)
673 unsigned int start = q->first_to_check;
676 qperf_inc(q, tasklet_inbound);
678 count = qdio_inbound_q_moved(q, start);
682 start = add_buf(start, count);
683 q->first_to_check = start;
684 qdio_kick_handler(q, count);
686 if (!qdio_inbound_q_done(q, start)) {
687 /* means poll time is not yet over */
688 qperf_inc(q, tasklet_inbound_resched);
689 if (!qdio_tasklet_schedule(q))
693 qdio_stop_polling(q);
695 * We need to check again to not lose initiative after
696 * resetting the ACK state.
698 if (!qdio_inbound_q_done(q, start)) {
699 qperf_inc(q, tasklet_inbound_resched2);
700 qdio_tasklet_schedule(q);
704 void qdio_inbound_processing(unsigned long data)
706 struct qdio_q *q = (struct qdio_q *)data;
707 __qdio_inbound_processing(q);
710 static int get_outbound_buffer_frontier(struct qdio_q *q, unsigned int start)
712 unsigned char state = 0;
715 q->timestamp = get_tod_clock_fast();
717 if (need_siga_sync(q))
718 if (((queue_type(q) != QDIO_IQDIO_QFMT) &&
719 !pci_out_supported(q->irq_ptr)) ||
720 (queue_type(q) == QDIO_IQDIO_QFMT &&
721 multicast_outbound(q)))
724 count = atomic_read(&q->nr_buf_used);
728 count = get_buf_states(q, start, &state, count, 0, q->u.out.use_cq);
733 case SLSB_P_OUTPUT_EMPTY:
734 case SLSB_P_OUTPUT_PENDING:
735 /* the adapter got it */
736 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr,
737 "out empty:%1d %02x", q->nr, count);
739 atomic_sub(count, &q->nr_buf_used);
740 if (q->irq_ptr->perf_stat_enabled)
741 account_sbals(q, count);
743 case SLSB_P_OUTPUT_ERROR:
744 process_buffer_error(q, start, count);
745 atomic_sub(count, &q->nr_buf_used);
746 if (q->irq_ptr->perf_stat_enabled)
747 account_sbals_error(q, count);
749 case SLSB_CU_OUTPUT_PRIMED:
750 /* the adapter has not fetched the output yet */
751 if (q->irq_ptr->perf_stat_enabled)
752 q->q_stats.nr_sbal_nop++;
753 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d",
756 case SLSB_P_OUTPUT_NOT_INIT:
757 case SLSB_P_OUTPUT_HALTED:
765 /* all buffers processed? */
766 static inline int qdio_outbound_q_done(struct qdio_q *q)
768 return atomic_read(&q->nr_buf_used) == 0;
771 static inline int qdio_outbound_q_moved(struct qdio_q *q, unsigned int start)
775 count = get_outbound_buffer_frontier(q, start);
778 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr);
783 static int qdio_kick_outbound_q(struct qdio_q *q, unsigned long aob)
786 unsigned int busy_bit;
788 if (!need_siga_out(q))
791 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr);
793 qperf_inc(q, siga_write);
795 cc = qdio_siga_output(q, &busy_bit, aob);
801 while (++retries < QDIO_BUSY_BIT_RETRIES) {
802 mdelay(QDIO_BUSY_BIT_RETRY_DELAY);
805 DBF_ERROR("%4x cc2 BBC:%1d", SCH_NO(q), q->nr);
808 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d", q->nr);
814 DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q), cc);
819 DBF_ERROR("%4x cc2 BB2:%1d", SCH_NO(q), q->nr);
820 DBF_ERROR("count:%u", retries);
825 static void __qdio_outbound_processing(struct qdio_q *q)
827 unsigned int start = q->first_to_check;
830 qperf_inc(q, tasklet_outbound);
831 WARN_ON_ONCE(atomic_read(&q->nr_buf_used) < 0);
833 count = qdio_outbound_q_moved(q, start);
835 q->first_to_check = add_buf(start, count);
836 qdio_kick_handler(q, count);
839 if (queue_type(q) == QDIO_ZFCP_QFMT && !pci_out_supported(q->irq_ptr) &&
840 !qdio_outbound_q_done(q))
843 if (q->u.out.pci_out_enabled)
847 * Now we know that queue type is either qeth without pci enabled
848 * or HiperSockets. Make sure buffer switch from PRIMED to EMPTY
849 * is noticed and outbound_handler is called after some time.
851 if (qdio_outbound_q_done(q))
852 del_timer_sync(&q->u.out.timer);
854 if (!timer_pending(&q->u.out.timer) &&
855 likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE))
856 mod_timer(&q->u.out.timer, jiffies + 10 * HZ);
860 qdio_tasklet_schedule(q);
863 /* outbound tasklet */
864 void qdio_outbound_processing(unsigned long data)
866 struct qdio_q *q = (struct qdio_q *)data;
867 __qdio_outbound_processing(q);
870 void qdio_outbound_timer(struct timer_list *t)
872 struct qdio_q *q = from_timer(q, t, u.out.timer);
874 qdio_tasklet_schedule(q);
877 static inline void qdio_check_outbound_pci_queues(struct qdio_irq *irq)
882 if (!pci_out_supported(irq))
885 for_each_output_queue(irq, out, i)
886 if (!qdio_outbound_q_done(out))
887 qdio_tasklet_schedule(out);
890 static void __tiqdio_inbound_processing(struct qdio_q *q)
892 unsigned int start = q->first_to_check;
895 qperf_inc(q, tasklet_inbound);
896 if (need_siga_sync(q) && need_siga_sync_after_ai(q))
899 /* The interrupt could be caused by a PCI request: */
900 qdio_check_outbound_pci_queues(q->irq_ptr);
902 count = qdio_inbound_q_moved(q, start);
906 start = add_buf(start, count);
907 q->first_to_check = start;
908 qdio_kick_handler(q, count);
910 if (!qdio_inbound_q_done(q, start)) {
911 qperf_inc(q, tasklet_inbound_resched);
912 if (!qdio_tasklet_schedule(q))
916 qdio_stop_polling(q);
918 * We need to check again to not lose initiative after
919 * resetting the ACK state.
921 if (!qdio_inbound_q_done(q, start)) {
922 qperf_inc(q, tasklet_inbound_resched2);
923 qdio_tasklet_schedule(q);
927 void tiqdio_inbound_processing(unsigned long data)
929 struct qdio_q *q = (struct qdio_q *)data;
930 __tiqdio_inbound_processing(q);
933 static inline void qdio_set_state(struct qdio_irq *irq_ptr,
934 enum qdio_irq_states state)
936 DBF_DEV_EVENT(DBF_INFO, irq_ptr, "newstate: %1d", state);
938 irq_ptr->state = state;
942 static void qdio_irq_check_sense(struct qdio_irq *irq_ptr, struct irb *irb)
944 if (irb->esw.esw0.erw.cons) {
945 DBF_ERROR("%4x sense:", irq_ptr->schid.sch_no);
946 DBF_ERROR_HEX(irb, 64);
947 DBF_ERROR_HEX(irb->ecw, 64);
951 /* PCI interrupt handler */
952 static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
957 if (unlikely(irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
960 for_each_input_queue(irq_ptr, q, i) {
961 if (q->u.in.queue_start_poll) {
962 /* skip if polling is enabled or already in work */
963 if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
964 &q->u.in.queue_irq_state)) {
965 qperf_inc(q, int_discarded);
968 q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr,
969 q->irq_ptr->int_parm);
971 tasklet_schedule(&q->tasklet);
975 if (!pci_out_supported(irq_ptr))
978 for_each_output_queue(irq_ptr, q, i) {
979 if (qdio_outbound_q_done(q))
981 if (need_siga_sync(q) && need_siga_sync_out_after_pci(q))
983 qdio_tasklet_schedule(q);
987 static void qdio_handle_activate_check(struct ccw_device *cdev,
988 unsigned long intparm, int cstat, int dstat)
990 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
994 DBF_ERROR("%4x ACT CHECK", irq_ptr->schid.sch_no);
995 DBF_ERROR("intp :%lx", intparm);
996 DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
998 if (irq_ptr->nr_input_qs) {
999 q = irq_ptr->input_qs[0];
1000 } else if (irq_ptr->nr_output_qs) {
1001 q = irq_ptr->output_qs[0];
1007 count = sub_buf(q->first_to_check, q->first_to_kick);
1008 q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE,
1009 q->nr, q->first_to_kick, count, irq_ptr->int_parm);
1011 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
1013 * In case of z/VM LGR (Live Guest Migration) QDIO recovery will happen.
1014 * Therefore we call the LGR detection function here.
1019 static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat,
1022 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1024 DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq");
1028 if (dstat & ~(DEV_STAT_DEV_END | DEV_STAT_CHN_END))
1030 if (!(dstat & DEV_STAT_DEV_END))
1032 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED);
1036 DBF_ERROR("%4x EQ:error", irq_ptr->schid.sch_no);
1037 DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
1038 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
1041 /* qdio interrupt handler */
1042 void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
1045 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1046 struct subchannel_id schid;
1049 if (!intparm || !irq_ptr) {
1050 ccw_device_get_schid(cdev, &schid);
1051 DBF_ERROR("qint:%4x", schid.sch_no);
1055 if (irq_ptr->perf_stat_enabled)
1056 irq_ptr->perf_stat.qdio_int++;
1059 DBF_ERROR("%4x IO error", irq_ptr->schid.sch_no);
1060 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
1061 wake_up(&cdev->private->wait_q);
1064 qdio_irq_check_sense(irq_ptr, irb);
1065 cstat = irb->scsw.cmd.cstat;
1066 dstat = irb->scsw.cmd.dstat;
1068 switch (irq_ptr->state) {
1069 case QDIO_IRQ_STATE_INACTIVE:
1070 qdio_establish_handle_irq(cdev, cstat, dstat);
1072 case QDIO_IRQ_STATE_CLEANUP:
1073 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1075 case QDIO_IRQ_STATE_ESTABLISHED:
1076 case QDIO_IRQ_STATE_ACTIVE:
1077 if (cstat & SCHN_STAT_PCI) {
1078 qdio_int_handler_pci(irq_ptr);
1082 qdio_handle_activate_check(cdev, intparm, cstat,
1085 case QDIO_IRQ_STATE_STOPPED:
1090 wake_up(&cdev->private->wait_q);
1094 * qdio_get_ssqd_desc - get qdio subchannel description
1095 * @cdev: ccw device to get description for
1096 * @data: where to store the ssqd
1098 * Returns 0 or an error code. The results of the chsc are stored in the
1099 * specified structure.
1101 int qdio_get_ssqd_desc(struct ccw_device *cdev,
1102 struct qdio_ssqd_desc *data)
1104 struct subchannel_id schid;
1106 if (!cdev || !cdev->private)
1109 ccw_device_get_schid(cdev, &schid);
1110 DBF_EVENT("get ssqd:%4x", schid.sch_no);
1111 return qdio_setup_get_ssqd(NULL, &schid, data);
1113 EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc);
1115 static void qdio_shutdown_queues(struct ccw_device *cdev)
1117 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1121 for_each_input_queue(irq_ptr, q, i)
1122 tasklet_kill(&q->tasklet);
1124 for_each_output_queue(irq_ptr, q, i) {
1125 del_timer_sync(&q->u.out.timer);
1126 tasklet_kill(&q->tasklet);
1131 * qdio_shutdown - shut down a qdio subchannel
1132 * @cdev: associated ccw device
1133 * @how: use halt or clear to shutdown
1135 int qdio_shutdown(struct ccw_device *cdev, int how)
1137 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1138 struct subchannel_id schid;
1144 WARN_ON_ONCE(irqs_disabled());
1145 ccw_device_get_schid(cdev, &schid);
1146 DBF_EVENT("qshutdown:%4x", schid.sch_no);
1148 mutex_lock(&irq_ptr->setup_mutex);
1150 * Subchannel was already shot down. We cannot prevent being called
1151 * twice since cio may trigger a shutdown asynchronously.
1153 if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
1154 mutex_unlock(&irq_ptr->setup_mutex);
1159 * Indicate that the device is going down. Scheduling the queue
1160 * tasklets is forbidden from here on.
1162 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
1164 tiqdio_remove_input_queues(irq_ptr);
1165 qdio_shutdown_queues(cdev);
1166 qdio_shutdown_debug_entries(irq_ptr);
1168 /* cleanup subchannel */
1169 spin_lock_irq(get_ccwdev_lock(cdev));
1171 if (how & QDIO_FLAG_CLEANUP_USING_CLEAR)
1172 rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
1174 /* default behaviour is halt */
1175 rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
1177 DBF_ERROR("%4x SHUTD ERR", irq_ptr->schid.sch_no);
1178 DBF_ERROR("rc:%4d", rc);
1182 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
1183 spin_unlock_irq(get_ccwdev_lock(cdev));
1184 wait_event_interruptible_timeout(cdev->private->wait_q,
1185 irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
1186 irq_ptr->state == QDIO_IRQ_STATE_ERR,
1188 spin_lock_irq(get_ccwdev_lock(cdev));
1191 qdio_shutdown_thinint(irq_ptr);
1193 /* restore interrupt handler */
1194 if ((void *)cdev->handler == (void *)qdio_int_handler) {
1195 cdev->handler = irq_ptr->orig_handler;
1196 cdev->private->intparm = 0;
1198 spin_unlock_irq(get_ccwdev_lock(cdev));
1200 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1201 mutex_unlock(&irq_ptr->setup_mutex);
1206 EXPORT_SYMBOL_GPL(qdio_shutdown);
1209 * qdio_free - free data structures for a qdio subchannel
1210 * @cdev: associated ccw device
1212 int qdio_free(struct ccw_device *cdev)
1214 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1215 struct subchannel_id schid;
1220 ccw_device_get_schid(cdev, &schid);
1221 DBF_EVENT("qfree:%4x", schid.sch_no);
1222 DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf abandoned");
1223 mutex_lock(&irq_ptr->setup_mutex);
1225 irq_ptr->debug_area = NULL;
1226 cdev->private->qdio_data = NULL;
1227 mutex_unlock(&irq_ptr->setup_mutex);
1229 qdio_release_memory(irq_ptr);
1232 EXPORT_SYMBOL_GPL(qdio_free);
1235 * qdio_allocate - allocate qdio queues and associated data
1236 * @init_data: initialization data
1238 int qdio_allocate(struct qdio_initialize *init_data)
1240 struct subchannel_id schid;
1241 struct qdio_irq *irq_ptr;
1243 ccw_device_get_schid(init_data->cdev, &schid);
1244 DBF_EVENT("qallocate:%4x", schid.sch_no);
1246 if ((init_data->no_input_qs && !init_data->input_handler) ||
1247 (init_data->no_output_qs && !init_data->output_handler))
1250 if ((init_data->no_input_qs > QDIO_MAX_QUEUES_PER_IRQ) ||
1251 (init_data->no_output_qs > QDIO_MAX_QUEUES_PER_IRQ))
1254 if ((!init_data->input_sbal_addr_array) ||
1255 (!init_data->output_sbal_addr_array))
1258 /* irq_ptr must be in GFP_DMA since it contains ccw1.cda */
1259 irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1263 mutex_init(&irq_ptr->setup_mutex);
1264 if (qdio_allocate_dbf(init_data, irq_ptr))
1268 * Allocate a page for the chsc calls in qdio_establish.
1269 * Must be pre-allocated since a zfcp recovery will call
1270 * qdio_establish. In case of low memory and swap on a zfcp disk
1271 * we may not be able to allocate memory otherwise.
1273 irq_ptr->chsc_page = get_zeroed_page(GFP_KERNEL);
1274 if (!irq_ptr->chsc_page)
1277 /* qdr is used in ccw1.cda which is u32 */
1278 irq_ptr->qdr = (struct qdr *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1282 if (qdio_allocate_qs(irq_ptr, init_data->no_input_qs,
1283 init_data->no_output_qs))
1286 init_data->cdev->private->qdio_data = irq_ptr;
1287 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1290 qdio_release_memory(irq_ptr);
1294 EXPORT_SYMBOL_GPL(qdio_allocate);
1296 static void qdio_detect_hsicq(struct qdio_irq *irq_ptr)
1298 struct qdio_q *q = irq_ptr->input_qs[0];
1301 if (irq_ptr->nr_input_qs > 1 && queue_type(q) == QDIO_IQDIO_QFMT)
1304 for_each_output_queue(irq_ptr, q, i) {
1306 if (multicast_outbound(q))
1308 if (qdio_enable_async_operation(&q->u.out) < 0) {
1313 qdio_disable_async_operation(&q->u.out);
1315 DBF_EVENT("use_cq:%d", use_cq);
1319 * qdio_establish - establish queues on a qdio subchannel
1320 * @init_data: initialization data
1322 int qdio_establish(struct qdio_initialize *init_data)
1324 struct ccw_device *cdev = init_data->cdev;
1325 struct subchannel_id schid;
1326 struct qdio_irq *irq_ptr;
1329 ccw_device_get_schid(cdev, &schid);
1330 DBF_EVENT("qestablish:%4x", schid.sch_no);
1332 irq_ptr = cdev->private->qdio_data;
1336 mutex_lock(&irq_ptr->setup_mutex);
1337 qdio_setup_irq(init_data);
1339 rc = qdio_establish_thinint(irq_ptr);
1341 mutex_unlock(&irq_ptr->setup_mutex);
1342 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1347 irq_ptr->ccw.cmd_code = irq_ptr->equeue.cmd;
1348 irq_ptr->ccw.flags = CCW_FLAG_SLI;
1349 irq_ptr->ccw.count = irq_ptr->equeue.count;
1350 irq_ptr->ccw.cda = (u32)((addr_t)irq_ptr->qdr);
1352 spin_lock_irq(get_ccwdev_lock(cdev));
1353 ccw_device_set_options_mask(cdev, 0);
1355 rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0);
1356 spin_unlock_irq(get_ccwdev_lock(cdev));
1358 DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no);
1359 DBF_ERROR("rc:%4x", rc);
1360 mutex_unlock(&irq_ptr->setup_mutex);
1361 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1365 wait_event_interruptible_timeout(cdev->private->wait_q,
1366 irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
1367 irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ);
1369 if (irq_ptr->state != QDIO_IRQ_STATE_ESTABLISHED) {
1370 mutex_unlock(&irq_ptr->setup_mutex);
1371 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1375 qdio_setup_ssqd_info(irq_ptr);
1377 qdio_detect_hsicq(irq_ptr);
1379 /* qebsm is now setup if available, initialize buffer states */
1380 qdio_init_buf_states(irq_ptr);
1382 mutex_unlock(&irq_ptr->setup_mutex);
1383 qdio_print_subchannel_info(irq_ptr, cdev);
1384 qdio_setup_debug_entries(irq_ptr, cdev);
1387 EXPORT_SYMBOL_GPL(qdio_establish);
1390 * qdio_activate - activate queues on a qdio subchannel
1391 * @cdev: associated cdev
1393 int qdio_activate(struct ccw_device *cdev)
1395 struct subchannel_id schid;
1396 struct qdio_irq *irq_ptr;
1399 ccw_device_get_schid(cdev, &schid);
1400 DBF_EVENT("qactivate:%4x", schid.sch_no);
1402 irq_ptr = cdev->private->qdio_data;
1406 mutex_lock(&irq_ptr->setup_mutex);
1407 if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
1412 irq_ptr->ccw.cmd_code = irq_ptr->aqueue.cmd;
1413 irq_ptr->ccw.flags = CCW_FLAG_SLI;
1414 irq_ptr->ccw.count = irq_ptr->aqueue.count;
1415 irq_ptr->ccw.cda = 0;
1417 spin_lock_irq(get_ccwdev_lock(cdev));
1418 ccw_device_set_options(cdev, CCWDEV_REPORT_ALL);
1420 rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE,
1421 0, DOIO_DENY_PREFETCH);
1422 spin_unlock_irq(get_ccwdev_lock(cdev));
1424 DBF_ERROR("%4x act IO ERR", irq_ptr->schid.sch_no);
1425 DBF_ERROR("rc:%4x", rc);
1429 if (is_thinint_irq(irq_ptr))
1430 tiqdio_add_input_queues(irq_ptr);
1432 /* wait for subchannel to become active */
1435 switch (irq_ptr->state) {
1436 case QDIO_IRQ_STATE_STOPPED:
1437 case QDIO_IRQ_STATE_ERR:
1441 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE);
1445 mutex_unlock(&irq_ptr->setup_mutex);
1448 EXPORT_SYMBOL_GPL(qdio_activate);
1450 static inline int buf_in_between(int bufnr, int start, int count)
1452 int end = add_buf(start, count);
1455 if (bufnr >= start && bufnr < end)
1461 /* wrap-around case */
1462 if ((bufnr >= start && bufnr <= QDIO_MAX_BUFFERS_PER_Q) ||
1470 * handle_inbound - reset processed input buffers
1471 * @q: queue containing the buffers
1473 * @bufnr: first buffer to process
1474 * @count: how many buffers are emptied
1476 static int handle_inbound(struct qdio_q *q, unsigned int callflags,
1477 int bufnr, int count)
1481 qperf_inc(q, inbound_call);
1483 if (!q->u.in.polling)
1486 /* protect against stop polling setting an ACK for an emptied slsb */
1487 if (count == QDIO_MAX_BUFFERS_PER_Q) {
1488 /* overwriting everything, just delete polling status */
1489 q->u.in.polling = 0;
1490 q->u.in.ack_count = 0;
1492 } else if (buf_in_between(q->u.in.ack_start, bufnr, count)) {
1494 /* partial overwrite, just update ack_start */
1495 diff = add_buf(bufnr, count);
1496 diff = sub_buf(diff, q->u.in.ack_start);
1497 q->u.in.ack_count -= diff;
1498 if (q->u.in.ack_count <= 0) {
1499 q->u.in.polling = 0;
1500 q->u.in.ack_count = 0;
1503 q->u.in.ack_start = add_buf(q->u.in.ack_start, diff);
1506 /* the only ACK will be deleted, so stop polling */
1507 q->u.in.polling = 0;
1511 count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count);
1512 atomic_add(count, &q->nr_buf_used);
1514 if (need_siga_in(q))
1515 return qdio_siga_input(q);
1521 * handle_outbound - process filled outbound buffers
1522 * @q: queue containing the buffers
1524 * @bufnr: first buffer to process
1525 * @count: how many buffers are filled
1527 static int handle_outbound(struct qdio_q *q, unsigned int callflags,
1528 int bufnr, int count)
1530 unsigned char state = 0;
1533 qperf_inc(q, outbound_call);
1535 count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count);
1536 used = atomic_add_return(count, &q->nr_buf_used);
1538 if (used == QDIO_MAX_BUFFERS_PER_Q)
1539 qperf_inc(q, outbound_queue_full);
1541 if (callflags & QDIO_FLAG_PCI_OUT) {
1542 q->u.out.pci_out_enabled = 1;
1543 qperf_inc(q, pci_request_int);
1545 q->u.out.pci_out_enabled = 0;
1547 if (queue_type(q) == QDIO_IQDIO_QFMT) {
1548 unsigned long phys_aob = 0;
1550 /* One SIGA-W per buffer required for unicast HSI */
1551 WARN_ON_ONCE(count > 1 && !multicast_outbound(q));
1553 if (q->u.out.use_cq)
1554 phys_aob = qdio_aob_for_buffer(&q->u.out, bufnr);
1556 rc = qdio_kick_outbound_q(q, phys_aob);
1557 } else if (need_siga_sync(q)) {
1558 rc = qdio_siga_sync_q(q);
1559 } else if (count < QDIO_MAX_BUFFERS_PER_Q &&
1560 get_buf_state(q, prev_buf(bufnr), &state, 0) > 0 &&
1561 state == SLSB_CU_OUTPUT_PRIMED) {
1562 /* The previous buffer is not processed yet, tack on. */
1563 qperf_inc(q, fast_requeue);
1565 rc = qdio_kick_outbound_q(q, 0);
1568 /* in case of SIGA errors we must process the error immediately */
1569 if (used >= q->u.out.scan_threshold || rc)
1570 qdio_tasklet_schedule(q);
1572 /* free the SBALs in case of no further traffic */
1573 if (!timer_pending(&q->u.out.timer) &&
1574 likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE))
1575 mod_timer(&q->u.out.timer, jiffies + HZ);
1580 * do_QDIO - process input or output buffers
1581 * @cdev: associated ccw_device for the qdio subchannel
1582 * @callflags: input or output and special flags from the program
1583 * @q_nr: queue number
1584 * @bufnr: buffer number
1585 * @count: how many buffers to process
1587 int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
1588 int q_nr, unsigned int bufnr, unsigned int count)
1590 struct qdio_irq *irq_ptr;
1592 if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q)
1595 irq_ptr = cdev->private->qdio_data;
1599 DBF_DEV_EVENT(DBF_INFO, irq_ptr,
1600 "do%02x b:%02x c:%02x", callflags, bufnr, count);
1602 if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)
1606 if (callflags & QDIO_FLAG_SYNC_INPUT)
1607 return handle_inbound(irq_ptr->input_qs[q_nr],
1608 callflags, bufnr, count);
1609 else if (callflags & QDIO_FLAG_SYNC_OUTPUT)
1610 return handle_outbound(irq_ptr->output_qs[q_nr],
1611 callflags, bufnr, count);
1614 EXPORT_SYMBOL_GPL(do_QDIO);
1617 * qdio_start_irq - process input buffers
1618 * @cdev: associated ccw_device for the qdio subchannel
1619 * @nr: input queue number
1623 * 1 - irqs not started since new data is available
1625 int qdio_start_irq(struct ccw_device *cdev, int nr)
1628 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1632 q = irq_ptr->input_qs[nr];
1634 clear_nonshared_ind(irq_ptr);
1635 qdio_stop_polling(q);
1636 clear_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state);
1639 * We need to check again to not lose initiative after
1640 * resetting the ACK state.
1642 if (test_nonshared_ind(irq_ptr))
1644 if (!qdio_inbound_q_done(q, q->first_to_check))
1649 if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
1650 &q->u.in.queue_irq_state))
1656 EXPORT_SYMBOL(qdio_start_irq);
1659 * qdio_get_next_buffers - process input buffers
1660 * @cdev: associated ccw_device for the qdio subchannel
1661 * @nr: input queue number
1662 * @bufnr: first filled buffer number
1663 * @error: buffers are in error state
1667 * = 0 - no new buffers found
1668 * > 0 - number of processed buffers
1670 int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr,
1674 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1680 q = irq_ptr->input_qs[nr];
1681 start = q->first_to_check;
1684 * Cannot rely on automatic sync after interrupt since queues may
1685 * also be examined without interrupt.
1687 if (need_siga_sync(q))
1688 qdio_sync_queues(q);
1690 qdio_check_outbound_pci_queues(irq_ptr);
1692 count = qdio_inbound_q_moved(q, start);
1696 start = add_buf(start, count);
1697 q->first_to_check = start;
1699 /* Note: upper-layer MUST stop processing immediately here ... */
1700 if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
1703 *bufnr = q->first_to_kick;
1704 *error = q->qdio_error;
1706 /* for the next time */
1707 q->first_to_kick = add_buf(q->first_to_kick, count);
1712 EXPORT_SYMBOL(qdio_get_next_buffers);
1715 * qdio_stop_irq - disable interrupt processing for the device
1716 * @cdev: associated ccw_device for the qdio subchannel
1717 * @nr: input queue number
1720 * 0 - interrupts were already disabled
1721 * 1 - interrupts successfully disabled
1723 int qdio_stop_irq(struct ccw_device *cdev, int nr)
1726 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1730 q = irq_ptr->input_qs[nr];
1732 if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
1733 &q->u.in.queue_irq_state))
1738 EXPORT_SYMBOL(qdio_stop_irq);
1741 * qdio_pnso_brinfo() - perform network subchannel op #0 - bridge info.
1742 * @schid: Subchannel ID.
1743 * @cnc: Boolean Change-Notification Control
1744 * @response: Response code will be stored at this address
1745 * @cb: Callback function will be executed for each element
1746 * of the address list
1747 * @priv: Pointer to pass to the callback function.
1749 * Performs "Store-network-bridging-information list" operation and calls
1750 * the callback function for every entry in the list. If "change-
1751 * notification-control" is set, further changes in the address list
1752 * will be reported via the IPA command.
1754 int qdio_pnso_brinfo(struct subchannel_id schid,
1755 int cnc, u16 *response,
1756 void (*cb)(void *priv, enum qdio_brinfo_entry_type type,
1760 struct chsc_pnso_area *rr;
1762 u32 prev_instance = 0;
1763 int isfirstblock = 1;
1766 rr = (struct chsc_pnso_area *)get_zeroed_page(GFP_KERNEL);
1770 /* on the first iteration, naihdr.resume_token will be zero */
1771 rc = chsc_pnso_brinfo(schid, rr, rr->naihdr.resume_token, cnc);
1772 if (rc != 0 && rc != -EBUSY)
1774 if (rr->response.code != 1) {
1783 size = rr->naihdr.naids;
1784 elems = (rr->response.length -
1785 sizeof(struct chsc_header) -
1786 sizeof(struct chsc_brinfo_naihdr)) /
1789 if (!isfirstblock && (rr->naihdr.instance != prev_instance)) {
1790 /* Inform the caller that they need to scrap */
1791 /* the data that was already reported via cb */
1796 prev_instance = rr->naihdr.instance;
1797 for (i = 0; i < elems; i++)
1799 case sizeof(struct qdio_brinfo_entry_l3_ipv6):
1800 (*cb)(priv, l3_ipv6_addr,
1801 &rr->entries.l3_ipv6[i]);
1803 case sizeof(struct qdio_brinfo_entry_l3_ipv4):
1804 (*cb)(priv, l3_ipv4_addr,
1805 &rr->entries.l3_ipv4[i]);
1807 case sizeof(struct qdio_brinfo_entry_l2):
1808 (*cb)(priv, l2_addr_lnid,
1809 &rr->entries.l2[i]);
1816 } while (rr->response.code == 0x0107 || /* channel busy */
1817 (rr->response.code == 1 && /* list stored */
1818 /* resume token is non-zero => list incomplete */
1819 (rr->naihdr.resume_token.t1 || rr->naihdr.resume_token.t2)));
1820 (*response) = rr->response.code;
1823 free_page((unsigned long)rr);
1826 EXPORT_SYMBOL_GPL(qdio_pnso_brinfo);
1828 static int __init init_QDIO(void)
1832 rc = qdio_debug_init();
1835 rc = qdio_setup_init();
1838 rc = tiqdio_allocate_memory();
1841 rc = tiqdio_register_thinints();
1847 tiqdio_free_memory();
1855 static void __exit exit_QDIO(void)
1857 tiqdio_unregister_thinints();
1858 tiqdio_free_memory();
1863 module_init(init_QDIO);
1864 module_exit(exit_QDIO);