blob: 99823477d57e2d5725b4754840658e6676d6a7c2 [file] [log] [blame]
Jan Glauber779e6e12008-07-17 17:16:48 +02001/*
2 * linux/drivers/s390/cio/qdio_main.c
3 *
4 * Linux for s390 qdio support, buffer handling, qdio API and module support.
5 *
6 * Copyright 2000,2008 IBM Corp.
7 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
8 * Jan Glauber <jang@linux.vnet.ibm.com>
9 * 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com>
10 */
11#include <linux/module.h>
12#include <linux/init.h>
13#include <linux/kernel.h>
14#include <linux/timer.h>
15#include <linux/delay.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090016#include <linux/gfp.h>
Jan Glauber30d77c32011-01-05 12:47:29 +010017#include <linux/kernel_stat.h>
Jan Glauber779e6e12008-07-17 17:16:48 +020018#include <asm/atomic.h>
19#include <asm/debug.h>
20#include <asm/qdio.h>
21
22#include "cio.h"
23#include "css.h"
24#include "device.h"
25#include "qdio.h"
26#include "qdio_debug.h"
Jan Glauber779e6e12008-07-17 17:16:48 +020027
28MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\
29 "Jan Glauber <jang@linux.vnet.ibm.com>");
30MODULE_DESCRIPTION("QDIO base support");
31MODULE_LICENSE("GPL");
32
Jan Glauber958c0ba2011-01-05 12:47:52 +010033static inline int do_siga_sync(unsigned long schid,
34 unsigned int out_mask, unsigned int in_mask,
35 unsigned int fc)
Jan Glauber779e6e12008-07-17 17:16:48 +020036{
Jan Glauber958c0ba2011-01-05 12:47:52 +010037 register unsigned long __fc asm ("0") = fc;
38 register unsigned long __schid asm ("1") = schid;
Jan Glauber779e6e12008-07-17 17:16:48 +020039 register unsigned long out asm ("2") = out_mask;
40 register unsigned long in asm ("3") = in_mask;
41 int cc;
42
43 asm volatile(
44 " siga 0\n"
45 " ipm %0\n"
46 " srl %0,28\n"
47 : "=d" (cc)
48 : "d" (__fc), "d" (__schid), "d" (out), "d" (in) : "cc");
49 return cc;
50}
51
Jan Glauber958c0ba2011-01-05 12:47:52 +010052static inline int do_siga_input(unsigned long schid, unsigned int mask,
53 unsigned int fc)
Jan Glauber779e6e12008-07-17 17:16:48 +020054{
Jan Glauber958c0ba2011-01-05 12:47:52 +010055 register unsigned long __fc asm ("0") = fc;
56 register unsigned long __schid asm ("1") = schid;
Jan Glauber779e6e12008-07-17 17:16:48 +020057 register unsigned long __mask asm ("2") = mask;
58 int cc;
59
60 asm volatile(
61 " siga 0\n"
62 " ipm %0\n"
63 " srl %0,28\n"
64 : "=d" (cc)
65 : "d" (__fc), "d" (__schid), "d" (__mask) : "cc", "memory");
66 return cc;
67}
68
69/**
70 * do_siga_output - perform SIGA-w/wt function
71 * @schid: subchannel id or in case of QEBSM the subchannel token
72 * @mask: which output queues to process
73 * @bb: busy bit indicator, set only if SIGA-w/wt could not access a buffer
74 * @fc: function code to perform
75 *
76 * Returns cc or QDIO_ERROR_SIGA_ACCESS_EXCEPTION.
77 * Note: For IQDC unicast queues only the highest priority queue is processed.
78 */
79static inline int do_siga_output(unsigned long schid, unsigned long mask,
Jan Glauber7a0b4cb2008-12-25 13:38:48 +010080 unsigned int *bb, unsigned int fc)
Jan Glauber779e6e12008-07-17 17:16:48 +020081{
82 register unsigned long __fc asm("0") = fc;
83 register unsigned long __schid asm("1") = schid;
84 register unsigned long __mask asm("2") = mask;
85 int cc = QDIO_ERROR_SIGA_ACCESS_EXCEPTION;
86
87 asm volatile(
88 " siga 0\n"
89 "0: ipm %0\n"
90 " srl %0,28\n"
91 "1:\n"
92 EX_TABLE(0b, 1b)
93 : "+d" (cc), "+d" (__fc), "+d" (__schid), "+d" (__mask)
94 : : "cc", "memory");
95 *bb = ((unsigned int) __fc) >> 31;
96 return cc;
97}
98
99static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
100{
Jan Glauber779e6e12008-07-17 17:16:48 +0200101 /* all done or next buffer state different */
102 if (ccq == 0 || ccq == 32)
103 return 0;
104 /* not all buffers processed */
105 if (ccq == 96 || ccq == 97)
106 return 1;
107 /* notify devices immediately */
Jan Glauber22f99342008-12-25 13:38:46 +0100108 DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq);
Jan Glauber779e6e12008-07-17 17:16:48 +0200109 return -EIO;
110}
111
112/**
113 * qdio_do_eqbs - extract buffer states for QEBSM
114 * @q: queue to manipulate
115 * @state: state of the extracted buffers
116 * @start: buffer number to start at
117 * @count: count of buffers to examine
Jan Glauber50f769d2008-12-25 13:38:47 +0100118 * @auto_ack: automatically acknowledge buffers
Jan Glauber779e6e12008-07-17 17:16:48 +0200119 *
Coly Li73ac36e2009-01-07 18:09:16 -0800120 * Returns the number of successfully extracted equal buffer states.
Jan Glauber779e6e12008-07-17 17:16:48 +0200121 * Stops processing if a state is different from the last buffers state.
122 */
123static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
Jan Glauber50f769d2008-12-25 13:38:47 +0100124 int start, int count, int auto_ack)
Jan Glauber779e6e12008-07-17 17:16:48 +0200125{
126 unsigned int ccq = 0;
127 int tmp_count = count, tmp_start = start;
128 int nr = q->nr;
129 int rc;
Jan Glauber779e6e12008-07-17 17:16:48 +0200130
131 BUG_ON(!q->irq_ptr->sch_token);
Jan Glauber6486cda2010-01-04 09:05:42 +0100132 qperf_inc(q, eqbs);
Jan Glauber779e6e12008-07-17 17:16:48 +0200133
134 if (!q->is_input_q)
135 nr += q->irq_ptr->nr_input_qs;
136again:
Jan Glauber50f769d2008-12-25 13:38:47 +0100137 ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count,
138 auto_ack);
Jan Glauber779e6e12008-07-17 17:16:48 +0200139 rc = qdio_check_ccq(q, ccq);
140
141 /* At least one buffer was processed, return and extract the remaining
142 * buffers later.
143 */
Jan Glauber23589d02008-12-25 13:38:44 +0100144 if ((ccq == 96) && (count != tmp_count)) {
Jan Glauber6486cda2010-01-04 09:05:42 +0100145 qperf_inc(q, eqbs_partial);
Jan Glauber779e6e12008-07-17 17:16:48 +0200146 return (count - tmp_count);
Jan Glauber23589d02008-12-25 13:38:44 +0100147 }
Jan Glauber22f99342008-12-25 13:38:46 +0100148
Jan Glauber779e6e12008-07-17 17:16:48 +0200149 if (rc == 1) {
Jan Glauber22f99342008-12-25 13:38:46 +0100150 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq);
Jan Glauber779e6e12008-07-17 17:16:48 +0200151 goto again;
152 }
153
154 if (rc < 0) {
Jan Glauber22f99342008-12-25 13:38:46 +0100155 DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
156 DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
Jan Glauber779e6e12008-07-17 17:16:48 +0200157 q->handler(q->irq_ptr->cdev,
158 QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
159 0, -1, -1, q->irq_ptr->int_parm);
160 return 0;
161 }
162 return count - tmp_count;
163}
164
165/**
166 * qdio_do_sqbs - set buffer states for QEBSM
167 * @q: queue to manipulate
168 * @state: new state of the buffers
169 * @start: first buffer number to change
170 * @count: how many buffers to change
171 *
172 * Returns the number of successfully changed buffers.
173 * Does retrying until the specified count of buffer states is set or an
174 * error occurs.
175 */
176static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start,
177 int count)
178{
179 unsigned int ccq = 0;
180 int tmp_count = count, tmp_start = start;
181 int nr = q->nr;
182 int rc;
Jan Glauber779e6e12008-07-17 17:16:48 +0200183
Jan Glauber50f769d2008-12-25 13:38:47 +0100184 if (!count)
185 return 0;
186
Jan Glauber779e6e12008-07-17 17:16:48 +0200187 BUG_ON(!q->irq_ptr->sch_token);
Jan Glauber6486cda2010-01-04 09:05:42 +0100188 qperf_inc(q, sqbs);
Jan Glauber779e6e12008-07-17 17:16:48 +0200189
190 if (!q->is_input_q)
191 nr += q->irq_ptr->nr_input_qs;
192again:
193 ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count);
194 rc = qdio_check_ccq(q, ccq);
195 if (rc == 1) {
Jan Glauber22f99342008-12-25 13:38:46 +0100196 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq);
Jan Glauber6486cda2010-01-04 09:05:42 +0100197 qperf_inc(q, sqbs_partial);
Jan Glauber779e6e12008-07-17 17:16:48 +0200198 goto again;
199 }
200 if (rc < 0) {
Jan Glauber22f99342008-12-25 13:38:46 +0100201 DBF_ERROR("%4x SQBS ERROR", SCH_NO(q));
202 DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
Jan Glauber779e6e12008-07-17 17:16:48 +0200203 q->handler(q->irq_ptr->cdev,
204 QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
205 0, -1, -1, q->irq_ptr->int_parm);
206 return 0;
207 }
208 WARN_ON(tmp_count);
209 return count - tmp_count;
210}
211
212/* returns number of examined buffers and their common state in *state */
213static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
Jan Glauber50f769d2008-12-25 13:38:47 +0100214 unsigned char *state, unsigned int count,
215 int auto_ack)
Jan Glauber779e6e12008-07-17 17:16:48 +0200216{
217 unsigned char __state = 0;
218 int i;
219
220 BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK);
221 BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q);
222
223 if (is_qebsm(q))
Jan Glauber50f769d2008-12-25 13:38:47 +0100224 return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
Jan Glauber779e6e12008-07-17 17:16:48 +0200225
226 for (i = 0; i < count; i++) {
227 if (!__state)
228 __state = q->slsb.val[bufnr];
229 else if (q->slsb.val[bufnr] != __state)
230 break;
231 bufnr = next_buf(bufnr);
232 }
233 *state = __state;
234 return i;
235}
236
Jan Glauber60b5df22009-06-22 12:08:10 +0200237static inline int get_buf_state(struct qdio_q *q, unsigned int bufnr,
238 unsigned char *state, int auto_ack)
Jan Glauber779e6e12008-07-17 17:16:48 +0200239{
Jan Glauber50f769d2008-12-25 13:38:47 +0100240 return get_buf_states(q, bufnr, state, 1, auto_ack);
Jan Glauber779e6e12008-07-17 17:16:48 +0200241}
242
243/* wrap-around safe setting of slsb states, returns number of changed buffers */
244static inline int set_buf_states(struct qdio_q *q, int bufnr,
245 unsigned char state, int count)
246{
247 int i;
248
249 BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK);
250 BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q);
251
252 if (is_qebsm(q))
253 return qdio_do_sqbs(q, state, bufnr, count);
254
255 for (i = 0; i < count; i++) {
256 xchg(&q->slsb.val[bufnr], state);
257 bufnr = next_buf(bufnr);
258 }
259 return count;
260}
261
262static inline int set_buf_state(struct qdio_q *q, int bufnr,
263 unsigned char state)
264{
265 return set_buf_states(q, bufnr, state, 1);
266}
267
268/* set slsb states to initial state */
269void qdio_init_buf_states(struct qdio_irq *irq_ptr)
270{
271 struct qdio_q *q;
272 int i;
273
274 for_each_input_queue(irq_ptr, q, i)
275 set_buf_states(q, 0, SLSB_P_INPUT_NOT_INIT,
276 QDIO_MAX_BUFFERS_PER_Q);
277 for_each_output_queue(irq_ptr, q, i)
278 set_buf_states(q, 0, SLSB_P_OUTPUT_NOT_INIT,
279 QDIO_MAX_BUFFERS_PER_Q);
280}
281
Jan Glauber60b5df22009-06-22 12:08:10 +0200282static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output,
Jan Glauber779e6e12008-07-17 17:16:48 +0200283 unsigned int input)
284{
Jan Glauber958c0ba2011-01-05 12:47:52 +0100285 unsigned long schid = *((u32 *) &q->irq_ptr->schid);
286 unsigned int fc = QDIO_SIGA_SYNC;
Jan Glauber779e6e12008-07-17 17:16:48 +0200287 int cc;
288
289 if (!need_siga_sync(q))
290 return 0;
291
Jan Glauber7a0b4cb2008-12-25 13:38:48 +0100292 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr);
Jan Glauber6486cda2010-01-04 09:05:42 +0100293 qperf_inc(q, siga_sync);
Jan Glauber779e6e12008-07-17 17:16:48 +0200294
Jan Glauber958c0ba2011-01-05 12:47:52 +0100295 if (is_qebsm(q)) {
296 schid = q->irq_ptr->sch_token;
297 fc |= QDIO_SIGA_QEBSM_FLAG;
298 }
299
300 cc = do_siga_sync(schid, output, input, fc);
Jan Glauber22f99342008-12-25 13:38:46 +0100301 if (cc)
302 DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q), cc);
Jan Glauber779e6e12008-07-17 17:16:48 +0200303 return cc;
304}
305
Jan Glauber60b5df22009-06-22 12:08:10 +0200306static inline int qdio_siga_sync_q(struct qdio_q *q)
Jan Glauber779e6e12008-07-17 17:16:48 +0200307{
308 if (q->is_input_q)
309 return qdio_siga_sync(q, 0, q->mask);
310 else
311 return qdio_siga_sync(q, q->mask, 0);
312}
313
314static inline int qdio_siga_sync_out(struct qdio_q *q)
315{
316 return qdio_siga_sync(q, ~0U, 0);
317}
318
319static inline int qdio_siga_sync_all(struct qdio_q *q)
320{
321 return qdio_siga_sync(q, ~0U, ~0U);
322}
323
Jan Glauber7a0b4cb2008-12-25 13:38:48 +0100324static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit)
Jan Glauber779e6e12008-07-17 17:16:48 +0200325{
Jan Glauber958c0ba2011-01-05 12:47:52 +0100326 unsigned long schid = *((u32 *) &q->irq_ptr->schid);
327 unsigned int fc = QDIO_SIGA_WRITE;
Jan Glauber7a0b4cb2008-12-25 13:38:48 +0100328 u64 start_time = 0;
329 int cc;
Jan Glauber779e6e12008-07-17 17:16:48 +0200330
Jan Glauber7a0b4cb2008-12-25 13:38:48 +0100331 if (q->u.out.use_enh_siga)
Klaus-Dieter Wacker7a0f4752008-10-10 21:33:18 +0200332 fc = 3;
Jan Glauber7a0b4cb2008-12-25 13:38:48 +0100333
334 if (is_qebsm(q)) {
Jan Glauber779e6e12008-07-17 17:16:48 +0200335 schid = q->irq_ptr->sch_token;
Jan Glauber958c0ba2011-01-05 12:47:52 +0100336 fc |= QDIO_SIGA_QEBSM_FLAG;
Jan Glauber779e6e12008-07-17 17:16:48 +0200337 }
Jan Glauber779e6e12008-07-17 17:16:48 +0200338again:
Jan Glauber7a0b4cb2008-12-25 13:38:48 +0100339 cc = do_siga_output(schid, q->mask, busy_bit, fc);
Jan Glauber58eb27c2008-08-21 19:46:34 +0200340
Jan Glauber7a0b4cb2008-12-25 13:38:48 +0100341 /* hipersocket busy condition */
342 if (*busy_bit) {
343 WARN_ON(queue_type(q) != QDIO_IQDIO_QFMT || cc != 2);
344
345 if (!start_time) {
Jan Glauber3a601bf2010-05-17 10:00:17 +0200346 start_time = get_clock();
Jan Glauber7a0b4cb2008-12-25 13:38:48 +0100347 goto again;
348 }
Jan Glauber3a601bf2010-05-17 10:00:17 +0200349 if ((get_clock() - start_time) < QDIO_BUSY_BIT_PATIENCE)
Jan Glauber779e6e12008-07-17 17:16:48 +0200350 goto again;
351 }
Jan Glauber779e6e12008-07-17 17:16:48 +0200352 return cc;
353}
354
355static inline int qdio_siga_input(struct qdio_q *q)
356{
Jan Glauber958c0ba2011-01-05 12:47:52 +0100357 unsigned long schid = *((u32 *) &q->irq_ptr->schid);
358 unsigned int fc = QDIO_SIGA_READ;
Jan Glauber779e6e12008-07-17 17:16:48 +0200359 int cc;
360
Jan Glauber22f99342008-12-25 13:38:46 +0100361 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr);
Jan Glauber6486cda2010-01-04 09:05:42 +0100362 qperf_inc(q, siga_read);
Jan Glauber779e6e12008-07-17 17:16:48 +0200363
Jan Glauber958c0ba2011-01-05 12:47:52 +0100364 if (is_qebsm(q)) {
365 schid = q->irq_ptr->sch_token;
366 fc |= QDIO_SIGA_QEBSM_FLAG;
367 }
368
369 cc = do_siga_input(schid, q->mask, fc);
Jan Glauber779e6e12008-07-17 17:16:48 +0200370 if (cc)
Jan Glauber22f99342008-12-25 13:38:46 +0100371 DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q), cc);
Jan Glauber779e6e12008-07-17 17:16:48 +0200372 return cc;
373}
374
Jan Glauber60b5df22009-06-22 12:08:10 +0200375static inline void qdio_sync_after_thinint(struct qdio_q *q)
Jan Glauber779e6e12008-07-17 17:16:48 +0200376{
377 if (pci_out_supported(q)) {
378 if (need_siga_sync_thinint(q))
379 qdio_siga_sync_all(q);
380 else if (need_siga_sync_out_thinint(q))
381 qdio_siga_sync_out(q);
382 } else
383 qdio_siga_sync_q(q);
384}
385
Jan Glauber60b5df22009-06-22 12:08:10 +0200386int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
387 unsigned char *state)
388{
389 qdio_siga_sync_q(q);
390 return get_buf_states(q, bufnr, state, 1, 0);
391}
392
393static inline void qdio_stop_polling(struct qdio_q *q)
Jan Glauber779e6e12008-07-17 17:16:48 +0200394{
Jan Glauber50f769d2008-12-25 13:38:47 +0100395 if (!q->u.in.polling)
Jan Glauber779e6e12008-07-17 17:16:48 +0200396 return;
Jan Glauber50f769d2008-12-25 13:38:47 +0100397
Jan Glauber779e6e12008-07-17 17:16:48 +0200398 q->u.in.polling = 0;
Jan Glauber6486cda2010-01-04 09:05:42 +0100399 qperf_inc(q, stop_polling);
Jan Glauber779e6e12008-07-17 17:16:48 +0200400
401 /* show the card that we are not polling anymore */
Jan Glauber50f769d2008-12-25 13:38:47 +0100402 if (is_qebsm(q)) {
Jan Glaubere85dea02009-03-26 15:24:29 +0100403 set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
Jan Glauber50f769d2008-12-25 13:38:47 +0100404 q->u.in.ack_count);
405 q->u.in.ack_count = 0;
406 } else
Jan Glaubere85dea02009-03-26 15:24:29 +0100407 set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
Jan Glauber779e6e12008-07-17 17:16:48 +0200408}
409
Jan Glauberd3072972010-02-26 22:37:36 +0100410static inline void account_sbals(struct qdio_q *q, int count)
411{
412 int pos = 0;
413
414 q->q_stats.nr_sbal_total += count;
415 if (count == QDIO_MAX_BUFFERS_MASK) {
416 q->q_stats.nr_sbals[7]++;
417 return;
418 }
419 while (count >>= 1)
420 pos++;
421 q->q_stats.nr_sbals[pos]++;
422}
423
Jan Glauber50f769d2008-12-25 13:38:47 +0100424static void announce_buffer_error(struct qdio_q *q, int count)
Jan Glauber779e6e12008-07-17 17:16:48 +0200425{
Jan Glauber7a0b4cb2008-12-25 13:38:48 +0100426 q->qdio_error |= QDIO_ERROR_SLSB_STATE;
Jan Glauber50f769d2008-12-25 13:38:47 +0100427
428 /* special handling for no target buffer empty */
429 if ((!q->is_input_q &&
430 (q->sbal[q->first_to_check]->element[15].flags & 0xff) == 0x10)) {
Jan Glauber6486cda2010-01-04 09:05:42 +0100431 qperf_inc(q, target_full);
Jan Glauber1d7e1502009-09-22 22:58:39 +0200432 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x",
Jan Glauber50f769d2008-12-25 13:38:47 +0100433 q->first_to_check);
434 return;
435 }
436
Jan Glauber22f99342008-12-25 13:38:46 +0100437 DBF_ERROR("%4x BUF ERROR", SCH_NO(q));
438 DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr);
Jan Glauber50f769d2008-12-25 13:38:47 +0100439 DBF_ERROR("FTC:%3d C:%3d", q->first_to_check, count);
Jan Glauber22f99342008-12-25 13:38:46 +0100440 DBF_ERROR("F14:%2x F15:%2x",
441 q->sbal[q->first_to_check]->element[14].flags & 0xff,
442 q->sbal[q->first_to_check]->element[15].flags & 0xff);
Jan Glauber50f769d2008-12-25 13:38:47 +0100443}
Jan Glauber779e6e12008-07-17 17:16:48 +0200444
Jan Glauber50f769d2008-12-25 13:38:47 +0100445static inline void inbound_primed(struct qdio_q *q, int count)
446{
447 int new;
448
Jan Glauber1d7e1502009-09-22 22:58:39 +0200449 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim: %02x", count);
Jan Glauber50f769d2008-12-25 13:38:47 +0100450
451 /* for QEBSM the ACK was already set by EQBS */
452 if (is_qebsm(q)) {
453 if (!q->u.in.polling) {
454 q->u.in.polling = 1;
455 q->u.in.ack_count = count;
Jan Glaubere85dea02009-03-26 15:24:29 +0100456 q->u.in.ack_start = q->first_to_check;
Jan Glauber50f769d2008-12-25 13:38:47 +0100457 return;
458 }
459
460 /* delete the previous ACK's */
Jan Glaubere85dea02009-03-26 15:24:29 +0100461 set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
Jan Glauber50f769d2008-12-25 13:38:47 +0100462 q->u.in.ack_count);
463 q->u.in.ack_count = count;
Jan Glaubere85dea02009-03-26 15:24:29 +0100464 q->u.in.ack_start = q->first_to_check;
Jan Glauber50f769d2008-12-25 13:38:47 +0100465 return;
466 }
467
468 /*
469 * ACK the newest buffer. The ACK will be removed in qdio_stop_polling
470 * or by the next inbound run.
471 */
472 new = add_buf(q->first_to_check, count - 1);
473 if (q->u.in.polling) {
474 /* reset the previous ACK but first set the new one */
475 set_buf_state(q, new, SLSB_P_INPUT_ACK);
Jan Glaubere85dea02009-03-26 15:24:29 +0100476 set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
Jan Glauber3fdf1e12009-03-26 15:24:28 +0100477 } else {
Jan Glauber50f769d2008-12-25 13:38:47 +0100478 q->u.in.polling = 1;
Jan Glauber3fdf1e12009-03-26 15:24:28 +0100479 set_buf_state(q, new, SLSB_P_INPUT_ACK);
Jan Glauber50f769d2008-12-25 13:38:47 +0100480 }
481
Jan Glaubere85dea02009-03-26 15:24:29 +0100482 q->u.in.ack_start = new;
Jan Glauber50f769d2008-12-25 13:38:47 +0100483 count--;
484 if (!count)
485 return;
Jan Glauber6541f7b2009-09-22 22:58:40 +0200486 /* need to change ALL buffers to get more interrupts */
487 set_buf_states(q, q->first_to_check, SLSB_P_INPUT_NOT_INIT, count);
Jan Glauber779e6e12008-07-17 17:16:48 +0200488}
489
490static int get_inbound_buffer_frontier(struct qdio_q *q)
491{
492 int count, stop;
493 unsigned char state;
494
495 /*
Jan Glauber779e6e12008-07-17 17:16:48 +0200496 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
497 * would return 0.
498 */
499 count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
500 stop = add_buf(q->first_to_check, count);
501
Jan Glauber779e6e12008-07-17 17:16:48 +0200502 if (q->first_to_check == stop)
503 goto out;
504
Jan Glauber36e3e722009-06-22 12:08:12 +0200505 /*
506 * No siga sync here, as a PCI or we after a thin interrupt
507 * already sync'ed the queues.
508 */
Jan Glauber50f769d2008-12-25 13:38:47 +0100509 count = get_buf_states(q, q->first_to_check, &state, count, 1);
Jan Glauber779e6e12008-07-17 17:16:48 +0200510 if (!count)
511 goto out;
512
513 switch (state) {
514 case SLSB_P_INPUT_PRIMED:
Jan Glauber50f769d2008-12-25 13:38:47 +0100515 inbound_primed(q, count);
Jan Glauber779e6e12008-07-17 17:16:48 +0200516 q->first_to_check = add_buf(q->first_to_check, count);
Jan Glauber8bcd9b02009-12-18 17:43:26 +0100517 if (atomic_sub(count, &q->nr_buf_used) == 0)
Jan Glauber6486cda2010-01-04 09:05:42 +0100518 qperf_inc(q, inbound_queue_full);
Jan Glauberd3072972010-02-26 22:37:36 +0100519 if (q->irq_ptr->perf_stat_enabled)
520 account_sbals(q, count);
Jan Glauber36e3e722009-06-22 12:08:12 +0200521 break;
Jan Glauber779e6e12008-07-17 17:16:48 +0200522 case SLSB_P_INPUT_ERROR:
Jan Glauber50f769d2008-12-25 13:38:47 +0100523 announce_buffer_error(q, count);
Jan Glauber779e6e12008-07-17 17:16:48 +0200524 /* process the buffer, the upper layer will take care of it */
525 q->first_to_check = add_buf(q->first_to_check, count);
526 atomic_sub(count, &q->nr_buf_used);
Jan Glauberd3072972010-02-26 22:37:36 +0100527 if (q->irq_ptr->perf_stat_enabled)
528 account_sbals_error(q, count);
Jan Glauber779e6e12008-07-17 17:16:48 +0200529 break;
530 case SLSB_CU_INPUT_EMPTY:
531 case SLSB_P_INPUT_NOT_INIT:
532 case SLSB_P_INPUT_ACK:
Jan Glauberd3072972010-02-26 22:37:36 +0100533 if (q->irq_ptr->perf_stat_enabled)
534 q->q_stats.nr_sbal_nop++;
Jan Glauber22f99342008-12-25 13:38:46 +0100535 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop");
Jan Glauber779e6e12008-07-17 17:16:48 +0200536 break;
537 default:
538 BUG();
539 }
540out:
Jan Glauber779e6e12008-07-17 17:16:48 +0200541 return q->first_to_check;
542}
543
Jan Glauber60b5df22009-06-22 12:08:10 +0200544static int qdio_inbound_q_moved(struct qdio_q *q)
Jan Glauber779e6e12008-07-17 17:16:48 +0200545{
546 int bufnr;
547
548 bufnr = get_inbound_buffer_frontier(q);
549
Jan Glaubere85dea02009-03-26 15:24:29 +0100550 if ((bufnr != q->last_move) || q->qdio_error) {
551 q->last_move = bufnr;
Martin Schwidefsky27d71602010-02-26 22:37:38 +0100552 if (!is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR)
Jan Glauber3a601bf2010-05-17 10:00:17 +0200553 q->u.in.timestamp = get_clock();
Jan Glauber779e6e12008-07-17 17:16:48 +0200554 return 1;
555 } else
556 return 0;
557}
558
Jan Glauber9a2c1602009-06-22 12:08:11 +0200559static inline int qdio_inbound_q_done(struct qdio_q *q)
Jan Glauber60b5df22009-06-22 12:08:10 +0200560{
561 unsigned char state = 0;
562
563 if (!atomic_read(&q->nr_buf_used))
564 return 1;
565
566 qdio_siga_sync_q(q);
567 get_buf_state(q, q->first_to_check, &state, 0);
568
Ursula Braun4c522282010-02-09 09:46:07 +0100569 if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR)
Jan Glauber60b5df22009-06-22 12:08:10 +0200570 /* more work coming */
571 return 0;
Jan Glauber9a2c1602009-06-22 12:08:11 +0200572
573 if (is_thinint_irq(q->irq_ptr))
574 return 1;
575
576 /* don't poll under z/VM */
577 if (MACHINE_IS_VM)
578 return 1;
579
580 /*
581 * At this point we know, that inbound first_to_check
582 * has (probably) not moved (see qdio_inbound_processing).
583 */
Jan Glauber3a601bf2010-05-17 10:00:17 +0200584 if (get_clock() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
Jan Glauber1d7e1502009-09-22 22:58:39 +0200585 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x",
Jan Glauber9a2c1602009-06-22 12:08:11 +0200586 q->first_to_check);
587 return 1;
588 } else
589 return 0;
Jan Glauber60b5df22009-06-22 12:08:10 +0200590}
591
592static void qdio_kick_handler(struct qdio_q *q)
Jan Glauber779e6e12008-07-17 17:16:48 +0200593{
Jan Glauber9c8a08d2009-03-26 15:24:32 +0100594 int start = q->first_to_kick;
595 int end = q->first_to_check;
596 int count;
Jan Glauber779e6e12008-07-17 17:16:48 +0200597
598 if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
599 return;
600
Jan Glauber9c8a08d2009-03-26 15:24:32 +0100601 count = sub_buf(end, start);
602
603 if (q->is_input_q) {
Jan Glauber6486cda2010-01-04 09:05:42 +0100604 qperf_inc(q, inbound_handler);
Jan Glauber1d7e1502009-09-22 22:58:39 +0200605 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%02x c:%02x", start, count);
Ursula Braunbd6e8a12010-03-08 12:25:18 +0100606 } else {
Jan Glauber6486cda2010-01-04 09:05:42 +0100607 qperf_inc(q, outbound_handler);
Jan Glauber1d7e1502009-09-22 22:58:39 +0200608 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x",
609 start, count);
Ursula Braunbd6e8a12010-03-08 12:25:18 +0100610 }
Jan Glauber9c8a08d2009-03-26 15:24:32 +0100611
612 q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count,
613 q->irq_ptr->int_parm);
Jan Glauber779e6e12008-07-17 17:16:48 +0200614
615 /* for the next time */
Jan Glauber9c8a08d2009-03-26 15:24:32 +0100616 q->first_to_kick = end;
Jan Glauber779e6e12008-07-17 17:16:48 +0200617 q->qdio_error = 0;
618}
619
620static void __qdio_inbound_processing(struct qdio_q *q)
621{
Jan Glauber6486cda2010-01-04 09:05:42 +0100622 qperf_inc(q, tasklet_inbound);
Jan Glauberf3eb20f2010-05-17 10:00:15 +0200623
Jan Glauber779e6e12008-07-17 17:16:48 +0200624 if (!qdio_inbound_q_moved(q))
625 return;
626
Jan Glauber9c8a08d2009-03-26 15:24:32 +0100627 qdio_kick_handler(q);
Jan Glauber779e6e12008-07-17 17:16:48 +0200628
Jan Glauber6486cda2010-01-04 09:05:42 +0100629 if (!qdio_inbound_q_done(q)) {
Jan Glauber779e6e12008-07-17 17:16:48 +0200630 /* means poll time is not yet over */
Jan Glauber6486cda2010-01-04 09:05:42 +0100631 qperf_inc(q, tasklet_inbound_resched);
Jan Glauberf3eb20f2010-05-17 10:00:15 +0200632 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) {
633 tasklet_schedule(&q->tasklet);
634 return;
635 }
Jan Glauber6486cda2010-01-04 09:05:42 +0100636 }
Jan Glauber779e6e12008-07-17 17:16:48 +0200637
638 qdio_stop_polling(q);
639 /*
640 * We need to check again to not lose initiative after
641 * resetting the ACK state.
642 */
Jan Glauber6486cda2010-01-04 09:05:42 +0100643 if (!qdio_inbound_q_done(q)) {
644 qperf_inc(q, tasklet_inbound_resched2);
Jan Glauberf3eb20f2010-05-17 10:00:15 +0200645 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
646 tasklet_schedule(&q->tasklet);
Jan Glauber6486cda2010-01-04 09:05:42 +0100647 }
Jan Glauber779e6e12008-07-17 17:16:48 +0200648}
649
Jan Glauber779e6e12008-07-17 17:16:48 +0200650void qdio_inbound_processing(unsigned long data)
651{
652 struct qdio_q *q = (struct qdio_q *)data;
653 __qdio_inbound_processing(q);
654}
655
656static int get_outbound_buffer_frontier(struct qdio_q *q)
657{
658 int count, stop;
659 unsigned char state;
660
661 if (((queue_type(q) != QDIO_IQDIO_QFMT) && !pci_out_supported(q)) ||
662 (queue_type(q) == QDIO_IQDIO_QFMT && multicast_outbound(q)))
663 qdio_siga_sync_q(q);
664
665 /*
666 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
667 * would return 0.
668 */
669 count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
670 stop = add_buf(q->first_to_check, count);
671
Jan Glauber779e6e12008-07-17 17:16:48 +0200672 if (q->first_to_check == stop)
673 return q->first_to_check;
674
Jan Glauber50f769d2008-12-25 13:38:47 +0100675 count = get_buf_states(q, q->first_to_check, &state, count, 0);
Jan Glauber779e6e12008-07-17 17:16:48 +0200676 if (!count)
677 return q->first_to_check;
678
679 switch (state) {
680 case SLSB_P_OUTPUT_EMPTY:
681 /* the adapter got it */
Jan Glauber1d7e1502009-09-22 22:58:39 +0200682 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out empty:%1d %02x", q->nr, count);
Jan Glauber779e6e12008-07-17 17:16:48 +0200683
684 atomic_sub(count, &q->nr_buf_used);
685 q->first_to_check = add_buf(q->first_to_check, count);
Jan Glauberd3072972010-02-26 22:37:36 +0100686 if (q->irq_ptr->perf_stat_enabled)
687 account_sbals(q, count);
Jan Glauber36e3e722009-06-22 12:08:12 +0200688 break;
Jan Glauber779e6e12008-07-17 17:16:48 +0200689 case SLSB_P_OUTPUT_ERROR:
Jan Glauber50f769d2008-12-25 13:38:47 +0100690 announce_buffer_error(q, count);
Jan Glauber779e6e12008-07-17 17:16:48 +0200691 /* process the buffer, the upper layer will take care of it */
692 q->first_to_check = add_buf(q->first_to_check, count);
693 atomic_sub(count, &q->nr_buf_used);
Jan Glauberd3072972010-02-26 22:37:36 +0100694 if (q->irq_ptr->perf_stat_enabled)
695 account_sbals_error(q, count);
Jan Glauber779e6e12008-07-17 17:16:48 +0200696 break;
697 case SLSB_CU_OUTPUT_PRIMED:
698 /* the adapter has not fetched the output yet */
Jan Glauberd3072972010-02-26 22:37:36 +0100699 if (q->irq_ptr->perf_stat_enabled)
700 q->q_stats.nr_sbal_nop++;
Jan Glauber22f99342008-12-25 13:38:46 +0100701 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d", q->nr);
Jan Glauber779e6e12008-07-17 17:16:48 +0200702 break;
703 case SLSB_P_OUTPUT_NOT_INIT:
704 case SLSB_P_OUTPUT_HALTED:
705 break;
706 default:
707 BUG();
708 }
709 return q->first_to_check;
710}
711
712/* all buffers processed? */
713static inline int qdio_outbound_q_done(struct qdio_q *q)
714{
715 return atomic_read(&q->nr_buf_used) == 0;
716}
717
718static inline int qdio_outbound_q_moved(struct qdio_q *q)
719{
720 int bufnr;
721
722 bufnr = get_outbound_buffer_frontier(q);
723
Jan Glaubere85dea02009-03-26 15:24:29 +0100724 if ((bufnr != q->last_move) || q->qdio_error) {
725 q->last_move = bufnr;
Jan Glauber22f99342008-12-25 13:38:46 +0100726 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr);
Jan Glauber779e6e12008-07-17 17:16:48 +0200727 return 1;
728 } else
729 return 0;
730}
731
Jan Glauberd303b6f2009-03-26 15:24:31 +0100732static int qdio_kick_outbound_q(struct qdio_q *q)
Jan Glauber779e6e12008-07-17 17:16:48 +0200733{
Jan Glauber7a0b4cb2008-12-25 13:38:48 +0100734 unsigned int busy_bit;
735 int cc;
Jan Glauber779e6e12008-07-17 17:16:48 +0200736
737 if (!need_siga_out(q))
Jan Glauberd303b6f2009-03-26 15:24:31 +0100738 return 0;
Jan Glauber779e6e12008-07-17 17:16:48 +0200739
Jan Glauber7a0b4cb2008-12-25 13:38:48 +0100740 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr);
Jan Glauber6486cda2010-01-04 09:05:42 +0100741 qperf_inc(q, siga_write);
Jan Glauber7a0b4cb2008-12-25 13:38:48 +0100742
743 cc = qdio_siga_output(q, &busy_bit);
744 switch (cc) {
Jan Glauber779e6e12008-07-17 17:16:48 +0200745 case 0:
Jan Glauber779e6e12008-07-17 17:16:48 +0200746 break;
Jan Glauber7a0b4cb2008-12-25 13:38:48 +0100747 case 2:
748 if (busy_bit) {
749 DBF_ERROR("%4x cc2 REP:%1d", SCH_NO(q), q->nr);
Jan Glauberd303b6f2009-03-26 15:24:31 +0100750 cc |= QDIO_ERROR_SIGA_BUSY;
751 } else
752 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d", q->nr);
Jan Glauber7a0b4cb2008-12-25 13:38:48 +0100753 break;
754 case 1:
755 case 3:
756 DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q), cc);
Jan Glauber7a0b4cb2008-12-25 13:38:48 +0100757 break;
Jan Glauber779e6e12008-07-17 17:16:48 +0200758 }
Jan Glauberd303b6f2009-03-26 15:24:31 +0100759 return cc;
Jan Glauber779e6e12008-07-17 17:16:48 +0200760}
761
Jan Glauber779e6e12008-07-17 17:16:48 +0200762static void __qdio_outbound_processing(struct qdio_q *q)
763{
Jan Glauber6486cda2010-01-04 09:05:42 +0100764 qperf_inc(q, tasklet_outbound);
Jan Glauber779e6e12008-07-17 17:16:48 +0200765 BUG_ON(atomic_read(&q->nr_buf_used) < 0);
766
767 if (qdio_outbound_q_moved(q))
Jan Glauber9c8a08d2009-03-26 15:24:32 +0100768 qdio_kick_handler(q);
Jan Glauber779e6e12008-07-17 17:16:48 +0200769
Jan Glauberc38f9602009-03-26 15:24:26 +0100770 if (queue_type(q) == QDIO_ZFCP_QFMT)
Jan Glauber779e6e12008-07-17 17:16:48 +0200771 if (!pci_out_supported(q) && !qdio_outbound_q_done(q))
Jan Glauberc38f9602009-03-26 15:24:26 +0100772 goto sched;
Jan Glauber779e6e12008-07-17 17:16:48 +0200773
774 /* bail out for HiperSockets unicast queues */
775 if (queue_type(q) == QDIO_IQDIO_QFMT && !multicast_outbound(q))
776 return;
777
Ursula Braun4bcb3a32008-10-10 21:33:04 +0200778 if ((queue_type(q) == QDIO_IQDIO_QFMT) &&
Jan Glauberc38f9602009-03-26 15:24:26 +0100779 (atomic_read(&q->nr_buf_used)) > QDIO_IQDIO_POLL_LVL)
780 goto sched;
Ursula Braun4bcb3a32008-10-10 21:33:04 +0200781
Jan Glauber779e6e12008-07-17 17:16:48 +0200782 if (q->u.out.pci_out_enabled)
783 return;
784
785 /*
786 * Now we know that queue type is either qeth without pci enabled
787 * or HiperSockets multicast. Make sure buffer switch from PRIMED to
788 * EMPTY is noticed and outbound_handler is called after some time.
789 */
790 if (qdio_outbound_q_done(q))
791 del_timer(&q->u.out.timer);
Jan Glauber6486cda2010-01-04 09:05:42 +0100792 else
793 if (!timer_pending(&q->u.out.timer))
Jan Glauber779e6e12008-07-17 17:16:48 +0200794 mod_timer(&q->u.out.timer, jiffies + 10 * HZ);
Jan Glauberc38f9602009-03-26 15:24:26 +0100795 return;
796
797sched:
798 if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
799 return;
800 tasklet_schedule(&q->tasklet);
Jan Glauber779e6e12008-07-17 17:16:48 +0200801}
802
803/* outbound tasklet */
804void qdio_outbound_processing(unsigned long data)
805{
806 struct qdio_q *q = (struct qdio_q *)data;
807 __qdio_outbound_processing(q);
808}
809
810void qdio_outbound_timer(unsigned long data)
811{
812 struct qdio_q *q = (struct qdio_q *)data;
Jan Glauberc38f9602009-03-26 15:24:26 +0100813
814 if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
815 return;
Jan Glauber779e6e12008-07-17 17:16:48 +0200816 tasklet_schedule(&q->tasklet);
817}
818
Jan Glauber60b5df22009-06-22 12:08:10 +0200819static inline void qdio_check_outbound_after_thinint(struct qdio_q *q)
Jan Glauber779e6e12008-07-17 17:16:48 +0200820{
821 struct qdio_q *out;
822 int i;
823
824 if (!pci_out_supported(q))
825 return;
826
827 for_each_output_queue(q->irq_ptr, out, i)
828 if (!qdio_outbound_q_done(out))
829 tasklet_schedule(&out->tasklet);
830}
831
Jan Glauber60b5df22009-06-22 12:08:10 +0200832static void __tiqdio_inbound_processing(struct qdio_q *q)
833{
Jan Glauber6486cda2010-01-04 09:05:42 +0100834 qperf_inc(q, tasklet_inbound);
Jan Glauber60b5df22009-06-22 12:08:10 +0200835 qdio_sync_after_thinint(q);
836
837 /*
838 * The interrupt could be caused by a PCI request. Check the
839 * PCI capable outbound queues.
840 */
841 qdio_check_outbound_after_thinint(q);
842
843 if (!qdio_inbound_q_moved(q))
844 return;
845
846 qdio_kick_handler(q);
847
Jan Glauber9a2c1602009-06-22 12:08:11 +0200848 if (!qdio_inbound_q_done(q)) {
Jan Glauber6486cda2010-01-04 09:05:42 +0100849 qperf_inc(q, tasklet_inbound_resched);
Jan Glaubere2910bc2009-09-11 10:28:19 +0200850 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) {
Jan Glauber60b5df22009-06-22 12:08:10 +0200851 tasklet_schedule(&q->tasklet);
Jan Glaubere2910bc2009-09-11 10:28:19 +0200852 return;
853 }
Jan Glauber60b5df22009-06-22 12:08:10 +0200854 }
855
856 qdio_stop_polling(q);
857 /*
858 * We need to check again to not lose initiative after
859 * resetting the ACK state.
860 */
Jan Glauber9a2c1602009-06-22 12:08:11 +0200861 if (!qdio_inbound_q_done(q)) {
Jan Glauber6486cda2010-01-04 09:05:42 +0100862 qperf_inc(q, tasklet_inbound_resched2);
Jan Glauber60b5df22009-06-22 12:08:10 +0200863 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
864 tasklet_schedule(&q->tasklet);
865 }
866}
867
868void tiqdio_inbound_processing(unsigned long data)
869{
870 struct qdio_q *q = (struct qdio_q *)data;
871 __tiqdio_inbound_processing(q);
872}
873
Jan Glauber779e6e12008-07-17 17:16:48 +0200874static inline void qdio_set_state(struct qdio_irq *irq_ptr,
875 enum qdio_irq_states state)
876{
Jan Glauber22f99342008-12-25 13:38:46 +0100877 DBF_DEV_EVENT(DBF_INFO, irq_ptr, "newstate: %1d", state);
Jan Glauber779e6e12008-07-17 17:16:48 +0200878
879 irq_ptr->state = state;
880 mb();
881}
882
Jan Glauber22f99342008-12-25 13:38:46 +0100883static void qdio_irq_check_sense(struct qdio_irq *irq_ptr, struct irb *irb)
Jan Glauber779e6e12008-07-17 17:16:48 +0200884{
Jan Glauber779e6e12008-07-17 17:16:48 +0200885 if (irb->esw.esw0.erw.cons) {
Jan Glauber22f99342008-12-25 13:38:46 +0100886 DBF_ERROR("%4x sense:", irq_ptr->schid.sch_no);
887 DBF_ERROR_HEX(irb, 64);
888 DBF_ERROR_HEX(irb->ecw, 64);
Jan Glauber779e6e12008-07-17 17:16:48 +0200889 }
890}
891
892/* PCI interrupt handler */
893static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
894{
895 int i;
896 struct qdio_q *q;
897
Jan Glauberc38f9602009-03-26 15:24:26 +0100898 if (unlikely(irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
899 return;
900
Jan Glauberd36deae2010-09-07 21:14:39 +0000901 for_each_input_queue(irq_ptr, q, i) {
902 if (q->u.in.queue_start_poll) {
903 /* skip if polling is enabled or already in work */
904 if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
905 &q->u.in.queue_irq_state)) {
906 qperf_inc(q, int_discarded);
907 continue;
908 }
909 q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr,
910 q->irq_ptr->int_parm);
911 } else
912 tasklet_schedule(&q->tasklet);
913 }
Jan Glauber779e6e12008-07-17 17:16:48 +0200914
915 if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED))
916 return;
917
918 for_each_output_queue(irq_ptr, q, i) {
919 if (qdio_outbound_q_done(q))
920 continue;
921
922 if (!siga_syncs_out_pci(q))
923 qdio_siga_sync_q(q);
924
925 tasklet_schedule(&q->tasklet);
926 }
927}
928
929static void qdio_handle_activate_check(struct ccw_device *cdev,
930 unsigned long intparm, int cstat, int dstat)
931{
932 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
933 struct qdio_q *q;
Jan Glauber779e6e12008-07-17 17:16:48 +0200934
Jan Glauber22f99342008-12-25 13:38:46 +0100935 DBF_ERROR("%4x ACT CHECK", irq_ptr->schid.sch_no);
936 DBF_ERROR("intp :%lx", intparm);
937 DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
Jan Glauber779e6e12008-07-17 17:16:48 +0200938
939 if (irq_ptr->nr_input_qs) {
940 q = irq_ptr->input_qs[0];
941 } else if (irq_ptr->nr_output_qs) {
942 q = irq_ptr->output_qs[0];
943 } else {
944 dump_stack();
945 goto no_handler;
946 }
947 q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
948 0, -1, -1, irq_ptr->int_parm);
949no_handler:
950 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
951}
952
Jan Glauber779e6e12008-07-17 17:16:48 +0200953static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat,
954 int dstat)
955{
956 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
Jan Glauber779e6e12008-07-17 17:16:48 +0200957
Jan Glauber22f99342008-12-25 13:38:46 +0100958 DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq");
Jan Glauber4c575422009-06-12 10:26:28 +0200959
960 if (cstat)
961 goto error;
962 if (dstat & ~(DEV_STAT_DEV_END | DEV_STAT_CHN_END))
963 goto error;
964 if (!(dstat & DEV_STAT_DEV_END))
965 goto error;
966 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED);
967 return;
968
969error:
970 DBF_ERROR("%4x EQ:error", irq_ptr->schid.sch_no);
971 DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
972 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
Jan Glauber779e6e12008-07-17 17:16:48 +0200973}
974
975/* qdio interrupt handler */
976void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
977 struct irb *irb)
978{
979 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
980 int cstat, dstat;
Jan Glauber779e6e12008-07-17 17:16:48 +0200981
Jan Glauber779e6e12008-07-17 17:16:48 +0200982 if (!intparm || !irq_ptr) {
Jan Glauber22f99342008-12-25 13:38:46 +0100983 DBF_ERROR("qint:%4x", cdev->private->schid.sch_no);
Jan Glauber779e6e12008-07-17 17:16:48 +0200984 return;
985 }
986
Jan Glauber30d77c32011-01-05 12:47:29 +0100987 kstat_cpu(smp_processor_id()).irqs[IOINT_QDI]++;
Jan Glauber09a308f2010-05-17 10:00:14 +0200988 if (irq_ptr->perf_stat_enabled)
989 irq_ptr->perf_stat.qdio_int++;
990
Jan Glauber779e6e12008-07-17 17:16:48 +0200991 if (IS_ERR(irb)) {
992 switch (PTR_ERR(irb)) {
993 case -EIO:
Jan Glauber22f99342008-12-25 13:38:46 +0100994 DBF_ERROR("%4x IO error", irq_ptr->schid.sch_no);
Jan Glauber75cb71f2009-04-14 15:36:22 +0200995 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
996 wake_up(&cdev->private->wait_q);
Jan Glauber779e6e12008-07-17 17:16:48 +0200997 return;
998 default:
999 WARN_ON(1);
1000 return;
1001 }
1002 }
Jan Glauber22f99342008-12-25 13:38:46 +01001003 qdio_irq_check_sense(irq_ptr, irb);
Jan Glauber779e6e12008-07-17 17:16:48 +02001004 cstat = irb->scsw.cmd.cstat;
1005 dstat = irb->scsw.cmd.dstat;
1006
1007 switch (irq_ptr->state) {
1008 case QDIO_IRQ_STATE_INACTIVE:
1009 qdio_establish_handle_irq(cdev, cstat, dstat);
1010 break;
Jan Glauber779e6e12008-07-17 17:16:48 +02001011 case QDIO_IRQ_STATE_CLEANUP:
1012 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1013 break;
Jan Glauber779e6e12008-07-17 17:16:48 +02001014 case QDIO_IRQ_STATE_ESTABLISHED:
1015 case QDIO_IRQ_STATE_ACTIVE:
1016 if (cstat & SCHN_STAT_PCI) {
1017 qdio_int_handler_pci(irq_ptr);
Jan Glauber779e6e12008-07-17 17:16:48 +02001018 return;
1019 }
Jan Glauber4c575422009-06-12 10:26:28 +02001020 if (cstat || dstat)
Jan Glauber779e6e12008-07-17 17:16:48 +02001021 qdio_handle_activate_check(cdev, intparm, cstat,
1022 dstat);
Jan Glauber4c575422009-06-12 10:26:28 +02001023 break;
Jan Glauber959153d2010-02-09 09:46:08 +01001024 case QDIO_IRQ_STATE_STOPPED:
1025 break;
Jan Glauber779e6e12008-07-17 17:16:48 +02001026 default:
1027 WARN_ON(1);
1028 }
1029 wake_up(&cdev->private->wait_q);
1030}
1031
1032/**
1033 * qdio_get_ssqd_desc - get qdio subchannel description
1034 * @cdev: ccw device to get description for
Jan Glauberbbd50e12008-12-25 13:38:43 +01001035 * @data: where to store the ssqd
Jan Glauber779e6e12008-07-17 17:16:48 +02001036 *
Jan Glauberbbd50e12008-12-25 13:38:43 +01001037 * Returns 0 or an error code. The results of the chsc are stored in the
1038 * specified structure.
Jan Glauber779e6e12008-07-17 17:16:48 +02001039 */
Jan Glauberbbd50e12008-12-25 13:38:43 +01001040int qdio_get_ssqd_desc(struct ccw_device *cdev,
1041 struct qdio_ssqd_desc *data)
Jan Glauber779e6e12008-07-17 17:16:48 +02001042{
Jan Glauber779e6e12008-07-17 17:16:48 +02001043
Jan Glauberbbd50e12008-12-25 13:38:43 +01001044 if (!cdev || !cdev->private)
1045 return -EINVAL;
1046
Jan Glauber22f99342008-12-25 13:38:46 +01001047 DBF_EVENT("get ssqd:%4x", cdev->private->schid.sch_no);
Jan Glauberbbd50e12008-12-25 13:38:43 +01001048 return qdio_setup_get_ssqd(NULL, &cdev->private->schid, data);
Jan Glauber779e6e12008-07-17 17:16:48 +02001049}
1050EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc);
1051
Jan Glauber779e6e12008-07-17 17:16:48 +02001052static void qdio_shutdown_queues(struct ccw_device *cdev)
1053{
1054 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1055 struct qdio_q *q;
1056 int i;
1057
1058 for_each_input_queue(irq_ptr, q, i)
Jan Glauberc38f9602009-03-26 15:24:26 +01001059 tasklet_kill(&q->tasklet);
Jan Glauber779e6e12008-07-17 17:16:48 +02001060
1061 for_each_output_queue(irq_ptr, q, i) {
Jan Glauber779e6e12008-07-17 17:16:48 +02001062 del_timer(&q->u.out.timer);
Jan Glauberc38f9602009-03-26 15:24:26 +01001063 tasklet_kill(&q->tasklet);
Jan Glauber779e6e12008-07-17 17:16:48 +02001064 }
1065}
1066
1067/**
1068 * qdio_shutdown - shut down a qdio subchannel
1069 * @cdev: associated ccw device
1070 * @how: use halt or clear to shutdown
1071 */
1072int qdio_shutdown(struct ccw_device *cdev, int how)
1073{
Jan Glauber22f99342008-12-25 13:38:46 +01001074 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
Jan Glauber779e6e12008-07-17 17:16:48 +02001075 int rc;
1076 unsigned long flags;
Jan Glauber779e6e12008-07-17 17:16:48 +02001077
Jan Glauber779e6e12008-07-17 17:16:48 +02001078 if (!irq_ptr)
1079 return -ENODEV;
1080
Jan Glauberb4547402009-03-26 15:24:24 +01001081 BUG_ON(irqs_disabled());
Jan Glauber22f99342008-12-25 13:38:46 +01001082 DBF_EVENT("qshutdown:%4x", cdev->private->schid.sch_no);
1083
Jan Glauber779e6e12008-07-17 17:16:48 +02001084 mutex_lock(&irq_ptr->setup_mutex);
1085 /*
1086 * Subchannel was already shot down. We cannot prevent being called
1087 * twice since cio may trigger a shutdown asynchronously.
1088 */
1089 if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
1090 mutex_unlock(&irq_ptr->setup_mutex);
1091 return 0;
1092 }
1093
Jan Glauberc38f9602009-03-26 15:24:26 +01001094 /*
1095 * Indicate that the device is going down. Scheduling the queue
1096 * tasklets is forbidden from here on.
1097 */
1098 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
1099
Jan Glauber779e6e12008-07-17 17:16:48 +02001100 tiqdio_remove_input_queues(irq_ptr);
1101 qdio_shutdown_queues(cdev);
1102 qdio_shutdown_debug_entries(irq_ptr, cdev);
1103
1104 /* cleanup subchannel */
1105 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1106
1107 if (how & QDIO_FLAG_CLEANUP_USING_CLEAR)
1108 rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
1109 else
1110 /* default behaviour is halt */
1111 rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
1112 if (rc) {
Jan Glauber22f99342008-12-25 13:38:46 +01001113 DBF_ERROR("%4x SHUTD ERR", irq_ptr->schid.sch_no);
1114 DBF_ERROR("rc:%4d", rc);
Jan Glauber779e6e12008-07-17 17:16:48 +02001115 goto no_cleanup;
1116 }
1117
1118 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
1119 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1120 wait_event_interruptible_timeout(cdev->private->wait_q,
1121 irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
1122 irq_ptr->state == QDIO_IRQ_STATE_ERR,
1123 10 * HZ);
1124 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1125
1126no_cleanup:
1127 qdio_shutdown_thinint(irq_ptr);
1128
1129 /* restore interrupt handler */
1130 if ((void *)cdev->handler == (void *)qdio_int_handler)
1131 cdev->handler = irq_ptr->orig_handler;
1132 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1133
1134 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1135 mutex_unlock(&irq_ptr->setup_mutex);
Jan Glauber779e6e12008-07-17 17:16:48 +02001136 if (rc)
1137 return rc;
1138 return 0;
1139}
1140EXPORT_SYMBOL_GPL(qdio_shutdown);
1141
1142/**
1143 * qdio_free - free data structures for a qdio subchannel
1144 * @cdev: associated ccw device
1145 */
1146int qdio_free(struct ccw_device *cdev)
1147{
Jan Glauber22f99342008-12-25 13:38:46 +01001148 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
Jan Glauber779e6e12008-07-17 17:16:48 +02001149
Jan Glauber779e6e12008-07-17 17:16:48 +02001150 if (!irq_ptr)
1151 return -ENODEV;
1152
Jan Glauber22f99342008-12-25 13:38:46 +01001153 DBF_EVENT("qfree:%4x", cdev->private->schid.sch_no);
Jan Glauber779e6e12008-07-17 17:16:48 +02001154 mutex_lock(&irq_ptr->setup_mutex);
Jan Glauber22f99342008-12-25 13:38:46 +01001155
1156 if (irq_ptr->debug_area != NULL) {
1157 debug_unregister(irq_ptr->debug_area);
1158 irq_ptr->debug_area = NULL;
1159 }
Jan Glauber779e6e12008-07-17 17:16:48 +02001160 cdev->private->qdio_data = NULL;
1161 mutex_unlock(&irq_ptr->setup_mutex);
1162
1163 qdio_release_memory(irq_ptr);
1164 return 0;
1165}
1166EXPORT_SYMBOL_GPL(qdio_free);
1167
1168/**
Jan Glauber779e6e12008-07-17 17:16:48 +02001169 * qdio_allocate - allocate qdio queues and associated data
1170 * @init_data: initialization data
1171 */
1172int qdio_allocate(struct qdio_initialize *init_data)
1173{
1174 struct qdio_irq *irq_ptr;
Jan Glauber779e6e12008-07-17 17:16:48 +02001175
Jan Glauber22f99342008-12-25 13:38:46 +01001176 DBF_EVENT("qallocate:%4x", init_data->cdev->private->schid.sch_no);
Jan Glauber779e6e12008-07-17 17:16:48 +02001177
1178 if ((init_data->no_input_qs && !init_data->input_handler) ||
1179 (init_data->no_output_qs && !init_data->output_handler))
1180 return -EINVAL;
1181
1182 if ((init_data->no_input_qs > QDIO_MAX_QUEUES_PER_IRQ) ||
1183 (init_data->no_output_qs > QDIO_MAX_QUEUES_PER_IRQ))
1184 return -EINVAL;
1185
1186 if ((!init_data->input_sbal_addr_array) ||
1187 (!init_data->output_sbal_addr_array))
1188 return -EINVAL;
1189
Jan Glauber779e6e12008-07-17 17:16:48 +02001190 /* irq_ptr must be in GFP_DMA since it contains ccw1.cda */
1191 irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1192 if (!irq_ptr)
1193 goto out_err;
Jan Glauber779e6e12008-07-17 17:16:48 +02001194
1195 mutex_init(&irq_ptr->setup_mutex);
Jan Glauber22f99342008-12-25 13:38:46 +01001196 qdio_allocate_dbf(init_data, irq_ptr);
Jan Glauber779e6e12008-07-17 17:16:48 +02001197
1198 /*
1199 * Allocate a page for the chsc calls in qdio_establish.
1200 * Must be pre-allocated since a zfcp recovery will call
1201 * qdio_establish. In case of low memory and swap on a zfcp disk
1202 * we may not be able to allocate memory otherwise.
1203 */
1204 irq_ptr->chsc_page = get_zeroed_page(GFP_KERNEL);
1205 if (!irq_ptr->chsc_page)
1206 goto out_rel;
1207
1208 /* qdr is used in ccw1.cda which is u32 */
Jan Glauber3b8e3002008-08-01 16:39:17 +02001209 irq_ptr->qdr = (struct qdr *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
Jan Glauber779e6e12008-07-17 17:16:48 +02001210 if (!irq_ptr->qdr)
1211 goto out_rel;
1212 WARN_ON((unsigned long)irq_ptr->qdr & 0xfff);
1213
Jan Glauber779e6e12008-07-17 17:16:48 +02001214 if (qdio_allocate_qs(irq_ptr, init_data->no_input_qs,
1215 init_data->no_output_qs))
1216 goto out_rel;
1217
1218 init_data->cdev->private->qdio_data = irq_ptr;
1219 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1220 return 0;
1221out_rel:
1222 qdio_release_memory(irq_ptr);
1223out_err:
1224 return -ENOMEM;
1225}
1226EXPORT_SYMBOL_GPL(qdio_allocate);
1227
1228/**
1229 * qdio_establish - establish queues on a qdio subchannel
1230 * @init_data: initialization data
1231 */
1232int qdio_establish(struct qdio_initialize *init_data)
1233{
Jan Glauber779e6e12008-07-17 17:16:48 +02001234 struct qdio_irq *irq_ptr;
1235 struct ccw_device *cdev = init_data->cdev;
1236 unsigned long saveflags;
1237 int rc;
1238
Jan Glauber22f99342008-12-25 13:38:46 +01001239 DBF_EVENT("qestablish:%4x", cdev->private->schid.sch_no);
Jan Glauber58eb27c2008-08-21 19:46:34 +02001240
Jan Glauber779e6e12008-07-17 17:16:48 +02001241 irq_ptr = cdev->private->qdio_data;
1242 if (!irq_ptr)
1243 return -ENODEV;
1244
1245 if (cdev->private->state != DEV_STATE_ONLINE)
1246 return -EINVAL;
1247
Jan Glauber779e6e12008-07-17 17:16:48 +02001248 mutex_lock(&irq_ptr->setup_mutex);
1249 qdio_setup_irq(init_data);
1250
1251 rc = qdio_establish_thinint(irq_ptr);
1252 if (rc) {
1253 mutex_unlock(&irq_ptr->setup_mutex);
1254 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1255 return rc;
1256 }
1257
1258 /* establish q */
1259 irq_ptr->ccw.cmd_code = irq_ptr->equeue.cmd;
1260 irq_ptr->ccw.flags = CCW_FLAG_SLI;
1261 irq_ptr->ccw.count = irq_ptr->equeue.count;
1262 irq_ptr->ccw.cda = (u32)((addr_t)irq_ptr->qdr);
1263
1264 spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags);
1265 ccw_device_set_options_mask(cdev, 0);
1266
1267 rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0);
1268 if (rc) {
Jan Glauber22f99342008-12-25 13:38:46 +01001269 DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no);
1270 DBF_ERROR("rc:%4x", rc);
Jan Glauber779e6e12008-07-17 17:16:48 +02001271 }
1272 spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags);
1273
1274 if (rc) {
1275 mutex_unlock(&irq_ptr->setup_mutex);
1276 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1277 return rc;
1278 }
1279
1280 wait_event_interruptible_timeout(cdev->private->wait_q,
1281 irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
1282 irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ);
1283
1284 if (irq_ptr->state != QDIO_IRQ_STATE_ESTABLISHED) {
1285 mutex_unlock(&irq_ptr->setup_mutex);
1286 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1287 return -EIO;
1288 }
1289
1290 qdio_setup_ssqd_info(irq_ptr);
Jan Glauber22f99342008-12-25 13:38:46 +01001291 DBF_EVENT("qDmmwc:%2x", irq_ptr->ssqd_desc.mmwc);
1292 DBF_EVENT("qib ac:%4x", irq_ptr->qib.ac);
Jan Glauber779e6e12008-07-17 17:16:48 +02001293
1294 /* qebsm is now setup if available, initialize buffer states */
1295 qdio_init_buf_states(irq_ptr);
1296
1297 mutex_unlock(&irq_ptr->setup_mutex);
1298 qdio_print_subchannel_info(irq_ptr, cdev);
1299 qdio_setup_debug_entries(irq_ptr, cdev);
1300 return 0;
1301}
1302EXPORT_SYMBOL_GPL(qdio_establish);
1303
1304/**
1305 * qdio_activate - activate queues on a qdio subchannel
1306 * @cdev: associated cdev
1307 */
1308int qdio_activate(struct ccw_device *cdev)
1309{
1310 struct qdio_irq *irq_ptr;
1311 int rc;
1312 unsigned long saveflags;
Jan Glauber779e6e12008-07-17 17:16:48 +02001313
Jan Glauber22f99342008-12-25 13:38:46 +01001314 DBF_EVENT("qactivate:%4x", cdev->private->schid.sch_no);
Jan Glauber58eb27c2008-08-21 19:46:34 +02001315
Jan Glauber779e6e12008-07-17 17:16:48 +02001316 irq_ptr = cdev->private->qdio_data;
1317 if (!irq_ptr)
1318 return -ENODEV;
1319
1320 if (cdev->private->state != DEV_STATE_ONLINE)
1321 return -EINVAL;
1322
1323 mutex_lock(&irq_ptr->setup_mutex);
1324 if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
1325 rc = -EBUSY;
1326 goto out;
1327 }
1328
Jan Glauber779e6e12008-07-17 17:16:48 +02001329 irq_ptr->ccw.cmd_code = irq_ptr->aqueue.cmd;
1330 irq_ptr->ccw.flags = CCW_FLAG_SLI;
1331 irq_ptr->ccw.count = irq_ptr->aqueue.count;
1332 irq_ptr->ccw.cda = 0;
1333
1334 spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags);
1335 ccw_device_set_options(cdev, CCWDEV_REPORT_ALL);
1336
1337 rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE,
1338 0, DOIO_DENY_PREFETCH);
1339 if (rc) {
Jan Glauber22f99342008-12-25 13:38:46 +01001340 DBF_ERROR("%4x act IO ERR", irq_ptr->schid.sch_no);
1341 DBF_ERROR("rc:%4x", rc);
Jan Glauber779e6e12008-07-17 17:16:48 +02001342 }
1343 spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags);
1344
1345 if (rc)
1346 goto out;
1347
1348 if (is_thinint_irq(irq_ptr))
1349 tiqdio_add_input_queues(irq_ptr);
1350
1351 /* wait for subchannel to become active */
1352 msleep(5);
1353
1354 switch (irq_ptr->state) {
1355 case QDIO_IRQ_STATE_STOPPED:
1356 case QDIO_IRQ_STATE_ERR:
Jan Glaubere4c14e22009-03-26 15:24:25 +01001357 rc = -EIO;
1358 break;
Jan Glauber779e6e12008-07-17 17:16:48 +02001359 default:
1360 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE);
1361 rc = 0;
1362 }
1363out:
1364 mutex_unlock(&irq_ptr->setup_mutex);
1365 return rc;
1366}
1367EXPORT_SYMBOL_GPL(qdio_activate);
1368
1369static inline int buf_in_between(int bufnr, int start, int count)
1370{
1371 int end = add_buf(start, count);
1372
1373 if (end > start) {
1374 if (bufnr >= start && bufnr < end)
1375 return 1;
1376 else
1377 return 0;
1378 }
1379
1380 /* wrap-around case */
1381 if ((bufnr >= start && bufnr <= QDIO_MAX_BUFFERS_PER_Q) ||
1382 (bufnr < end))
1383 return 1;
1384 else
1385 return 0;
1386}
1387
1388/**
1389 * handle_inbound - reset processed input buffers
1390 * @q: queue containing the buffers
1391 * @callflags: flags
1392 * @bufnr: first buffer to process
1393 * @count: how many buffers are emptied
1394 */
Jan Glauberd303b6f2009-03-26 15:24:31 +01001395static int handle_inbound(struct qdio_q *q, unsigned int callflags,
1396 int bufnr, int count)
Jan Glauber779e6e12008-07-17 17:16:48 +02001397{
Jan Glauberd303b6f2009-03-26 15:24:31 +01001398 int used, diff;
Jan Glauber779e6e12008-07-17 17:16:48 +02001399
Jan Glauber6486cda2010-01-04 09:05:42 +01001400 qperf_inc(q, inbound_call);
1401
Jan Glauber50f769d2008-12-25 13:38:47 +01001402 if (!q->u.in.polling)
1403 goto set;
1404
1405 /* protect against stop polling setting an ACK for an emptied slsb */
1406 if (count == QDIO_MAX_BUFFERS_PER_Q) {
1407 /* overwriting everything, just delete polling status */
1408 q->u.in.polling = 0;
1409 q->u.in.ack_count = 0;
1410 goto set;
Jan Glaubere85dea02009-03-26 15:24:29 +01001411 } else if (buf_in_between(q->u.in.ack_start, bufnr, count)) {
Jan Glauber50f769d2008-12-25 13:38:47 +01001412 if (is_qebsm(q)) {
Jan Glaubere85dea02009-03-26 15:24:29 +01001413 /* partial overwrite, just update ack_start */
Jan Glauber50f769d2008-12-25 13:38:47 +01001414 diff = add_buf(bufnr, count);
Jan Glaubere85dea02009-03-26 15:24:29 +01001415 diff = sub_buf(diff, q->u.in.ack_start);
Jan Glauber50f769d2008-12-25 13:38:47 +01001416 q->u.in.ack_count -= diff;
1417 if (q->u.in.ack_count <= 0) {
1418 q->u.in.polling = 0;
1419 q->u.in.ack_count = 0;
Jan Glauber50f769d2008-12-25 13:38:47 +01001420 goto set;
1421 }
Jan Glaubere85dea02009-03-26 15:24:29 +01001422 q->u.in.ack_start = add_buf(q->u.in.ack_start, diff);
Jan Glauber50f769d2008-12-25 13:38:47 +01001423 }
1424 else
1425 /* the only ACK will be deleted, so stop polling */
Jan Glauber779e6e12008-07-17 17:16:48 +02001426 q->u.in.polling = 0;
Jan Glauber50f769d2008-12-25 13:38:47 +01001427 }
Jan Glauber779e6e12008-07-17 17:16:48 +02001428
Jan Glauber50f769d2008-12-25 13:38:47 +01001429set:
Jan Glauber779e6e12008-07-17 17:16:48 +02001430 count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count);
Jan Glauber779e6e12008-07-17 17:16:48 +02001431
1432 used = atomic_add_return(count, &q->nr_buf_used) - count;
1433 BUG_ON(used + count > QDIO_MAX_BUFFERS_PER_Q);
1434
1435 /* no need to signal as long as the adapter had free buffers */
1436 if (used)
Jan Glauberd303b6f2009-03-26 15:24:31 +01001437 return 0;
Jan Glauber779e6e12008-07-17 17:16:48 +02001438
Jan Glauberd303b6f2009-03-26 15:24:31 +01001439 if (need_siga_in(q))
1440 return qdio_siga_input(q);
1441 return 0;
Jan Glauber779e6e12008-07-17 17:16:48 +02001442}
1443
1444/**
1445 * handle_outbound - process filled outbound buffers
1446 * @q: queue containing the buffers
1447 * @callflags: flags
1448 * @bufnr: first buffer to process
1449 * @count: how many buffers are filled
1450 */
Jan Glauberd303b6f2009-03-26 15:24:31 +01001451static int handle_outbound(struct qdio_q *q, unsigned int callflags,
1452 int bufnr, int count)
Jan Glauber779e6e12008-07-17 17:16:48 +02001453{
1454 unsigned char state;
Jan Glauberd303b6f2009-03-26 15:24:31 +01001455 int used, rc = 0;
Jan Glauber779e6e12008-07-17 17:16:48 +02001456
Jan Glauber6486cda2010-01-04 09:05:42 +01001457 qperf_inc(q, outbound_call);
Jan Glauber779e6e12008-07-17 17:16:48 +02001458
1459 count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count);
1460 used = atomic_add_return(count, &q->nr_buf_used);
1461 BUG_ON(used > QDIO_MAX_BUFFERS_PER_Q);
1462
Jan Glauber01958432011-01-05 12:47:51 +01001463 if (used == QDIO_MAX_BUFFERS_PER_Q)
1464 qperf_inc(q, outbound_queue_full);
1465
Jan Glauber6486cda2010-01-04 09:05:42 +01001466 if (callflags & QDIO_FLAG_PCI_OUT) {
Jan Glauber779e6e12008-07-17 17:16:48 +02001467 q->u.out.pci_out_enabled = 1;
Jan Glauber6486cda2010-01-04 09:05:42 +01001468 qperf_inc(q, pci_request_int);
1469 }
Jan Glauber779e6e12008-07-17 17:16:48 +02001470 else
1471 q->u.out.pci_out_enabled = 0;
1472
1473 if (queue_type(q) == QDIO_IQDIO_QFMT) {
1474 if (multicast_outbound(q))
Jan Glauberd303b6f2009-03-26 15:24:31 +01001475 rc = qdio_kick_outbound_q(q);
Jan Glauber779e6e12008-07-17 17:16:48 +02001476 else
Klaus-Dieter Wacker7a0f4752008-10-10 21:33:18 +02001477 if ((q->irq_ptr->ssqd_desc.mmwc > 1) &&
1478 (count > 1) &&
1479 (count <= q->irq_ptr->ssqd_desc.mmwc)) {
1480 /* exploit enhanced SIGA */
1481 q->u.out.use_enh_siga = 1;
Jan Glauberd303b6f2009-03-26 15:24:31 +01001482 rc = qdio_kick_outbound_q(q);
Klaus-Dieter Wacker7a0f4752008-10-10 21:33:18 +02001483 } else {
1484 /*
1485 * One siga-w per buffer required for unicast
1486 * HiperSockets.
1487 */
1488 q->u.out.use_enh_siga = 0;
Jan Glauberd303b6f2009-03-26 15:24:31 +01001489 while (count--) {
1490 rc = qdio_kick_outbound_q(q);
1491 if (rc)
1492 goto out;
1493 }
Klaus-Dieter Wacker7a0f4752008-10-10 21:33:18 +02001494 }
Jan Glauber779e6e12008-07-17 17:16:48 +02001495 goto out;
1496 }
1497
1498 if (need_siga_sync(q)) {
1499 qdio_siga_sync_q(q);
1500 goto out;
1501 }
1502
1503 /* try to fast requeue buffers */
Jan Glauber50f769d2008-12-25 13:38:47 +01001504 get_buf_state(q, prev_buf(bufnr), &state, 0);
Jan Glauber779e6e12008-07-17 17:16:48 +02001505 if (state != SLSB_CU_OUTPUT_PRIMED)
Jan Glauberd303b6f2009-03-26 15:24:31 +01001506 rc = qdio_kick_outbound_q(q);
Jan Glauber1d7e1502009-09-22 22:58:39 +02001507 else
Jan Glauber6486cda2010-01-04 09:05:42 +01001508 qperf_inc(q, fast_requeue);
Jan Glauber1d7e1502009-09-22 22:58:39 +02001509
Jan Glauber779e6e12008-07-17 17:16:48 +02001510out:
Jan Glauber3d6c76f2011-01-05 12:47:50 +01001511 /* in case of SIGA errors we must process the error immediately */
1512 if (used >= q->u.out.scan_threshold || rc)
1513 tasklet_schedule(&q->tasklet);
1514 else
1515 /* free the SBALs in case of no further traffic */
1516 if (!timer_pending(&q->u.out.timer))
1517 mod_timer(&q->u.out.timer, jiffies + HZ);
Jan Glauberd303b6f2009-03-26 15:24:31 +01001518 return rc;
Jan Glauber779e6e12008-07-17 17:16:48 +02001519}
1520
1521/**
1522 * do_QDIO - process input or output buffers
1523 * @cdev: associated ccw_device for the qdio subchannel
1524 * @callflags: input or output and special flags from the program
1525 * @q_nr: queue number
1526 * @bufnr: buffer number
1527 * @count: how many buffers to process
1528 */
1529int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
Jan Glauber66182412009-06-22 12:08:15 +02001530 int q_nr, unsigned int bufnr, unsigned int count)
Jan Glauber779e6e12008-07-17 17:16:48 +02001531{
1532 struct qdio_irq *irq_ptr;
Jan Glauber779e6e12008-07-17 17:16:48 +02001533
Jan Glauber66182412009-06-22 12:08:15 +02001534 if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q)
Jan Glauber779e6e12008-07-17 17:16:48 +02001535 return -EINVAL;
1536
Jan Glauber779e6e12008-07-17 17:16:48 +02001537 irq_ptr = cdev->private->qdio_data;
1538 if (!irq_ptr)
1539 return -ENODEV;
1540
Jan Glauber1d7e1502009-09-22 22:58:39 +02001541 DBF_DEV_EVENT(DBF_INFO, irq_ptr,
1542 "do%02x b:%02x c:%02x", callflags, bufnr, count);
Jan Glauber779e6e12008-07-17 17:16:48 +02001543
1544 if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)
1545 return -EBUSY;
1546
1547 if (callflags & QDIO_FLAG_SYNC_INPUT)
Jan Glauberd303b6f2009-03-26 15:24:31 +01001548 return handle_inbound(irq_ptr->input_qs[q_nr],
1549 callflags, bufnr, count);
Jan Glauber779e6e12008-07-17 17:16:48 +02001550 else if (callflags & QDIO_FLAG_SYNC_OUTPUT)
Jan Glauberd303b6f2009-03-26 15:24:31 +01001551 return handle_outbound(irq_ptr->output_qs[q_nr],
1552 callflags, bufnr, count);
1553 return -EINVAL;
Jan Glauber779e6e12008-07-17 17:16:48 +02001554}
1555EXPORT_SYMBOL_GPL(do_QDIO);
1556
Jan Glauberd36deae2010-09-07 21:14:39 +00001557/**
1558 * qdio_start_irq - process input buffers
1559 * @cdev: associated ccw_device for the qdio subchannel
1560 * @nr: input queue number
1561 *
1562 * Return codes
1563 * 0 - success
1564 * 1 - irqs not started since new data is available
1565 */
1566int qdio_start_irq(struct ccw_device *cdev, int nr)
1567{
1568 struct qdio_q *q;
1569 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1570
1571 if (!irq_ptr)
1572 return -ENODEV;
1573 q = irq_ptr->input_qs[nr];
1574
1575 WARN_ON(queue_irqs_enabled(q));
1576
Jan Glauber4f325182011-01-05 12:47:49 +01001577 if (!shared_ind(q->irq_ptr->dsci))
Jan Glauberd36deae2010-09-07 21:14:39 +00001578 xchg(q->irq_ptr->dsci, 0);
1579
1580 qdio_stop_polling(q);
1581 clear_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state);
1582
1583 /*
1584 * We need to check again to not lose initiative after
1585 * resetting the ACK state.
1586 */
Jan Glauber4f325182011-01-05 12:47:49 +01001587 if (!shared_ind(q->irq_ptr->dsci) && *q->irq_ptr->dsci)
Jan Glauberd36deae2010-09-07 21:14:39 +00001588 goto rescan;
1589 if (!qdio_inbound_q_done(q))
1590 goto rescan;
1591 return 0;
1592
1593rescan:
1594 if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
1595 &q->u.in.queue_irq_state))
1596 return 0;
1597 else
1598 return 1;
1599
1600}
1601EXPORT_SYMBOL(qdio_start_irq);
1602
1603/**
1604 * qdio_get_next_buffers - process input buffers
1605 * @cdev: associated ccw_device for the qdio subchannel
1606 * @nr: input queue number
1607 * @bufnr: first filled buffer number
1608 * @error: buffers are in error state
1609 *
1610 * Return codes
1611 * < 0 - error
1612 * = 0 - no new buffers found
1613 * > 0 - number of processed buffers
1614 */
1615int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr,
1616 int *error)
1617{
1618 struct qdio_q *q;
1619 int start, end;
1620 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1621
1622 if (!irq_ptr)
1623 return -ENODEV;
1624 q = irq_ptr->input_qs[nr];
1625 WARN_ON(queue_irqs_enabled(q));
1626
1627 qdio_sync_after_thinint(q);
1628
1629 /*
1630 * The interrupt could be caused by a PCI request. Check the
1631 * PCI capable outbound queues.
1632 */
1633 qdio_check_outbound_after_thinint(q);
1634
1635 if (!qdio_inbound_q_moved(q))
1636 return 0;
1637
1638 /* Note: upper-layer MUST stop processing immediately here ... */
1639 if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
1640 return -EIO;
1641
1642 start = q->first_to_kick;
1643 end = q->first_to_check;
1644 *bufnr = start;
1645 *error = q->qdio_error;
1646
1647 /* for the next time */
1648 q->first_to_kick = end;
1649 q->qdio_error = 0;
1650 return sub_buf(end, start);
1651}
1652EXPORT_SYMBOL(qdio_get_next_buffers);
1653
1654/**
1655 * qdio_stop_irq - disable interrupt processing for the device
1656 * @cdev: associated ccw_device for the qdio subchannel
1657 * @nr: input queue number
1658 *
1659 * Return codes
1660 * 0 - interrupts were already disabled
1661 * 1 - interrupts successfully disabled
1662 */
1663int qdio_stop_irq(struct ccw_device *cdev, int nr)
1664{
1665 struct qdio_q *q;
1666 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1667
1668 if (!irq_ptr)
1669 return -ENODEV;
1670 q = irq_ptr->input_qs[nr];
1671
1672 if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
1673 &q->u.in.queue_irq_state))
1674 return 0;
1675 else
1676 return 1;
1677}
1678EXPORT_SYMBOL(qdio_stop_irq);
1679
Jan Glauber779e6e12008-07-17 17:16:48 +02001680static int __init init_QDIO(void)
1681{
1682 int rc;
1683
1684 rc = qdio_setup_init();
1685 if (rc)
1686 return rc;
1687 rc = tiqdio_allocate_memory();
1688 if (rc)
1689 goto out_cache;
1690 rc = qdio_debug_init();
1691 if (rc)
1692 goto out_ti;
Jan Glauber779e6e12008-07-17 17:16:48 +02001693 rc = tiqdio_register_thinints();
1694 if (rc)
Jan Glauber6486cda2010-01-04 09:05:42 +01001695 goto out_debug;
Jan Glauber779e6e12008-07-17 17:16:48 +02001696 return 0;
1697
Jan Glauber779e6e12008-07-17 17:16:48 +02001698out_debug:
1699 qdio_debug_exit();
1700out_ti:
1701 tiqdio_free_memory();
1702out_cache:
1703 qdio_setup_exit();
1704 return rc;
1705}
1706
1707static void __exit exit_QDIO(void)
1708{
1709 tiqdio_unregister_thinints();
1710 tiqdio_free_memory();
Jan Glauber779e6e12008-07-17 17:16:48 +02001711 qdio_debug_exit();
1712 qdio_setup_exit();
1713}
1714
1715module_init(init_QDIO);
1716module_exit(exit_QDIO);