blob: 4e53f9db1b2cb2dd0036b3bbf88fa2f9535a20d7 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Adaptec AAC series RAID controller driver
3 * (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
4 *
5 * based on the old aacraid driver that is..
6 * Adaptec aacraid device driver for Linux.
7 *
Mark Haverkamp03d44332007-03-15 10:27:45 -07008 * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2, or (at your option)
13 * any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; see the file COPYING. If not, write to
22 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 *
24 * Module Name:
25 * dpcsup.c
26 *
27 * Abstract: All DPC processing routines for the cyclone board occur here.
28 *
29 *
30 */
31
32#include <linux/kernel.h>
33#include <linux/init.h>
34#include <linux/types.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <linux/pci.h>
36#include <linux/spinlock.h>
37#include <linux/slab.h>
38#include <linux/completion.h>
39#include <linux/blkdev.h>
40#include <asm/semaphore.h>
41
42#include "aacraid.h"
43
44/**
45 * aac_response_normal - Handle command replies
46 * @q: Queue to read from
47 *
48 * This DPC routine will be run when the adapter interrupts us to let us
49 * know there is a response on our normal priority queue. We will pull off
50 * all QE there are and wake up all the waiters before exiting. We will
51 * take a spinlock out on the queue before operating on it.
52 */
53
54unsigned int aac_response_normal(struct aac_queue * q)
55{
56 struct aac_dev * dev = q->dev;
57 struct aac_entry *entry;
58 struct hw_fib * hwfib;
59 struct fib * fib;
60 int consumed = 0;
61 unsigned long flags;
62
63 spin_lock_irqsave(q->lock, flags);
64 /*
65 * Keep pulling response QEs off the response queue and waking
66 * up the waiters until there are no more QEs. We then return
67 * back to the system. If no response was requesed we just
68 * deallocate the Fib here and continue.
69 */
70 while(aac_consumer_get(dev, q, &entry))
71 {
72 int fast;
73 u32 index = le32_to_cpu(entry->addr);
74 fast = index & 0x01;
Mark Haverkamp8e0c5eb2005-10-24 10:52:22 -070075 fib = &dev->fibs[index >> 2];
Mark Haverkampa8166a52007-03-15 10:26:22 -070076 hwfib = fib->hw_fib_va;
Linus Torvalds1da177e2005-04-16 15:20:36 -070077
78 aac_consumer_free(dev, q, HostNormRespQueue);
79 /*
80 * Remove this fib from the Outstanding I/O queue.
81 * But only if it has not already been timed out.
82 *
83 * If the fib has been timed out already, then just
84 * continue. The caller has already been notified that
85 * the fib timed out.
86 */
Mark Haverkamp03d44332007-03-15 10:27:45 -070087 dev->queues->queue[AdapNormCmdQueue].numpending--;
88
89 if (unlikely(fib->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) {
90 spin_unlock_irqrestore(q->lock, flags);
91 aac_fib_complete(fib);
92 aac_fib_free(fib);
93 spin_lock_irqsave(q->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070094 continue;
95 }
96 spin_unlock_irqrestore(q->lock, flags);
97
98 if (fast) {
99 /*
100 * Doctor the fib
101 */
Mark Haverkamp 56b58712005-04-27 06:05:51 -0700102 *(__le32 *)hwfib->data = cpu_to_le32(ST_OK);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103 hwfib->header.XferState |= cpu_to_le32(AdapterProcessed);
104 }
105
106 FIB_COUNTER_INCREMENT(aac_config.FibRecved);
107
108 if (hwfib->header.Command == cpu_to_le16(NuFileSystem))
109 {
Mark Haverkamp 56b58712005-04-27 06:05:51 -0700110 __le32 *pstatus = (__le32 *)hwfib->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111 if (*pstatus & cpu_to_le32(0xffff0000))
112 *pstatus = cpu_to_le32(ST_OK);
113 }
114 if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected | Async))
115 {
116 if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected))
117 FIB_COUNTER_INCREMENT(aac_config.NoResponseRecved);
118 else
119 FIB_COUNTER_INCREMENT(aac_config.AsyncRecved);
120 /*
121 * NOTE: we cannot touch the fib after this
122 * call, because it may have been deallocated.
123 */
124 fib->callback(fib->callback_data, fib);
125 } else {
126 unsigned long flagv;
127 spin_lock_irqsave(&fib->event_lock, flagv);
Mark Haverkampc8f7b072006-08-03 08:02:24 -0700128 if (!fib->done)
129 fib->done = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130 up(&fib->event_wait);
131 spin_unlock_irqrestore(&fib->event_lock, flagv);
132 FIB_COUNTER_INCREMENT(aac_config.NormalRecved);
Mark Haverkampc8f7b072006-08-03 08:02:24 -0700133 if (fib->done == 2) {
134 aac_fib_complete(fib);
135 aac_fib_free(fib);
136 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137 }
138 consumed++;
139 spin_lock_irqsave(q->lock, flags);
140 }
141
142 if (consumed > aac_config.peak_fibs)
143 aac_config.peak_fibs = consumed;
144 if (consumed == 0)
145 aac_config.zero_fibs++;
146
147 spin_unlock_irqrestore(q->lock, flags);
148 return 0;
149}
150
151
152/**
153 * aac_command_normal - handle commands
154 * @q: queue to process
155 *
156 * This DPC routine will be queued when the adapter interrupts us to
157 * let us know there is a command on our normal priority queue. We will
158 * pull off all QE there are and wake up all the waiters before exiting.
159 * We will take a spinlock out on the queue before operating on it.
160 */
161
162unsigned int aac_command_normal(struct aac_queue *q)
163{
164 struct aac_dev * dev = q->dev;
165 struct aac_entry *entry;
166 unsigned long flags;
167
168 spin_lock_irqsave(q->lock, flags);
169
170 /*
171 * Keep pulling response QEs off the response queue and waking
172 * up the waiters until there are no more QEs. We then return
173 * back to the system.
174 */
175 while(aac_consumer_get(dev, q, &entry))
176 {
177 struct fib fibctx;
178 struct hw_fib * hw_fib;
179 u32 index;
180 struct fib *fib = &fibctx;
181
182 index = le32_to_cpu(entry->addr) / sizeof(struct hw_fib);
183 hw_fib = &dev->aif_base_va[index];
184
185 /*
186 * Allocate a FIB at all costs. For non queued stuff
187 * we can just use the stack so we are happy. We need
188 * a fib object in order to manage the linked lists
189 */
190 if (dev->aif_thread)
191 if((fib = kmalloc(sizeof(struct fib), GFP_ATOMIC)) == NULL)
192 fib = &fibctx;
193
194 memset(fib, 0, sizeof(struct fib));
195 INIT_LIST_HEAD(&fib->fiblink);
196 fib->type = FSAFS_NTC_FIB_CONTEXT;
197 fib->size = sizeof(struct fib);
Mark Haverkampa8166a52007-03-15 10:26:22 -0700198 fib->hw_fib_va = hw_fib;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199 fib->data = hw_fib->data;
200 fib->dev = dev;
201
202
203 if (dev->aif_thread && fib != &fibctx) {
204 list_add_tail(&fib->fiblink, &q->cmdq);
205 aac_consumer_free(dev, q, HostNormCmdQueue);
206 wake_up_interruptible(&q->cmdready);
207 } else {
208 aac_consumer_free(dev, q, HostNormCmdQueue);
209 spin_unlock_irqrestore(q->lock, flags);
210 /*
211 * Set the status of this FIB
212 */
Mark Haverkamp 56b58712005-04-27 06:05:51 -0700213 *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
Mark Haverkampbfb35aa82006-02-01 09:30:55 -0800214 aac_fib_adapter_complete(fib, sizeof(u32));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215 spin_lock_irqsave(q->lock, flags);
216 }
217 }
218 spin_unlock_irqrestore(q->lock, flags);
219 return 0;
220}
Mark Haverkamp8e0c5eb2005-10-24 10:52:22 -0700221
222
223/**
224 * aac_intr_normal - Handle command replies
225 * @dev: Device
226 * @index: completion reference
227 *
228 * This DPC routine will be run when the adapter interrupts us to let us
229 * know there is a response on our normal priority queue. We will pull off
230 * all QE there are and wake up all the waiters before exiting.
231 */
232
233unsigned int aac_intr_normal(struct aac_dev * dev, u32 Index)
234{
235 u32 index = le32_to_cpu(Index);
236
237 dprintk((KERN_INFO "aac_intr_normal(%p,%x)\n", dev, Index));
238 if ((index & 0x00000002L)) {
239 struct hw_fib * hw_fib;
240 struct fib * fib;
241 struct aac_queue *q = &dev->queues->queue[HostNormCmdQueue];
242 unsigned long flags;
243
244 if (index == 0xFFFFFFFEL) /* Special Case */
245 return 0; /* Do nothing */
246 /*
247 * Allocate a FIB. For non queued stuff we can just use
248 * the stack so we are happy. We need a fib object in order to
249 * manage the linked lists.
250 */
251 if ((!dev->aif_thread)
252 || (!(fib = kmalloc(sizeof(struct fib),GFP_ATOMIC))))
253 return 1;
254 if (!(hw_fib = kmalloc(sizeof(struct hw_fib),GFP_ATOMIC))) {
255 kfree (fib);
256 return 1;
257 }
258 memset(hw_fib, 0, sizeof(struct hw_fib));
259 memcpy(hw_fib, (struct hw_fib *)(((unsigned long)(dev->regs.sa)) + (index & ~0x00000002L)), sizeof(struct hw_fib));
260 memset(fib, 0, sizeof(struct fib));
261 INIT_LIST_HEAD(&fib->fiblink);
262 fib->type = FSAFS_NTC_FIB_CONTEXT;
263 fib->size = sizeof(struct fib);
Mark Haverkampa8166a52007-03-15 10:26:22 -0700264 fib->hw_fib_va = hw_fib;
Mark Haverkamp8e0c5eb2005-10-24 10:52:22 -0700265 fib->data = hw_fib->data;
266 fib->dev = dev;
267
268 spin_lock_irqsave(q->lock, flags);
269 list_add_tail(&fib->fiblink, &q->cmdq);
270 wake_up_interruptible(&q->cmdready);
271 spin_unlock_irqrestore(q->lock, flags);
272 return 1;
273 } else {
274 int fast = index & 0x01;
275 struct fib * fib = &dev->fibs[index >> 2];
Mark Haverkampa8166a52007-03-15 10:26:22 -0700276 struct hw_fib * hwfib = fib->hw_fib_va;
Mark Haverkamp8e0c5eb2005-10-24 10:52:22 -0700277
278 /*
279 * Remove this fib from the Outstanding I/O queue.
280 * But only if it has not already been timed out.
281 *
282 * If the fib has been timed out already, then just
283 * continue. The caller has already been notified that
284 * the fib timed out.
285 */
Mark Haverkamp03d44332007-03-15 10:27:45 -0700286 dev->queues->queue[AdapNormCmdQueue].numpending--;
287
288 if (unlikely(fib->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) {
289 aac_fib_complete(fib);
290 aac_fib_free(fib);
Mark Haverkamp8e0c5eb2005-10-24 10:52:22 -0700291 return 0;
292 }
293
Mark Haverkamp8e0c5eb2005-10-24 10:52:22 -0700294 if (fast) {
295 /*
296 * Doctor the fib
297 */
298 *(__le32 *)hwfib->data = cpu_to_le32(ST_OK);
299 hwfib->header.XferState |= cpu_to_le32(AdapterProcessed);
300 }
301
302 FIB_COUNTER_INCREMENT(aac_config.FibRecved);
303
304 if (hwfib->header.Command == cpu_to_le16(NuFileSystem))
305 {
306 u32 *pstatus = (u32 *)hwfib->data;
307 if (*pstatus & cpu_to_le32(0xffff0000))
308 *pstatus = cpu_to_le32(ST_OK);
309 }
310 if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected | Async))
311 {
312 if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected))
313 FIB_COUNTER_INCREMENT(aac_config.NoResponseRecved);
314 else
315 FIB_COUNTER_INCREMENT(aac_config.AsyncRecved);
316 /*
317 * NOTE: we cannot touch the fib after this
318 * call, because it may have been deallocated.
319 */
320 fib->callback(fib->callback_data, fib);
321 } else {
322 unsigned long flagv;
323 dprintk((KERN_INFO "event_wait up\n"));
324 spin_lock_irqsave(&fib->event_lock, flagv);
Mark Haverkampc8f7b072006-08-03 08:02:24 -0700325 if (!fib->done)
326 fib->done = 1;
Mark Haverkamp8e0c5eb2005-10-24 10:52:22 -0700327 up(&fib->event_wait);
328 spin_unlock_irqrestore(&fib->event_lock, flagv);
329 FIB_COUNTER_INCREMENT(aac_config.NormalRecved);
330 }
331 return 0;
332 }
333}