blob: bbe346bd3cb8f476e8f012abd7e5e6f7a3e2f41f [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/***************************************************************************
2 dpti.c - description
3 -------------------
4 begin : Thu Sep 7 2000
5 copyright : (C) 2000 by Adaptec
6
7 July 30, 2001 First version being submitted
8 for inclusion in the kernel. V2.4
9
10 See Documentation/scsi/dpti.txt for history, notes, license info
11 and credits
12 ***************************************************************************/
13
14/***************************************************************************
15 * *
16 * This program is free software; you can redistribute it and/or modify *
17 * it under the terms of the GNU General Public License as published by *
18 * the Free Software Foundation; either version 2 of the License, or *
19 * (at your option) any later version. *
20 * *
21 ***************************************************************************/
22/***************************************************************************
23 * Sat Dec 20 2003 Go Taniguchi <go@turbolinux.co.jp>
24 - Support 2.6 kernel and DMA-mapping
25 - ioctl fix for raid tools
26 - use schedule_timeout in long long loop
27 **************************************************************************/
28
29/*#define DEBUG 1 */
30/*#define UARTDELAY 1 */
31
32/* On the real kernel ADDR32 should always be zero for 2.4. GFP_HIGH allocates
33 high pages. Keep the macro around because of the broken unmerged ia64 tree */
34
35#define ADDR32 (0)
36
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include <linux/module.h>
38
39MODULE_AUTHOR("Deanna Bonds, with _lots_ of help from Mark Salyzyn");
40MODULE_DESCRIPTION("Adaptec I2O RAID Driver");
41
42////////////////////////////////////////////////////////////////
43
44#include <linux/ioctl.h> /* For SCSI-Passthrough */
45#include <asm/uaccess.h>
46
47#include <linux/stat.h>
48#include <linux/slab.h> /* for kmalloc() */
49#include <linux/config.h> /* for CONFIG_PCI */
50#include <linux/pci.h> /* for PCI support */
51#include <linux/proc_fs.h>
52#include <linux/blkdev.h>
53#include <linux/delay.h> /* for udelay */
54#include <linux/interrupt.h>
55#include <linux/kernel.h> /* for printk */
56#include <linux/sched.h>
57#include <linux/reboot.h>
58#include <linux/spinlock.h>
59#include <linux/smp_lock.h>
60
61#include <linux/timer.h>
62#include <linux/string.h>
63#include <linux/ioport.h>
64
65#include <asm/processor.h> /* for boot_cpu_data */
66#include <asm/pgtable.h>
67#include <asm/io.h> /* for virt_to_bus, etc. */
68
69#include <scsi/scsi.h>
70#include <scsi/scsi_cmnd.h>
71#include <scsi/scsi_device.h>
72#include <scsi/scsi_host.h>
73#include <scsi/scsi_tcq.h>
74
75#include "dpt/dptsig.h"
76#include "dpti.h"
77
78/*============================================================================
79 * Create a binary signature - this is read by dptsig
80 * Needed for our management apps
81 *============================================================================
82 */
83static dpt_sig_S DPTI_sig = {
84 {'d', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION,
85#ifdef __i386__
86 PROC_INTEL, PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM,
87#elif defined(__ia64__)
88 PROC_INTEL, PROC_IA64,
89#elif defined(__sparc__)
90 PROC_ULTRASPARC, PROC_ULTRASPARC,
91#elif defined(__alpha__)
92 PROC_ALPHA, PROC_ALPHA,
93#else
94 (-1),(-1),
95#endif
96 FT_HBADRVR, 0, OEM_DPT, OS_LINUX, CAP_OVERLAP, DEV_ALL,
97 ADF_ALL_SC5, 0, 0, DPT_VERSION, DPT_REVISION, DPT_SUBREVISION,
98 DPT_MONTH, DPT_DAY, DPT_YEAR, "Adaptec Linux I2O RAID Driver"
99};
100
101
102
103
104/*============================================================================
105 * Globals
106 *============================================================================
107 */
108
109static DECLARE_MUTEX(adpt_configuration_lock);
110
111static struct i2o_sys_tbl *sys_tbl = NULL;
112static int sys_tbl_ind = 0;
113static int sys_tbl_len = 0;
114
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115static adpt_hba* hba_chain = NULL;
116static int hba_count = 0;
117
118static struct file_operations adpt_fops = {
119 .ioctl = adpt_ioctl,
120 .open = adpt_open,
121 .release = adpt_close
122};
123
124#ifdef REBOOT_NOTIFIER
125static struct notifier_block adpt_reboot_notifier =
126{
127 adpt_reboot_event,
128 NULL,
129 0
130};
131#endif
132
133/* Structures and definitions for synchronous message posting.
134 * See adpt_i2o_post_wait() for description
135 * */
136struct adpt_i2o_post_wait_data
137{
138 int status;
139 u32 id;
140 adpt_wait_queue_head_t *wq;
141 struct adpt_i2o_post_wait_data *next;
142};
143
144static struct adpt_i2o_post_wait_data *adpt_post_wait_queue = NULL;
145static u32 adpt_post_wait_id = 0;
146static DEFINE_SPINLOCK(adpt_post_wait_lock);
147
148
149/*============================================================================
150 * Functions
151 *============================================================================
152 */
153
154static u8 adpt_read_blink_led(adpt_hba* host)
155{
156 if(host->FwDebugBLEDflag_P != 0) {
157 if( readb(host->FwDebugBLEDflag_P) == 0xbc ){
158 return readb(host->FwDebugBLEDvalue_P);
159 }
160 }
161 return 0;
162}
163
164/*============================================================================
165 * Scsi host template interface functions
166 *============================================================================
167 */
168
169static struct pci_device_id dptids[] = {
170 { PCI_DPT_VENDOR_ID, PCI_DPT_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
171 { PCI_DPT_VENDOR_ID, PCI_DPT_RAPTOR_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
172 { 0, }
173};
174MODULE_DEVICE_TABLE(pci,dptids);
175
176static int adpt_detect(struct scsi_host_template* sht)
177{
178 struct pci_dev *pDev = NULL;
179 adpt_hba* pHba;
180
181 adpt_init();
182
183 PINFO("Detecting Adaptec I2O RAID controllers...\n");
184
185 /* search for all Adatpec I2O RAID cards */
186 while ((pDev = pci_find_device( PCI_DPT_VENDOR_ID, PCI_ANY_ID, pDev))) {
187 if(pDev->device == PCI_DPT_DEVICE_ID ||
188 pDev->device == PCI_DPT_RAPTOR_DEVICE_ID){
189 if(adpt_install_hba(sht, pDev) ){
190 PERROR("Could not Init an I2O RAID device\n");
191 PERROR("Will not try to detect others.\n");
192 return hba_count-1;
193 }
194 }
195 }
196
197 /* In INIT state, Activate IOPs */
198 for (pHba = hba_chain; pHba; pHba = pHba->next) {
199 // Activate does get status , init outbound, and get hrt
200 if (adpt_i2o_activate_hba(pHba) < 0) {
201 adpt_i2o_delete_hba(pHba);
202 }
203 }
204
205
206 /* Active IOPs in HOLD state */
207
208rebuild_sys_tab:
209 if (hba_chain == NULL)
210 return 0;
211
212 /*
213 * If build_sys_table fails, we kill everything and bail
214 * as we can't init the IOPs w/o a system table
215 */
216 if (adpt_i2o_build_sys_table() < 0) {
217 adpt_i2o_sys_shutdown();
218 return 0;
219 }
220
221 PDEBUG("HBA's in HOLD state\n");
222
223 /* If IOP don't get online, we need to rebuild the System table */
224 for (pHba = hba_chain; pHba; pHba = pHba->next) {
225 if (adpt_i2o_online_hba(pHba) < 0) {
226 adpt_i2o_delete_hba(pHba);
227 goto rebuild_sys_tab;
228 }
229 }
230
231 /* Active IOPs now in OPERATIONAL state */
232 PDEBUG("HBA's in OPERATIONAL state\n");
233
234 printk("dpti: If you have a lot of devices this could take a few minutes.\n");
235 for (pHba = hba_chain; pHba; pHba = pHba->next) {
236 printk(KERN_INFO"%s: Reading the hardware resource table.\n", pHba->name);
237 if (adpt_i2o_lct_get(pHba) < 0){
238 adpt_i2o_delete_hba(pHba);
239 continue;
240 }
241
242 if (adpt_i2o_parse_lct(pHba) < 0){
243 adpt_i2o_delete_hba(pHba);
244 continue;
245 }
246 adpt_inquiry(pHba);
247 }
248
249 for (pHba = hba_chain; pHba; pHba = pHba->next) {
250 if( adpt_scsi_register(pHba,sht) < 0){
251 adpt_i2o_delete_hba(pHba);
252 continue;
253 }
254 pHba->initialized = TRUE;
255 pHba->state &= ~DPTI_STATE_RESET;
256 }
257
258 // Register our control device node
259 // nodes will need to be created in /dev to access this
260 // the nodes can not be created from within the driver
261 if (hba_count && register_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER, &adpt_fops)) {
262 adpt_i2o_sys_shutdown();
263 return 0;
264 }
265 return hba_count;
266}
267
268
269/*
270 * scsi_unregister will be called AFTER we return.
271 */
272static int adpt_release(struct Scsi_Host *host)
273{
274 adpt_hba* pHba = (adpt_hba*) host->hostdata[0];
275// adpt_i2o_quiesce_hba(pHba);
276 adpt_i2o_delete_hba(pHba);
277 scsi_unregister(host);
278 return 0;
279}
280
281
282static void adpt_inquiry(adpt_hba* pHba)
283{
284 u32 msg[14];
285 u32 *mptr;
286 u32 *lenptr;
287 int direction;
288 int scsidir;
289 u32 len;
290 u32 reqlen;
291 u8* buf;
292 u8 scb[16];
293 s32 rcode;
294
295 memset(msg, 0, sizeof(msg));
296 buf = (u8*)kmalloc(80,GFP_KERNEL|ADDR32);
297 if(!buf){
298 printk(KERN_ERR"%s: Could not allocate buffer\n",pHba->name);
299 return;
300 }
301 memset((void*)buf, 0, 36);
302
303 len = 36;
304 direction = 0x00000000;
305 scsidir =0x40000000; // DATA IN (iop<--dev)
306
307 reqlen = 14; // SINGLE SGE
308 /* Stick the headers on */
309 msg[0] = reqlen<<16 | SGL_OFFSET_12;
310 msg[1] = (0xff<<24|HOST_TID<<12|ADAPTER_TID);
311 msg[2] = 0;
312 msg[3] = 0;
313 // Adaptec/DPT Private stuff
314 msg[4] = I2O_CMD_SCSI_EXEC|DPT_ORGANIZATION_ID<<16;
315 msg[5] = ADAPTER_TID | 1<<16 /* Interpret*/;
316 /* Direction, disconnect ok | sense data | simple queue , CDBLen */
317 // I2O_SCB_FLAG_ENABLE_DISCONNECT |
318 // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
319 // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
320 msg[6] = scsidir|0x20a00000| 6 /* cmd len*/;
321
322 mptr=msg+7;
323
324 memset(scb, 0, sizeof(scb));
325 // Write SCSI command into the message - always 16 byte block
326 scb[0] = INQUIRY;
327 scb[1] = 0;
328 scb[2] = 0;
329 scb[3] = 0;
330 scb[4] = 36;
331 scb[5] = 0;
332 // Don't care about the rest of scb
333
334 memcpy(mptr, scb, sizeof(scb));
335 mptr+=4;
336 lenptr=mptr++; /* Remember me - fill in when we know */
337
338 /* Now fill in the SGList and command */
339 *lenptr = len;
340 *mptr++ = 0xD0000000|direction|len;
341 *mptr++ = virt_to_bus(buf);
342
343 // Send it on it's way
344 rcode = adpt_i2o_post_wait(pHba, msg, reqlen<<2, 120);
345 if (rcode != 0) {
346 sprintf(pHba->detail, "Adaptec I2O RAID");
347 printk(KERN_INFO "%s: Inquiry Error (%d)\n",pHba->name,rcode);
348 if (rcode != -ETIME && rcode != -EINTR)
349 kfree(buf);
350 } else {
351 memset(pHba->detail, 0, sizeof(pHba->detail));
352 memcpy(&(pHba->detail), "Vendor: Adaptec ", 16);
353 memcpy(&(pHba->detail[16]), " Model: ", 8);
354 memcpy(&(pHba->detail[24]), (u8*) &buf[16], 16);
355 memcpy(&(pHba->detail[40]), " FW: ", 4);
356 memcpy(&(pHba->detail[44]), (u8*) &buf[32], 4);
357 pHba->detail[48] = '\0'; /* precautionary */
358 kfree(buf);
359 }
360 adpt_i2o_status_get(pHba);
361 return ;
362}
363
364
365static int adpt_slave_configure(struct scsi_device * device)
366{
367 struct Scsi_Host *host = device->host;
368 adpt_hba* pHba;
369
370 pHba = (adpt_hba *) host->hostdata[0];
371
372 if (host->can_queue && device->tagged_supported) {
373 scsi_adjust_queue_depth(device, MSG_SIMPLE_TAG,
374 host->can_queue - 1);
375 } else {
376 scsi_adjust_queue_depth(device, 0, 1);
377 }
378 return 0;
379}
380
381static int adpt_queue(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *))
382{
383 adpt_hba* pHba = NULL;
384 struct adpt_device* pDev = NULL; /* dpt per device information */
385 ulong timeout = jiffies + (TMOUT_SCSI*HZ);
386
387 cmd->scsi_done = done;
388 /*
389 * SCSI REQUEST_SENSE commands will be executed automatically by the
390 * Host Adapter for any errors, so they should not be executed
391 * explicitly unless the Sense Data is zero indicating that no error
392 * occurred.
393 */
394
395 if ((cmd->cmnd[0] == REQUEST_SENSE) && (cmd->sense_buffer[0] != 0)) {
396 cmd->result = (DID_OK << 16);
397 cmd->scsi_done(cmd);
398 return 0;
399 }
400
401 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
402 if (!pHba) {
403 return FAILED;
404 }
405
406 rmb();
407 /*
408 * TODO: I need to block here if I am processing ioctl cmds
409 * but if the outstanding cmds all finish before the ioctl,
410 * the scsi-core will not know to start sending cmds to me again.
411 * I need to a way to restart the scsi-cores queues or should I block
412 * calling scsi_done on the outstanding cmds instead
413 * for now we don't set the IOCTL state
414 */
415 if(((pHba->state) & DPTI_STATE_IOCTL) || ((pHba->state) & DPTI_STATE_RESET)) {
416 pHba->host->last_reset = jiffies;
417 pHba->host->resetting = 1;
418 return 1;
419 }
420
421 if(cmd->eh_state != SCSI_STATE_QUEUED){
422 // If we are not doing error recovery
423 mod_timer(&cmd->eh_timeout, timeout);
424 }
425
426 // TODO if the cmd->device if offline then I may need to issue a bus rescan
427 // followed by a get_lct to see if the device is there anymore
428 if((pDev = (struct adpt_device*) (cmd->device->hostdata)) == NULL) {
429 /*
430 * First command request for this device. Set up a pointer
431 * to the device structure. This should be a TEST_UNIT_READY
432 * command from scan_scsis_single.
433 */
434 if ((pDev = adpt_find_device(pHba, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun)) == NULL) {
435 // TODO: if any luns are at this bus, scsi id then fake a TEST_UNIT_READY and INQUIRY response
436 // with type 7F (for all luns less than the max for this bus,id) so the lun scan will continue.
437 cmd->result = (DID_NO_CONNECT << 16);
438 cmd->scsi_done(cmd);
439 return 0;
440 }
441 cmd->device->hostdata = pDev;
442 }
443 pDev->pScsi_dev = cmd->device;
444
445 /*
446 * If we are being called from when the device is being reset,
447 * delay processing of the command until later.
448 */
449 if (pDev->state & DPTI_DEV_RESET ) {
450 return FAILED;
451 }
452 return adpt_scsi_to_i2o(pHba, cmd, pDev);
453}
454
455static int adpt_bios_param(struct scsi_device *sdev, struct block_device *dev,
456 sector_t capacity, int geom[])
457{
458 int heads=-1;
459 int sectors=-1;
460 int cylinders=-1;
461
462 // *** First lets set the default geometry ****
463
464 // If the capacity is less than ox2000
465 if (capacity < 0x2000 ) { // floppy
466 heads = 18;
467 sectors = 2;
468 }
469 // else if between 0x2000 and 0x20000
470 else if (capacity < 0x20000) {
471 heads = 64;
472 sectors = 32;
473 }
474 // else if between 0x20000 and 0x40000
475 else if (capacity < 0x40000) {
476 heads = 65;
477 sectors = 63;
478 }
479 // else if between 0x4000 and 0x80000
480 else if (capacity < 0x80000) {
481 heads = 128;
482 sectors = 63;
483 }
484 // else if greater than 0x80000
485 else {
486 heads = 255;
487 sectors = 63;
488 }
489 cylinders = sector_div(capacity, heads * sectors);
490
491 // Special case if CDROM
492 if(sdev->type == 5) { // CDROM
493 heads = 252;
494 sectors = 63;
495 cylinders = 1111;
496 }
497
498 geom[0] = heads;
499 geom[1] = sectors;
500 geom[2] = cylinders;
501
502 PDEBUG("adpt_bios_param: exit\n");
503 return 0;
504}
505
506
507static const char *adpt_info(struct Scsi_Host *host)
508{
509 adpt_hba* pHba;
510
511 pHba = (adpt_hba *) host->hostdata[0];
512 return (char *) (pHba->detail);
513}
514
515static int adpt_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset,
516 int length, int inout)
517{
518 struct adpt_device* d;
519 int id;
520 int chan;
521 int len = 0;
522 int begin = 0;
523 int pos = 0;
524 adpt_hba* pHba;
525 int unit;
526
527 *start = buffer;
528 if (inout == TRUE) {
529 /*
530 * The user has done a write and wants us to take the
531 * data in the buffer and do something with it.
532 * proc_scsiwrite calls us with inout = 1
533 *
534 * Read data from buffer (writing to us) - NOT SUPPORTED
535 */
536 return -EINVAL;
537 }
538
539 /*
540 * inout = 0 means the user has done a read and wants information
541 * returned, so we write information about the cards into the buffer
542 * proc_scsiread() calls us with inout = 0
543 */
544
545 // Find HBA (host bus adapter) we are looking for
546 down(&adpt_configuration_lock);
547 for (pHba = hba_chain; pHba; pHba = pHba->next) {
548 if (pHba->host == host) {
549 break; /* found adapter */
550 }
551 }
552 up(&adpt_configuration_lock);
553 if (pHba == NULL) {
554 return 0;
555 }
556 host = pHba->host;
557
558 len = sprintf(buffer , "Adaptec I2O RAID Driver Version: %s\n\n", DPT_I2O_VERSION);
559 len += sprintf(buffer+len, "%s\n", pHba->detail);
560 len += sprintf(buffer+len, "SCSI Host=scsi%d Control Node=/dev/%s irq=%d\n",
561 pHba->host->host_no, pHba->name, host->irq);
562 len += sprintf(buffer+len, "\tpost fifo size = %d\n\treply fifo size = %d\n\tsg table size = %d\n\n",
563 host->can_queue, (int) pHba->reply_fifo_size , host->sg_tablesize);
564
565 pos = begin + len;
566
567 /* CHECKPOINT */
568 if(pos > offset + length) {
569 goto stop_output;
570 }
571 if(pos <= offset) {
572 /*
573 * If we haven't even written to where we last left
574 * off (the last time we were called), reset the
575 * beginning pointer.
576 */
577 len = 0;
578 begin = pos;
579 }
580 len += sprintf(buffer+len, "Devices:\n");
581 for(chan = 0; chan < MAX_CHANNEL; chan++) {
582 for(id = 0; id < MAX_ID; id++) {
583 d = pHba->channel[chan].device[id];
584 while(d){
585 len += sprintf(buffer+len,"\t%-24.24s", d->pScsi_dev->vendor);
586 len += sprintf(buffer+len," Rev: %-8.8s\n", d->pScsi_dev->rev);
587 pos = begin + len;
588
589
590 /* CHECKPOINT */
591 if(pos > offset + length) {
592 goto stop_output;
593 }
594 if(pos <= offset) {
595 len = 0;
596 begin = pos;
597 }
598
599 unit = d->pI2o_dev->lct_data.tid;
600 len += sprintf(buffer+len, "\tTID=%d, (Channel=%d, Target=%d, Lun=%d) (%s)\n\n",
601 unit, (int)d->scsi_channel, (int)d->scsi_id, (int)d->scsi_lun,
602 scsi_device_online(d->pScsi_dev)? "online":"offline");
603 pos = begin + len;
604
605 /* CHECKPOINT */
606 if(pos > offset + length) {
607 goto stop_output;
608 }
609 if(pos <= offset) {
610 len = 0;
611 begin = pos;
612 }
613
614 d = d->next_lun;
615 }
616 }
617 }
618
619 /*
620 * begin is where we last checked our position with regards to offset
621 * begin is always less than offset. len is relative to begin. It
622 * is the number of bytes written past begin
623 *
624 */
625stop_output:
626 /* stop the output and calculate the correct length */
627 *(buffer + len) = '\0';
628
629 *start = buffer + (offset - begin); /* Start of wanted data */
630 len -= (offset - begin);
631 if(len > length) {
632 len = length;
633 } else if(len < 0){
634 len = 0;
635 **start = '\0';
636 }
637 return len;
638}
639
640
641/*===========================================================================
642 * Error Handling routines
643 *===========================================================================
644 */
645
646static int adpt_abort(struct scsi_cmnd * cmd)
647{
648 adpt_hba* pHba = NULL; /* host bus adapter structure */
649 struct adpt_device* dptdevice; /* dpt per device information */
650 u32 msg[5];
651 int rcode;
652
653 if(cmd->serial_number == 0){
654 return FAILED;
655 }
656 pHba = (adpt_hba*) cmd->device->host->hostdata[0];
657 printk(KERN_INFO"%s: Trying to Abort cmd=%ld\n",pHba->name, cmd->serial_number);
658 if ((dptdevice = (void*) (cmd->device->hostdata)) == NULL) {
659 printk(KERN_ERR "%s: Unable to abort: No device in cmnd\n",pHba->name);
660 return FAILED;
661 }
662
663 memset(msg, 0, sizeof(msg));
664 msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
665 msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|dptdevice->tid;
666 msg[2] = 0;
667 msg[3]= 0;
668 msg[4] = (u32)cmd;
669 if( (rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER)) != 0){
670 if(rcode == -EOPNOTSUPP ){
671 printk(KERN_INFO"%s: Abort cmd not supported\n",pHba->name);
672 return FAILED;
673 }
674 printk(KERN_INFO"%s: Abort cmd=%ld failed.\n",pHba->name, cmd->serial_number);
675 return FAILED;
676 }
677 printk(KERN_INFO"%s: Abort cmd=%ld complete.\n",pHba->name, cmd->serial_number);
678 return SUCCESS;
679}
680
681
682#define I2O_DEVICE_RESET 0x27
683// This is the same for BLK and SCSI devices
684// NOTE this is wrong in the i2o.h definitions
685// This is not currently supported by our adapter but we issue it anyway
686static int adpt_device_reset(struct scsi_cmnd* cmd)
687{
688 adpt_hba* pHba;
689 u32 msg[4];
690 u32 rcode;
691 int old_state;
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -0700692 struct adpt_device* d = cmd->device->hostdata;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693
694 pHba = (void*) cmd->device->host->hostdata[0];
695 printk(KERN_INFO"%s: Trying to reset device\n",pHba->name);
696 if (!d) {
697 printk(KERN_INFO"%s: Reset Device: Device Not found\n",pHba->name);
698 return FAILED;
699 }
700 memset(msg, 0, sizeof(msg));
701 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
702 msg[1] = (I2O_DEVICE_RESET<<24|HOST_TID<<12|d->tid);
703 msg[2] = 0;
704 msg[3] = 0;
705
706 old_state = d->state;
707 d->state |= DPTI_DEV_RESET;
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -0700708 if( (rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER)) ){
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709 d->state = old_state;
710 if(rcode == -EOPNOTSUPP ){
711 printk(KERN_INFO"%s: Device reset not supported\n",pHba->name);
712 return FAILED;
713 }
714 printk(KERN_INFO"%s: Device reset failed\n",pHba->name);
715 return FAILED;
716 } else {
717 d->state = old_state;
718 printk(KERN_INFO"%s: Device reset successful\n",pHba->name);
719 return SUCCESS;
720 }
721}
722
723
724#define I2O_HBA_BUS_RESET 0x87
725// This version of bus reset is called by the eh_error handler
726static int adpt_bus_reset(struct scsi_cmnd* cmd)
727{
728 adpt_hba* pHba;
729 u32 msg[4];
730
731 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
732 memset(msg, 0, sizeof(msg));
733 printk(KERN_WARNING"%s: Bus reset: SCSI Bus %d: tid: %d\n",pHba->name, cmd->device->channel,pHba->channel[cmd->device->channel].tid );
734 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
735 msg[1] = (I2O_HBA_BUS_RESET<<24|HOST_TID<<12|pHba->channel[cmd->device->channel].tid);
736 msg[2] = 0;
737 msg[3] = 0;
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -0700738 if(adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER) ){
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739 printk(KERN_WARNING"%s: Bus reset failed.\n",pHba->name);
740 return FAILED;
741 } else {
742 printk(KERN_WARNING"%s: Bus reset success.\n",pHba->name);
743 return SUCCESS;
744 }
745}
746
747// This version of reset is called by the eh_error_handler
Jeff Garzik df0ae242005-05-28 07:57:14 -0400748static int __adpt_reset(struct scsi_cmnd* cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700749{
750 adpt_hba* pHba;
751 int rcode;
752 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
753 printk(KERN_WARNING"%s: Hba Reset: scsi id %d: tid: %d\n",pHba->name,cmd->device->channel,pHba->channel[cmd->device->channel].tid );
754 rcode = adpt_hba_reset(pHba);
755 if(rcode == 0){
756 printk(KERN_WARNING"%s: HBA reset complete\n",pHba->name);
757 return SUCCESS;
758 } else {
759 printk(KERN_WARNING"%s: HBA reset failed (%x)\n",pHba->name, rcode);
760 return FAILED;
761 }
762}
763
Jeff Garzik df0ae242005-05-28 07:57:14 -0400764static int adpt_reset(struct scsi_cmnd* cmd)
765{
766 int rc;
767
768 spin_lock_irq(cmd->device->host->host_lock);
769 rc = __adpt_reset(cmd);
770 spin_unlock_irq(cmd->device->host->host_lock);
771
772 return rc;
773}
774
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775// This version of reset is called by the ioctls and indirectly from eh_error_handler via adpt_reset
776static int adpt_hba_reset(adpt_hba* pHba)
777{
778 int rcode;
779
780 pHba->state |= DPTI_STATE_RESET;
781
782 // Activate does get status , init outbound, and get hrt
783 if ((rcode=adpt_i2o_activate_hba(pHba)) < 0) {
784 printk(KERN_ERR "%s: Could not activate\n", pHba->name);
785 adpt_i2o_delete_hba(pHba);
786 return rcode;
787 }
788
789 if ((rcode=adpt_i2o_build_sys_table()) < 0) {
790 adpt_i2o_delete_hba(pHba);
791 return rcode;
792 }
793 PDEBUG("%s: in HOLD state\n",pHba->name);
794
795 if ((rcode=adpt_i2o_online_hba(pHba)) < 0) {
796 adpt_i2o_delete_hba(pHba);
797 return rcode;
798 }
799 PDEBUG("%s: in OPERATIONAL state\n",pHba->name);
800
801 if ((rcode=adpt_i2o_lct_get(pHba)) < 0){
802 adpt_i2o_delete_hba(pHba);
803 return rcode;
804 }
805
806 if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0){
807 adpt_i2o_delete_hba(pHba);
808 return rcode;
809 }
810 pHba->state &= ~DPTI_STATE_RESET;
811
812 adpt_fail_posted_scbs(pHba);
813 return 0; /* return success */
814}
815
816/*===========================================================================
817 *
818 *===========================================================================
819 */
820
821
822static void adpt_i2o_sys_shutdown(void)
823{
824 adpt_hba *pHba, *pNext;
825 struct adpt_i2o_post_wait_data *p1, *p2;
826
827 printk(KERN_INFO"Shutting down Adaptec I2O controllers.\n");
828 printk(KERN_INFO" This could take a few minutes if there are many devices attached\n");
829 /* Delete all IOPs from the controller chain */
830 /* They should have already been released by the
831 * scsi-core
832 */
833 for (pHba = hba_chain; pHba; pHba = pNext) {
834 pNext = pHba->next;
835 adpt_i2o_delete_hba(pHba);
836 }
837
838 /* Remove any timedout entries from the wait queue. */
839 p2 = NULL;
840// spin_lock_irqsave(&adpt_post_wait_lock, flags);
841 /* Nothing should be outstanding at this point so just
842 * free them
843 */
844 for(p1 = adpt_post_wait_queue; p1; p2 = p1, p1 = p2->next) {
845 kfree(p1);
846 }
847// spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
848 adpt_post_wait_queue = NULL;
849
850 printk(KERN_INFO "Adaptec I2O controllers down.\n");
851}
852
853/*
854 * reboot/shutdown notification.
855 *
856 * - Quiesce each IOP in the system
857 *
858 */
859
860#ifdef REBOOT_NOTIFIER
861static int adpt_reboot_event(struct notifier_block *n, ulong code, void *p)
862{
863
864 if(code != SYS_RESTART && code != SYS_HALT && code != SYS_POWER_OFF)
865 return NOTIFY_DONE;
866
867 adpt_i2o_sys_shutdown();
868
869 return NOTIFY_DONE;
870}
871#endif
872
873
874static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev)
875{
876
877 adpt_hba* pHba = NULL;
878 adpt_hba* p = NULL;
879 ulong base_addr0_phys = 0;
880 ulong base_addr1_phys = 0;
881 u32 hba_map0_area_size = 0;
882 u32 hba_map1_area_size = 0;
883 void __iomem *base_addr_virt = NULL;
884 void __iomem *msg_addr_virt = NULL;
885
886 int raptorFlag = FALSE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700887
888 if(pci_enable_device(pDev)) {
889 return -EINVAL;
890 }
891 pci_set_master(pDev);
892 if (pci_set_dma_mask(pDev, 0xffffffffffffffffULL) &&
893 pci_set_dma_mask(pDev, 0xffffffffULL))
894 return -EINVAL;
895
896 base_addr0_phys = pci_resource_start(pDev,0);
897 hba_map0_area_size = pci_resource_len(pDev,0);
898
899 // Check if standard PCI card or single BAR Raptor
900 if(pDev->device == PCI_DPT_DEVICE_ID){
901 if(pDev->subsystem_device >=0xc032 && pDev->subsystem_device <= 0xc03b){
902 // Raptor card with this device id needs 4M
903 hba_map0_area_size = 0x400000;
904 } else { // Not Raptor - it is a PCI card
905 if(hba_map0_area_size > 0x100000 ){
906 hba_map0_area_size = 0x100000;
907 }
908 }
909 } else {// Raptor split BAR config
910 // Use BAR1 in this configuration
911 base_addr1_phys = pci_resource_start(pDev,1);
912 hba_map1_area_size = pci_resource_len(pDev,1);
913 raptorFlag = TRUE;
914 }
915
916
917 base_addr_virt = ioremap(base_addr0_phys,hba_map0_area_size);
918 if (!base_addr_virt) {
919 PERROR("dpti: adpt_config_hba: io remap failed\n");
920 return -EINVAL;
921 }
922
923 if(raptorFlag == TRUE) {
924 msg_addr_virt = ioremap(base_addr1_phys, hba_map1_area_size );
925 if (!msg_addr_virt) {
926 PERROR("dpti: adpt_config_hba: io remap failed on BAR1\n");
927 iounmap(base_addr_virt);
928 return -EINVAL;
929 }
930 } else {
931 msg_addr_virt = base_addr_virt;
932 }
933
934 // Allocate and zero the data structure
935 pHba = kmalloc(sizeof(adpt_hba), GFP_KERNEL);
936 if( pHba == NULL) {
937 if(msg_addr_virt != base_addr_virt){
938 iounmap(msg_addr_virt);
939 }
940 iounmap(base_addr_virt);
941 return -ENOMEM;
942 }
943 memset(pHba, 0, sizeof(adpt_hba));
944
945 down(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946
947 if(hba_chain != NULL){
948 for(p = hba_chain; p->next; p = p->next);
949 p->next = pHba;
950 } else {
951 hba_chain = pHba;
952 }
953 pHba->next = NULL;
954 pHba->unit = hba_count;
Benoit Boissinot 23a2bc22005-04-25 19:46:30 -0700955 sprintf(pHba->name, "dpti%d", hba_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956 hba_count++;
957
958 up(&adpt_configuration_lock);
959
960 pHba->pDev = pDev;
961 pHba->base_addr_phys = base_addr0_phys;
962
963 // Set up the Virtual Base Address of the I2O Device
964 pHba->base_addr_virt = base_addr_virt;
965 pHba->msg_addr_virt = msg_addr_virt;
966 pHba->irq_mask = base_addr_virt+0x30;
967 pHba->post_port = base_addr_virt+0x40;
968 pHba->reply_port = base_addr_virt+0x44;
969
970 pHba->hrt = NULL;
971 pHba->lct = NULL;
972 pHba->lct_size = 0;
973 pHba->status_block = NULL;
974 pHba->post_count = 0;
975 pHba->state = DPTI_STATE_RESET;
976 pHba->pDev = pDev;
977 pHba->devices = NULL;
978
979 // Initializing the spinlocks
980 spin_lock_init(&pHba->state_lock);
981 spin_lock_init(&adpt_post_wait_lock);
982
983 if(raptorFlag == 0){
984 printk(KERN_INFO"Adaptec I2O RAID controller %d at %p size=%x irq=%d\n",
985 hba_count-1, base_addr_virt, hba_map0_area_size, pDev->irq);
986 } else {
987 printk(KERN_INFO"Adaptec I2O RAID controller %d irq=%d\n",hba_count-1, pDev->irq);
988 printk(KERN_INFO" BAR0 %p - size= %x\n",base_addr_virt,hba_map0_area_size);
989 printk(KERN_INFO" BAR1 %p - size= %x\n",msg_addr_virt,hba_map1_area_size);
990 }
991
992 if (request_irq (pDev->irq, adpt_isr, SA_SHIRQ, pHba->name, pHba)) {
993 printk(KERN_ERR"%s: Couldn't register IRQ %d\n", pHba->name, pDev->irq);
994 adpt_i2o_delete_hba(pHba);
995 return -EINVAL;
996 }
997
998 return 0;
999}
1000
1001
1002static void adpt_i2o_delete_hba(adpt_hba* pHba)
1003{
1004 adpt_hba* p1;
1005 adpt_hba* p2;
1006 struct i2o_device* d;
1007 struct i2o_device* next;
1008 int i;
1009 int j;
1010 struct adpt_device* pDev;
1011 struct adpt_device* pNext;
1012
1013
1014 down(&adpt_configuration_lock);
1015 // scsi_unregister calls our adpt_release which
1016 // does a quiese
1017 if(pHba->host){
1018 free_irq(pHba->host->irq, pHba);
1019 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001020 p2 = NULL;
1021 for( p1 = hba_chain; p1; p2 = p1,p1=p1->next){
1022 if(p1 == pHba) {
1023 if(p2) {
1024 p2->next = p1->next;
1025 } else {
1026 hba_chain = p1->next;
1027 }
1028 break;
1029 }
1030 }
1031
1032 hba_count--;
1033 up(&adpt_configuration_lock);
1034
1035 iounmap(pHba->base_addr_virt);
1036 if(pHba->msg_addr_virt != pHba->base_addr_virt){
1037 iounmap(pHba->msg_addr_virt);
1038 }
1039 if(pHba->hrt) {
1040 kfree(pHba->hrt);
1041 }
1042 if(pHba->lct){
1043 kfree(pHba->lct);
1044 }
1045 if(pHba->status_block) {
1046 kfree(pHba->status_block);
1047 }
1048 if(pHba->reply_pool){
1049 kfree(pHba->reply_pool);
1050 }
1051
1052 for(d = pHba->devices; d ; d = next){
1053 next = d->next;
1054 kfree(d);
1055 }
1056 for(i = 0 ; i < pHba->top_scsi_channel ; i++){
1057 for(j = 0; j < MAX_ID; j++){
1058 if(pHba->channel[i].device[j] != NULL){
1059 for(pDev = pHba->channel[i].device[j]; pDev; pDev = pNext){
1060 pNext = pDev->next_lun;
1061 kfree(pDev);
1062 }
1063 }
1064 }
1065 }
1066 kfree(pHba);
1067
1068 if(hba_count <= 0){
1069 unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER);
1070 }
1071}
1072
1073
1074static int adpt_init(void)
1075{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076 printk("Loading Adaptec I2O RAID: Version " DPT_I2O_VERSION "\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001077#ifdef REBOOT_NOTIFIER
1078 register_reboot_notifier(&adpt_reboot_notifier);
1079#endif
1080
1081 return 0;
1082}
1083
1084
1085static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u32 lun)
1086{
1087 struct adpt_device* d;
1088
1089 if(chan < 0 || chan >= MAX_CHANNEL)
1090 return NULL;
1091
1092 if( pHba->channel[chan].device == NULL){
1093 printk(KERN_DEBUG"Adaptec I2O RAID: Trying to find device before they are allocated\n");
1094 return NULL;
1095 }
1096
1097 d = pHba->channel[chan].device[id];
1098 if(!d || d->tid == 0) {
1099 return NULL;
1100 }
1101
1102 /* If it is the only lun at that address then this should match*/
1103 if(d->scsi_lun == lun){
1104 return d;
1105 }
1106
1107 /* else we need to look through all the luns */
1108 for(d=d->next_lun ; d ; d = d->next_lun){
1109 if(d->scsi_lun == lun){
1110 return d;
1111 }
1112 }
1113 return NULL;
1114}
1115
1116
1117static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout)
1118{
1119 // I used my own version of the WAIT_QUEUE_HEAD
1120 // to handle some version differences
1121 // When embedded in the kernel this could go back to the vanilla one
1122 ADPT_DECLARE_WAIT_QUEUE_HEAD(adpt_wq_i2o_post);
1123 int status = 0;
1124 ulong flags = 0;
1125 struct adpt_i2o_post_wait_data *p1, *p2;
1126 struct adpt_i2o_post_wait_data *wait_data =
1127 kmalloc(sizeof(struct adpt_i2o_post_wait_data),GFP_KERNEL);
Andrew Morton4452ea52005-06-23 00:10:26 -07001128 DECLARE_WAITQUEUE(wait, current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001129
Andrew Morton4452ea52005-06-23 00:10:26 -07001130 if (!wait_data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131 return -ENOMEM;
Andrew Morton4452ea52005-06-23 00:10:26 -07001132
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133 /*
1134 * The spin locking is needed to keep anyone from playing
1135 * with the queue pointers and id while we do the same
1136 */
1137 spin_lock_irqsave(&adpt_post_wait_lock, flags);
1138 // TODO we need a MORE unique way of getting ids
1139 // to support async LCT get
1140 wait_data->next = adpt_post_wait_queue;
1141 adpt_post_wait_queue = wait_data;
1142 adpt_post_wait_id++;
1143 adpt_post_wait_id &= 0x7fff;
1144 wait_data->id = adpt_post_wait_id;
1145 spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1146
1147 wait_data->wq = &adpt_wq_i2o_post;
1148 wait_data->status = -ETIMEDOUT;
1149
Andrew Morton4452ea52005-06-23 00:10:26 -07001150 add_wait_queue(&adpt_wq_i2o_post, &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001151
1152 msg[2] |= 0x80000000 | ((u32)wait_data->id);
1153 timeout *= HZ;
1154 if((status = adpt_i2o_post_this(pHba, msg, len)) == 0){
1155 set_current_state(TASK_INTERRUPTIBLE);
1156 if(pHba->host)
1157 spin_unlock_irq(pHba->host->host_lock);
1158 if (!timeout)
1159 schedule();
1160 else{
1161 timeout = schedule_timeout(timeout);
1162 if (timeout == 0) {
1163 // I/O issued, but cannot get result in
1164 // specified time. Freeing resorces is
1165 // dangerous.
1166 status = -ETIME;
1167 }
1168 }
1169 if(pHba->host)
1170 spin_lock_irq(pHba->host->host_lock);
1171 }
Andrew Morton4452ea52005-06-23 00:10:26 -07001172 remove_wait_queue(&adpt_wq_i2o_post, &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001173
1174 if(status == -ETIMEDOUT){
1175 printk(KERN_INFO"dpti%d: POST WAIT TIMEOUT\n",pHba->unit);
1176 // We will have to free the wait_data memory during shutdown
1177 return status;
1178 }
1179
1180 /* Remove the entry from the queue. */
1181 p2 = NULL;
1182 spin_lock_irqsave(&adpt_post_wait_lock, flags);
1183 for(p1 = adpt_post_wait_queue; p1; p2 = p1, p1 = p1->next) {
1184 if(p1 == wait_data) {
1185 if(p1->status == I2O_DETAIL_STATUS_UNSUPPORTED_FUNCTION ) {
1186 status = -EOPNOTSUPP;
1187 }
1188 if(p2) {
1189 p2->next = p1->next;
1190 } else {
1191 adpt_post_wait_queue = p1->next;
1192 }
1193 break;
1194 }
1195 }
1196 spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1197
1198 kfree(wait_data);
1199
1200 return status;
1201}
1202
1203
1204static s32 adpt_i2o_post_this(adpt_hba* pHba, u32* data, int len)
1205{
1206
1207 u32 m = EMPTY_QUEUE;
1208 u32 __iomem *msg;
1209 ulong timeout = jiffies + 30*HZ;
1210 do {
1211 rmb();
1212 m = readl(pHba->post_port);
1213 if (m != EMPTY_QUEUE) {
1214 break;
1215 }
1216 if(time_after(jiffies,timeout)){
1217 printk(KERN_WARNING"dpti%d: Timeout waiting for message frame!\n", pHba->unit);
1218 return -ETIMEDOUT;
1219 }
1220 set_current_state(TASK_UNINTERRUPTIBLE);
1221 schedule_timeout(1);
1222 } while(m == EMPTY_QUEUE);
1223
1224 msg = pHba->msg_addr_virt + m;
1225 memcpy_toio(msg, data, len);
1226 wmb();
1227
1228 //post message
1229 writel(m, pHba->post_port);
1230 wmb();
1231
1232 return 0;
1233}
1234
1235
1236static void adpt_i2o_post_wait_complete(u32 context, int status)
1237{
1238 struct adpt_i2o_post_wait_data *p1 = NULL;
1239 /*
1240 * We need to search through the adpt_post_wait
1241 * queue to see if the given message is still
1242 * outstanding. If not, it means that the IOP
1243 * took longer to respond to the message than we
1244 * had allowed and timer has already expired.
1245 * Not much we can do about that except log
1246 * it for debug purposes, increase timeout, and recompile
1247 *
1248 * Lock needed to keep anyone from moving queue pointers
1249 * around while we're looking through them.
1250 */
1251
1252 context &= 0x7fff;
1253
1254 spin_lock(&adpt_post_wait_lock);
1255 for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1256 if(p1->id == context) {
1257 p1->status = status;
1258 spin_unlock(&adpt_post_wait_lock);
1259 wake_up_interruptible(p1->wq);
1260 return;
1261 }
1262 }
1263 spin_unlock(&adpt_post_wait_lock);
1264 // If this happens we lose commands that probably really completed
1265 printk(KERN_DEBUG"dpti: Could Not find task %d in wait queue\n",context);
1266 printk(KERN_DEBUG" Tasks in wait queue:\n");
1267 for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1268 printk(KERN_DEBUG" %d\n",p1->id);
1269 }
1270 return;
1271}
1272
1273static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
1274{
1275 u32 msg[8];
1276 u8* status;
1277 u32 m = EMPTY_QUEUE ;
1278 ulong timeout = jiffies + (TMOUT_IOPRESET*HZ);
1279
1280 if(pHba->initialized == FALSE) { // First time reset should be quick
1281 timeout = jiffies + (25*HZ);
1282 } else {
1283 adpt_i2o_quiesce_hba(pHba);
1284 }
1285
1286 do {
1287 rmb();
1288 m = readl(pHba->post_port);
1289 if (m != EMPTY_QUEUE) {
1290 break;
1291 }
1292 if(time_after(jiffies,timeout)){
1293 printk(KERN_WARNING"Timeout waiting for message!\n");
1294 return -ETIMEDOUT;
1295 }
1296 set_current_state(TASK_UNINTERRUPTIBLE);
1297 schedule_timeout(1);
1298 } while (m == EMPTY_QUEUE);
1299
1300 status = (u8*)kmalloc(4, GFP_KERNEL|ADDR32);
1301 if(status == NULL) {
1302 adpt_send_nop(pHba, m);
1303 printk(KERN_ERR"IOP reset failed - no free memory.\n");
1304 return -ENOMEM;
1305 }
1306 memset(status,0,4);
1307
1308 msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0;
1309 msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID;
1310 msg[2]=0;
1311 msg[3]=0;
1312 msg[4]=0;
1313 msg[5]=0;
1314 msg[6]=virt_to_bus(status);
1315 msg[7]=0;
1316
1317 memcpy_toio(pHba->msg_addr_virt+m, msg, sizeof(msg));
1318 wmb();
1319 writel(m, pHba->post_port);
1320 wmb();
1321
1322 while(*status == 0){
1323 if(time_after(jiffies,timeout)){
1324 printk(KERN_WARNING"%s: IOP Reset Timeout\n",pHba->name);
1325 kfree(status);
1326 return -ETIMEDOUT;
1327 }
1328 rmb();
1329 set_current_state(TASK_UNINTERRUPTIBLE);
1330 schedule_timeout(1);
1331 }
1332
1333 if(*status == 0x01 /*I2O_EXEC_IOP_RESET_IN_PROGRESS*/) {
1334 PDEBUG("%s: Reset in progress...\n", pHba->name);
1335 // Here we wait for message frame to become available
1336 // indicated that reset has finished
1337 do {
1338 rmb();
1339 m = readl(pHba->post_port);
1340 if (m != EMPTY_QUEUE) {
1341 break;
1342 }
1343 if(time_after(jiffies,timeout)){
1344 printk(KERN_ERR "%s:Timeout waiting for IOP Reset.\n",pHba->name);
1345 return -ETIMEDOUT;
1346 }
1347 set_current_state(TASK_UNINTERRUPTIBLE);
1348 schedule_timeout(1);
1349 } while (m == EMPTY_QUEUE);
1350 // Flush the offset
1351 adpt_send_nop(pHba, m);
1352 }
1353 adpt_i2o_status_get(pHba);
1354 if(*status == 0x02 ||
1355 pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
1356 printk(KERN_WARNING"%s: Reset reject, trying to clear\n",
1357 pHba->name);
1358 } else {
1359 PDEBUG("%s: Reset completed.\n", pHba->name);
1360 }
1361
1362 kfree(status);
1363#ifdef UARTDELAY
1364 // This delay is to allow someone attached to the card through the debug UART to
1365 // set up the dump levels that they want before the rest of the initialization sequence
1366 adpt_delay(20000);
1367#endif
1368 return 0;
1369}
1370
1371
1372static int adpt_i2o_parse_lct(adpt_hba* pHba)
1373{
1374 int i;
1375 int max;
1376 int tid;
1377 struct i2o_device *d;
1378 i2o_lct *lct = pHba->lct;
1379 u8 bus_no = 0;
1380 s16 scsi_id;
1381 s16 scsi_lun;
1382 u32 buf[10]; // larger than 7, or 8 ...
1383 struct adpt_device* pDev;
1384
1385 if (lct == NULL) {
1386 printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
1387 return -1;
1388 }
1389
1390 max = lct->table_size;
1391 max -= 3;
1392 max /= 9;
1393
1394 for(i=0;i<max;i++) {
1395 if( lct->lct_entry[i].user_tid != 0xfff){
1396 /*
1397 * If we have hidden devices, we need to inform the upper layers about
1398 * the possible maximum id reference to handle device access when
1399 * an array is disassembled. This code has no other purpose but to
1400 * allow us future access to devices that are currently hidden
1401 * behind arrays, hotspares or have not been configured (JBOD mode).
1402 */
1403 if( lct->lct_entry[i].class_id != I2O_CLASS_RANDOM_BLOCK_STORAGE &&
1404 lct->lct_entry[i].class_id != I2O_CLASS_SCSI_PERIPHERAL &&
1405 lct->lct_entry[i].class_id != I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1406 continue;
1407 }
1408 tid = lct->lct_entry[i].tid;
1409 // I2O_DPT_DEVICE_INFO_GROUP_NO;
1410 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
1411 continue;
1412 }
1413 bus_no = buf[0]>>16;
1414 scsi_id = buf[1];
1415 scsi_lun = (buf[2]>>8 )&0xff;
1416 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1417 printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no);
1418 continue;
1419 }
1420 if (scsi_id >= MAX_ID){
1421 printk(KERN_WARNING"%s: SCSI ID %d out of range \n", pHba->name, bus_no);
1422 continue;
1423 }
1424 if(bus_no > pHba->top_scsi_channel){
1425 pHba->top_scsi_channel = bus_no;
1426 }
1427 if(scsi_id > pHba->top_scsi_id){
1428 pHba->top_scsi_id = scsi_id;
1429 }
1430 if(scsi_lun > pHba->top_scsi_lun){
1431 pHba->top_scsi_lun = scsi_lun;
1432 }
1433 continue;
1434 }
1435 d = (struct i2o_device *)kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
1436 if(d==NULL)
1437 {
1438 printk(KERN_CRIT"%s: Out of memory for I2O device data.\n",pHba->name);
1439 return -ENOMEM;
1440 }
1441
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07001442 d->controller = pHba;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001443 d->next = NULL;
1444
1445 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
1446
1447 d->flags = 0;
1448 tid = d->lct_data.tid;
1449 adpt_i2o_report_hba_unit(pHba, d);
1450 adpt_i2o_install_device(pHba, d);
1451 }
1452 bus_no = 0;
1453 for(d = pHba->devices; d ; d = d->next) {
1454 if(d->lct_data.class_id == I2O_CLASS_BUS_ADAPTER_PORT ||
1455 d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PORT){
1456 tid = d->lct_data.tid;
1457 // TODO get the bus_no from hrt-but for now they are in order
1458 //bus_no =
1459 if(bus_no > pHba->top_scsi_channel){
1460 pHba->top_scsi_channel = bus_no;
1461 }
1462 pHba->channel[bus_no].type = d->lct_data.class_id;
1463 pHba->channel[bus_no].tid = tid;
1464 if(adpt_i2o_query_scalar(pHba, tid, 0x0200, -1, buf, 28)>=0)
1465 {
1466 pHba->channel[bus_no].scsi_id = buf[1];
1467 PDEBUG("Bus %d - SCSI ID %d.\n", bus_no, buf[1]);
1468 }
1469 // TODO remove - this is just until we get from hrt
1470 bus_no++;
1471 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1472 printk(KERN_WARNING"%s: Channel number %d out of range - LCT\n", pHba->name, bus_no);
1473 break;
1474 }
1475 }
1476 }
1477
1478 // Setup adpt_device table
1479 for(d = pHba->devices; d ; d = d->next) {
1480 if(d->lct_data.class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
1481 d->lct_data.class_id == I2O_CLASS_SCSI_PERIPHERAL ||
1482 d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1483
1484 tid = d->lct_data.tid;
1485 scsi_id = -1;
1486 // I2O_DPT_DEVICE_INFO_GROUP_NO;
1487 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)>=0) {
1488 bus_no = buf[0]>>16;
1489 scsi_id = buf[1];
1490 scsi_lun = (buf[2]>>8 )&0xff;
1491 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1492 continue;
1493 }
1494 if (scsi_id >= MAX_ID) {
1495 continue;
1496 }
1497 if( pHba->channel[bus_no].device[scsi_id] == NULL){
1498 pDev = kmalloc(sizeof(struct adpt_device),GFP_KERNEL);
1499 if(pDev == NULL) {
1500 return -ENOMEM;
1501 }
1502 pHba->channel[bus_no].device[scsi_id] = pDev;
1503 memset(pDev,0,sizeof(struct adpt_device));
1504 } else {
1505 for( pDev = pHba->channel[bus_no].device[scsi_id];
1506 pDev->next_lun; pDev = pDev->next_lun){
1507 }
1508 pDev->next_lun = kmalloc(sizeof(struct adpt_device),GFP_KERNEL);
1509 if(pDev->next_lun == NULL) {
1510 return -ENOMEM;
1511 }
1512 memset(pDev->next_lun,0,sizeof(struct adpt_device));
1513 pDev = pDev->next_lun;
1514 }
1515 pDev->tid = tid;
1516 pDev->scsi_channel = bus_no;
1517 pDev->scsi_id = scsi_id;
1518 pDev->scsi_lun = scsi_lun;
1519 pDev->pI2o_dev = d;
1520 d->owner = pDev;
1521 pDev->type = (buf[0])&0xff;
1522 pDev->flags = (buf[0]>>8)&0xff;
1523 if(scsi_id > pHba->top_scsi_id){
1524 pHba->top_scsi_id = scsi_id;
1525 }
1526 if(scsi_lun > pHba->top_scsi_lun){
1527 pHba->top_scsi_lun = scsi_lun;
1528 }
1529 }
1530 if(scsi_id == -1){
1531 printk(KERN_WARNING"Could not find SCSI ID for %s\n",
1532 d->lct_data.identity_tag);
1533 }
1534 }
1535 }
1536 return 0;
1537}
1538
1539
1540/*
1541 * Each I2O controller has a chain of devices on it - these match
1542 * the useful parts of the LCT of the board.
1543 */
1544
1545static int adpt_i2o_install_device(adpt_hba* pHba, struct i2o_device *d)
1546{
1547 down(&adpt_configuration_lock);
1548 d->controller=pHba;
1549 d->owner=NULL;
1550 d->next=pHba->devices;
1551 d->prev=NULL;
1552 if (pHba->devices != NULL){
1553 pHba->devices->prev=d;
1554 }
1555 pHba->devices=d;
1556 *d->dev_name = 0;
1557
1558 up(&adpt_configuration_lock);
1559 return 0;
1560}
1561
1562static int adpt_open(struct inode *inode, struct file *file)
1563{
1564 int minor;
1565 adpt_hba* pHba;
1566
1567 //TODO check for root access
1568 //
1569 minor = iminor(inode);
1570 if (minor >= hba_count) {
1571 return -ENXIO;
1572 }
1573 down(&adpt_configuration_lock);
1574 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1575 if (pHba->unit == minor) {
1576 break; /* found adapter */
1577 }
1578 }
1579 if (pHba == NULL) {
1580 up(&adpt_configuration_lock);
1581 return -ENXIO;
1582 }
1583
1584// if(pHba->in_use){
1585 // up(&adpt_configuration_lock);
1586// return -EBUSY;
1587// }
1588
1589 pHba->in_use = 1;
1590 up(&adpt_configuration_lock);
1591
1592 return 0;
1593}
1594
1595static int adpt_close(struct inode *inode, struct file *file)
1596{
1597 int minor;
1598 adpt_hba* pHba;
1599
1600 minor = iminor(inode);
1601 if (minor >= hba_count) {
1602 return -ENXIO;
1603 }
1604 down(&adpt_configuration_lock);
1605 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1606 if (pHba->unit == minor) {
1607 break; /* found adapter */
1608 }
1609 }
1610 up(&adpt_configuration_lock);
1611 if (pHba == NULL) {
1612 return -ENXIO;
1613 }
1614
1615 pHba->in_use = 0;
1616
1617 return 0;
1618}
1619
1620
1621static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1622{
1623 u32 msg[MAX_MESSAGE_SIZE];
1624 u32* reply = NULL;
1625 u32 size = 0;
1626 u32 reply_size = 0;
1627 u32 __user *user_msg = arg;
1628 u32 __user * user_reply = NULL;
1629 void *sg_list[pHba->sg_tablesize];
1630 u32 sg_offset = 0;
1631 u32 sg_count = 0;
1632 int sg_index = 0;
1633 u32 i = 0;
1634 u32 rcode = 0;
1635 void *p = NULL;
1636 ulong flags = 0;
1637
1638 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1639 // get user msg size in u32s
1640 if(get_user(size, &user_msg[0])){
1641 return -EFAULT;
1642 }
1643 size = size>>16;
1644
1645 user_reply = &user_msg[size];
1646 if(size > MAX_MESSAGE_SIZE){
1647 return -EFAULT;
1648 }
1649 size *= 4; // Convert to bytes
1650
1651 /* Copy in the user's I2O command */
1652 if(copy_from_user(msg, user_msg, size)) {
1653 return -EFAULT;
1654 }
1655 get_user(reply_size, &user_reply[0]);
1656 reply_size = reply_size>>16;
1657 if(reply_size > REPLY_FRAME_SIZE){
1658 reply_size = REPLY_FRAME_SIZE;
1659 }
1660 reply_size *= 4;
1661 reply = kmalloc(REPLY_FRAME_SIZE*4, GFP_KERNEL);
1662 if(reply == NULL) {
1663 printk(KERN_WARNING"%s: Could not allocate reply buffer\n",pHba->name);
1664 return -ENOMEM;
1665 }
1666 memset(reply,0,REPLY_FRAME_SIZE*4);
1667 sg_offset = (msg[0]>>4)&0xf;
1668 msg[2] = 0x40000000; // IOCTL context
1669 msg[3] = (u32)reply;
1670 memset(sg_list,0, sizeof(sg_list[0])*pHba->sg_tablesize);
1671 if(sg_offset) {
1672 // TODO 64bit fix
1673 struct sg_simple_element *sg = (struct sg_simple_element*) (msg+sg_offset);
1674 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1675 if (sg_count > pHba->sg_tablesize){
1676 printk(KERN_DEBUG"%s:IOCTL SG List too large (%u)\n", pHba->name,sg_count);
1677 kfree (reply);
1678 return -EINVAL;
1679 }
1680
1681 for(i = 0; i < sg_count; i++) {
1682 int sg_size;
1683
1684 if (!(sg[i].flag_count & 0x10000000 /*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT*/)) {
1685 printk(KERN_DEBUG"%s:Bad SG element %d - not simple (%x)\n",pHba->name,i, sg[i].flag_count);
1686 rcode = -EINVAL;
1687 goto cleanup;
1688 }
1689 sg_size = sg[i].flag_count & 0xffffff;
1690 /* Allocate memory for the transfer */
1691 p = kmalloc(sg_size, GFP_KERNEL|ADDR32);
1692 if(!p) {
1693 printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
1694 pHba->name,sg_size,i,sg_count);
1695 rcode = -ENOMEM;
1696 goto cleanup;
1697 }
1698 sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame.
1699 /* Copy in the user's SG buffer if necessary */
1700 if(sg[i].flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR*/) {
1701 // TODO 64bit fix
1702 if (copy_from_user(p,(void __user *)sg[i].addr_bus, sg_size)) {
1703 printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",pHba->name,i);
1704 rcode = -EFAULT;
1705 goto cleanup;
1706 }
1707 }
1708 //TODO 64bit fix
1709 sg[i].addr_bus = (u32)virt_to_bus(p);
1710 }
1711 }
1712
1713 do {
1714 if(pHba->host)
1715 spin_lock_irqsave(pHba->host->host_lock, flags);
1716 // This state stops any new commands from enterring the
1717 // controller while processing the ioctl
1718// pHba->state |= DPTI_STATE_IOCTL;
1719// We can't set this now - The scsi subsystem sets host_blocked and
1720// the queue empties and stops. We need a way to restart the queue
1721 rcode = adpt_i2o_post_wait(pHba, msg, size, FOREVER);
1722 if (rcode != 0)
1723 printk("adpt_i2o_passthru: post wait failed %d %p\n",
1724 rcode, reply);
1725// pHba->state &= ~DPTI_STATE_IOCTL;
1726 if(pHba->host)
1727 spin_unlock_irqrestore(pHba->host->host_lock, flags);
1728 } while(rcode == -ETIMEDOUT);
1729
1730 if(rcode){
1731 goto cleanup;
1732 }
1733
1734 if(sg_offset) {
1735 /* Copy back the Scatter Gather buffers back to user space */
1736 u32 j;
1737 // TODO 64bit fix
1738 struct sg_simple_element* sg;
1739 int sg_size;
1740
1741 // re-acquire the original message to handle correctly the sg copy operation
1742 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1743 // get user msg size in u32s
1744 if(get_user(size, &user_msg[0])){
1745 rcode = -EFAULT;
1746 goto cleanup;
1747 }
1748 size = size>>16;
1749 size *= 4;
1750 /* Copy in the user's I2O command */
1751 if (copy_from_user (msg, user_msg, size)) {
1752 rcode = -EFAULT;
1753 goto cleanup;
1754 }
1755 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1756
1757 // TODO 64bit fix
1758 sg = (struct sg_simple_element*)(msg + sg_offset);
1759 for (j = 0; j < sg_count; j++) {
1760 /* Copy out the SG list to user's buffer if necessary */
1761 if(! (sg[j].flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR*/)) {
1762 sg_size = sg[j].flag_count & 0xffffff;
1763 // TODO 64bit fix
1764 if (copy_to_user((void __user *)sg[j].addr_bus,sg_list[j], sg_size)) {
1765 printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus);
1766 rcode = -EFAULT;
1767 goto cleanup;
1768 }
1769 }
1770 }
1771 }
1772
1773 /* Copy back the reply to user space */
1774 if (reply_size) {
1775 // we wrote our own values for context - now restore the user supplied ones
1776 if(copy_from_user(reply+2, user_msg+2, sizeof(u32)*2)) {
1777 printk(KERN_WARNING"%s: Could not copy message context FROM user\n",pHba->name);
1778 rcode = -EFAULT;
1779 }
1780 if(copy_to_user(user_reply, reply, reply_size)) {
1781 printk(KERN_WARNING"%s: Could not copy reply TO user\n",pHba->name);
1782 rcode = -EFAULT;
1783 }
1784 }
1785
1786
1787cleanup:
1788 if (rcode != -ETIME && rcode != -EINTR)
1789 kfree (reply);
1790 while(sg_index) {
1791 if(sg_list[--sg_index]) {
1792 if (rcode != -ETIME && rcode != -EINTR)
1793 kfree(sg_list[sg_index]);
1794 }
1795 }
1796 return rcode;
1797}
1798
1799
1800/*
1801 * This routine returns information about the system. This does not effect
1802 * any logic and if the info is wrong - it doesn't matter.
1803 */
1804
1805/* Get all the info we can not get from kernel services */
1806static int adpt_system_info(void __user *buffer)
1807{
1808 sysInfo_S si;
1809
1810 memset(&si, 0, sizeof(si));
1811
1812 si.osType = OS_LINUX;
Adrian Bunka4cd16e2005-06-25 14:59:01 -07001813 si.osMajorVersion = 0;
1814 si.osMinorVersion = 0;
1815 si.osRevision = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001816 si.busType = SI_PCI_BUS;
1817 si.processorFamily = DPTI_sig.dsProcessorFamily;
1818
1819#if defined __i386__
1820 adpt_i386_info(&si);
1821#elif defined (__ia64__)
1822 adpt_ia64_info(&si);
1823#elif defined(__sparc__)
1824 adpt_sparc_info(&si);
1825#elif defined (__alpha__)
1826 adpt_alpha_info(&si);
1827#else
1828 si.processorType = 0xff ;
1829#endif
1830 if(copy_to_user(buffer, &si, sizeof(si))){
1831 printk(KERN_WARNING"dpti: Could not copy buffer TO user\n");
1832 return -EFAULT;
1833 }
1834
1835 return 0;
1836}
1837
1838#if defined __ia64__
1839static void adpt_ia64_info(sysInfo_S* si)
1840{
1841 // This is all the info we need for now
1842 // We will add more info as our new
1843 // managmenent utility requires it
1844 si->processorType = PROC_IA64;
1845}
1846#endif
1847
1848
1849#if defined __sparc__
1850static void adpt_sparc_info(sysInfo_S* si)
1851{
1852 // This is all the info we need for now
1853 // We will add more info as our new
1854 // managmenent utility requires it
1855 si->processorType = PROC_ULTRASPARC;
1856}
1857#endif
1858
1859#if defined __alpha__
1860static void adpt_alpha_info(sysInfo_S* si)
1861{
1862 // This is all the info we need for now
1863 // We will add more info as our new
1864 // managmenent utility requires it
1865 si->processorType = PROC_ALPHA;
1866}
1867#endif
1868
1869#if defined __i386__
1870
1871static void adpt_i386_info(sysInfo_S* si)
1872{
1873 // This is all the info we need for now
1874 // We will add more info as our new
1875 // managmenent utility requires it
1876 switch (boot_cpu_data.x86) {
1877 case CPU_386:
1878 si->processorType = PROC_386;
1879 break;
1880 case CPU_486:
1881 si->processorType = PROC_486;
1882 break;
1883 case CPU_586:
1884 si->processorType = PROC_PENTIUM;
1885 break;
1886 default: // Just in case
1887 si->processorType = PROC_PENTIUM;
1888 break;
1889 }
1890}
1891
1892#endif
1893
1894
1895static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd,
1896 ulong arg)
1897{
1898 int minor;
1899 int error = 0;
1900 adpt_hba* pHba;
1901 ulong flags = 0;
1902 void __user *argp = (void __user *)arg;
1903
1904 minor = iminor(inode);
1905 if (minor >= DPTI_MAX_HBA){
1906 return -ENXIO;
1907 }
1908 down(&adpt_configuration_lock);
1909 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1910 if (pHba->unit == minor) {
1911 break; /* found adapter */
1912 }
1913 }
1914 up(&adpt_configuration_lock);
1915 if(pHba == NULL){
1916 return -ENXIO;
1917 }
1918
1919 while((volatile u32) pHba->state & DPTI_STATE_RESET ) {
1920 set_task_state(current,TASK_UNINTERRUPTIBLE);
1921 schedule_timeout(2);
1922
1923 }
1924
1925 switch (cmd) {
1926 // TODO: handle 3 cases
1927 case DPT_SIGNATURE:
1928 if (copy_to_user(argp, &DPTI_sig, sizeof(DPTI_sig))) {
1929 return -EFAULT;
1930 }
1931 break;
1932 case I2OUSRCMD:
1933 return adpt_i2o_passthru(pHba, argp);
1934
1935 case DPT_CTRLINFO:{
1936 drvrHBAinfo_S HbaInfo;
1937
1938#define FLG_OSD_PCI_VALID 0x0001
1939#define FLG_OSD_DMA 0x0002
1940#define FLG_OSD_I2O 0x0004
1941 memset(&HbaInfo, 0, sizeof(HbaInfo));
1942 HbaInfo.drvrHBAnum = pHba->unit;
1943 HbaInfo.baseAddr = (ulong) pHba->base_addr_phys;
1944 HbaInfo.blinkState = adpt_read_blink_led(pHba);
1945 HbaInfo.pciBusNum = pHba->pDev->bus->number;
1946 HbaInfo.pciDeviceNum=PCI_SLOT(pHba->pDev->devfn);
1947 HbaInfo.Interrupt = pHba->pDev->irq;
1948 HbaInfo.hbaFlags = FLG_OSD_PCI_VALID | FLG_OSD_DMA | FLG_OSD_I2O;
1949 if(copy_to_user(argp, &HbaInfo, sizeof(HbaInfo))){
1950 printk(KERN_WARNING"%s: Could not copy HbaInfo TO user\n",pHba->name);
1951 return -EFAULT;
1952 }
1953 break;
1954 }
1955 case DPT_SYSINFO:
1956 return adpt_system_info(argp);
1957 case DPT_BLINKLED:{
1958 u32 value;
1959 value = (u32)adpt_read_blink_led(pHba);
1960 if (copy_to_user(argp, &value, sizeof(value))) {
1961 return -EFAULT;
1962 }
1963 break;
1964 }
1965 case I2ORESETCMD:
1966 if(pHba->host)
1967 spin_lock_irqsave(pHba->host->host_lock, flags);
1968 adpt_hba_reset(pHba);
1969 if(pHba->host)
1970 spin_unlock_irqrestore(pHba->host->host_lock, flags);
1971 break;
1972 case I2ORESCANCMD:
1973 adpt_rescan(pHba);
1974 break;
1975 default:
1976 return -EINVAL;
1977 }
1978
1979 return error;
1980}
1981
1982
1983static irqreturn_t adpt_isr(int irq, void *dev_id, struct pt_regs *regs)
1984{
1985 struct scsi_cmnd* cmd;
1986 adpt_hba* pHba = dev_id;
1987 u32 m;
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07001988 void __iomem *reply;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001989 u32 status=0;
1990 u32 context;
1991 ulong flags = 0;
1992 int handled = 0;
1993
1994 if (pHba == NULL){
1995 printk(KERN_WARNING"adpt_isr: NULL dev_id\n");
1996 return IRQ_NONE;
1997 }
1998 if(pHba->host)
1999 spin_lock_irqsave(pHba->host->host_lock, flags);
2000
2001 while( readl(pHba->irq_mask) & I2O_INTERRUPT_PENDING_B) {
2002 m = readl(pHba->reply_port);
2003 if(m == EMPTY_QUEUE){
2004 // Try twice then give up
2005 rmb();
2006 m = readl(pHba->reply_port);
2007 if(m == EMPTY_QUEUE){
2008 // This really should not happen
2009 printk(KERN_ERR"dpti: Could not get reply frame\n");
2010 goto out;
2011 }
2012 }
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07002013 reply = bus_to_virt(m);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002014
2015 if (readl(reply) & MSG_FAIL) {
2016 u32 old_m = readl(reply+28);
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07002017 void __iomem *msg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002018 u32 old_context;
2019 PDEBUG("%s: Failed message\n",pHba->name);
2020 if(old_m >= 0x100000){
2021 printk(KERN_ERR"%s: Bad preserved MFA (%x)- dropping frame\n",pHba->name,old_m);
2022 writel(m,pHba->reply_port);
2023 continue;
2024 }
2025 // Transaction context is 0 in failed reply frame
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07002026 msg = pHba->msg_addr_virt + old_m;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002027 old_context = readl(msg+12);
2028 writel(old_context, reply+12);
2029 adpt_send_nop(pHba, old_m);
2030 }
2031 context = readl(reply+8);
2032 if(context & 0x40000000){ // IOCTL
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07002033 void *p = (void *)readl(reply+12);
2034 if( p != NULL) {
2035 memcpy_fromio(p, reply, REPLY_FRAME_SIZE * 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002036 }
2037 // All IOCTLs will also be post wait
2038 }
2039 if(context & 0x80000000){ // Post wait message
2040 status = readl(reply+16);
2041 if(status >> 24){
2042 status &= 0xffff; /* Get detail status */
2043 } else {
2044 status = I2O_POST_WAIT_OK;
2045 }
2046 if(!(context & 0x40000000)) {
2047 cmd = (struct scsi_cmnd*) readl(reply+12);
2048 if(cmd != NULL) {
2049 printk(KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->name, cmd, context);
2050 }
2051 }
2052 adpt_i2o_post_wait_complete(context, status);
2053 } else { // SCSI message
2054 cmd = (struct scsi_cmnd*) readl(reply+12);
2055 if(cmd != NULL){
2056 if(cmd->serial_number != 0) { // If not timedout
2057 adpt_i2o_to_scsi(reply, cmd);
2058 }
2059 }
2060 }
2061 writel(m, pHba->reply_port);
2062 wmb();
2063 rmb();
2064 }
2065 handled = 1;
2066out: if(pHba->host)
2067 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2068 return IRQ_RETVAL(handled);
2069}
2070
2071static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_device* d)
2072{
2073 int i;
2074 u32 msg[MAX_MESSAGE_SIZE];
2075 u32* mptr;
2076 u32 *lenptr;
2077 int direction;
2078 int scsidir;
2079 u32 len;
2080 u32 reqlen;
2081 s32 rcode;
2082
2083 memset(msg, 0 , sizeof(msg));
2084 len = cmd->request_bufflen;
2085 direction = 0x00000000;
2086
2087 scsidir = 0x00000000; // DATA NO XFER
2088 if(len) {
2089 /*
2090 * Set SCBFlags to indicate if data is being transferred
2091 * in or out, or no data transfer
2092 * Note: Do not have to verify index is less than 0 since
2093 * cmd->cmnd[0] is an unsigned char
2094 */
2095 switch(cmd->sc_data_direction){
2096 case DMA_FROM_DEVICE:
2097 scsidir =0x40000000; // DATA IN (iop<--dev)
2098 break;
2099 case DMA_TO_DEVICE:
2100 direction=0x04000000; // SGL OUT
2101 scsidir =0x80000000; // DATA OUT (iop-->dev)
2102 break;
2103 case DMA_NONE:
2104 break;
2105 case DMA_BIDIRECTIONAL:
2106 scsidir =0x40000000; // DATA IN (iop<--dev)
2107 // Assume In - and continue;
2108 break;
2109 default:
2110 printk(KERN_WARNING"%s: scsi opcode 0x%x not supported.\n",
2111 pHba->name, cmd->cmnd[0]);
2112 cmd->result = (DID_OK <<16) | (INITIATOR_ERROR << 8);
2113 cmd->scsi_done(cmd);
2114 return 0;
2115 }
2116 }
2117 // msg[0] is set later
2118 // I2O_CMD_SCSI_EXEC
2119 msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid);
2120 msg[2] = 0;
2121 msg[3] = (u32)cmd; /* We want the SCSI control block back */
2122 // Our cards use the transaction context as the tag for queueing
2123 // Adaptec/DPT Private stuff
2124 msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16);
2125 msg[5] = d->tid;
2126 /* Direction, disconnect ok | sense data | simple queue , CDBLen */
2127 // I2O_SCB_FLAG_ENABLE_DISCONNECT |
2128 // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
2129 // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
2130 msg[6] = scsidir|0x20a00000|cmd->cmd_len;
2131
2132 mptr=msg+7;
2133
2134 // Write SCSI command into the message - always 16 byte block
2135 memset(mptr, 0, 16);
2136 memcpy(mptr, cmd->cmnd, cmd->cmd_len);
2137 mptr+=4;
2138 lenptr=mptr++; /* Remember me - fill in when we know */
2139 reqlen = 14; // SINGLE SGE
2140 /* Now fill in the SGList and command */
2141 if(cmd->use_sg) {
2142 struct scatterlist *sg = (struct scatterlist *)cmd->request_buffer;
2143 int sg_count = pci_map_sg(pHba->pDev, sg, cmd->use_sg,
2144 cmd->sc_data_direction);
2145
2146
2147 len = 0;
2148 for(i = 0 ; i < sg_count; i++) {
2149 *mptr++ = direction|0x10000000|sg_dma_len(sg);
2150 len+=sg_dma_len(sg);
2151 *mptr++ = sg_dma_address(sg);
2152 sg++;
2153 }
2154 /* Make this an end of list */
2155 mptr[-2] = direction|0xD0000000|sg_dma_len(sg-1);
2156 reqlen = mptr - msg;
2157 *lenptr = len;
2158
2159 if(cmd->underflow && len != cmd->underflow){
2160 printk(KERN_WARNING"Cmd len %08X Cmd underflow %08X\n",
2161 len, cmd->underflow);
2162 }
2163 } else {
2164 *lenptr = len = cmd->request_bufflen;
2165 if(len == 0) {
2166 reqlen = 12;
2167 } else {
2168 *mptr++ = 0xD0000000|direction|cmd->request_bufflen;
2169 *mptr++ = pci_map_single(pHba->pDev,
2170 cmd->request_buffer,
2171 cmd->request_bufflen,
2172 cmd->sc_data_direction);
2173 }
2174 }
2175
2176 /* Stick the headers on */
2177 msg[0] = reqlen<<16 | ((reqlen > 12) ? SGL_OFFSET_12 : SGL_OFFSET_0);
2178
2179 // Send it on it's way
2180 rcode = adpt_i2o_post_this(pHba, msg, reqlen<<2);
2181 if (rcode == 0) {
2182 return 0;
2183 }
2184 return rcode;
2185}
2186
2187
2188static s32 adpt_scsi_register(adpt_hba* pHba,struct scsi_host_template * sht)
2189{
2190 struct Scsi_Host *host = NULL;
2191
2192 host = scsi_register(sht, sizeof(adpt_hba*));
2193 if (host == NULL) {
2194 printk ("%s: scsi_register returned NULL\n",pHba->name);
2195 return -1;
2196 }
2197 host->hostdata[0] = (unsigned long)pHba;
2198 pHba->host = host;
2199
2200 host->irq = pHba->pDev->irq;
2201 /* no IO ports, so don't have to set host->io_port and
2202 * host->n_io_port
2203 */
2204 host->io_port = 0;
2205 host->n_io_port = 0;
2206 /* see comments in hosts.h */
2207 host->max_id = 16;
2208 host->max_lun = 256;
2209 host->max_channel = pHba->top_scsi_channel + 1;
2210 host->cmd_per_lun = 1;
2211 host->unique_id = (uint) pHba;
2212 host->sg_tablesize = pHba->sg_tablesize;
2213 host->can_queue = pHba->post_fifo_size;
2214
2215 return 0;
2216}
2217
2218
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07002219static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002220{
2221 adpt_hba* pHba;
2222 u32 hba_status;
2223 u32 dev_status;
2224 u32 reply_flags = readl(reply) & 0xff00; // Leave it shifted up 8 bits
2225 // I know this would look cleaner if I just read bytes
2226 // but the model I have been using for all the rest of the
2227 // io is in 4 byte words - so I keep that model
2228 u16 detailed_status = readl(reply+16) &0xffff;
2229 dev_status = (detailed_status & 0xff);
2230 hba_status = detailed_status >> 8;
2231
2232 // calculate resid for sg
2233 cmd->resid = cmd->request_bufflen - readl(reply+5);
2234
2235 pHba = (adpt_hba*) cmd->device->host->hostdata[0];
2236
2237 cmd->sense_buffer[0] = '\0'; // initialize sense valid flag to false
2238
2239 if(!(reply_flags & MSG_FAIL)) {
2240 switch(detailed_status & I2O_SCSI_DSC_MASK) {
2241 case I2O_SCSI_DSC_SUCCESS:
2242 cmd->result = (DID_OK << 16);
2243 // handle underflow
2244 if(readl(reply+5) < cmd->underflow ) {
2245 cmd->result = (DID_ERROR <<16);
2246 printk(KERN_WARNING"%s: SCSI CMD underflow\n",pHba->name);
2247 }
2248 break;
2249 case I2O_SCSI_DSC_REQUEST_ABORTED:
2250 cmd->result = (DID_ABORT << 16);
2251 break;
2252 case I2O_SCSI_DSC_PATH_INVALID:
2253 case I2O_SCSI_DSC_DEVICE_NOT_PRESENT:
2254 case I2O_SCSI_DSC_SELECTION_TIMEOUT:
2255 case I2O_SCSI_DSC_COMMAND_TIMEOUT:
2256 case I2O_SCSI_DSC_NO_ADAPTER:
2257 case I2O_SCSI_DSC_RESOURCE_UNAVAILABLE:
2258 printk(KERN_WARNING"%s: SCSI Timeout-Device (%d,%d,%d) hba status=0x%x, dev status=0x%x, cmd=0x%x\n",
2259 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun, hba_status, dev_status, cmd->cmnd[0]);
2260 cmd->result = (DID_TIME_OUT << 16);
2261 break;
2262 case I2O_SCSI_DSC_ADAPTER_BUSY:
2263 case I2O_SCSI_DSC_BUS_BUSY:
2264 cmd->result = (DID_BUS_BUSY << 16);
2265 break;
2266 case I2O_SCSI_DSC_SCSI_BUS_RESET:
2267 case I2O_SCSI_DSC_BDR_MESSAGE_SENT:
2268 cmd->result = (DID_RESET << 16);
2269 break;
2270 case I2O_SCSI_DSC_PARITY_ERROR_FAILURE:
2271 printk(KERN_WARNING"%s: SCSI CMD parity error\n",pHba->name);
2272 cmd->result = (DID_PARITY << 16);
2273 break;
2274 case I2O_SCSI_DSC_UNABLE_TO_ABORT:
2275 case I2O_SCSI_DSC_COMPLETE_WITH_ERROR:
2276 case I2O_SCSI_DSC_UNABLE_TO_TERMINATE:
2277 case I2O_SCSI_DSC_MR_MESSAGE_RECEIVED:
2278 case I2O_SCSI_DSC_AUTOSENSE_FAILED:
2279 case I2O_SCSI_DSC_DATA_OVERRUN:
2280 case I2O_SCSI_DSC_UNEXPECTED_BUS_FREE:
2281 case I2O_SCSI_DSC_SEQUENCE_FAILURE:
2282 case I2O_SCSI_DSC_REQUEST_LENGTH_ERROR:
2283 case I2O_SCSI_DSC_PROVIDE_FAILURE:
2284 case I2O_SCSI_DSC_REQUEST_TERMINATED:
2285 case I2O_SCSI_DSC_IDE_MESSAGE_SENT:
2286 case I2O_SCSI_DSC_UNACKNOWLEDGED_EVENT:
2287 case I2O_SCSI_DSC_MESSAGE_RECEIVED:
2288 case I2O_SCSI_DSC_INVALID_CDB:
2289 case I2O_SCSI_DSC_LUN_INVALID:
2290 case I2O_SCSI_DSC_SCSI_TID_INVALID:
2291 case I2O_SCSI_DSC_FUNCTION_UNAVAILABLE:
2292 case I2O_SCSI_DSC_NO_NEXUS:
2293 case I2O_SCSI_DSC_CDB_RECEIVED:
2294 case I2O_SCSI_DSC_LUN_ALREADY_ENABLED:
2295 case I2O_SCSI_DSC_QUEUE_FROZEN:
2296 case I2O_SCSI_DSC_REQUEST_INVALID:
2297 default:
2298 printk(KERN_WARNING"%s: SCSI error %0x-Device(%d,%d,%d) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2299 pHba->name, detailed_status & I2O_SCSI_DSC_MASK, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
2300 hba_status, dev_status, cmd->cmnd[0]);
2301 cmd->result = (DID_ERROR << 16);
2302 break;
2303 }
2304
2305 // copy over the request sense data if it was a check
2306 // condition status
2307 if(dev_status == 0x02 /*CHECK_CONDITION*/) {
2308 u32 len = sizeof(cmd->sense_buffer);
2309 len = (len > 40) ? 40 : len;
2310 // Copy over the sense data
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07002311 memcpy_fromio(cmd->sense_buffer, (reply+28) , len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002312 if(cmd->sense_buffer[0] == 0x70 /* class 7 */ &&
2313 cmd->sense_buffer[2] == DATA_PROTECT ){
2314 /* This is to handle an array failed */
2315 cmd->result = (DID_TIME_OUT << 16);
2316 printk(KERN_WARNING"%s: SCSI Data Protect-Device (%d,%d,%d) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2317 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
2318 hba_status, dev_status, cmd->cmnd[0]);
2319
2320 }
2321 }
2322 } else {
2323 /* In this condtion we could not talk to the tid
2324 * the card rejected it. We should signal a retry
2325 * for a limitted number of retries.
2326 */
2327 cmd->result = (DID_TIME_OUT << 16);
2328 printk(KERN_WARNING"%s: I2O MSG_FAIL - Device (%d,%d,%d) tid=%d, cmd=0x%x\n",
2329 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
2330 ((struct adpt_device*)(cmd->device->hostdata))->tid, cmd->cmnd[0]);
2331 }
2332
2333 cmd->result |= (dev_status);
2334
2335 if(cmd->scsi_done != NULL){
2336 cmd->scsi_done(cmd);
2337 }
2338 return cmd->result;
2339}
2340
2341
2342static s32 adpt_rescan(adpt_hba* pHba)
2343{
2344 s32 rcode;
2345 ulong flags = 0;
2346
2347 if(pHba->host)
2348 spin_lock_irqsave(pHba->host->host_lock, flags);
2349 if ((rcode=adpt_i2o_lct_get(pHba)) < 0)
2350 goto out;
2351 if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0)
2352 goto out;
2353 rcode = 0;
2354out: if(pHba->host)
2355 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2356 return rcode;
2357}
2358
2359
2360static s32 adpt_i2o_reparse_lct(adpt_hba* pHba)
2361{
2362 int i;
2363 int max;
2364 int tid;
2365 struct i2o_device *d;
2366 i2o_lct *lct = pHba->lct;
2367 u8 bus_no = 0;
2368 s16 scsi_id;
2369 s16 scsi_lun;
2370 u32 buf[10]; // at least 8 u32's
2371 struct adpt_device* pDev = NULL;
2372 struct i2o_device* pI2o_dev = NULL;
2373
2374 if (lct == NULL) {
2375 printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
2376 return -1;
2377 }
2378
2379 max = lct->table_size;
2380 max -= 3;
2381 max /= 9;
2382
2383 // Mark each drive as unscanned
2384 for (d = pHba->devices; d; d = d->next) {
2385 pDev =(struct adpt_device*) d->owner;
2386 if(!pDev){
2387 continue;
2388 }
2389 pDev->state |= DPTI_DEV_UNSCANNED;
2390 }
2391
2392 printk(KERN_INFO "%s: LCT has %d entries.\n", pHba->name,max);
2393
2394 for(i=0;i<max;i++) {
2395 if( lct->lct_entry[i].user_tid != 0xfff){
2396 continue;
2397 }
2398
2399 if( lct->lct_entry[i].class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
2400 lct->lct_entry[i].class_id == I2O_CLASS_SCSI_PERIPHERAL ||
2401 lct->lct_entry[i].class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
2402 tid = lct->lct_entry[i].tid;
2403 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
2404 printk(KERN_ERR"%s: Could not query device\n",pHba->name);
2405 continue;
2406 }
2407 bus_no = buf[0]>>16;
2408 scsi_id = buf[1];
2409 scsi_lun = (buf[2]>>8 )&0xff;
2410 pDev = pHba->channel[bus_no].device[scsi_id];
2411 /* da lun */
2412 while(pDev) {
2413 if(pDev->scsi_lun == scsi_lun) {
2414 break;
2415 }
2416 pDev = pDev->next_lun;
2417 }
2418 if(!pDev ) { // Something new add it
2419 d = (struct i2o_device *)kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
2420 if(d==NULL)
2421 {
2422 printk(KERN_CRIT "Out of memory for I2O device data.\n");
2423 return -ENOMEM;
2424 }
2425
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07002426 d->controller = pHba;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002427 d->next = NULL;
2428
2429 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2430
2431 d->flags = 0;
2432 adpt_i2o_report_hba_unit(pHba, d);
2433 adpt_i2o_install_device(pHba, d);
2434
2435 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
2436 printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no);
2437 continue;
2438 }
2439 pDev = pHba->channel[bus_no].device[scsi_id];
2440 if( pDev == NULL){
2441 pDev = kmalloc(sizeof(struct adpt_device),GFP_KERNEL);
2442 if(pDev == NULL) {
2443 return -ENOMEM;
2444 }
2445 pHba->channel[bus_no].device[scsi_id] = pDev;
2446 } else {
2447 while (pDev->next_lun) {
2448 pDev = pDev->next_lun;
2449 }
2450 pDev = pDev->next_lun = kmalloc(sizeof(struct adpt_device),GFP_KERNEL);
2451 if(pDev == NULL) {
2452 return -ENOMEM;
2453 }
2454 }
2455 memset(pDev,0,sizeof(struct adpt_device));
2456 pDev->tid = d->lct_data.tid;
2457 pDev->scsi_channel = bus_no;
2458 pDev->scsi_id = scsi_id;
2459 pDev->scsi_lun = scsi_lun;
2460 pDev->pI2o_dev = d;
2461 d->owner = pDev;
2462 pDev->type = (buf[0])&0xff;
2463 pDev->flags = (buf[0]>>8)&0xff;
2464 // Too late, SCSI system has made up it's mind, but what the hey ...
2465 if(scsi_id > pHba->top_scsi_id){
2466 pHba->top_scsi_id = scsi_id;
2467 }
2468 if(scsi_lun > pHba->top_scsi_lun){
2469 pHba->top_scsi_lun = scsi_lun;
2470 }
2471 continue;
2472 } // end of new i2o device
2473
2474 // We found an old device - check it
2475 while(pDev) {
2476 if(pDev->scsi_lun == scsi_lun) {
2477 if(!scsi_device_online(pDev->pScsi_dev)) {
2478 printk(KERN_WARNING"%s: Setting device (%d,%d,%d) back online\n",
2479 pHba->name,bus_no,scsi_id,scsi_lun);
2480 if (pDev->pScsi_dev) {
2481 scsi_device_set_state(pDev->pScsi_dev, SDEV_RUNNING);
2482 }
2483 }
2484 d = pDev->pI2o_dev;
2485 if(d->lct_data.tid != tid) { // something changed
2486 pDev->tid = tid;
2487 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2488 if (pDev->pScsi_dev) {
2489 pDev->pScsi_dev->changed = TRUE;
2490 pDev->pScsi_dev->removable = TRUE;
2491 }
2492 }
2493 // Found it - mark it scanned
2494 pDev->state = DPTI_DEV_ONLINE;
2495 break;
2496 }
2497 pDev = pDev->next_lun;
2498 }
2499 }
2500 }
2501 for (pI2o_dev = pHba->devices; pI2o_dev; pI2o_dev = pI2o_dev->next) {
2502 pDev =(struct adpt_device*) pI2o_dev->owner;
2503 if(!pDev){
2504 continue;
2505 }
2506 // Drive offline drives that previously existed but could not be found
2507 // in the LCT table
2508 if (pDev->state & DPTI_DEV_UNSCANNED){
2509 pDev->state = DPTI_DEV_OFFLINE;
2510 printk(KERN_WARNING"%s: Device (%d,%d,%d) offline\n",pHba->name,pDev->scsi_channel,pDev->scsi_id,pDev->scsi_lun);
2511 if (pDev->pScsi_dev) {
2512 scsi_device_set_state(pDev->pScsi_dev, SDEV_OFFLINE);
2513 }
2514 }
2515 }
2516 return 0;
2517}
2518
2519static void adpt_fail_posted_scbs(adpt_hba* pHba)
2520{
2521 struct scsi_cmnd* cmd = NULL;
2522 struct scsi_device* d = NULL;
2523
2524 shost_for_each_device(d, pHba->host) {
2525 unsigned long flags;
2526 spin_lock_irqsave(&d->list_lock, flags);
2527 list_for_each_entry(cmd, &d->cmd_list, list) {
2528 if(cmd->serial_number == 0){
2529 continue;
2530 }
2531 cmd->result = (DID_OK << 16) | (QUEUE_FULL <<1);
2532 cmd->scsi_done(cmd);
2533 }
2534 spin_unlock_irqrestore(&d->list_lock, flags);
2535 }
2536}
2537
2538
2539/*============================================================================
2540 * Routines from i2o subsystem
2541 *============================================================================
2542 */
2543
2544
2545
2546/*
2547 * Bring an I2O controller into HOLD state. See the spec.
2548 */
2549static int adpt_i2o_activate_hba(adpt_hba* pHba)
2550{
2551 int rcode;
2552
2553 if(pHba->initialized ) {
2554 if (adpt_i2o_status_get(pHba) < 0) {
2555 if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2556 printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2557 return rcode;
2558 }
2559 if (adpt_i2o_status_get(pHba) < 0) {
2560 printk(KERN_INFO "HBA not responding.\n");
2561 return -1;
2562 }
2563 }
2564
2565 if(pHba->status_block->iop_state == ADAPTER_STATE_FAULTED) {
2566 printk(KERN_CRIT "%s: hardware fault\n", pHba->name);
2567 return -1;
2568 }
2569
2570 if (pHba->status_block->iop_state == ADAPTER_STATE_READY ||
2571 pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL ||
2572 pHba->status_block->iop_state == ADAPTER_STATE_HOLD ||
2573 pHba->status_block->iop_state == ADAPTER_STATE_FAILED) {
2574 adpt_i2o_reset_hba(pHba);
2575 if (adpt_i2o_status_get(pHba) < 0 || pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
2576 printk(KERN_ERR "%s: Failed to initialize.\n", pHba->name);
2577 return -1;
2578 }
2579 }
2580 } else {
2581 if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2582 printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2583 return rcode;
2584 }
2585
2586 }
2587
2588 if (adpt_i2o_init_outbound_q(pHba) < 0) {
2589 return -1;
2590 }
2591
2592 /* In HOLD state */
2593
2594 if (adpt_i2o_hrt_get(pHba) < 0) {
2595 return -1;
2596 }
2597
2598 return 0;
2599}
2600
2601/*
2602 * Bring a controller online into OPERATIONAL state.
2603 */
2604
2605static int adpt_i2o_online_hba(adpt_hba* pHba)
2606{
2607 if (adpt_i2o_systab_send(pHba) < 0) {
2608 adpt_i2o_delete_hba(pHba);
2609 return -1;
2610 }
2611 /* In READY state */
2612
2613 if (adpt_i2o_enable_hba(pHba) < 0) {
2614 adpt_i2o_delete_hba(pHba);
2615 return -1;
2616 }
2617
2618 /* In OPERATIONAL state */
2619 return 0;
2620}
2621
2622static s32 adpt_send_nop(adpt_hba*pHba,u32 m)
2623{
2624 u32 __iomem *msg;
2625 ulong timeout = jiffies + 5*HZ;
2626
2627 while(m == EMPTY_QUEUE){
2628 rmb();
2629 m = readl(pHba->post_port);
2630 if(m != EMPTY_QUEUE){
2631 break;
2632 }
2633 if(time_after(jiffies,timeout)){
2634 printk(KERN_ERR "%s: Timeout waiting for message frame!\n",pHba->name);
2635 return 2;
2636 }
2637 set_current_state(TASK_UNINTERRUPTIBLE);
2638 schedule_timeout(1);
2639 }
2640 msg = (u32 __iomem *)(pHba->msg_addr_virt + m);
2641 writel( THREE_WORD_MSG_SIZE | SGL_OFFSET_0,&msg[0]);
2642 writel( I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0,&msg[1]);
2643 writel( 0,&msg[2]);
2644 wmb();
2645
2646 writel(m, pHba->post_port);
2647 wmb();
2648 return 0;
2649}
2650
2651static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
2652{
2653 u8 *status;
2654 u32 __iomem *msg = NULL;
2655 int i;
2656 ulong timeout = jiffies + TMOUT_INITOUTBOUND*HZ;
2657 u32* ptr;
2658 u32 outbound_frame; // This had to be a 32 bit address
2659 u32 m;
2660
2661 do {
2662 rmb();
2663 m = readl(pHba->post_port);
2664 if (m != EMPTY_QUEUE) {
2665 break;
2666 }
2667
2668 if(time_after(jiffies,timeout)){
2669 printk(KERN_WARNING"%s: Timeout waiting for message frame\n",pHba->name);
2670 return -ETIMEDOUT;
2671 }
2672 set_current_state(TASK_UNINTERRUPTIBLE);
2673 schedule_timeout(1);
2674 } while(m == EMPTY_QUEUE);
2675
2676 msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2677
2678 status = kmalloc(4,GFP_KERNEL|ADDR32);
2679 if (status==NULL) {
2680 adpt_send_nop(pHba, m);
2681 printk(KERN_WARNING"%s: IOP reset failed - no free memory.\n",
2682 pHba->name);
2683 return -ENOMEM;
2684 }
2685 memset(status, 0, 4);
2686
2687 writel(EIGHT_WORD_MSG_SIZE| SGL_OFFSET_6, &msg[0]);
2688 writel(I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID, &msg[1]);
2689 writel(0, &msg[2]);
2690 writel(0x0106, &msg[3]); /* Transaction context */
2691 writel(4096, &msg[4]); /* Host page frame size */
2692 writel((REPLY_FRAME_SIZE)<<16|0x80, &msg[5]); /* Outbound msg frame size and Initcode */
2693 writel(0xD0000004, &msg[6]); /* Simple SG LE, EOB */
2694 writel(virt_to_bus(status), &msg[7]);
2695
2696 writel(m, pHba->post_port);
2697 wmb();
2698
2699 // Wait for the reply status to come back
2700 do {
2701 if (*status) {
2702 if (*status != 0x01 /*I2O_EXEC_OUTBOUND_INIT_IN_PROGRESS*/) {
2703 break;
2704 }
2705 }
2706 rmb();
2707 if(time_after(jiffies,timeout)){
2708 printk(KERN_WARNING"%s: Timeout Initializing\n",pHba->name);
2709 return -ETIMEDOUT;
2710 }
2711 set_current_state(TASK_UNINTERRUPTIBLE);
2712 schedule_timeout(1);
2713 } while (1);
2714
2715 // If the command was successful, fill the fifo with our reply
2716 // message packets
2717 if(*status != 0x04 /*I2O_EXEC_OUTBOUND_INIT_COMPLETE*/) {
2718 kfree((void*)status);
2719 return -2;
2720 }
2721 kfree((void*)status);
2722
2723 if(pHba->reply_pool != NULL){
2724 kfree(pHba->reply_pool);
2725 }
2726
2727 pHba->reply_pool = (u32*)kmalloc(pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4, GFP_KERNEL|ADDR32);
2728 if(!pHba->reply_pool){
2729 printk(KERN_ERR"%s: Could not allocate reply pool\n",pHba->name);
2730 return -1;
2731 }
2732 memset(pHba->reply_pool, 0 , pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4);
2733
2734 ptr = pHba->reply_pool;
2735 for(i = 0; i < pHba->reply_fifo_size; i++) {
2736 outbound_frame = (u32)virt_to_bus(ptr);
2737 writel(outbound_frame, pHba->reply_port);
2738 wmb();
2739 ptr += REPLY_FRAME_SIZE;
2740 }
2741 adpt_i2o_status_get(pHba);
2742 return 0;
2743}
2744
2745
2746/*
2747 * I2O System Table. Contains information about
2748 * all the IOPs in the system. Used to inform IOPs
2749 * about each other's existence.
2750 *
2751 * sys_tbl_ver is the CurrentChangeIndicator that is
2752 * used by IOPs to track changes.
2753 */
2754
2755
2756
2757static s32 adpt_i2o_status_get(adpt_hba* pHba)
2758{
2759 ulong timeout;
2760 u32 m;
2761 u32 __iomem *msg;
2762 u8 *status_block=NULL;
2763 ulong status_block_bus;
2764
2765 if(pHba->status_block == NULL) {
2766 pHba->status_block = (i2o_status_block*)
2767 kmalloc(sizeof(i2o_status_block),GFP_KERNEL|ADDR32);
2768 if(pHba->status_block == NULL) {
2769 printk(KERN_ERR
2770 "dpti%d: Get Status Block failed; Out of memory. \n",
2771 pHba->unit);
2772 return -ENOMEM;
2773 }
2774 }
2775 memset(pHba->status_block, 0, sizeof(i2o_status_block));
2776 status_block = (u8*)(pHba->status_block);
2777 status_block_bus = virt_to_bus(pHba->status_block);
2778 timeout = jiffies+TMOUT_GETSTATUS*HZ;
2779 do {
2780 rmb();
2781 m = readl(pHba->post_port);
2782 if (m != EMPTY_QUEUE) {
2783 break;
2784 }
2785 if(time_after(jiffies,timeout)){
2786 printk(KERN_ERR "%s: Timeout waiting for message !\n",
2787 pHba->name);
2788 return -ETIMEDOUT;
2789 }
2790 set_current_state(TASK_UNINTERRUPTIBLE);
2791 schedule_timeout(1);
2792 } while(m==EMPTY_QUEUE);
2793
2794
2795 msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2796
2797 writel(NINE_WORD_MSG_SIZE|SGL_OFFSET_0, &msg[0]);
2798 writel(I2O_CMD_STATUS_GET<<24|HOST_TID<<12|ADAPTER_TID, &msg[1]);
2799 writel(1, &msg[2]);
2800 writel(0, &msg[3]);
2801 writel(0, &msg[4]);
2802 writel(0, &msg[5]);
2803 writel(((u32)status_block_bus)&0xffffffff, &msg[6]);
2804 writel(0, &msg[7]);
2805 writel(sizeof(i2o_status_block), &msg[8]); // 88 bytes
2806
2807 //post message
2808 writel(m, pHba->post_port);
2809 wmb();
2810
2811 while(status_block[87]!=0xff){
2812 if(time_after(jiffies,timeout)){
2813 printk(KERN_ERR"dpti%d: Get status timeout.\n",
2814 pHba->unit);
2815 return -ETIMEDOUT;
2816 }
2817 rmb();
2818 set_current_state(TASK_UNINTERRUPTIBLE);
2819 schedule_timeout(1);
2820 }
2821
2822 // Set up our number of outbound and inbound messages
2823 pHba->post_fifo_size = pHba->status_block->max_inbound_frames;
2824 if (pHba->post_fifo_size > MAX_TO_IOP_MESSAGES) {
2825 pHba->post_fifo_size = MAX_TO_IOP_MESSAGES;
2826 }
2827
2828 pHba->reply_fifo_size = pHba->status_block->max_outbound_frames;
2829 if (pHba->reply_fifo_size > MAX_FROM_IOP_MESSAGES) {
2830 pHba->reply_fifo_size = MAX_FROM_IOP_MESSAGES;
2831 }
2832
2833 // Calculate the Scatter Gather list size
2834 pHba->sg_tablesize = (pHba->status_block->inbound_frame_size * 4 -40)/ sizeof(struct sg_simple_element);
2835 if (pHba->sg_tablesize > SG_LIST_ELEMENTS) {
2836 pHba->sg_tablesize = SG_LIST_ELEMENTS;
2837 }
2838
2839
2840#ifdef DEBUG
2841 printk("dpti%d: State = ",pHba->unit);
2842 switch(pHba->status_block->iop_state) {
2843 case 0x01:
2844 printk("INIT\n");
2845 break;
2846 case 0x02:
2847 printk("RESET\n");
2848 break;
2849 case 0x04:
2850 printk("HOLD\n");
2851 break;
2852 case 0x05:
2853 printk("READY\n");
2854 break;
2855 case 0x08:
2856 printk("OPERATIONAL\n");
2857 break;
2858 case 0x10:
2859 printk("FAILED\n");
2860 break;
2861 case 0x11:
2862 printk("FAULTED\n");
2863 break;
2864 default:
2865 printk("%x (unknown!!)\n",pHba->status_block->iop_state);
2866 }
2867#endif
2868 return 0;
2869}
2870
2871/*
2872 * Get the IOP's Logical Configuration Table
2873 */
2874static int adpt_i2o_lct_get(adpt_hba* pHba)
2875{
2876 u32 msg[8];
2877 int ret;
2878 u32 buf[16];
2879
2880 if ((pHba->lct_size == 0) || (pHba->lct == NULL)){
2881 pHba->lct_size = pHba->status_block->expected_lct_size;
2882 }
2883 do {
2884 if (pHba->lct == NULL) {
2885 pHba->lct = kmalloc(pHba->lct_size, GFP_KERNEL|ADDR32);
2886 if(pHba->lct == NULL) {
2887 printk(KERN_CRIT "%s: Lct Get failed. Out of memory.\n",
2888 pHba->name);
2889 return -ENOMEM;
2890 }
2891 }
2892 memset(pHba->lct, 0, pHba->lct_size);
2893
2894 msg[0] = EIGHT_WORD_MSG_SIZE|SGL_OFFSET_6;
2895 msg[1] = I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID;
2896 msg[2] = 0;
2897 msg[3] = 0;
2898 msg[4] = 0xFFFFFFFF; /* All devices */
2899 msg[5] = 0x00000000; /* Report now */
2900 msg[6] = 0xD0000000|pHba->lct_size;
2901 msg[7] = virt_to_bus(pHba->lct);
2902
2903 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 360))) {
2904 printk(KERN_ERR "%s: LCT Get failed (status=%#10x.\n",
2905 pHba->name, ret);
2906 printk(KERN_ERR"Adaptec: Error Reading Hardware.\n");
2907 return ret;
2908 }
2909
2910 if ((pHba->lct->table_size << 2) > pHba->lct_size) {
2911 pHba->lct_size = pHba->lct->table_size << 2;
2912 kfree(pHba->lct);
2913 pHba->lct = NULL;
2914 }
2915 } while (pHba->lct == NULL);
2916
2917 PDEBUG("%s: Hardware resource table read.\n", pHba->name);
2918
2919
2920 // I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO;
2921 if(adpt_i2o_query_scalar(pHba, 0 , 0x8000, -1, buf, sizeof(buf))>=0) {
2922 pHba->FwDebugBufferSize = buf[1];
2923 pHba->FwDebugBuffer_P = pHba->base_addr_virt + buf[0];
2924 pHba->FwDebugFlags_P = pHba->FwDebugBuffer_P + FW_DEBUG_FLAGS_OFFSET;
2925 pHba->FwDebugBLEDvalue_P = pHba->FwDebugBuffer_P + FW_DEBUG_BLED_OFFSET;
2926 pHba->FwDebugBLEDflag_P = pHba->FwDebugBLEDvalue_P + 1;
2927 pHba->FwDebugStrLength_P = pHba->FwDebugBuffer_P + FW_DEBUG_STR_LENGTH_OFFSET;
2928 pHba->FwDebugBuffer_P += buf[2];
2929 pHba->FwDebugFlags = 0;
2930 }
2931
2932 return 0;
2933}
2934
2935static int adpt_i2o_build_sys_table(void)
2936{
2937 adpt_hba* pHba = NULL;
2938 int count = 0;
2939
2940 sys_tbl_len = sizeof(struct i2o_sys_tbl) + // Header + IOPs
2941 (hba_count) * sizeof(struct i2o_sys_tbl_entry);
2942
2943 if(sys_tbl)
2944 kfree(sys_tbl);
2945
2946 sys_tbl = kmalloc(sys_tbl_len, GFP_KERNEL|ADDR32);
2947 if(!sys_tbl) {
2948 printk(KERN_WARNING "SysTab Set failed. Out of memory.\n");
2949 return -ENOMEM;
2950 }
2951 memset(sys_tbl, 0, sys_tbl_len);
2952
2953 sys_tbl->num_entries = hba_count;
2954 sys_tbl->version = I2OVERSION;
2955 sys_tbl->change_ind = sys_tbl_ind++;
2956
2957 for(pHba = hba_chain; pHba; pHba = pHba->next) {
2958 // Get updated Status Block so we have the latest information
2959 if (adpt_i2o_status_get(pHba)) {
2960 sys_tbl->num_entries--;
2961 continue; // try next one
2962 }
2963
2964 sys_tbl->iops[count].org_id = pHba->status_block->org_id;
2965 sys_tbl->iops[count].iop_id = pHba->unit + 2;
2966 sys_tbl->iops[count].seg_num = 0;
2967 sys_tbl->iops[count].i2o_version = pHba->status_block->i2o_version;
2968 sys_tbl->iops[count].iop_state = pHba->status_block->iop_state;
2969 sys_tbl->iops[count].msg_type = pHba->status_block->msg_type;
2970 sys_tbl->iops[count].frame_size = pHba->status_block->inbound_frame_size;
2971 sys_tbl->iops[count].last_changed = sys_tbl_ind - 1; // ??
2972 sys_tbl->iops[count].iop_capabilities = pHba->status_block->iop_capabilities;
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07002973 sys_tbl->iops[count].inbound_low = (u32)virt_to_bus(pHba->post_port);
2974 sys_tbl->iops[count].inbound_high = (u32)((u64)virt_to_bus(pHba->post_port)>>32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002975
2976 count++;
2977 }
2978
2979#ifdef DEBUG
2980{
2981 u32 *table = (u32*)sys_tbl;
2982 printk(KERN_DEBUG"sys_tbl_len=%d in 32bit words\n",(sys_tbl_len >>2));
2983 for(count = 0; count < (sys_tbl_len >>2); count++) {
2984 printk(KERN_INFO "sys_tbl[%d] = %0#10x\n",
2985 count, table[count]);
2986 }
2987}
2988#endif
2989
2990 return 0;
2991}
2992
2993
2994/*
2995 * Dump the information block associated with a given unit (TID)
2996 */
2997
2998static void adpt_i2o_report_hba_unit(adpt_hba* pHba, struct i2o_device *d)
2999{
3000 char buf[64];
3001 int unit = d->lct_data.tid;
3002
3003 printk(KERN_INFO "TID %3.3d ", unit);
3004
3005 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 3, buf, 16)>=0)
3006 {
3007 buf[16]=0;
3008 printk(" Vendor: %-12.12s", buf);
3009 }
3010 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 4, buf, 16)>=0)
3011 {
3012 buf[16]=0;
3013 printk(" Device: %-12.12s", buf);
3014 }
3015 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 6, buf, 8)>=0)
3016 {
3017 buf[8]=0;
3018 printk(" Rev: %-12.12s\n", buf);
3019 }
3020#ifdef DEBUG
3021 printk(KERN_INFO "\tClass: %.21s\n", adpt_i2o_get_class_name(d->lct_data.class_id));
3022 printk(KERN_INFO "\tSubclass: 0x%04X\n", d->lct_data.sub_class);
3023 printk(KERN_INFO "\tFlags: ");
3024
3025 if(d->lct_data.device_flags&(1<<0))
3026 printk("C"); // ConfigDialog requested
3027 if(d->lct_data.device_flags&(1<<1))
3028 printk("U"); // Multi-user capable
3029 if(!(d->lct_data.device_flags&(1<<4)))
3030 printk("P"); // Peer service enabled!
3031 if(!(d->lct_data.device_flags&(1<<5)))
3032 printk("M"); // Mgmt service enabled!
3033 printk("\n");
3034#endif
3035}
3036
3037#ifdef DEBUG
3038/*
3039 * Do i2o class name lookup
3040 */
3041static const char *adpt_i2o_get_class_name(int class)
3042{
3043 int idx = 16;
3044 static char *i2o_class_name[] = {
3045 "Executive",
3046 "Device Driver Module",
3047 "Block Device",
3048 "Tape Device",
3049 "LAN Interface",
3050 "WAN Interface",
3051 "Fibre Channel Port",
3052 "Fibre Channel Device",
3053 "SCSI Device",
3054 "ATE Port",
3055 "ATE Device",
3056 "Floppy Controller",
3057 "Floppy Device",
3058 "Secondary Bus Port",
3059 "Peer Transport Agent",
3060 "Peer Transport",
3061 "Unknown"
3062 };
3063
3064 switch(class&0xFFF) {
3065 case I2O_CLASS_EXECUTIVE:
3066 idx = 0; break;
3067 case I2O_CLASS_DDM:
3068 idx = 1; break;
3069 case I2O_CLASS_RANDOM_BLOCK_STORAGE:
3070 idx = 2; break;
3071 case I2O_CLASS_SEQUENTIAL_STORAGE:
3072 idx = 3; break;
3073 case I2O_CLASS_LAN:
3074 idx = 4; break;
3075 case I2O_CLASS_WAN:
3076 idx = 5; break;
3077 case I2O_CLASS_FIBRE_CHANNEL_PORT:
3078 idx = 6; break;
3079 case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
3080 idx = 7; break;
3081 case I2O_CLASS_SCSI_PERIPHERAL:
3082 idx = 8; break;
3083 case I2O_CLASS_ATE_PORT:
3084 idx = 9; break;
3085 case I2O_CLASS_ATE_PERIPHERAL:
3086 idx = 10; break;
3087 case I2O_CLASS_FLOPPY_CONTROLLER:
3088 idx = 11; break;
3089 case I2O_CLASS_FLOPPY_DEVICE:
3090 idx = 12; break;
3091 case I2O_CLASS_BUS_ADAPTER_PORT:
3092 idx = 13; break;
3093 case I2O_CLASS_PEER_TRANSPORT_AGENT:
3094 idx = 14; break;
3095 case I2O_CLASS_PEER_TRANSPORT:
3096 idx = 15; break;
3097 }
3098 return i2o_class_name[idx];
3099}
3100#endif
3101
3102
3103static s32 adpt_i2o_hrt_get(adpt_hba* pHba)
3104{
3105 u32 msg[6];
3106 int ret, size = sizeof(i2o_hrt);
3107
3108 do {
3109 if (pHba->hrt == NULL) {
3110 pHba->hrt=kmalloc(size, GFP_KERNEL|ADDR32);
3111 if (pHba->hrt == NULL) {
3112 printk(KERN_CRIT "%s: Hrt Get failed; Out of memory.\n", pHba->name);
3113 return -ENOMEM;
3114 }
3115 }
3116
3117 msg[0]= SIX_WORD_MSG_SIZE| SGL_OFFSET_4;
3118 msg[1]= I2O_CMD_HRT_GET<<24 | HOST_TID<<12 | ADAPTER_TID;
3119 msg[2]= 0;
3120 msg[3]= 0;
3121 msg[4]= (0xD0000000 | size); /* Simple transaction */
3122 msg[5]= virt_to_bus(pHba->hrt); /* Dump it here */
3123
3124 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg),20))) {
3125 printk(KERN_ERR "%s: Unable to get HRT (status=%#10x)\n", pHba->name, ret);
3126 return ret;
3127 }
3128
3129 if (pHba->hrt->num_entries * pHba->hrt->entry_len << 2 > size) {
3130 size = pHba->hrt->num_entries * pHba->hrt->entry_len << 2;
3131 kfree(pHba->hrt);
3132 pHba->hrt = NULL;
3133 }
3134 } while(pHba->hrt == NULL);
3135 return 0;
3136}
3137
3138/*
3139 * Query one scalar group value or a whole scalar group.
3140 */
3141static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
3142 int group, int field, void *buf, int buflen)
3143{
3144 u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field };
3145 u8 *resblk;
3146
3147 int size;
3148
3149 /* 8 bytes for header */
3150 resblk = kmalloc(sizeof(u8) * (8+buflen), GFP_KERNEL|ADDR32);
3151 if (resblk == NULL) {
3152 printk(KERN_CRIT "%s: query scalar failed; Out of memory.\n", pHba->name);
3153 return -ENOMEM;
3154 }
3155
3156 if (field == -1) /* whole group */
3157 opblk[4] = -1;
3158
3159 size = adpt_i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET, pHba, tid,
3160 opblk, sizeof(opblk), resblk, sizeof(u8)*(8+buflen));
3161 if (size == -ETIME) {
3162 printk(KERN_WARNING "%s: issue params failed; Timed out.\n", pHba->name);
3163 return -ETIME;
3164 } else if (size == -EINTR) {
3165 printk(KERN_WARNING "%s: issue params failed; Interrupted.\n", pHba->name);
3166 return -EINTR;
3167 }
3168
3169 memcpy(buf, resblk+8, buflen); /* cut off header */
3170
3171 kfree(resblk);
3172 if (size < 0)
3173 return size;
3174
3175 return buflen;
3176}
3177
3178
3179/* Issue UTIL_PARAMS_GET or UTIL_PARAMS_SET
3180 *
3181 * This function can be used for all UtilParamsGet/Set operations.
3182 * The OperationBlock is given in opblk-buffer,
3183 * and results are returned in resblk-buffer.
3184 * Note that the minimum sized resblk is 8 bytes and contains
3185 * ResultCount, ErrorInfoSize, BlockStatus and BlockSize.
3186 */
3187static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid,
3188 void *opblk, int oplen, void *resblk, int reslen)
3189{
3190 u32 msg[9];
3191 u32 *res = (u32 *)resblk;
3192 int wait_status;
3193
3194 msg[0] = NINE_WORD_MSG_SIZE | SGL_OFFSET_5;
3195 msg[1] = cmd << 24 | HOST_TID << 12 | tid;
3196 msg[2] = 0;
3197 msg[3] = 0;
3198 msg[4] = 0;
3199 msg[5] = 0x54000000 | oplen; /* OperationBlock */
3200 msg[6] = virt_to_bus(opblk);
3201 msg[7] = 0xD0000000 | reslen; /* ResultBlock */
3202 msg[8] = virt_to_bus(resblk);
3203
3204 if ((wait_status = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 20))) {
3205 printk("adpt_i2o_issue_params: post_wait failed (%p)\n", resblk);
3206 return wait_status; /* -DetailedStatus */
3207 }
3208
3209 if (res[1]&0x00FF0000) { /* BlockStatus != SUCCESS */
3210 printk(KERN_WARNING "%s: %s - Error:\n ErrorInfoSize = 0x%02x, "
3211 "BlockStatus = 0x%02x, BlockSize = 0x%04x\n",
3212 pHba->name,
3213 (cmd == I2O_CMD_UTIL_PARAMS_SET) ? "PARAMS_SET"
3214 : "PARAMS_GET",
3215 res[1]>>24, (res[1]>>16)&0xFF, res[1]&0xFFFF);
3216 return -((res[1] >> 16) & 0xFF); /* -BlockStatus */
3217 }
3218
3219 return 4 + ((res[1] & 0x0000FFFF) << 2); /* bytes used in resblk */
3220}
3221
3222
3223static s32 adpt_i2o_quiesce_hba(adpt_hba* pHba)
3224{
3225 u32 msg[4];
3226 int ret;
3227
3228 adpt_i2o_status_get(pHba);
3229
3230 /* SysQuiesce discarded if IOP not in READY or OPERATIONAL state */
3231
3232 if((pHba->status_block->iop_state != ADAPTER_STATE_READY) &&
3233 (pHba->status_block->iop_state != ADAPTER_STATE_OPERATIONAL)){
3234 return 0;
3235 }
3236
3237 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3238 msg[1] = I2O_CMD_SYS_QUIESCE<<24|HOST_TID<<12|ADAPTER_TID;
3239 msg[2] = 0;
3240 msg[3] = 0;
3241
3242 if((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3243 printk(KERN_INFO"dpti%d: Unable to quiesce (status=%#x).\n",
3244 pHba->unit, -ret);
3245 } else {
3246 printk(KERN_INFO"dpti%d: Quiesced.\n",pHba->unit);
3247 }
3248
3249 adpt_i2o_status_get(pHba);
3250 return ret;
3251}
3252
3253
3254/*
3255 * Enable IOP. Allows the IOP to resume external operations.
3256 */
3257static int adpt_i2o_enable_hba(adpt_hba* pHba)
3258{
3259 u32 msg[4];
3260 int ret;
3261
3262 adpt_i2o_status_get(pHba);
3263 if(!pHba->status_block){
3264 return -ENOMEM;
3265 }
3266 /* Enable only allowed on READY state */
3267 if(pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL)
3268 return 0;
3269
3270 if(pHba->status_block->iop_state != ADAPTER_STATE_READY)
3271 return -EINVAL;
3272
3273 msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3274 msg[1]=I2O_CMD_SYS_ENABLE<<24|HOST_TID<<12|ADAPTER_TID;
3275 msg[2]= 0;
3276 msg[3]= 0;
3277
3278 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3279 printk(KERN_WARNING"%s: Could not enable (status=%#10x).\n",
3280 pHba->name, ret);
3281 } else {
3282 PDEBUG("%s: Enabled.\n", pHba->name);
3283 }
3284
3285 adpt_i2o_status_get(pHba);
3286 return ret;
3287}
3288
3289
3290static int adpt_i2o_systab_send(adpt_hba* pHba)
3291{
3292 u32 msg[12];
3293 int ret;
3294
3295 msg[0] = I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6;
3296 msg[1] = I2O_CMD_SYS_TAB_SET<<24 | HOST_TID<<12 | ADAPTER_TID;
3297 msg[2] = 0;
3298 msg[3] = 0;
3299 msg[4] = (0<<16) | ((pHba->unit+2) << 12); /* Host 0 IOP ID (unit + 2) */
3300 msg[5] = 0; /* Segment 0 */
3301
3302 /*
3303 * Provide three SGL-elements:
3304 * System table (SysTab), Private memory space declaration and
3305 * Private i/o space declaration
3306 */
3307 msg[6] = 0x54000000 | sys_tbl_len;
3308 msg[7] = virt_to_phys(sys_tbl);
3309 msg[8] = 0x54000000 | 0;
3310 msg[9] = 0;
3311 msg[10] = 0xD4000000 | 0;
3312 msg[11] = 0;
3313
3314 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 120))) {
3315 printk(KERN_INFO "%s: Unable to set SysTab (status=%#10x).\n",
3316 pHba->name, ret);
3317 }
3318#ifdef DEBUG
3319 else {
3320 PINFO("%s: SysTab set.\n", pHba->name);
3321 }
3322#endif
3323
3324 return ret;
3325 }
3326
3327
3328/*============================================================================
3329 *
3330 *============================================================================
3331 */
3332
3333
3334#ifdef UARTDELAY
3335
3336static static void adpt_delay(int millisec)
3337{
3338 int i;
3339 for (i = 0; i < millisec; i++) {
3340 udelay(1000); /* delay for one millisecond */
3341 }
3342}
3343
3344#endif
3345
3346static struct scsi_host_template driver_template = {
3347 .name = "dpt_i2o",
3348 .proc_name = "dpt_i2o",
3349 .proc_info = adpt_proc_info,
3350 .detect = adpt_detect,
3351 .release = adpt_release,
3352 .info = adpt_info,
3353 .queuecommand = adpt_queue,
3354 .eh_abort_handler = adpt_abort,
3355 .eh_device_reset_handler = adpt_device_reset,
3356 .eh_bus_reset_handler = adpt_bus_reset,
3357 .eh_host_reset_handler = adpt_reset,
3358 .bios_param = adpt_bios_param,
3359 .slave_configure = adpt_slave_configure,
3360 .can_queue = MAX_TO_IOP_MESSAGES,
3361 .this_id = 7,
3362 .cmd_per_lun = 1,
3363 .use_clustering = ENABLE_CLUSTERING,
3364};
3365#include "scsi_module.c"
3366MODULE_LICENSE("GPL");