| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #include <linux/init.h> | 
 | 2 | #include <linux/ioport.h> | 
 | 3 | #include <linux/interrupt.h> | 
 | 4 | #include <asm/io.h> | 
 | 5 | #include <asm/i8259.h> | 
 | 6 |  | 
 | 7 | static volatile unsigned char *pci_intack; /* RO, gives us the irq vector */ | 
 | 8 |  | 
 | 9 | unsigned char cached_8259[2] = { 0xff, 0xff }; | 
 | 10 | #define cached_A1 (cached_8259[0]) | 
 | 11 | #define cached_21 (cached_8259[1]) | 
 | 12 |  | 
 | 13 | static DEFINE_SPINLOCK(i8259_lock); | 
 | 14 |  | 
 | 15 | int i8259_pic_irq_offset; | 
 | 16 |  | 
 | 17 | /* | 
 | 18 |  * Acknowledge the IRQ using either the PCI host bridge's interrupt | 
 | 19 |  * acknowledge feature or poll.  How i8259_init() is called determines | 
 | 20 |  * which is called.  It should be noted that polling is broken on some | 
 | 21 |  * IBM and Motorola PReP boxes so we must use the int-ack feature on them. | 
 | 22 |  */ | 
 | 23 | int | 
 | 24 | i8259_irq(struct pt_regs *regs) | 
 | 25 | { | 
 | 26 | 	int irq; | 
 | 27 |  | 
 | 28 | 	spin_lock(&i8259_lock); | 
 | 29 |  | 
 | 30 | 	/* Either int-ack or poll for the IRQ */ | 
 | 31 | 	if (pci_intack) | 
 | 32 | 		irq = *pci_intack; | 
 | 33 | 	else { | 
 | 34 | 		/* Perform an interrupt acknowledge cycle on controller 1. */ | 
 | 35 | 		outb(0x0C, 0x20);		/* prepare for poll */ | 
 | 36 | 		irq = inb(0x20) & 7; | 
 | 37 | 		if (irq == 2 ) { | 
 | 38 | 			/* | 
 | 39 | 			 * Interrupt is cascaded so perform interrupt | 
 | 40 | 			 * acknowledge on controller 2. | 
 | 41 | 			 */ | 
 | 42 | 			outb(0x0C, 0xA0);	/* prepare for poll */ | 
 | 43 | 			irq = (inb(0xA0) & 7) + 8; | 
 | 44 | 		} | 
 | 45 | 	} | 
 | 46 |  | 
 | 47 | 	if (irq == 7) { | 
 | 48 | 		/* | 
 | 49 | 		 * This may be a spurious interrupt. | 
 | 50 | 		 * | 
 | 51 | 		 * Read the interrupt status register (ISR). If the most | 
 | 52 | 		 * significant bit is not set then there is no valid | 
 | 53 | 		 * interrupt. | 
 | 54 | 		 */ | 
 | 55 | 		if (!pci_intack) | 
 | 56 | 			outb(0x0B, 0x20);	/* ISR register */ | 
 | 57 | 		if(~inb(0x20) & 0x80) | 
 | 58 | 			irq = -1; | 
 | 59 | 	} | 
 | 60 |  | 
 | 61 | 	spin_unlock(&i8259_lock); | 
 | 62 | 	return irq; | 
 | 63 | } | 
 | 64 |  | 
 | 65 | static void i8259_mask_and_ack_irq(unsigned int irq_nr) | 
 | 66 | { | 
 | 67 | 	unsigned long flags; | 
 | 68 |  | 
 | 69 | 	spin_lock_irqsave(&i8259_lock, flags); | 
 | 70 | 	if ( irq_nr >= i8259_pic_irq_offset ) | 
 | 71 | 		irq_nr -= i8259_pic_irq_offset; | 
 | 72 |  | 
 | 73 | 	if (irq_nr > 7) { | 
 | 74 | 		cached_A1 |= 1 << (irq_nr-8); | 
 | 75 | 		inb(0xA1); /* DUMMY */ | 
 | 76 | 		outb(cached_A1,0xA1); | 
 | 77 | 		outb(0x20,0xA0); /* Non-specific EOI */ | 
 | 78 | 		outb(0x20,0x20); /* Non-specific EOI to cascade */ | 
 | 79 | 	} else { | 
 | 80 | 		cached_21 |= 1 << irq_nr; | 
 | 81 | 		inb(0x21); /* DUMMY */ | 
 | 82 | 		outb(cached_21,0x21); | 
 | 83 | 		outb(0x20,0x20); /* Non-specific EOI */ | 
 | 84 | 	} | 
 | 85 | 	spin_unlock_irqrestore(&i8259_lock, flags); | 
 | 86 | } | 
 | 87 |  | 
 | 88 | static void i8259_set_irq_mask(int irq_nr) | 
 | 89 | { | 
 | 90 | 	outb(cached_A1,0xA1); | 
 | 91 | 	outb(cached_21,0x21); | 
 | 92 | } | 
 | 93 |  | 
 | 94 | static void i8259_mask_irq(unsigned int irq_nr) | 
 | 95 | { | 
 | 96 | 	unsigned long flags; | 
 | 97 |  | 
 | 98 | 	spin_lock_irqsave(&i8259_lock, flags); | 
 | 99 | 	if ( irq_nr >= i8259_pic_irq_offset ) | 
 | 100 | 		irq_nr -= i8259_pic_irq_offset; | 
 | 101 | 	if ( irq_nr < 8 ) | 
 | 102 | 		cached_21 |= 1 << irq_nr; | 
 | 103 | 	else | 
 | 104 | 		cached_A1 |= 1 << (irq_nr-8); | 
 | 105 | 	i8259_set_irq_mask(irq_nr); | 
 | 106 | 	spin_unlock_irqrestore(&i8259_lock, flags); | 
 | 107 | } | 
 | 108 |  | 
 | 109 | static void i8259_unmask_irq(unsigned int irq_nr) | 
 | 110 | { | 
 | 111 | 	unsigned long flags; | 
 | 112 |  | 
 | 113 | 	spin_lock_irqsave(&i8259_lock, flags); | 
 | 114 | 	if ( irq_nr >= i8259_pic_irq_offset ) | 
 | 115 | 		irq_nr -= i8259_pic_irq_offset; | 
 | 116 | 	if ( irq_nr < 8 ) | 
 | 117 | 		cached_21 &= ~(1 << irq_nr); | 
 | 118 | 	else | 
 | 119 | 		cached_A1 &= ~(1 << (irq_nr-8)); | 
 | 120 | 	i8259_set_irq_mask(irq_nr); | 
 | 121 | 	spin_unlock_irqrestore(&i8259_lock, flags); | 
 | 122 | } | 
 | 123 |  | 
 | 124 | static void i8259_end_irq(unsigned int irq) | 
 | 125 | { | 
 | 126 | 	if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)) | 
 | 127 | 	    && irq_desc[irq].action) | 
 | 128 | 		i8259_unmask_irq(irq); | 
 | 129 | } | 
 | 130 |  | 
 | 131 | struct hw_interrupt_type i8259_pic = { | 
 | 132 | 	" i8259    ", | 
 | 133 | 	NULL, | 
 | 134 | 	NULL, | 
 | 135 | 	i8259_unmask_irq, | 
 | 136 | 	i8259_mask_irq, | 
 | 137 | 	i8259_mask_and_ack_irq, | 
 | 138 | 	i8259_end_irq, | 
 | 139 | 	NULL | 
 | 140 | }; | 
 | 141 |  | 
 | 142 | static struct resource pic1_iores = { | 
 | 143 | 	.name = "8259 (master)", | 
 | 144 | 	.start = 0x20, | 
 | 145 | 	.end = 0x21, | 
 | 146 | 	.flags = IORESOURCE_BUSY, | 
 | 147 | }; | 
 | 148 |  | 
 | 149 | static struct resource pic2_iores = { | 
 | 150 | 	.name = "8259 (slave)", | 
 | 151 | 	.start = 0xa0, | 
 | 152 | 	.end = 0xa1, | 
 | 153 | 	.flags = IORESOURCE_BUSY, | 
 | 154 | }; | 
 | 155 |  | 
 | 156 | static struct resource pic_edgectrl_iores = { | 
 | 157 | 	.name = "8259 edge control", | 
 | 158 | 	.start = 0x4d0, | 
 | 159 | 	.end = 0x4d1, | 
 | 160 | 	.flags = IORESOURCE_BUSY, | 
 | 161 | }; | 
 | 162 |  | 
 | 163 | static struct irqaction i8259_irqaction = { | 
 | 164 | 	.handler = no_action, | 
 | 165 | 	.flags = SA_INTERRUPT, | 
 | 166 | 	.mask = CPU_MASK_NONE, | 
 | 167 | 	.name = "82c59 secondary cascade", | 
 | 168 | }; | 
 | 169 |  | 
 | 170 | /* | 
 | 171 |  * i8259_init() | 
 | 172 |  * intack_addr - PCI interrupt acknowledge (real) address which will return | 
 | 173 |  *               the active irq from the 8259 | 
 | 174 |  */ | 
 | 175 | void __init | 
 | 176 | i8259_init(long intack_addr) | 
 | 177 | { | 
 | 178 | 	unsigned long flags; | 
 | 179 |  | 
 | 180 | 	spin_lock_irqsave(&i8259_lock, flags); | 
 | 181 | 	/* init master interrupt controller */ | 
 | 182 | 	outb(0x11, 0x20); /* Start init sequence */ | 
 | 183 | 	outb(0x00, 0x21); /* Vector base */ | 
 | 184 | 	outb(0x04, 0x21); /* edge tiggered, Cascade (slave) on IRQ2 */ | 
 | 185 | 	outb(0x01, 0x21); /* Select 8086 mode */ | 
 | 186 |  | 
 | 187 | 	/* init slave interrupt controller */ | 
 | 188 | 	outb(0x11, 0xA0); /* Start init sequence */ | 
 | 189 | 	outb(0x08, 0xA1); /* Vector base */ | 
 | 190 | 	outb(0x02, 0xA1); /* edge triggered, Cascade (slave) on IRQ2 */ | 
 | 191 | 	outb(0x01, 0xA1); /* Select 8086 mode */ | 
 | 192 |  | 
 | 193 | 	/* always read ISR */ | 
 | 194 | 	outb(0x0B, 0x20); | 
 | 195 | 	outb(0x0B, 0xA0); | 
 | 196 |  | 
 | 197 | 	/* Mask all interrupts */ | 
 | 198 | 	outb(cached_A1, 0xA1); | 
 | 199 | 	outb(cached_21, 0x21); | 
 | 200 |  | 
 | 201 | 	spin_unlock_irqrestore(&i8259_lock, flags); | 
 | 202 |  | 
 | 203 | 	/* reserve our resources */ | 
 | 204 | 	setup_irq( i8259_pic_irq_offset + 2, &i8259_irqaction); | 
 | 205 | 	request_resource(&ioport_resource, &pic1_iores); | 
 | 206 | 	request_resource(&ioport_resource, &pic2_iores); | 
 | 207 | 	request_resource(&ioport_resource, &pic_edgectrl_iores); | 
 | 208 |  | 
 | 209 | 	if (intack_addr != 0) | 
 | 210 | 		pci_intack = ioremap(intack_addr, 1); | 
 | 211 | } |