| Paul Mundt | 2be6bb0 | 2010-10-05 22:10:30 +0900 | [diff] [blame] | 1 | /* | 
|  | 2 | * IRQ chip definitions for INTC IRQs. | 
|  | 3 | * | 
|  | 4 | * Copyright (C) 2007, 2008 Magnus Damm | 
|  | 5 | * Copyright (C) 2009, 2010 Paul Mundt | 
|  | 6 | * | 
|  | 7 | * This file is subject to the terms and conditions of the GNU General Public | 
|  | 8 | * License.  See the file "COPYING" in the main directory of this archive | 
|  | 9 | * for more details. | 
|  | 10 | */ | 
|  | 11 | #include <linux/cpumask.h> | 
|  | 12 | #include <linux/io.h> | 
|  | 13 | #include "internals.h" | 
|  | 14 |  | 
| Paul Mundt | 26599a9 | 2010-10-27 15:42:10 +0900 | [diff] [blame] | 15 | void _intc_enable(struct irq_data *data, unsigned long handle) | 
| Paul Mundt | 2be6bb0 | 2010-10-05 22:10:30 +0900 | [diff] [blame] | 16 | { | 
| Paul Mundt | 26599a9 | 2010-10-27 15:42:10 +0900 | [diff] [blame] | 17 | unsigned int irq = data->irq; | 
| Paul Mundt | 2be6bb0 | 2010-10-05 22:10:30 +0900 | [diff] [blame] | 18 | struct intc_desc_int *d = get_intc_desc(irq); | 
|  | 19 | unsigned long addr; | 
|  | 20 | unsigned int cpu; | 
|  | 21 |  | 
|  | 22 | for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_E(handle)); cpu++) { | 
|  | 23 | #ifdef CONFIG_SMP | 
| Paul Mundt | 26599a9 | 2010-10-27 15:42:10 +0900 | [diff] [blame] | 24 | if (!cpumask_test_cpu(cpu, data->affinity)) | 
| Paul Mundt | 2be6bb0 | 2010-10-05 22:10:30 +0900 | [diff] [blame] | 25 | continue; | 
|  | 26 | #endif | 
|  | 27 | addr = INTC_REG(d, _INTC_ADDR_E(handle), cpu); | 
|  | 28 | intc_enable_fns[_INTC_MODE(handle)](addr, handle, intc_reg_fns\ | 
|  | 29 | [_INTC_FN(handle)], irq); | 
|  | 30 | } | 
|  | 31 |  | 
|  | 32 | intc_balancing_enable(irq); | 
|  | 33 | } | 
|  | 34 |  | 
| Paul Mundt | 26599a9 | 2010-10-27 15:42:10 +0900 | [diff] [blame] | 35 | static void intc_enable(struct irq_data *data) | 
| Paul Mundt | 2be6bb0 | 2010-10-05 22:10:30 +0900 | [diff] [blame] | 36 | { | 
| Paul Mundt | 26599a9 | 2010-10-27 15:42:10 +0900 | [diff] [blame] | 37 | _intc_enable(data, (unsigned long)irq_data_get_irq_chip_data(data)); | 
| Paul Mundt | 2be6bb0 | 2010-10-05 22:10:30 +0900 | [diff] [blame] | 38 | } | 
|  | 39 |  | 
| Paul Mundt | 26599a9 | 2010-10-27 15:42:10 +0900 | [diff] [blame] | 40 | static void intc_disable(struct irq_data *data) | 
| Paul Mundt | 2be6bb0 | 2010-10-05 22:10:30 +0900 | [diff] [blame] | 41 | { | 
| Paul Mundt | 26599a9 | 2010-10-27 15:42:10 +0900 | [diff] [blame] | 42 | unsigned int irq = data->irq; | 
| Paul Mundt | 2be6bb0 | 2010-10-05 22:10:30 +0900 | [diff] [blame] | 43 | struct intc_desc_int *d = get_intc_desc(irq); | 
| Paul Mundt | 26599a9 | 2010-10-27 15:42:10 +0900 | [diff] [blame] | 44 | unsigned long handle = (unsigned long)irq_data_get_irq_chip_data(data); | 
| Paul Mundt | 2be6bb0 | 2010-10-05 22:10:30 +0900 | [diff] [blame] | 45 | unsigned long addr; | 
|  | 46 | unsigned int cpu; | 
|  | 47 |  | 
|  | 48 | intc_balancing_disable(irq); | 
|  | 49 |  | 
|  | 50 | for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_D(handle)); cpu++) { | 
|  | 51 | #ifdef CONFIG_SMP | 
| Paul Mundt | 26599a9 | 2010-10-27 15:42:10 +0900 | [diff] [blame] | 52 | if (!cpumask_test_cpu(cpu, data->affinity)) | 
| Paul Mundt | 2be6bb0 | 2010-10-05 22:10:30 +0900 | [diff] [blame] | 53 | continue; | 
|  | 54 | #endif | 
|  | 55 | addr = INTC_REG(d, _INTC_ADDR_D(handle), cpu); | 
|  | 56 | intc_disable_fns[_INTC_MODE(handle)](addr, handle,intc_reg_fns\ | 
|  | 57 | [_INTC_FN(handle)], irq); | 
|  | 58 | } | 
|  | 59 | } | 
|  | 60 |  | 
| Paul Mundt | 26599a9 | 2010-10-27 15:42:10 +0900 | [diff] [blame] | 61 | static int intc_set_wake(struct irq_data *data, unsigned int on) | 
| Paul Mundt | 2be6bb0 | 2010-10-05 22:10:30 +0900 | [diff] [blame] | 62 | { | 
|  | 63 | return 0; /* allow wakeup, but setup hardware in intc_suspend() */ | 
|  | 64 | } | 
|  | 65 |  | 
|  | 66 | #ifdef CONFIG_SMP | 
|  | 67 | /* | 
|  | 68 | * This is held with the irq desc lock held, so we don't require any | 
|  | 69 | * additional locking here at the intc desc level. The affinity mask is | 
|  | 70 | * later tested in the enable/disable paths. | 
|  | 71 | */ | 
| Paul Mundt | 26599a9 | 2010-10-27 15:42:10 +0900 | [diff] [blame] | 72 | static int intc_set_affinity(struct irq_data *data, | 
|  | 73 | const struct cpumask *cpumask, | 
|  | 74 | bool force) | 
| Paul Mundt | 2be6bb0 | 2010-10-05 22:10:30 +0900 | [diff] [blame] | 75 | { | 
|  | 76 | if (!cpumask_intersects(cpumask, cpu_online_mask)) | 
|  | 77 | return -1; | 
|  | 78 |  | 
| Paul Mundt | 26599a9 | 2010-10-27 15:42:10 +0900 | [diff] [blame] | 79 | cpumask_copy(data->affinity, cpumask); | 
| Paul Mundt | 2be6bb0 | 2010-10-05 22:10:30 +0900 | [diff] [blame] | 80 |  | 
|  | 81 | return 0; | 
|  | 82 | } | 
|  | 83 | #endif | 
|  | 84 |  | 
| Paul Mundt | 26599a9 | 2010-10-27 15:42:10 +0900 | [diff] [blame] | 85 | static void intc_mask_ack(struct irq_data *data) | 
| Paul Mundt | 2be6bb0 | 2010-10-05 22:10:30 +0900 | [diff] [blame] | 86 | { | 
| Paul Mundt | 26599a9 | 2010-10-27 15:42:10 +0900 | [diff] [blame] | 87 | unsigned int irq = data->irq; | 
| Paul Mundt | 2be6bb0 | 2010-10-05 22:10:30 +0900 | [diff] [blame] | 88 | struct intc_desc_int *d = get_intc_desc(irq); | 
|  | 89 | unsigned long handle = intc_get_ack_handle(irq); | 
|  | 90 | unsigned long addr; | 
|  | 91 |  | 
| Paul Mundt | 26599a9 | 2010-10-27 15:42:10 +0900 | [diff] [blame] | 92 | intc_disable(data); | 
| Paul Mundt | 2be6bb0 | 2010-10-05 22:10:30 +0900 | [diff] [blame] | 93 |  | 
|  | 94 | /* read register and write zero only to the associated bit */ | 
|  | 95 | if (handle) { | 
|  | 96 | unsigned int value; | 
|  | 97 |  | 
|  | 98 | addr = INTC_REG(d, _INTC_ADDR_D(handle), 0); | 
|  | 99 | value = intc_set_field_from_handle(0, 1, handle); | 
|  | 100 |  | 
|  | 101 | switch (_INTC_FN(handle)) { | 
|  | 102 | case REG_FN_MODIFY_BASE + 0:	/* 8bit */ | 
|  | 103 | __raw_readb(addr); | 
|  | 104 | __raw_writeb(0xff ^ value, addr); | 
|  | 105 | break; | 
|  | 106 | case REG_FN_MODIFY_BASE + 1:	/* 16bit */ | 
|  | 107 | __raw_readw(addr); | 
|  | 108 | __raw_writew(0xffff ^ value, addr); | 
|  | 109 | break; | 
|  | 110 | case REG_FN_MODIFY_BASE + 3:	/* 32bit */ | 
|  | 111 | __raw_readl(addr); | 
|  | 112 | __raw_writel(0xffffffff ^ value, addr); | 
|  | 113 | break; | 
|  | 114 | default: | 
|  | 115 | BUG(); | 
|  | 116 | break; | 
|  | 117 | } | 
|  | 118 | } | 
|  | 119 | } | 
|  | 120 |  | 
|  | 121 | static struct intc_handle_int *intc_find_irq(struct intc_handle_int *hp, | 
|  | 122 | unsigned int nr_hp, | 
|  | 123 | unsigned int irq) | 
|  | 124 | { | 
|  | 125 | int i; | 
|  | 126 |  | 
|  | 127 | /* | 
|  | 128 | * this doesn't scale well, but... | 
|  | 129 | * | 
|  | 130 | * this function should only be used for cerain uncommon | 
|  | 131 | * operations such as intc_set_priority() and intc_set_type() | 
|  | 132 | * and in those rare cases performance doesn't matter that much. | 
|  | 133 | * keeping the memory footprint low is more important. | 
|  | 134 | * | 
|  | 135 | * one rather simple way to speed this up and still keep the | 
|  | 136 | * memory footprint down is to make sure the array is sorted | 
|  | 137 | * and then perform a bisect to lookup the irq. | 
|  | 138 | */ | 
|  | 139 | for (i = 0; i < nr_hp; i++) { | 
|  | 140 | if ((hp + i)->irq != irq) | 
|  | 141 | continue; | 
|  | 142 |  | 
|  | 143 | return hp + i; | 
|  | 144 | } | 
|  | 145 |  | 
|  | 146 | return NULL; | 
|  | 147 | } | 
|  | 148 |  | 
|  | 149 | int intc_set_priority(unsigned int irq, unsigned int prio) | 
|  | 150 | { | 
|  | 151 | struct intc_desc_int *d = get_intc_desc(irq); | 
| Paul Mundt | 26599a9 | 2010-10-27 15:42:10 +0900 | [diff] [blame] | 152 | struct irq_data *data = irq_get_irq_data(irq); | 
| Paul Mundt | 2be6bb0 | 2010-10-05 22:10:30 +0900 | [diff] [blame] | 153 | struct intc_handle_int *ihp; | 
|  | 154 |  | 
|  | 155 | if (!intc_get_prio_level(irq) || prio <= 1) | 
|  | 156 | return -EINVAL; | 
|  | 157 |  | 
|  | 158 | ihp = intc_find_irq(d->prio, d->nr_prio, irq); | 
|  | 159 | if (ihp) { | 
|  | 160 | if (prio >= (1 << _INTC_WIDTH(ihp->handle))) | 
|  | 161 | return -EINVAL; | 
|  | 162 |  | 
|  | 163 | intc_set_prio_level(irq, prio); | 
|  | 164 |  | 
|  | 165 | /* | 
|  | 166 | * only set secondary masking method directly | 
|  | 167 | * primary masking method is using intc_prio_level[irq] | 
|  | 168 | * priority level will be set during next enable() | 
|  | 169 | */ | 
|  | 170 | if (_INTC_FN(ihp->handle) != REG_FN_ERR) | 
| Paul Mundt | 26599a9 | 2010-10-27 15:42:10 +0900 | [diff] [blame] | 171 | _intc_enable(data, ihp->handle); | 
| Paul Mundt | 2be6bb0 | 2010-10-05 22:10:30 +0900 | [diff] [blame] | 172 | } | 
|  | 173 | return 0; | 
|  | 174 | } | 
|  | 175 |  | 
|  | 176 | #define VALID(x) (x | 0x80) | 
|  | 177 |  | 
|  | 178 | static unsigned char intc_irq_sense_table[IRQ_TYPE_SENSE_MASK + 1] = { | 
|  | 179 | [IRQ_TYPE_EDGE_FALLING] = VALID(0), | 
|  | 180 | [IRQ_TYPE_EDGE_RISING] = VALID(1), | 
|  | 181 | [IRQ_TYPE_LEVEL_LOW] = VALID(2), | 
|  | 182 | /* SH7706, SH7707 and SH7709 do not support high level triggered */ | 
|  | 183 | #if !defined(CONFIG_CPU_SUBTYPE_SH7706) && \ | 
|  | 184 | !defined(CONFIG_CPU_SUBTYPE_SH7707) && \ | 
|  | 185 | !defined(CONFIG_CPU_SUBTYPE_SH7709) | 
|  | 186 | [IRQ_TYPE_LEVEL_HIGH] = VALID(3), | 
|  | 187 | #endif | 
|  | 188 | }; | 
|  | 189 |  | 
| Paul Mundt | 26599a9 | 2010-10-27 15:42:10 +0900 | [diff] [blame] | 190 | static int intc_set_type(struct irq_data *data, unsigned int type) | 
| Paul Mundt | 2be6bb0 | 2010-10-05 22:10:30 +0900 | [diff] [blame] | 191 | { | 
| Paul Mundt | 26599a9 | 2010-10-27 15:42:10 +0900 | [diff] [blame] | 192 | unsigned int irq = data->irq; | 
| Paul Mundt | 2be6bb0 | 2010-10-05 22:10:30 +0900 | [diff] [blame] | 193 | struct intc_desc_int *d = get_intc_desc(irq); | 
|  | 194 | unsigned char value = intc_irq_sense_table[type & IRQ_TYPE_SENSE_MASK]; | 
|  | 195 | struct intc_handle_int *ihp; | 
|  | 196 | unsigned long addr; | 
|  | 197 |  | 
|  | 198 | if (!value) | 
|  | 199 | return -EINVAL; | 
|  | 200 |  | 
|  | 201 | ihp = intc_find_irq(d->sense, d->nr_sense, irq); | 
|  | 202 | if (ihp) { | 
|  | 203 | addr = INTC_REG(d, _INTC_ADDR_E(ihp->handle), 0); | 
|  | 204 | intc_reg_fns[_INTC_FN(ihp->handle)](addr, ihp->handle, value); | 
|  | 205 | } | 
|  | 206 |  | 
|  | 207 | return 0; | 
|  | 208 | } | 
|  | 209 |  | 
|  | 210 | struct irq_chip intc_irq_chip	= { | 
| Paul Mundt | 26599a9 | 2010-10-27 15:42:10 +0900 | [diff] [blame] | 211 | .irq_mask		= intc_disable, | 
|  | 212 | .irq_unmask		= intc_enable, | 
|  | 213 | .irq_mask_ack		= intc_mask_ack, | 
|  | 214 | .irq_enable		= intc_enable, | 
|  | 215 | .irq_disable		= intc_disable, | 
|  | 216 | .irq_shutdown		= intc_disable, | 
|  | 217 | .irq_set_type		= intc_set_type, | 
|  | 218 | .irq_set_wake		= intc_set_wake, | 
| Paul Mundt | 2be6bb0 | 2010-10-05 22:10:30 +0900 | [diff] [blame] | 219 | #ifdef CONFIG_SMP | 
| Paul Mundt | 26599a9 | 2010-10-27 15:42:10 +0900 | [diff] [blame] | 220 | .irq_set_affinity	= intc_set_affinity, | 
| Paul Mundt | 2be6bb0 | 2010-10-05 22:10:30 +0900 | [diff] [blame] | 221 | #endif | 
|  | 222 | }; |