Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 1 | /* Copyright (c) 2011, Code Aurora Forum. All rights reserved. |
| 2 | * |
| 3 | * This program is free software; you can redistribute it and/or modify |
| 4 | * it under the terms of the GNU General Public License version 2 and |
| 5 | * only version 2 as published by the Free Software Foundation. |
| 6 | * |
| 7 | * This program is distributed in the hope that it will be useful, |
| 8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 10 | * GNU General Public License for more details. |
| 11 | */ |
| 12 | #include <linux/bitops.h> |
| 13 | #include <linux/kernel.h> |
| 14 | #include <linux/module.h> |
| 15 | #include <linux/irq.h> |
| 16 | #include <linux/mfd/core.h> |
| 17 | #include <linux/mfd/wcd9310/core.h> |
| 18 | #include <linux/mfd/wcd9310/registers.h> |
| 19 | #include <linux/interrupt.h> |
| 20 | |
| 21 | #define BYTE_BIT_MASK(nr) (1UL << ((nr) % BITS_PER_BYTE)) |
| 22 | #define BIT_BYTE(nr) ((nr) / BITS_PER_BYTE) |
| 23 | |
| 24 | struct tabla_irq { |
| 25 | bool level; |
| 26 | }; |
| 27 | |
| 28 | static struct tabla_irq tabla_irqs[TABLA_NUM_IRQS] = { |
| 29 | [0] = { .level = 1}, |
| 30 | /* All other tabla interrupts are edge triggered */ |
| 31 | }; |
| 32 | |
| 33 | static inline int irq_to_tabla_irq(struct tabla *tabla, int irq) |
| 34 | { |
| 35 | return irq - tabla->irq_base; |
| 36 | } |
| 37 | |
| 38 | static void tabla_irq_lock(struct irq_data *data) |
| 39 | { |
| 40 | struct tabla *tabla = irq_data_get_irq_chip_data(data); |
| 41 | mutex_lock(&tabla->irq_lock); |
| 42 | } |
| 43 | |
| 44 | static void tabla_irq_sync_unlock(struct irq_data *data) |
| 45 | { |
| 46 | struct tabla *tabla = irq_data_get_irq_chip_data(data); |
| 47 | int i; |
| 48 | |
| 49 | for (i = 0; i < ARRAY_SIZE(tabla->irq_masks_cur); i++) { |
| 50 | /* If there's been a change in the mask write it back |
| 51 | * to the hardware. |
| 52 | */ |
| 53 | if (tabla->irq_masks_cur[i] != tabla->irq_masks_cache[i]) { |
| 54 | tabla->irq_masks_cache[i] = tabla->irq_masks_cur[i]; |
| 55 | tabla_reg_write(tabla, TABLA_A_INTR_MASK0+i, |
| 56 | tabla->irq_masks_cur[i]); |
| 57 | } |
| 58 | } |
| 59 | |
| 60 | mutex_unlock(&tabla->irq_lock); |
| 61 | } |
| 62 | |
| 63 | static void tabla_irq_enable(struct irq_data *data) |
| 64 | { |
| 65 | struct tabla *tabla = irq_data_get_irq_chip_data(data); |
| 66 | int tabla_irq = irq_to_tabla_irq(tabla, data->irq); |
| 67 | tabla->irq_masks_cur[BIT_BYTE(tabla_irq)] &= |
| 68 | ~(BYTE_BIT_MASK(tabla_irq)); |
| 69 | } |
| 70 | |
| 71 | static void tabla_irq_disable(struct irq_data *data) |
| 72 | { |
| 73 | struct tabla *tabla = irq_data_get_irq_chip_data(data); |
| 74 | int tabla_irq = irq_to_tabla_irq(tabla, data->irq); |
| 75 | tabla->irq_masks_cur[BIT_BYTE(tabla_irq)] |= BYTE_BIT_MASK(tabla_irq); |
| 76 | } |
| 77 | |
| 78 | static struct irq_chip tabla_irq_chip = { |
| 79 | .name = "tabla", |
| 80 | .irq_bus_lock = tabla_irq_lock, |
| 81 | .irq_bus_sync_unlock = tabla_irq_sync_unlock, |
| 82 | .irq_disable = tabla_irq_disable, |
| 83 | .irq_enable = tabla_irq_enable, |
| 84 | }; |
| 85 | |
| 86 | static irqreturn_t tabla_irq_thread(int irq, void *data) |
| 87 | { |
| 88 | int ret; |
| 89 | struct tabla *tabla = data; |
| 90 | u8 status[TABLA_NUM_IRQ_REGS]; |
| 91 | unsigned int i; |
| 92 | |
| 93 | ret = tabla_bulk_read(tabla, TABLA_A_INTR_STATUS0, |
| 94 | TABLA_NUM_IRQ_REGS, status); |
| 95 | if (ret < 0) { |
| 96 | dev_err(tabla->dev, "Failed to read interrupt status: %d\n", |
| 97 | ret); |
| 98 | return IRQ_NONE; |
| 99 | } |
| 100 | /* Apply masking */ |
| 101 | for (i = 0; i < TABLA_NUM_IRQ_REGS; i++) |
| 102 | status[i] &= ~tabla->irq_masks_cur[i]; |
| 103 | |
| 104 | /* Find out which interrupt was triggered and call that interrupt's |
| 105 | * handler function |
| 106 | */ |
| 107 | for (i = 0; i < TABLA_NUM_IRQS; i++) { |
| 108 | if (status[BIT_BYTE(i)] & BYTE_BIT_MASK(i)) { |
| 109 | if ((i <= TABLA_IRQ_MBHC_INSERTION) && |
| 110 | (i >= TABLA_IRQ_MBHC_REMOVAL)) { |
| 111 | tabla_reg_write(tabla, TABLA_A_INTR_CLEAR0 + |
| 112 | BIT_BYTE(i), BYTE_BIT_MASK(i)); |
| 113 | handle_nested_irq(tabla->irq_base + i); |
| 114 | } else { |
| 115 | handle_nested_irq(tabla->irq_base + i); |
| 116 | tabla_reg_write(tabla, TABLA_A_INTR_CLEAR0 + |
| 117 | BIT_BYTE(i), BYTE_BIT_MASK(i)); |
| 118 | } |
| 119 | break; |
| 120 | } |
| 121 | } |
| 122 | |
| 123 | return IRQ_HANDLED; |
| 124 | } |
| 125 | |
| 126 | int tabla_irq_init(struct tabla *tabla) |
| 127 | { |
| 128 | int ret; |
| 129 | unsigned int i, cur_irq; |
| 130 | |
| 131 | mutex_init(&tabla->irq_lock); |
| 132 | |
| 133 | if (!tabla->irq) { |
| 134 | dev_warn(tabla->dev, |
| 135 | "No interrupt specified, no interrupts\n"); |
| 136 | tabla->irq_base = 0; |
| 137 | return 0; |
| 138 | } |
| 139 | |
| 140 | if (!tabla->irq_base) { |
| 141 | dev_err(tabla->dev, |
| 142 | "No interrupt base specified, no interrupts\n"); |
| 143 | return 0; |
| 144 | } |
| 145 | /* Mask the individual interrupt sources */ |
| 146 | for (i = 0, cur_irq = tabla->irq_base; i < TABLA_NUM_IRQS; i++, |
| 147 | cur_irq++) { |
| 148 | |
| 149 | irq_set_chip_data(cur_irq, tabla); |
| 150 | |
| 151 | if (tabla_irqs[i].level) |
| 152 | irq_set_chip_and_handler(cur_irq, &tabla_irq_chip, |
| 153 | handle_level_irq); |
| 154 | else |
| 155 | irq_set_chip_and_handler(cur_irq, &tabla_irq_chip, |
| 156 | handle_edge_irq); |
| 157 | |
| 158 | irq_set_nested_thread(cur_irq, 1); |
| 159 | |
| 160 | /* ARM needs us to explicitly flag the IRQ as valid |
| 161 | * and will set them noprobe when we do so. */ |
| 162 | #ifdef CONFIG_ARM |
| 163 | set_irq_flags(cur_irq, IRQF_VALID); |
| 164 | #else |
| 165 | set_irq_noprobe(cur_irq); |
| 166 | #endif |
| 167 | |
| 168 | tabla->irq_masks_cur[BIT_BYTE(i)] |= BYTE_BIT_MASK(i); |
| 169 | tabla->irq_masks_cache[BIT_BYTE(i)] |= BYTE_BIT_MASK(i); |
| 170 | tabla->irq_level[BIT_BYTE(i)] |= tabla_irqs[i].level << |
| 171 | (i % BITS_PER_BYTE); |
| 172 | } |
| 173 | for (i = 0; i < TABLA_NUM_IRQ_REGS; i++) { |
| 174 | /* Initialize interrupt mask and level registers */ |
| 175 | tabla_reg_write(tabla, TABLA_A_INTR_LEVEL0 + i, |
| 176 | tabla->irq_level[i]); |
| 177 | tabla_reg_write(tabla, TABLA_A_INTR_MASK0 + i, |
| 178 | tabla->irq_masks_cur[i]); |
| 179 | } |
| 180 | |
| 181 | ret = request_threaded_irq(tabla->irq, NULL, tabla_irq_thread, |
| 182 | IRQF_TRIGGER_HIGH | IRQF_ONESHOT, |
| 183 | "tabla", tabla); |
| 184 | |
| 185 | if (ret != 0) { |
| 186 | dev_err(tabla->dev, "Failed to request IRQ %d: %d\n", |
| 187 | tabla->irq, ret); |
| 188 | return ret; |
| 189 | } |
| 190 | return 0; |
| 191 | } |
| 192 | void tabla_irq_exit(struct tabla *tabla) |
| 193 | { |
| 194 | if (tabla->irq) |
| 195 | free_irq(tabla->irq, tabla); |
| 196 | mutex_destroy(&tabla->irq_lock); |
| 197 | } |