| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 1 | /* | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 2 | * driver for ENE KB3926 B/C/D/E/F CIR (pnp id: ENE0XXX) | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 3 | * | 
|  | 4 | * Copyright (C) 2010 Maxim Levitsky <maximlevitsky@gmail.com> | 
|  | 5 | * | 
|  | 6 | * This program is free software; you can redistribute it and/or | 
|  | 7 | * modify it under the terms of the GNU General Public License as | 
|  | 8 | * published by the Free Software Foundation; either version 2 of the | 
|  | 9 | * License, or (at your option) any later version. | 
|  | 10 | * | 
|  | 11 | * This program is distributed in the hope that it will be useful, but | 
|  | 12 | * WITHOUT ANY WARRANTY; without even the implied warranty of | 
|  | 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU | 
|  | 14 | * General Public License for more details. | 
|  | 15 | * | 
|  | 16 | * You should have received a copy of the GNU General Public License | 
|  | 17 | * along with this program; if not, write to the Free Software | 
|  | 18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 | 
|  | 19 | * USA | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 20 | * | 
|  | 21 | * Special thanks to: | 
|  | 22 | *   Sami R. <maesesami@gmail.com> for lot of help in debugging and therefore | 
|  | 23 | *    bringing to life support for transmission & learning mode. | 
|  | 24 | * | 
|  | 25 | *   Charlie Andrews <charliethepilot@googlemail.com> for lots of help in | 
|  | 26 | *   bringing up the support of new firmware buffer that is popular | 
|  | 27 | *   on latest notebooks | 
|  | 28 | * | 
|  | 29 | *   ENE for partial device documentation | 
|  | 30 | * | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 31 | */ | 
|  | 32 |  | 
|  | 33 | #include <linux/kernel.h> | 
|  | 34 | #include <linux/module.h> | 
|  | 35 | #include <linux/pnp.h> | 
|  | 36 | #include <linux/io.h> | 
|  | 37 | #include <linux/interrupt.h> | 
|  | 38 | #include <linux/sched.h> | 
| Maxim Levitsky | 931e39a | 2010-07-31 11:59:26 -0300 | [diff] [blame] | 39 | #include <linux/slab.h> | 
| Mauro Carvalho Chehab | 6bda964 | 2010-11-17 13:28:38 -0300 | [diff] [blame] | 40 | #include <media/rc-core.h> | 
| Maxim Levitsky | 931e39a | 2010-07-31 11:59:26 -0300 | [diff] [blame] | 41 | #include "ene_ir.h" | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 42 |  | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 43 | static int sample_period; | 
| Maxim Levitsky | c29bc4d | 2010-10-16 19:56:30 -0300 | [diff] [blame] | 44 | static bool learning_mode_force; | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 45 | static int debug; | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 46 | static bool txsim; | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 47 |  | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 48 | static void ene_set_reg_addr(struct ene_device *dev, u16 reg) | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 49 | { | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 50 | outb(reg >> 8, dev->hw_io + ENE_ADDR_HI); | 
|  | 51 | outb(reg & 0xFF, dev->hw_io + ENE_ADDR_LO); | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 52 | } | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 53 |  | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 54 | /* read a hardware register */ | 
|  | 55 | static u8 ene_read_reg(struct ene_device *dev, u16 reg) | 
|  | 56 | { | 
|  | 57 | u8 retval; | 
|  | 58 | ene_set_reg_addr(dev, reg); | 
|  | 59 | retval = inb(dev->hw_io + ENE_IO); | 
|  | 60 | dbg_regs("reg %04x == %02x", reg, retval); | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 61 | return retval; | 
|  | 62 | } | 
|  | 63 |  | 
|  | 64 | /* write a hardware register */ | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 65 | static void ene_write_reg(struct ene_device *dev, u16 reg, u8 value) | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 66 | { | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 67 | dbg_regs("reg %04x <- %02x", reg, value); | 
|  | 68 | ene_set_reg_addr(dev, reg); | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 69 | outb(value, dev->hw_io + ENE_IO); | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 70 | } | 
|  | 71 |  | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 72 | /* Set bits in hardware register */ | 
|  | 73 | static void ene_set_reg_mask(struct ene_device *dev, u16 reg, u8 mask) | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 74 | { | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 75 | dbg_regs("reg %04x |= %02x", reg, mask); | 
|  | 76 | ene_set_reg_addr(dev, reg); | 
|  | 77 | outb(inb(dev->hw_io + ENE_IO) | mask, dev->hw_io + ENE_IO); | 
|  | 78 | } | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 79 |  | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 80 | /* Clear bits in hardware register */ | 
|  | 81 | static void ene_clear_reg_mask(struct ene_device *dev, u16 reg, u8 mask) | 
|  | 82 | { | 
|  | 83 | dbg_regs("reg %04x &= ~%02x ", reg, mask); | 
|  | 84 | ene_set_reg_addr(dev, reg); | 
|  | 85 | outb(inb(dev->hw_io + ENE_IO) & ~mask, dev->hw_io + ENE_IO); | 
|  | 86 | } | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 87 |  | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 88 | /* A helper to set/clear a bit in register according to boolean variable */ | 
|  | 89 | static void ene_set_clear_reg_mask(struct ene_device *dev, u16 reg, u8 mask, | 
|  | 90 | bool set) | 
|  | 91 | { | 
|  | 92 | if (set) | 
|  | 93 | ene_set_reg_mask(dev, reg, mask); | 
|  | 94 | else | 
|  | 95 | ene_clear_reg_mask(dev, reg, mask); | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 96 | } | 
|  | 97 |  | 
|  | 98 | /* detect hardware features */ | 
|  | 99 | static int ene_hw_detect(struct ene_device *dev) | 
|  | 100 | { | 
|  | 101 | u8 chip_major, chip_minor; | 
|  | 102 | u8 hw_revision, old_ver; | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 103 | u8 fw_reg2, fw_reg1; | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 104 |  | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 105 | ene_clear_reg_mask(dev, ENE_ECSTS, ENE_ECSTS_RSRVD); | 
|  | 106 | chip_major = ene_read_reg(dev, ENE_ECVER_MAJOR); | 
|  | 107 | chip_minor = ene_read_reg(dev, ENE_ECVER_MINOR); | 
|  | 108 | ene_set_reg_mask(dev, ENE_ECSTS, ENE_ECSTS_RSRVD); | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 109 |  | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 110 | hw_revision = ene_read_reg(dev, ENE_ECHV); | 
|  | 111 | old_ver = ene_read_reg(dev, ENE_HW_VER_OLD); | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 112 |  | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 113 | dev->pll_freq = (ene_read_reg(dev, ENE_PLLFRH) << 4) + | 
|  | 114 | (ene_read_reg(dev, ENE_PLLFRL) >> 4); | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 115 |  | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 116 | if (sample_period != ENE_DEFAULT_SAMPLE_PERIOD) | 
|  | 117 | dev->rx_period_adjust = | 
|  | 118 | dev->pll_freq == ENE_DEFAULT_PLL_FREQ ? 2 : 4; | 
| Maxim Levitsky | 931e39a | 2010-07-31 11:59:26 -0300 | [diff] [blame] | 119 |  | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 120 | if (hw_revision == 0xFF) { | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 121 | ene_warn("device seems to be disabled"); | 
|  | 122 | ene_warn("send a mail to lirc-list@lists.sourceforge.net"); | 
|  | 123 | ene_warn("please attach output of acpidump and dmidecode"); | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 124 | return -ENODEV; | 
|  | 125 | } | 
|  | 126 |  | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 127 | ene_notice("chip is 0x%02x%02x - kbver = 0x%02x, rev = 0x%02x", | 
|  | 128 | chip_major, chip_minor, old_ver, hw_revision); | 
|  | 129 |  | 
|  | 130 | ene_notice("PLL freq = %d", dev->pll_freq); | 
|  | 131 |  | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 132 | if (chip_major == 0x33) { | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 133 | ene_warn("chips 0x33xx aren't supported"); | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 134 | return -ENODEV; | 
|  | 135 | } | 
|  | 136 |  | 
|  | 137 | if (chip_major == 0x39 && chip_minor == 0x26 && hw_revision == 0xC0) { | 
|  | 138 | dev->hw_revision = ENE_HW_C; | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 139 | ene_notice("KB3926C detected"); | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 140 | } else if (old_ver == 0x24 && hw_revision == 0xC0) { | 
|  | 141 | dev->hw_revision = ENE_HW_B; | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 142 | ene_notice("KB3926B detected"); | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 143 | } else { | 
|  | 144 | dev->hw_revision = ENE_HW_D; | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 145 | ene_notice("KB3926D or higher detected"); | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 146 | } | 
|  | 147 |  | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 148 | /* detect features hardware supports */ | 
|  | 149 | if (dev->hw_revision < ENE_HW_C) | 
|  | 150 | return 0; | 
|  | 151 |  | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 152 | fw_reg1 = ene_read_reg(dev, ENE_FW1); | 
|  | 153 | fw_reg2 = ene_read_reg(dev, ENE_FW2); | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 154 |  | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 155 | ene_notice("Firmware regs: %02x %02x", fw_reg1, fw_reg2); | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 156 |  | 
| Maxim Levitsky | a06423c | 2010-10-15 13:06:37 -0300 | [diff] [blame] | 157 | dev->hw_use_gpio_0a = !!(fw_reg2 & ENE_FW2_GP0A); | 
|  | 158 | dev->hw_learning_and_tx_capable = !!(fw_reg2 & ENE_FW2_LEARNING); | 
|  | 159 | dev->hw_extra_buffer = !!(fw_reg1 & ENE_FW1_HAS_EXTRA_BUF); | 
|  | 160 |  | 
|  | 161 | if (dev->hw_learning_and_tx_capable) | 
|  | 162 | dev->hw_fan_input = !!(fw_reg2 & ENE_FW2_FAN_INPUT); | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 163 |  | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 164 | ene_notice("Hardware features:"); | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 165 |  | 
|  | 166 | if (dev->hw_learning_and_tx_capable) { | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 167 | ene_notice("* Supports transmitting & learning mode"); | 
|  | 168 | ene_notice("   This feature is rare and therefore,"); | 
|  | 169 | ene_notice("   you are welcome to test it,"); | 
|  | 170 | ene_notice("   and/or contact the author via:"); | 
|  | 171 | ene_notice("   lirc-list@lists.sourceforge.net"); | 
|  | 172 | ene_notice("   or maximlevitsky@gmail.com"); | 
|  | 173 |  | 
|  | 174 | ene_notice("* Uses GPIO %s for IR raw input", | 
|  | 175 | dev->hw_use_gpio_0a ? "40" : "0A"); | 
|  | 176 |  | 
|  | 177 | if (dev->hw_fan_input) | 
|  | 178 | ene_notice("* Uses unused fan feedback input as source" | 
|  | 179 | " of demodulated IR data"); | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 180 | } | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 181 |  | 
|  | 182 | if (!dev->hw_fan_input) | 
|  | 183 | ene_notice("* Uses GPIO %s for IR demodulated input", | 
|  | 184 | dev->hw_use_gpio_0a ? "0A" : "40"); | 
|  | 185 |  | 
|  | 186 | if (dev->hw_extra_buffer) | 
|  | 187 | ene_notice("* Uses new style input buffer"); | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 188 | return 0; | 
|  | 189 | } | 
|  | 190 |  | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 191 | /* Read properities of hw sample buffer */ | 
| Maxim Levitsky | c29bc4d | 2010-10-16 19:56:30 -0300 | [diff] [blame] | 192 | static void ene_rx_setup_hw_buffer(struct ene_device *dev) | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 193 | { | 
|  | 194 | u16 tmp; | 
|  | 195 |  | 
| Maxim Levitsky | c29bc4d | 2010-10-16 19:56:30 -0300 | [diff] [blame] | 196 | ene_rx_read_hw_pointer(dev); | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 197 | dev->r_pointer = dev->w_pointer; | 
|  | 198 |  | 
|  | 199 | if (!dev->hw_extra_buffer) { | 
|  | 200 | dev->buffer_len = ENE_FW_PACKET_SIZE * 2; | 
|  | 201 | return; | 
|  | 202 | } | 
|  | 203 |  | 
|  | 204 | tmp = ene_read_reg(dev, ENE_FW_SAMPLE_BUFFER); | 
|  | 205 | tmp |= ene_read_reg(dev, ENE_FW_SAMPLE_BUFFER+1) << 8; | 
|  | 206 | dev->extra_buf1_address = tmp; | 
|  | 207 |  | 
|  | 208 | dev->extra_buf1_len = ene_read_reg(dev, ENE_FW_SAMPLE_BUFFER + 2); | 
|  | 209 |  | 
|  | 210 | tmp = ene_read_reg(dev, ENE_FW_SAMPLE_BUFFER + 3); | 
|  | 211 | tmp |= ene_read_reg(dev, ENE_FW_SAMPLE_BUFFER + 4) << 8; | 
|  | 212 | dev->extra_buf2_address = tmp; | 
|  | 213 |  | 
|  | 214 | dev->extra_buf2_len = ene_read_reg(dev, ENE_FW_SAMPLE_BUFFER + 5); | 
|  | 215 |  | 
|  | 216 | dev->buffer_len = dev->extra_buf1_len + dev->extra_buf2_len + 8; | 
|  | 217 |  | 
|  | 218 | ene_notice("Hardware uses 2 extended buffers:"); | 
|  | 219 | ene_notice("  0x%04x - len : %d", dev->extra_buf1_address, | 
|  | 220 | dev->extra_buf1_len); | 
|  | 221 | ene_notice("  0x%04x - len : %d", dev->extra_buf2_address, | 
|  | 222 | dev->extra_buf2_len); | 
|  | 223 |  | 
|  | 224 | ene_notice("Total buffer len = %d", dev->buffer_len); | 
|  | 225 |  | 
|  | 226 | if (dev->buffer_len > 64 || dev->buffer_len < 16) | 
|  | 227 | goto error; | 
|  | 228 |  | 
|  | 229 | if (dev->extra_buf1_address > 0xFBFC || | 
|  | 230 | dev->extra_buf1_address < 0xEC00) | 
|  | 231 | goto error; | 
|  | 232 |  | 
|  | 233 | if (dev->extra_buf2_address > 0xFBFC || | 
|  | 234 | dev->extra_buf2_address < 0xEC00) | 
|  | 235 | goto error; | 
|  | 236 |  | 
|  | 237 | if (dev->r_pointer > dev->buffer_len) | 
|  | 238 | goto error; | 
|  | 239 |  | 
|  | 240 | ene_set_reg_mask(dev, ENE_FW1, ENE_FW1_EXTRA_BUF_HND); | 
|  | 241 | return; | 
|  | 242 | error: | 
|  | 243 | ene_warn("Error validating extra buffers, device probably won't work"); | 
|  | 244 | dev->hw_extra_buffer = false; | 
|  | 245 | ene_clear_reg_mask(dev, ENE_FW1, ENE_FW1_EXTRA_BUF_HND); | 
|  | 246 | } | 
|  | 247 |  | 
|  | 248 |  | 
|  | 249 | /* Restore the pointers to extra buffers - to make module reload work*/ | 
| Maxim Levitsky | c29bc4d | 2010-10-16 19:56:30 -0300 | [diff] [blame] | 250 | static void ene_rx_restore_hw_buffer(struct ene_device *dev) | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 251 | { | 
|  | 252 | if (!dev->hw_extra_buffer) | 
|  | 253 | return; | 
|  | 254 |  | 
|  | 255 | ene_write_reg(dev, ENE_FW_SAMPLE_BUFFER + 0, | 
|  | 256 | dev->extra_buf1_address & 0xFF); | 
|  | 257 | ene_write_reg(dev, ENE_FW_SAMPLE_BUFFER + 1, | 
|  | 258 | dev->extra_buf1_address >> 8); | 
|  | 259 | ene_write_reg(dev, ENE_FW_SAMPLE_BUFFER + 2, dev->extra_buf1_len); | 
|  | 260 |  | 
|  | 261 | ene_write_reg(dev, ENE_FW_SAMPLE_BUFFER + 3, | 
|  | 262 | dev->extra_buf2_address & 0xFF); | 
|  | 263 | ene_write_reg(dev, ENE_FW_SAMPLE_BUFFER + 4, | 
|  | 264 | dev->extra_buf2_address >> 8); | 
|  | 265 | ene_write_reg(dev, ENE_FW_SAMPLE_BUFFER + 5, | 
|  | 266 | dev->extra_buf2_len); | 
|  | 267 | ene_clear_reg_mask(dev, ENE_FW1, ENE_FW1_EXTRA_BUF_HND); | 
|  | 268 | } | 
|  | 269 |  | 
| Maxim Levitsky | c29bc4d | 2010-10-16 19:56:30 -0300 | [diff] [blame] | 270 | /* Read hardware write pointer */ | 
|  | 271 | static void ene_rx_read_hw_pointer(struct ene_device *dev) | 
|  | 272 | { | 
|  | 273 | if (dev->hw_extra_buffer) | 
|  | 274 | dev->w_pointer = ene_read_reg(dev, ENE_FW_RX_POINTER); | 
|  | 275 | else | 
|  | 276 | dev->w_pointer = ene_read_reg(dev, ENE_FW2) | 
|  | 277 | & ENE_FW2_BUF_WPTR ? 0 : ENE_FW_PACKET_SIZE; | 
|  | 278 |  | 
|  | 279 | dbg_verbose("RB: HW write pointer: %02x, driver read pointer: %02x", | 
|  | 280 | dev->w_pointer, dev->r_pointer); | 
|  | 281 | } | 
|  | 282 |  | 
|  | 283 | /* Gets address of next sample from HW ring buffer */ | 
|  | 284 | static int ene_rx_get_sample_reg(struct ene_device *dev) | 
|  | 285 | { | 
|  | 286 | int r_pointer; | 
|  | 287 |  | 
|  | 288 | if (dev->r_pointer == dev->w_pointer) { | 
|  | 289 | dbg_verbose("RB: hit end, try update w_pointer"); | 
|  | 290 | ene_rx_read_hw_pointer(dev); | 
|  | 291 | } | 
|  | 292 |  | 
|  | 293 | if (dev->r_pointer == dev->w_pointer) { | 
|  | 294 | dbg_verbose("RB: end of data at %d", dev->r_pointer); | 
|  | 295 | return 0; | 
|  | 296 | } | 
|  | 297 |  | 
|  | 298 | dbg_verbose("RB: reading at offset %d", dev->r_pointer); | 
|  | 299 | r_pointer = dev->r_pointer; | 
|  | 300 |  | 
|  | 301 | dev->r_pointer++; | 
|  | 302 | if (dev->r_pointer == dev->buffer_len) | 
|  | 303 | dev->r_pointer = 0; | 
|  | 304 |  | 
|  | 305 | dbg_verbose("RB: next read will be from offset %d", dev->r_pointer); | 
|  | 306 |  | 
|  | 307 | if (r_pointer < 8) { | 
|  | 308 | dbg_verbose("RB: read at main buffer at %d", r_pointer); | 
|  | 309 | return ENE_FW_SAMPLE_BUFFER + r_pointer; | 
|  | 310 | } | 
|  | 311 |  | 
|  | 312 | r_pointer -= 8; | 
|  | 313 |  | 
|  | 314 | if (r_pointer < dev->extra_buf1_len) { | 
|  | 315 | dbg_verbose("RB: read at 1st extra buffer at %d", r_pointer); | 
|  | 316 | return dev->extra_buf1_address + r_pointer; | 
|  | 317 | } | 
|  | 318 |  | 
|  | 319 | r_pointer -= dev->extra_buf1_len; | 
|  | 320 |  | 
|  | 321 | if (r_pointer < dev->extra_buf2_len) { | 
|  | 322 | dbg_verbose("RB: read at 2nd extra buffer at %d", r_pointer); | 
|  | 323 | return dev->extra_buf2_address + r_pointer; | 
|  | 324 | } | 
|  | 325 |  | 
|  | 326 | dbg("attempt to read beyong ring bufer end"); | 
|  | 327 | return 0; | 
|  | 328 | } | 
|  | 329 |  | 
|  | 330 | /* Sense current received carrier */ | 
|  | 331 | void ene_rx_sense_carrier(struct ene_device *dev) | 
|  | 332 | { | 
|  | 333 | DEFINE_IR_RAW_EVENT(ev); | 
|  | 334 |  | 
|  | 335 | int carrier, duty_cycle; | 
|  | 336 | int period = ene_read_reg(dev, ENE_CIRCAR_PRD); | 
|  | 337 | int hperiod = ene_read_reg(dev, ENE_CIRCAR_HPRD); | 
|  | 338 |  | 
|  | 339 | if (!(period & ENE_CIRCAR_PRD_VALID)) | 
|  | 340 | return; | 
|  | 341 |  | 
|  | 342 | period &= ~ENE_CIRCAR_PRD_VALID; | 
|  | 343 |  | 
|  | 344 | if (!period) | 
|  | 345 | return; | 
|  | 346 |  | 
|  | 347 | dbg("RX: hardware carrier period = %02x", period); | 
|  | 348 | dbg("RX: hardware carrier pulse period = %02x", hperiod); | 
|  | 349 |  | 
|  | 350 | carrier = 2000000 / period; | 
|  | 351 | duty_cycle = (hperiod * 100) / period; | 
|  | 352 | dbg("RX: sensed carrier = %d Hz, duty cycle %d%%", | 
|  | 353 | carrier, duty_cycle); | 
|  | 354 | if (dev->carrier_detect_enabled) { | 
|  | 355 | ev.carrier_report = true; | 
|  | 356 | ev.carrier = carrier; | 
|  | 357 | ev.duty_cycle = duty_cycle; | 
| David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 358 | ir_raw_event_store(dev->rdev, &ev); | 
| Maxim Levitsky | c29bc4d | 2010-10-16 19:56:30 -0300 | [diff] [blame] | 359 | } | 
|  | 360 | } | 
|  | 361 |  | 
|  | 362 | /* this enables/disables the CIR RX engine */ | 
|  | 363 | static void ene_rx_enable_cir_engine(struct ene_device *dev, bool enable) | 
|  | 364 | { | 
|  | 365 | ene_set_clear_reg_mask(dev, ENE_CIRCFG, | 
|  | 366 | ENE_CIRCFG_RX_EN | ENE_CIRCFG_RX_IRQ, enable); | 
|  | 367 | } | 
|  | 368 |  | 
|  | 369 | /* this selects input for CIR engine. Ether GPIO 0A or GPIO40*/ | 
|  | 370 | static void ene_rx_select_input(struct ene_device *dev, bool gpio_0a) | 
|  | 371 | { | 
|  | 372 | ene_set_clear_reg_mask(dev, ENE_CIRCFG2, ENE_CIRCFG2_GPIO0A, gpio_0a); | 
|  | 373 | } | 
|  | 374 |  | 
|  | 375 | /* | 
|  | 376 | * this enables alternative input via fan tachometer sensor and bypasses | 
|  | 377 | * the hw CIR engine | 
|  | 378 | */ | 
|  | 379 | static void ene_rx_enable_fan_input(struct ene_device *dev, bool enable) | 
|  | 380 | { | 
|  | 381 | if (!dev->hw_fan_input) | 
|  | 382 | return; | 
|  | 383 |  | 
|  | 384 | if (!enable) | 
|  | 385 | ene_write_reg(dev, ENE_FAN_AS_IN1, 0); | 
|  | 386 | else { | 
|  | 387 | ene_write_reg(dev, ENE_FAN_AS_IN1, ENE_FAN_AS_IN1_EN); | 
|  | 388 | ene_write_reg(dev, ENE_FAN_AS_IN2, ENE_FAN_AS_IN2_EN); | 
|  | 389 | } | 
|  | 390 | } | 
|  | 391 |  | 
|  | 392 | /* setup the receiver for RX*/ | 
|  | 393 | static void ene_rx_setup(struct ene_device *dev) | 
|  | 394 | { | 
|  | 395 | bool learning_mode = dev->learning_mode_enabled || | 
|  | 396 | dev->carrier_detect_enabled; | 
|  | 397 | int sample_period_adjust = 0; | 
|  | 398 |  | 
|  | 399 | dbg("RX: setup receiver, learning mode = %d", learning_mode); | 
|  | 400 |  | 
|  | 401 |  | 
|  | 402 | /* This selects RLC input and clears CFG2 settings */ | 
|  | 403 | ene_write_reg(dev, ENE_CIRCFG2, 0x00); | 
|  | 404 |  | 
|  | 405 | /* set sample period*/ | 
|  | 406 | if (sample_period == ENE_DEFAULT_SAMPLE_PERIOD) | 
|  | 407 | sample_period_adjust = | 
|  | 408 | dev->pll_freq == ENE_DEFAULT_PLL_FREQ ? 1 : 2; | 
|  | 409 |  | 
|  | 410 | ene_write_reg(dev, ENE_CIRRLC_CFG, | 
|  | 411 | (sample_period + sample_period_adjust) | | 
|  | 412 | ENE_CIRRLC_CFG_OVERFLOW); | 
|  | 413 | /* revB doesn't support inputs */ | 
|  | 414 | if (dev->hw_revision < ENE_HW_C) | 
|  | 415 | goto select_timeout; | 
|  | 416 |  | 
|  | 417 | if (learning_mode) { | 
|  | 418 |  | 
|  | 419 | WARN_ON(!dev->hw_learning_and_tx_capable); | 
|  | 420 |  | 
|  | 421 | /* Enable the opposite of the normal input | 
|  | 422 | That means that if GPIO40 is normally used, use GPIO0A | 
|  | 423 | and vice versa. | 
|  | 424 | This input will carry non demodulated | 
|  | 425 | signal, and we will tell the hw to demodulate it itself */ | 
|  | 426 | ene_rx_select_input(dev, !dev->hw_use_gpio_0a); | 
|  | 427 | dev->rx_fan_input_inuse = false; | 
|  | 428 |  | 
|  | 429 | /* Enable carrier demodulation */ | 
|  | 430 | ene_set_reg_mask(dev, ENE_CIRCFG, ENE_CIRCFG_CARR_DEMOD); | 
|  | 431 |  | 
|  | 432 | /* Enable carrier detection */ | 
|  | 433 | ene_write_reg(dev, ENE_CIRCAR_PULS, 0x63); | 
|  | 434 | ene_set_clear_reg_mask(dev, ENE_CIRCFG2, ENE_CIRCFG2_CARR_DETECT, | 
|  | 435 | dev->carrier_detect_enabled || debug); | 
|  | 436 | } else { | 
|  | 437 | if (dev->hw_fan_input) | 
|  | 438 | dev->rx_fan_input_inuse = true; | 
|  | 439 | else | 
|  | 440 | ene_rx_select_input(dev, dev->hw_use_gpio_0a); | 
|  | 441 |  | 
|  | 442 | /* Disable carrier detection & demodulation */ | 
|  | 443 | ene_clear_reg_mask(dev, ENE_CIRCFG, ENE_CIRCFG_CARR_DEMOD); | 
|  | 444 | ene_clear_reg_mask(dev, ENE_CIRCFG2, ENE_CIRCFG2_CARR_DETECT); | 
|  | 445 | } | 
|  | 446 |  | 
|  | 447 | select_timeout: | 
|  | 448 | if (dev->rx_fan_input_inuse) { | 
| Jarod Wilson | 5aad724 | 2011-01-06 16:59:36 -0300 | [diff] [blame] | 449 | dev->rdev->rx_resolution = US_TO_NS(ENE_FW_SAMPLE_PERIOD_FAN); | 
| Maxim Levitsky | c29bc4d | 2010-10-16 19:56:30 -0300 | [diff] [blame] | 450 |  | 
|  | 451 | /* Fan input doesn't support timeouts, it just ends the | 
|  | 452 | input with a maximum sample */ | 
| David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 453 | dev->rdev->min_timeout = dev->rdev->max_timeout = | 
| Jarod Wilson | 5aad724 | 2011-01-06 16:59:36 -0300 | [diff] [blame] | 454 | US_TO_NS(ENE_FW_SMPL_BUF_FAN_MSK * | 
| Maxim Levitsky | c29bc4d | 2010-10-16 19:56:30 -0300 | [diff] [blame] | 455 | ENE_FW_SAMPLE_PERIOD_FAN); | 
|  | 456 | } else { | 
| Jarod Wilson | 5aad724 | 2011-01-06 16:59:36 -0300 | [diff] [blame] | 457 | dev->rdev->rx_resolution = US_TO_NS(sample_period); | 
| Maxim Levitsky | c29bc4d | 2010-10-16 19:56:30 -0300 | [diff] [blame] | 458 |  | 
|  | 459 | /* Theoreticly timeout is unlimited, but we cap it | 
|  | 460 | * because it was seen that on one device, it | 
|  | 461 | * would stop sending spaces after around 250 msec. | 
|  | 462 | * Besides, this is close to 2^32 anyway and timeout is u32. | 
|  | 463 | */ | 
| Jarod Wilson | 5aad724 | 2011-01-06 16:59:36 -0300 | [diff] [blame] | 464 | dev->rdev->min_timeout = US_TO_NS(127 * sample_period); | 
|  | 465 | dev->rdev->max_timeout = US_TO_NS(200000); | 
| Maxim Levitsky | c29bc4d | 2010-10-16 19:56:30 -0300 | [diff] [blame] | 466 | } | 
|  | 467 |  | 
|  | 468 | if (dev->hw_learning_and_tx_capable) | 
| Jarod Wilson | 5aad724 | 2011-01-06 16:59:36 -0300 | [diff] [blame] | 469 | dev->rdev->tx_resolution = US_TO_NS(sample_period); | 
| Maxim Levitsky | c29bc4d | 2010-10-16 19:56:30 -0300 | [diff] [blame] | 470 |  | 
| David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 471 | if (dev->rdev->timeout > dev->rdev->max_timeout) | 
|  | 472 | dev->rdev->timeout = dev->rdev->max_timeout; | 
|  | 473 | if (dev->rdev->timeout < dev->rdev->min_timeout) | 
|  | 474 | dev->rdev->timeout = dev->rdev->min_timeout; | 
| Maxim Levitsky | c29bc4d | 2010-10-16 19:56:30 -0300 | [diff] [blame] | 475 | } | 
|  | 476 |  | 
|  | 477 | /* Enable the device for receive */ | 
|  | 478 | static void ene_rx_enable(struct ene_device *dev) | 
|  | 479 | { | 
|  | 480 | u8 reg_value; | 
|  | 481 |  | 
|  | 482 | /* Enable system interrupt */ | 
|  | 483 | if (dev->hw_revision < ENE_HW_C) { | 
|  | 484 | ene_write_reg(dev, ENEB_IRQ, dev->irq << 1); | 
|  | 485 | ene_write_reg(dev, ENEB_IRQ_UNK1, 0x01); | 
|  | 486 | } else { | 
|  | 487 | reg_value = ene_read_reg(dev, ENE_IRQ) & 0xF0; | 
|  | 488 | reg_value |= ENE_IRQ_UNK_EN; | 
|  | 489 | reg_value &= ~ENE_IRQ_STATUS; | 
|  | 490 | reg_value |= (dev->irq & ENE_IRQ_MASK); | 
|  | 491 | ene_write_reg(dev, ENE_IRQ, reg_value); | 
|  | 492 | } | 
|  | 493 |  | 
|  | 494 | /* Enable inputs */ | 
|  | 495 | ene_rx_enable_fan_input(dev, dev->rx_fan_input_inuse); | 
|  | 496 | ene_rx_enable_cir_engine(dev, !dev->rx_fan_input_inuse); | 
|  | 497 |  | 
|  | 498 | /* ack any pending irqs - just in case */ | 
|  | 499 | ene_irq_status(dev); | 
|  | 500 |  | 
|  | 501 | /* enable firmware bits */ | 
|  | 502 | ene_set_reg_mask(dev, ENE_FW1, ENE_FW1_ENABLE | ENE_FW1_IRQ); | 
|  | 503 |  | 
|  | 504 | /* enter idle mode */ | 
| David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 505 | ir_raw_event_set_idle(dev->rdev, true); | 
| Maxim Levitsky | c29bc4d | 2010-10-16 19:56:30 -0300 | [diff] [blame] | 506 | dev->rx_enabled = true; | 
|  | 507 | } | 
|  | 508 |  | 
|  | 509 | /* Disable the device receiver */ | 
|  | 510 | static void ene_rx_disable(struct ene_device *dev) | 
|  | 511 | { | 
|  | 512 | /* disable inputs */ | 
|  | 513 | ene_rx_enable_cir_engine(dev, false); | 
|  | 514 | ene_rx_enable_fan_input(dev, false); | 
|  | 515 |  | 
|  | 516 | /* disable hardware IRQ and firmware flag */ | 
|  | 517 | ene_clear_reg_mask(dev, ENE_FW1, ENE_FW1_ENABLE | ENE_FW1_IRQ); | 
|  | 518 |  | 
| David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 519 | ir_raw_event_set_idle(dev->rdev, true); | 
| Maxim Levitsky | c29bc4d | 2010-10-16 19:56:30 -0300 | [diff] [blame] | 520 | dev->rx_enabled = false; | 
|  | 521 | } | 
|  | 522 |  | 
| Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 523 | /* This resets the receiver. Useful to stop stream of spaces at end of | 
| Maxim Levitsky | c29bc4d | 2010-10-16 19:56:30 -0300 | [diff] [blame] | 524 | * transmission | 
|  | 525 | */ | 
|  | 526 | static void ene_rx_reset(struct ene_device *dev) | 
|  | 527 | { | 
|  | 528 | ene_clear_reg_mask(dev, ENE_CIRCFG, ENE_CIRCFG_RX_EN); | 
|  | 529 | ene_set_reg_mask(dev, ENE_CIRCFG, ENE_CIRCFG_RX_EN); | 
|  | 530 | } | 
|  | 531 |  | 
|  | 532 | /* Set up the TX carrier frequency and duty cycle */ | 
|  | 533 | static void ene_tx_set_carrier(struct ene_device *dev) | 
|  | 534 | { | 
|  | 535 | u8 tx_puls_width; | 
|  | 536 | unsigned long flags; | 
|  | 537 |  | 
|  | 538 | spin_lock_irqsave(&dev->hw_lock, flags); | 
|  | 539 |  | 
|  | 540 | ene_set_clear_reg_mask(dev, ENE_CIRCFG, | 
|  | 541 | ENE_CIRCFG_TX_CARR, dev->tx_period > 0); | 
|  | 542 |  | 
|  | 543 | if (!dev->tx_period) | 
|  | 544 | goto unlock; | 
|  | 545 |  | 
|  | 546 | BUG_ON(dev->tx_duty_cycle >= 100 || dev->tx_duty_cycle <= 0); | 
|  | 547 |  | 
|  | 548 | tx_puls_width = dev->tx_period / (100 / dev->tx_duty_cycle); | 
|  | 549 |  | 
|  | 550 | if (!tx_puls_width) | 
|  | 551 | tx_puls_width = 1; | 
|  | 552 |  | 
|  | 553 | dbg("TX: pulse distance = %d * 500 ns", dev->tx_period); | 
|  | 554 | dbg("TX: pulse width = %d * 500 ns", tx_puls_width); | 
|  | 555 |  | 
|  | 556 | ene_write_reg(dev, ENE_CIRMOD_PRD, dev->tx_period | ENE_CIRMOD_PRD_POL); | 
|  | 557 | ene_write_reg(dev, ENE_CIRMOD_HPRD, tx_puls_width); | 
|  | 558 | unlock: | 
|  | 559 | spin_unlock_irqrestore(&dev->hw_lock, flags); | 
|  | 560 | } | 
|  | 561 |  | 
|  | 562 | /* Enable/disable transmitters */ | 
|  | 563 | static void ene_tx_set_transmitters(struct ene_device *dev) | 
|  | 564 | { | 
|  | 565 | unsigned long flags; | 
|  | 566 |  | 
|  | 567 | spin_lock_irqsave(&dev->hw_lock, flags); | 
|  | 568 | ene_set_clear_reg_mask(dev, ENE_GPIOFS8, ENE_GPIOFS8_GPIO41, | 
|  | 569 | !!(dev->transmitter_mask & 0x01)); | 
|  | 570 | ene_set_clear_reg_mask(dev, ENE_GPIOFS1, ENE_GPIOFS1_GPIO0D, | 
|  | 571 | !!(dev->transmitter_mask & 0x02)); | 
|  | 572 | spin_unlock_irqrestore(&dev->hw_lock, flags); | 
|  | 573 | } | 
|  | 574 |  | 
|  | 575 | /* prepare transmission */ | 
|  | 576 | static void ene_tx_enable(struct ene_device *dev) | 
|  | 577 | { | 
|  | 578 | u8 conf1 = ene_read_reg(dev, ENE_CIRCFG); | 
|  | 579 | u8 fwreg2 = ene_read_reg(dev, ENE_FW2); | 
|  | 580 |  | 
|  | 581 | dev->saved_conf1 = conf1; | 
|  | 582 |  | 
|  | 583 | /* Show information about currently connected transmitter jacks */ | 
|  | 584 | if (fwreg2 & ENE_FW2_EMMITER1_CONN) | 
|  | 585 | dbg("TX: Transmitter #1 is connected"); | 
|  | 586 |  | 
|  | 587 | if (fwreg2 & ENE_FW2_EMMITER2_CONN) | 
|  | 588 | dbg("TX: Transmitter #2 is connected"); | 
|  | 589 |  | 
|  | 590 | if (!(fwreg2 & (ENE_FW2_EMMITER1_CONN | ENE_FW2_EMMITER2_CONN))) | 
|  | 591 | ene_warn("TX: transmitter cable isn't connected!"); | 
|  | 592 |  | 
|  | 593 | /* disable receive on revc */ | 
|  | 594 | if (dev->hw_revision == ENE_HW_C) | 
|  | 595 | conf1 &= ~ENE_CIRCFG_RX_EN; | 
|  | 596 |  | 
|  | 597 | /* Enable TX engine */ | 
|  | 598 | conf1 |= ENE_CIRCFG_TX_EN | ENE_CIRCFG_TX_IRQ; | 
|  | 599 | ene_write_reg(dev, ENE_CIRCFG, conf1); | 
|  | 600 | } | 
|  | 601 |  | 
|  | 602 | /* end transmission */ | 
|  | 603 | static void ene_tx_disable(struct ene_device *dev) | 
|  | 604 | { | 
|  | 605 | ene_write_reg(dev, ENE_CIRCFG, dev->saved_conf1); | 
|  | 606 | dev->tx_buffer = NULL; | 
|  | 607 | } | 
|  | 608 |  | 
|  | 609 |  | 
|  | 610 | /* TX one sample - must be called with dev->hw_lock*/ | 
|  | 611 | static void ene_tx_sample(struct ene_device *dev) | 
|  | 612 | { | 
|  | 613 | u8 raw_tx; | 
|  | 614 | u32 sample; | 
|  | 615 | bool pulse = dev->tx_sample_pulse; | 
|  | 616 |  | 
|  | 617 | if (!dev->tx_buffer) { | 
|  | 618 | ene_warn("TX: BUG: attempt to transmit NULL buffer"); | 
|  | 619 | return; | 
|  | 620 | } | 
|  | 621 |  | 
|  | 622 | /* Grab next TX sample */ | 
|  | 623 | if (!dev->tx_sample) { | 
|  | 624 |  | 
|  | 625 | if (dev->tx_pos == dev->tx_len) { | 
|  | 626 | if (!dev->tx_done) { | 
|  | 627 | dbg("TX: no more data to send"); | 
|  | 628 | dev->tx_done = true; | 
|  | 629 | goto exit; | 
|  | 630 | } else { | 
|  | 631 | dbg("TX: last sample sent by hardware"); | 
|  | 632 | ene_tx_disable(dev); | 
|  | 633 | complete(&dev->tx_complete); | 
|  | 634 | return; | 
|  | 635 | } | 
|  | 636 | } | 
|  | 637 |  | 
|  | 638 | sample = dev->tx_buffer[dev->tx_pos++]; | 
|  | 639 | dev->tx_sample_pulse = !dev->tx_sample_pulse; | 
|  | 640 |  | 
|  | 641 | dev->tx_sample = DIV_ROUND_CLOSEST(sample, sample_period); | 
|  | 642 |  | 
|  | 643 | if (!dev->tx_sample) | 
|  | 644 | dev->tx_sample = 1; | 
|  | 645 | } | 
|  | 646 |  | 
|  | 647 | raw_tx = min(dev->tx_sample , (unsigned int)ENE_CIRRLC_OUT_MASK); | 
|  | 648 | dev->tx_sample -= raw_tx; | 
|  | 649 |  | 
|  | 650 | dbg("TX: sample %8d (%s)", raw_tx * sample_period, | 
|  | 651 | pulse ? "pulse" : "space"); | 
|  | 652 | if (pulse) | 
|  | 653 | raw_tx |= ENE_CIRRLC_OUT_PULSE; | 
|  | 654 |  | 
|  | 655 | ene_write_reg(dev, | 
|  | 656 | dev->tx_reg ? ENE_CIRRLC_OUT1 : ENE_CIRRLC_OUT0, raw_tx); | 
|  | 657 |  | 
|  | 658 | dev->tx_reg = !dev->tx_reg; | 
|  | 659 | exit: | 
|  | 660 | /* simulate TX done interrupt */ | 
|  | 661 | if (txsim) | 
|  | 662 | mod_timer(&dev->tx_sim_timer, jiffies + HZ / 500); | 
|  | 663 | } | 
|  | 664 |  | 
|  | 665 | /* timer to simulate tx done interrupt */ | 
|  | 666 | static void ene_tx_irqsim(unsigned long data) | 
|  | 667 | { | 
|  | 668 | struct ene_device *dev = (struct ene_device *)data; | 
|  | 669 | unsigned long flags; | 
|  | 670 |  | 
|  | 671 | spin_lock_irqsave(&dev->hw_lock, flags); | 
|  | 672 | ene_tx_sample(dev); | 
|  | 673 | spin_unlock_irqrestore(&dev->hw_lock, flags); | 
|  | 674 | } | 
|  | 675 |  | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 676 |  | 
|  | 677 | /* read irq status and ack it */ | 
|  | 678 | static int ene_irq_status(struct ene_device *dev) | 
|  | 679 | { | 
|  | 680 | u8 irq_status; | 
|  | 681 | u8 fw_flags1, fw_flags2; | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 682 | int retval = 0; | 
|  | 683 |  | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 684 | fw_flags2 = ene_read_reg(dev, ENE_FW2); | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 685 |  | 
|  | 686 | if (dev->hw_revision < ENE_HW_C) { | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 687 | irq_status = ene_read_reg(dev, ENEB_IRQ_STATUS); | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 688 |  | 
|  | 689 | if (!(irq_status & ENEB_IRQ_STATUS_IR)) | 
|  | 690 | return 0; | 
|  | 691 |  | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 692 | ene_clear_reg_mask(dev, ENEB_IRQ_STATUS, ENEB_IRQ_STATUS_IR); | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 693 | return ENE_IRQ_RX; | 
|  | 694 | } | 
|  | 695 |  | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 696 | irq_status = ene_read_reg(dev, ENE_IRQ); | 
|  | 697 | if (!(irq_status & ENE_IRQ_STATUS)) | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 698 | return 0; | 
|  | 699 |  | 
|  | 700 | /* original driver does that twice - a workaround ? */ | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 701 | ene_write_reg(dev, ENE_IRQ, irq_status & ~ENE_IRQ_STATUS); | 
|  | 702 | ene_write_reg(dev, ENE_IRQ, irq_status & ~ENE_IRQ_STATUS); | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 703 |  | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 704 | /* check RX interrupt */ | 
|  | 705 | if (fw_flags2 & ENE_FW2_RXIRQ) { | 
|  | 706 | retval |= ENE_IRQ_RX; | 
|  | 707 | ene_write_reg(dev, ENE_FW2, fw_flags2 & ~ENE_FW2_RXIRQ); | 
|  | 708 | } | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 709 |  | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 710 | /* check TX interrupt */ | 
|  | 711 | fw_flags1 = ene_read_reg(dev, ENE_FW1); | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 712 | if (fw_flags1 & ENE_FW1_TXIRQ) { | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 713 | ene_write_reg(dev, ENE_FW1, fw_flags1 & ~ENE_FW1_TXIRQ); | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 714 | retval |= ENE_IRQ_TX; | 
|  | 715 | } | 
|  | 716 |  | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 717 | return retval; | 
|  | 718 | } | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 719 |  | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 720 | /* interrupt handler */ | 
|  | 721 | static irqreturn_t ene_isr(int irq, void *data) | 
|  | 722 | { | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 723 | u16 hw_value, reg; | 
|  | 724 | int hw_sample, irq_status; | 
|  | 725 | bool pulse; | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 726 | unsigned long flags; | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 727 | irqreturn_t retval = IRQ_NONE; | 
|  | 728 | struct ene_device *dev = (struct ene_device *)data; | 
| Maxim Levitsky | 4651918 | 2010-10-16 19:56:28 -0300 | [diff] [blame] | 729 | DEFINE_IR_RAW_EVENT(ev); | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 730 |  | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 731 | spin_lock_irqsave(&dev->hw_lock, flags); | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 732 |  | 
|  | 733 | dbg_verbose("ISR called"); | 
| Maxim Levitsky | c29bc4d | 2010-10-16 19:56:30 -0300 | [diff] [blame] | 734 | ene_rx_read_hw_pointer(dev); | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 735 | irq_status = ene_irq_status(dev); | 
|  | 736 |  | 
|  | 737 | if (!irq_status) | 
|  | 738 | goto unlock; | 
|  | 739 |  | 
|  | 740 | retval = IRQ_HANDLED; | 
|  | 741 |  | 
|  | 742 | if (irq_status & ENE_IRQ_TX) { | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 743 | dbg_verbose("TX interrupt"); | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 744 | if (!dev->hw_learning_and_tx_capable) { | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 745 | dbg("TX interrupt on unsupported device!"); | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 746 | goto unlock; | 
|  | 747 | } | 
|  | 748 | ene_tx_sample(dev); | 
|  | 749 | } | 
|  | 750 |  | 
|  | 751 | if (!(irq_status & ENE_IRQ_RX)) | 
|  | 752 | goto unlock; | 
|  | 753 |  | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 754 | dbg_verbose("RX interrupt"); | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 755 |  | 
| Maxim Levitsky | e1b1ddb | 2010-10-16 19:56:29 -0300 | [diff] [blame] | 756 | if (dev->hw_learning_and_tx_capable) | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 757 | ene_rx_sense_carrier(dev); | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 758 |  | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 759 | /* On hardware that don't support extra buffer we need to trust | 
|  | 760 | the interrupt and not track the read pointer */ | 
|  | 761 | if (!dev->hw_extra_buffer) | 
|  | 762 | dev->r_pointer = dev->w_pointer == 0 ? ENE_FW_PACKET_SIZE : 0; | 
|  | 763 |  | 
|  | 764 | while (1) { | 
|  | 765 |  | 
| Maxim Levitsky | c29bc4d | 2010-10-16 19:56:30 -0300 | [diff] [blame] | 766 | reg = ene_rx_get_sample_reg(dev); | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 767 |  | 
|  | 768 | dbg_verbose("next sample to read at: %04x", reg); | 
|  | 769 | if (!reg) | 
|  | 770 | break; | 
|  | 771 |  | 
|  | 772 | hw_value = ene_read_reg(dev, reg); | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 773 |  | 
|  | 774 | if (dev->rx_fan_input_inuse) { | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 775 |  | 
|  | 776 | int offset = ENE_FW_SMPL_BUF_FAN - ENE_FW_SAMPLE_BUFFER; | 
|  | 777 |  | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 778 | /* read high part of the sample */ | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 779 | hw_value |= ene_read_reg(dev, reg + offset) << 8; | 
|  | 780 | pulse = hw_value & ENE_FW_SMPL_BUF_FAN_PLS; | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 781 |  | 
|  | 782 | /* clear space bit, and other unused bits */ | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 783 | hw_value &= ENE_FW_SMPL_BUF_FAN_MSK; | 
|  | 784 | hw_sample = hw_value * ENE_FW_SAMPLE_PERIOD_FAN; | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 785 |  | 
|  | 786 | } else { | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 787 | pulse = !(hw_value & ENE_FW_SAMPLE_SPACE); | 
|  | 788 | hw_value &= ~ENE_FW_SAMPLE_SPACE; | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 789 | hw_sample = hw_value * sample_period; | 
| Maxim Levitsky | 931e39a | 2010-07-31 11:59:26 -0300 | [diff] [blame] | 790 |  | 
|  | 791 | if (dev->rx_period_adjust) { | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 792 | hw_sample *= 100; | 
|  | 793 | hw_sample /= (100 + dev->rx_period_adjust); | 
| Maxim Levitsky | 931e39a | 2010-07-31 11:59:26 -0300 | [diff] [blame] | 794 | } | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 795 | } | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 796 |  | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 797 | if (!dev->hw_extra_buffer && !hw_sample) { | 
|  | 798 | dev->r_pointer = dev->w_pointer; | 
|  | 799 | continue; | 
|  | 800 | } | 
| Maxim Levitsky | 931e39a | 2010-07-31 11:59:26 -0300 | [diff] [blame] | 801 |  | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 802 | dbg("RX: %d (%s)", hw_sample, pulse ? "pulse" : "space"); | 
| Maxim Levitsky | 931e39a | 2010-07-31 11:59:26 -0300 | [diff] [blame] | 803 |  | 
| Jarod Wilson | 5aad724 | 2011-01-06 16:59:36 -0300 | [diff] [blame] | 804 | ev.duration = US_TO_NS(hw_sample); | 
| Maxim Levitsky | 931e39a | 2010-07-31 11:59:26 -0300 | [diff] [blame] | 805 | ev.pulse = pulse; | 
| David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 806 | ir_raw_event_store_with_filter(dev->rdev, &ev); | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 807 | } | 
| Maxim Levitsky | 931e39a | 2010-07-31 11:59:26 -0300 | [diff] [blame] | 808 |  | 
| David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 809 | ir_raw_event_handle(dev->rdev); | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 810 | unlock: | 
|  | 811 | spin_unlock_irqrestore(&dev->hw_lock, flags); | 
|  | 812 | return retval; | 
|  | 813 | } | 
|  | 814 |  | 
|  | 815 | /* Initialize default settings */ | 
| Maxim Levitsky | c29bc4d | 2010-10-16 19:56:30 -0300 | [diff] [blame] | 816 | static void ene_setup_default_settings(struct ene_device *dev) | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 817 | { | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 818 | dev->tx_period = 32; | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 819 | dev->tx_duty_cycle = 50; /*%*/ | 
|  | 820 | dev->transmitter_mask = 0x03; | 
| Maxim Levitsky | c29bc4d | 2010-10-16 19:56:30 -0300 | [diff] [blame] | 821 | dev->learning_mode_enabled = learning_mode_force; | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 822 |  | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 823 | /* Set reasonable default timeout */ | 
| Jarod Wilson | 5aad724 | 2011-01-06 16:59:36 -0300 | [diff] [blame] | 824 | dev->rdev->timeout = US_TO_NS(150000); | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 825 | } | 
|  | 826 |  | 
| Maxim Levitsky | c29bc4d | 2010-10-16 19:56:30 -0300 | [diff] [blame] | 827 | /* Upload all hardware settings at once. Used at load and resume time */ | 
|  | 828 | static void ene_setup_hw_settings(struct ene_device *dev) | 
|  | 829 | { | 
|  | 830 | if (dev->hw_learning_and_tx_capable) { | 
|  | 831 | ene_tx_set_carrier(dev); | 
|  | 832 | ene_tx_set_transmitters(dev); | 
|  | 833 | } | 
|  | 834 |  | 
|  | 835 | ene_rx_setup(dev); | 
|  | 836 | } | 
|  | 837 |  | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 838 | /* outside interface: called on first open*/ | 
| David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 839 | static int ene_open(struct rc_dev *rdev) | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 840 | { | 
| David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 841 | struct ene_device *dev = rdev->priv; | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 842 | unsigned long flags; | 
|  | 843 |  | 
|  | 844 | spin_lock_irqsave(&dev->hw_lock, flags); | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 845 | ene_rx_enable(dev); | 
|  | 846 | spin_unlock_irqrestore(&dev->hw_lock, flags); | 
|  | 847 | return 0; | 
|  | 848 | } | 
|  | 849 |  | 
|  | 850 | /* outside interface: called on device close*/ | 
| David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 851 | static void ene_close(struct rc_dev *rdev) | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 852 | { | 
| David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 853 | struct ene_device *dev = rdev->priv; | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 854 | unsigned long flags; | 
|  | 855 | spin_lock_irqsave(&dev->hw_lock, flags); | 
|  | 856 |  | 
|  | 857 | ene_rx_disable(dev); | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 858 | spin_unlock_irqrestore(&dev->hw_lock, flags); | 
|  | 859 | } | 
|  | 860 |  | 
| Maxim Levitsky | 931e39a | 2010-07-31 11:59:26 -0300 | [diff] [blame] | 861 | /* outside interface: set transmitter mask */ | 
| David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 862 | static int ene_set_tx_mask(struct rc_dev *rdev, u32 tx_mask) | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 863 | { | 
| David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 864 | struct ene_device *dev = rdev->priv; | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 865 | dbg("TX: attempt to set transmitter mask %02x", tx_mask); | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 866 |  | 
| Maxim Levitsky | 931e39a | 2010-07-31 11:59:26 -0300 | [diff] [blame] | 867 | /* invalid txmask */ | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 868 | if (!tx_mask || tx_mask & ~0x03) { | 
|  | 869 | dbg("TX: invalid mask"); | 
| Maxim Levitsky | 931e39a | 2010-07-31 11:59:26 -0300 | [diff] [blame] | 870 | /* return count of transmitters */ | 
|  | 871 | return 2; | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 872 | } | 
|  | 873 |  | 
| Maxim Levitsky | 931e39a | 2010-07-31 11:59:26 -0300 | [diff] [blame] | 874 | dev->transmitter_mask = tx_mask; | 
| Maxim Levitsky | c29bc4d | 2010-10-16 19:56:30 -0300 | [diff] [blame] | 875 | ene_tx_set_transmitters(dev); | 
| Maxim Levitsky | 931e39a | 2010-07-31 11:59:26 -0300 | [diff] [blame] | 876 | return 0; | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 877 | } | 
|  | 878 |  | 
| Maxim Levitsky | 931e39a | 2010-07-31 11:59:26 -0300 | [diff] [blame] | 879 | /* outside interface : set tx carrier */ | 
| David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 880 | static int ene_set_tx_carrier(struct rc_dev *rdev, u32 carrier) | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 881 | { | 
| David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 882 | struct ene_device *dev = rdev->priv; | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 883 | u32 period = 2000000 / carrier; | 
| Maxim Levitsky | 931e39a | 2010-07-31 11:59:26 -0300 | [diff] [blame] | 884 |  | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 885 | dbg("TX: attempt to set tx carrier to %d kHz", carrier); | 
| Maxim Levitsky | 931e39a | 2010-07-31 11:59:26 -0300 | [diff] [blame] | 886 |  | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 887 | if (period && (period > ENE_CIRMOD_PRD_MAX || | 
|  | 888 | period < ENE_CIRMOD_PRD_MIN)) { | 
| Maxim Levitsky | 931e39a | 2010-07-31 11:59:26 -0300 | [diff] [blame] | 889 |  | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 890 | dbg("TX: out of range %d-%d kHz carrier", | 
| Maxim Levitsky | c29bc4d | 2010-10-16 19:56:30 -0300 | [diff] [blame] | 891 | 2000 / ENE_CIRMOD_PRD_MIN, 2000 / ENE_CIRMOD_PRD_MAX); | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 892 | return -1; | 
| Maxim Levitsky | 931e39a | 2010-07-31 11:59:26 -0300 | [diff] [blame] | 893 | } | 
| Maxim Levitsky | 931e39a | 2010-07-31 11:59:26 -0300 | [diff] [blame] | 894 |  | 
| Maxim Levitsky | 931e39a | 2010-07-31 11:59:26 -0300 | [diff] [blame] | 895 | dev->tx_period = period; | 
| Maxim Levitsky | c29bc4d | 2010-10-16 19:56:30 -0300 | [diff] [blame] | 896 | ene_tx_set_carrier(dev); | 
| Maxim Levitsky | 931e39a | 2010-07-31 11:59:26 -0300 | [diff] [blame] | 897 | return 0; | 
|  | 898 | } | 
|  | 899 |  | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 900 | /*outside interface : set tx duty cycle */ | 
| David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 901 | static int ene_set_tx_duty_cycle(struct rc_dev *rdev, u32 duty_cycle) | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 902 | { | 
| David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 903 | struct ene_device *dev = rdev->priv; | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 904 | dbg("TX: setting duty cycle to %d%%", duty_cycle); | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 905 | dev->tx_duty_cycle = duty_cycle; | 
| Maxim Levitsky | c29bc4d | 2010-10-16 19:56:30 -0300 | [diff] [blame] | 906 | ene_tx_set_carrier(dev); | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 907 | return 0; | 
|  | 908 | } | 
| Maxim Levitsky | 931e39a | 2010-07-31 11:59:26 -0300 | [diff] [blame] | 909 |  | 
|  | 910 | /* outside interface: enable learning mode */ | 
| David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 911 | static int ene_set_learning_mode(struct rc_dev *rdev, int enable) | 
| Maxim Levitsky | 931e39a | 2010-07-31 11:59:26 -0300 | [diff] [blame] | 912 | { | 
| David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 913 | struct ene_device *dev = rdev->priv; | 
| Maxim Levitsky | 931e39a | 2010-07-31 11:59:26 -0300 | [diff] [blame] | 914 | unsigned long flags; | 
| Maxim Levitsky | c29bc4d | 2010-10-16 19:56:30 -0300 | [diff] [blame] | 915 | if (enable == dev->learning_mode_enabled) | 
| Maxim Levitsky | 931e39a | 2010-07-31 11:59:26 -0300 | [diff] [blame] | 916 | return 0; | 
|  | 917 |  | 
|  | 918 | spin_lock_irqsave(&dev->hw_lock, flags); | 
| Maxim Levitsky | c29bc4d | 2010-10-16 19:56:30 -0300 | [diff] [blame] | 919 | dev->learning_mode_enabled = enable; | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 920 | ene_rx_disable(dev); | 
|  | 921 | ene_rx_setup(dev); | 
|  | 922 | ene_rx_enable(dev); | 
| Maxim Levitsky | 931e39a | 2010-07-31 11:59:26 -0300 | [diff] [blame] | 923 | spin_unlock_irqrestore(&dev->hw_lock, flags); | 
|  | 924 | return 0; | 
|  | 925 | } | 
|  | 926 |  | 
| David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 927 | static int ene_set_carrier_report(struct rc_dev *rdev, int enable) | 
| Maxim Levitsky | e1b1ddb | 2010-10-16 19:56:29 -0300 | [diff] [blame] | 928 | { | 
| David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 929 | struct ene_device *dev = rdev->priv; | 
| Maxim Levitsky | e1b1ddb | 2010-10-16 19:56:29 -0300 | [diff] [blame] | 930 | unsigned long flags; | 
|  | 931 |  | 
|  | 932 | if (enable == dev->carrier_detect_enabled) | 
|  | 933 | return 0; | 
|  | 934 |  | 
|  | 935 | spin_lock_irqsave(&dev->hw_lock, flags); | 
|  | 936 | dev->carrier_detect_enabled = enable; | 
|  | 937 | ene_rx_disable(dev); | 
|  | 938 | ene_rx_setup(dev); | 
|  | 939 | ene_rx_enable(dev); | 
|  | 940 | spin_unlock_irqrestore(&dev->hw_lock, flags); | 
|  | 941 | return 0; | 
|  | 942 | } | 
|  | 943 |  | 
| Maxim Levitsky | 931e39a | 2010-07-31 11:59:26 -0300 | [diff] [blame] | 944 | /* outside interface: enable or disable idle mode */ | 
| David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 945 | static void ene_set_idle(struct rc_dev *rdev, bool idle) | 
| Maxim Levitsky | 931e39a | 2010-07-31 11:59:26 -0300 | [diff] [blame] | 946 | { | 
| David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 947 | struct ene_device *dev = rdev->priv; | 
|  | 948 |  | 
| Maxim Levitsky | c29bc4d | 2010-10-16 19:56:30 -0300 | [diff] [blame] | 949 | if (idle) { | 
| David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 950 | ene_rx_reset(dev); | 
| Maxim Levitsky | c29bc4d | 2010-10-16 19:56:30 -0300 | [diff] [blame] | 951 | dbg("RX: end of data"); | 
|  | 952 | } | 
| Maxim Levitsky | 931e39a | 2010-07-31 11:59:26 -0300 | [diff] [blame] | 953 | } | 
|  | 954 |  | 
| Maxim Levitsky | 931e39a | 2010-07-31 11:59:26 -0300 | [diff] [blame] | 955 | /* outside interface: transmit */ | 
| David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 956 | static int ene_transmit(struct rc_dev *rdev, int *buf, u32 n) | 
| Maxim Levitsky | 931e39a | 2010-07-31 11:59:26 -0300 | [diff] [blame] | 957 | { | 
| David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 958 | struct ene_device *dev = rdev->priv; | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 959 | unsigned long flags; | 
|  | 960 |  | 
| Maxim Levitsky | 931e39a | 2010-07-31 11:59:26 -0300 | [diff] [blame] | 961 | dev->tx_buffer = buf; | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 962 | dev->tx_len = n / sizeof(int); | 
|  | 963 | dev->tx_pos = 0; | 
|  | 964 | dev->tx_reg = 0; | 
|  | 965 | dev->tx_done = 0; | 
|  | 966 | dev->tx_sample = 0; | 
|  | 967 | dev->tx_sample_pulse = 0; | 
|  | 968 |  | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 969 | dbg("TX: %d samples", dev->tx_len); | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 970 |  | 
|  | 971 | spin_lock_irqsave(&dev->hw_lock, flags); | 
|  | 972 |  | 
| Maxim Levitsky | c29bc4d | 2010-10-16 19:56:30 -0300 | [diff] [blame] | 973 | ene_tx_enable(dev); | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 974 |  | 
|  | 975 | /* Transmit first two samples */ | 
|  | 976 | ene_tx_sample(dev); | 
|  | 977 | ene_tx_sample(dev); | 
|  | 978 |  | 
|  | 979 | spin_unlock_irqrestore(&dev->hw_lock, flags); | 
|  | 980 |  | 
|  | 981 | if (wait_for_completion_timeout(&dev->tx_complete, 2 * HZ) == 0) { | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 982 | dbg("TX: timeout"); | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 983 | spin_lock_irqsave(&dev->hw_lock, flags); | 
| Maxim Levitsky | c29bc4d | 2010-10-16 19:56:30 -0300 | [diff] [blame] | 984 | ene_tx_disable(dev); | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 985 | spin_unlock_irqrestore(&dev->hw_lock, flags); | 
|  | 986 | } else | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 987 | dbg("TX: done"); | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 988 | return n; | 
|  | 989 | } | 
|  | 990 |  | 
| Maxim Levitsky | 931e39a | 2010-07-31 11:59:26 -0300 | [diff] [blame] | 991 | /* probe entry */ | 
|  | 992 | static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id) | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 993 | { | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 994 | int error = -ENOMEM; | 
| David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 995 | struct rc_dev *rdev; | 
| Maxim Levitsky | 931e39a | 2010-07-31 11:59:26 -0300 | [diff] [blame] | 996 | struct ene_device *dev; | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 997 |  | 
| Maxim Levitsky | 931e39a | 2010-07-31 11:59:26 -0300 | [diff] [blame] | 998 | /* allocate memory */ | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 999 | dev = kzalloc(sizeof(struct ene_device), GFP_KERNEL); | 
| David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 1000 | rdev = rc_allocate_device(); | 
|  | 1001 | if (!dev || !rdev) | 
| Jiri Slaby | 2e75bce | 2010-10-01 18:13:40 -0300 | [diff] [blame] | 1002 | goto error1; | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 1003 |  | 
|  | 1004 | /* validate resources */ | 
| Maxim Levitsky | 931e39a | 2010-07-31 11:59:26 -0300 | [diff] [blame] | 1005 | error = -ENODEV; | 
|  | 1006 |  | 
| Kyle McMartin | 2e4c556 | 2011-01-06 16:59:33 -0300 | [diff] [blame] | 1007 | /* init these to -1, as 0 is valid for both */ | 
|  | 1008 | dev->hw_io = -1; | 
|  | 1009 | dev->irq = -1; | 
|  | 1010 |  | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 1011 | if (!pnp_port_valid(pnp_dev, 0) || | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 1012 | pnp_port_len(pnp_dev, 0) < ENE_IO_SIZE) | 
| Maxim Levitsky | 931e39a | 2010-07-31 11:59:26 -0300 | [diff] [blame] | 1013 | goto error; | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 1014 |  | 
|  | 1015 | if (!pnp_irq_valid(pnp_dev, 0)) | 
| Maxim Levitsky | 931e39a | 2010-07-31 11:59:26 -0300 | [diff] [blame] | 1016 | goto error; | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 1017 |  | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 1018 | spin_lock_init(&dev->hw_lock); | 
|  | 1019 |  | 
|  | 1020 | /* claim the resources */ | 
|  | 1021 | error = -EBUSY; | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 1022 | dev->hw_io = pnp_port_start(pnp_dev, 0); | 
|  | 1023 | if (!request_region(dev->hw_io, ENE_IO_SIZE, ENE_DRIVER_NAME)) { | 
|  | 1024 | dev->hw_io = -1; | 
|  | 1025 | dev->irq = -1; | 
| Maxim Levitsky | 931e39a | 2010-07-31 11:59:26 -0300 | [diff] [blame] | 1026 | goto error; | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 1027 | } | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 1028 |  | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 1029 | dev->irq = pnp_irq(pnp_dev, 0); | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 1030 | if (request_irq(dev->irq, ene_isr, | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 1031 | IRQF_SHARED, ENE_DRIVER_NAME, (void *)dev)) { | 
|  | 1032 | dev->irq = -1; | 
| Maxim Levitsky | 931e39a | 2010-07-31 11:59:26 -0300 | [diff] [blame] | 1033 | goto error; | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 1034 | } | 
| Maxim Levitsky | 931e39a | 2010-07-31 11:59:26 -0300 | [diff] [blame] | 1035 |  | 
|  | 1036 | pnp_set_drvdata(pnp_dev, dev); | 
|  | 1037 | dev->pnp_dev = pnp_dev; | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 1038 |  | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 1039 | /* don't allow too short/long sample periods */ | 
|  | 1040 | if (sample_period < 5 || sample_period > 0x7F) | 
|  | 1041 | sample_period = ENE_DEFAULT_SAMPLE_PERIOD; | 
|  | 1042 |  | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 1043 | /* detect hardware version and features */ | 
|  | 1044 | error = ene_hw_detect(dev); | 
|  | 1045 | if (error) | 
| Maxim Levitsky | 931e39a | 2010-07-31 11:59:26 -0300 | [diff] [blame] | 1046 | goto error; | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 1047 |  | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 1048 | if (!dev->hw_learning_and_tx_capable && txsim) { | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 1049 | dev->hw_learning_and_tx_capable = true; | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 1050 | setup_timer(&dev->tx_sim_timer, ene_tx_irqsim, | 
|  | 1051 | (long unsigned int)dev); | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 1052 | ene_warn("Simulation of TX activated"); | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 1053 | } | 
|  | 1054 |  | 
| Maxim Levitsky | a06423c | 2010-10-15 13:06:37 -0300 | [diff] [blame] | 1055 | if (!dev->hw_learning_and_tx_capable) | 
| Maxim Levitsky | c29bc4d | 2010-10-16 19:56:30 -0300 | [diff] [blame] | 1056 | learning_mode_force = false; | 
| Maxim Levitsky | a06423c | 2010-10-15 13:06:37 -0300 | [diff] [blame] | 1057 |  | 
| David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 1058 | rdev->driver_type = RC_DRIVER_IR_RAW; | 
| Mauro Carvalho Chehab | 52b6614 | 2010-11-17 14:20:52 -0300 | [diff] [blame] | 1059 | rdev->allowed_protos = RC_TYPE_ALL; | 
| David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 1060 | rdev->priv = dev; | 
|  | 1061 | rdev->open = ene_open; | 
|  | 1062 | rdev->close = ene_close; | 
|  | 1063 | rdev->s_idle = ene_set_idle; | 
|  | 1064 | rdev->driver_name = ENE_DRIVER_NAME; | 
|  | 1065 | rdev->map_name = RC_MAP_RC6_MCE; | 
|  | 1066 | rdev->input_name = "ENE eHome Infrared Remote Receiver"; | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 1067 |  | 
| Maxim Levitsky | 931e39a | 2010-07-31 11:59:26 -0300 | [diff] [blame] | 1068 | if (dev->hw_learning_and_tx_capable) { | 
| David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 1069 | rdev->s_learning_mode = ene_set_learning_mode; | 
| Maxim Levitsky | 931e39a | 2010-07-31 11:59:26 -0300 | [diff] [blame] | 1070 | init_completion(&dev->tx_complete); | 
| David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 1071 | rdev->tx_ir = ene_transmit; | 
|  | 1072 | rdev->s_tx_mask = ene_set_tx_mask; | 
|  | 1073 | rdev->s_tx_carrier = ene_set_tx_carrier; | 
|  | 1074 | rdev->s_tx_duty_cycle = ene_set_tx_duty_cycle; | 
|  | 1075 | rdev->s_carrier_report = ene_set_carrier_report; | 
|  | 1076 | rdev->input_name = "ENE eHome Infrared Remote Transceiver"; | 
| Maxim Levitsky | 931e39a | 2010-07-31 11:59:26 -0300 | [diff] [blame] | 1077 | } | 
|  | 1078 |  | 
| Kyle McMartin | 2e4c556 | 2011-01-06 16:59:33 -0300 | [diff] [blame] | 1079 | dev->rdev = rdev; | 
|  | 1080 |  | 
| Maxim Levitsky | c29bc4d | 2010-10-16 19:56:30 -0300 | [diff] [blame] | 1081 | ene_rx_setup_hw_buffer(dev); | 
|  | 1082 | ene_setup_default_settings(dev); | 
|  | 1083 | ene_setup_hw_settings(dev); | 
| Maxim Levitsky | 931e39a | 2010-07-31 11:59:26 -0300 | [diff] [blame] | 1084 |  | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 1085 | device_set_wakeup_capable(&pnp_dev->dev, true); | 
|  | 1086 | device_set_wakeup_enable(&pnp_dev->dev, true); | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 1087 |  | 
| David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 1088 | error = rc_register_device(rdev); | 
|  | 1089 | if (error < 0) | 
| Maxim Levitsky | 931e39a | 2010-07-31 11:59:26 -0300 | [diff] [blame] | 1090 | goto error; | 
|  | 1091 |  | 
| Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 1092 | ene_notice("driver has been successfully loaded"); | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 1093 | return 0; | 
| Maxim Levitsky | 931e39a | 2010-07-31 11:59:26 -0300 | [diff] [blame] | 1094 | error: | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 1095 | if (dev && dev->irq >= 0) | 
| Maxim Levitsky | 931e39a | 2010-07-31 11:59:26 -0300 | [diff] [blame] | 1096 | free_irq(dev->irq, dev); | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 1097 | if (dev && dev->hw_io >= 0) | 
|  | 1098 | release_region(dev->hw_io, ENE_IO_SIZE); | 
| Jiri Slaby | 2e75bce | 2010-10-01 18:13:40 -0300 | [diff] [blame] | 1099 | error1: | 
| David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 1100 | rc_free_device(rdev); | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 1101 | kfree(dev); | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 1102 | return error; | 
|  | 1103 | } | 
|  | 1104 |  | 
|  | 1105 | /* main unload function */ | 
|  | 1106 | static void ene_remove(struct pnp_dev *pnp_dev) | 
|  | 1107 | { | 
|  | 1108 | struct ene_device *dev = pnp_get_drvdata(pnp_dev); | 
|  | 1109 | unsigned long flags; | 
|  | 1110 |  | 
|  | 1111 | spin_lock_irqsave(&dev->hw_lock, flags); | 
|  | 1112 | ene_rx_disable(dev); | 
| Maxim Levitsky | c29bc4d | 2010-10-16 19:56:30 -0300 | [diff] [blame] | 1113 | ene_rx_restore_hw_buffer(dev); | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 1114 | spin_unlock_irqrestore(&dev->hw_lock, flags); | 
|  | 1115 |  | 
|  | 1116 | free_irq(dev->irq, dev); | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 1117 | release_region(dev->hw_io, ENE_IO_SIZE); | 
| David Härdeman | d8b4b58 | 2010-10-29 16:08:23 -0300 | [diff] [blame] | 1118 | rc_unregister_device(dev->rdev); | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 1119 | kfree(dev); | 
|  | 1120 | } | 
|  | 1121 |  | 
|  | 1122 | /* enable wake on IR (wakes on specific button on original remote) */ | 
|  | 1123 | static void ene_enable_wake(struct ene_device *dev, int enable) | 
|  | 1124 | { | 
|  | 1125 | enable = enable && device_may_wakeup(&dev->pnp_dev->dev); | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 1126 | dbg("wake on IR %s", enable ? "enabled" : "disabled"); | 
|  | 1127 | ene_set_clear_reg_mask(dev, ENE_FW1, ENE_FW1_WAKE, enable); | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 1128 | } | 
|  | 1129 |  | 
|  | 1130 | #ifdef CONFIG_PM | 
|  | 1131 | static int ene_suspend(struct pnp_dev *pnp_dev, pm_message_t state) | 
|  | 1132 | { | 
|  | 1133 | struct ene_device *dev = pnp_get_drvdata(pnp_dev); | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 1134 | ene_enable_wake(dev, true); | 
|  | 1135 |  | 
|  | 1136 | /* TODO: add support for wake pattern */ | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 1137 | return 0; | 
|  | 1138 | } | 
|  | 1139 |  | 
|  | 1140 | static int ene_resume(struct pnp_dev *pnp_dev) | 
|  | 1141 | { | 
|  | 1142 | struct ene_device *dev = pnp_get_drvdata(pnp_dev); | 
| Maxim Levitsky | c29bc4d | 2010-10-16 19:56:30 -0300 | [diff] [blame] | 1143 | ene_setup_hw_settings(dev); | 
|  | 1144 |  | 
|  | 1145 | if (dev->rx_enabled) | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 1146 | ene_rx_enable(dev); | 
| Maxim Levitsky | c29bc4d | 2010-10-16 19:56:30 -0300 | [diff] [blame] | 1147 |  | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 1148 | ene_enable_wake(dev, false); | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 1149 | return 0; | 
|  | 1150 | } | 
|  | 1151 | #endif | 
|  | 1152 |  | 
|  | 1153 | static void ene_shutdown(struct pnp_dev *pnp_dev) | 
|  | 1154 | { | 
|  | 1155 | struct ene_device *dev = pnp_get_drvdata(pnp_dev); | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 1156 | ene_enable_wake(dev, true); | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 1157 | } | 
|  | 1158 |  | 
|  | 1159 | static const struct pnp_device_id ene_ids[] = { | 
|  | 1160 | {.id = "ENE0100",}, | 
|  | 1161 | {.id = "ENE0200",}, | 
|  | 1162 | {.id = "ENE0201",}, | 
| Maxim Levitsky | 931e39a | 2010-07-31 11:59:26 -0300 | [diff] [blame] | 1163 | {.id = "ENE0202",}, | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 1164 | {}, | 
|  | 1165 | }; | 
|  | 1166 |  | 
|  | 1167 | static struct pnp_driver ene_driver = { | 
|  | 1168 | .name = ENE_DRIVER_NAME, | 
|  | 1169 | .id_table = ene_ids, | 
|  | 1170 | .flags = PNP_DRIVER_RES_DO_NOT_CHANGE, | 
|  | 1171 |  | 
|  | 1172 | .probe = ene_probe, | 
|  | 1173 | .remove = __devexit_p(ene_remove), | 
|  | 1174 | #ifdef CONFIG_PM | 
|  | 1175 | .suspend = ene_suspend, | 
|  | 1176 | .resume = ene_resume, | 
|  | 1177 | #endif | 
|  | 1178 | .shutdown = ene_shutdown, | 
|  | 1179 | }; | 
|  | 1180 |  | 
|  | 1181 | static int __init ene_init(void) | 
|  | 1182 | { | 
|  | 1183 | return pnp_register_driver(&ene_driver); | 
|  | 1184 | } | 
|  | 1185 |  | 
|  | 1186 | static void ene_exit(void) | 
|  | 1187 | { | 
|  | 1188 | pnp_unregister_driver(&ene_driver); | 
|  | 1189 | } | 
|  | 1190 |  | 
|  | 1191 | module_param(sample_period, int, S_IRUGO); | 
|  | 1192 | MODULE_PARM_DESC(sample_period, "Hardware sample period (50 us default)"); | 
|  | 1193 |  | 
| Maxim Levitsky | c29bc4d | 2010-10-16 19:56:30 -0300 | [diff] [blame] | 1194 | module_param(learning_mode_force, bool, S_IRUGO); | 
|  | 1195 | MODULE_PARM_DESC(learning_mode_force, "Enable learning mode by default"); | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 1196 |  | 
|  | 1197 | module_param(debug, int, S_IRUGO | S_IWUSR); | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 1198 | MODULE_PARM_DESC(debug, "Debug level"); | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 1199 |  | 
|  | 1200 | module_param(txsim, bool, S_IRUGO); | 
|  | 1201 | MODULE_PARM_DESC(txsim, | 
|  | 1202 | "Simulate TX features on unsupported hardware (dangerous)"); | 
|  | 1203 |  | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 1204 | MODULE_DEVICE_TABLE(pnp, ene_ids); | 
|  | 1205 | MODULE_DESCRIPTION | 
| Maxim Levitsky | 11b64d3 | 2010-09-06 18:26:11 -0300 | [diff] [blame] | 1206 | ("Infrared input driver for KB3926B/C/D/E/F " | 
|  | 1207 | "(aka ENE0100/ENE0200/ENE0201/ENE0202) CIR port"); | 
| Maxim Levitsky | 9ea53b7 | 2010-07-31 11:59:25 -0300 | [diff] [blame] | 1208 |  | 
|  | 1209 | MODULE_AUTHOR("Maxim Levitsky"); | 
|  | 1210 | MODULE_LICENSE("GPL"); | 
|  | 1211 |  | 
|  | 1212 | module_init(ene_init); | 
|  | 1213 | module_exit(ene_exit); |