| Duy Truong | e833aca | 2013-02-12 13:35:08 -0800 | [diff] [blame] | 1 | /* Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2 | * | 
|  | 3 | * This program is free software; you can redistribute it and/or modify | 
|  | 4 | * it under the terms of the GNU General Public License version 2 and | 
|  | 5 | * only version 2 as published by the Free Software Foundation. | 
|  | 6 | * | 
|  | 7 | * This program is distributed in the hope that it will be useful, | 
|  | 8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 
|  | 9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
|  | 10 | * GNU General Public License for more details. | 
|  | 11 | * | 
|  | 12 | */ | 
|  | 13 |  | 
|  | 14 | /* | 
|  | 15 | *  SDIO DMUX module. | 
|  | 16 | */ | 
|  | 17 |  | 
|  | 18 | #define DEBUG | 
|  | 19 |  | 
|  | 20 | #include <linux/delay.h> | 
|  | 21 | #include <linux/module.h> | 
|  | 22 | #include <linux/netdevice.h> | 
|  | 23 | #include <linux/platform_device.h> | 
|  | 24 | #include <linux/sched.h> | 
|  | 25 | #include <linux/skbuff.h> | 
|  | 26 | #include <linux/wakelock.h> | 
|  | 27 | #include <linux/debugfs.h> | 
| Eric Holmberg | 4b1a17e | 2011-09-22 11:08:25 -0600 | [diff] [blame] | 28 | #include <linux/smp.h> | 
|  | 29 | #include <linux/cpumask.h> | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 30 |  | 
|  | 31 | #include <mach/sdio_al.h> | 
|  | 32 | #include <mach/sdio_dmux.h> | 
|  | 33 |  | 
|  | 34 | #define SDIO_CH_LOCAL_OPEN       0x1 | 
|  | 35 | #define SDIO_CH_REMOTE_OPEN      0x2 | 
|  | 36 | #define SDIO_CH_IN_RESET         0x4 | 
|  | 37 |  | 
|  | 38 | #define SDIO_MUX_HDR_MAGIC_NO    0x33fc | 
|  | 39 |  | 
|  | 40 | #define SDIO_MUX_HDR_CMD_DATA    0 | 
|  | 41 | #define SDIO_MUX_HDR_CMD_OPEN    1 | 
|  | 42 | #define SDIO_MUX_HDR_CMD_CLOSE   2 | 
|  | 43 |  | 
|  | 44 | #define LOW_WATERMARK            2 | 
|  | 45 | #define HIGH_WATERMARK           4 | 
|  | 46 |  | 
|  | 47 | static int msm_sdio_dmux_debug_enable; | 
|  | 48 | module_param_named(debug_enable, msm_sdio_dmux_debug_enable, | 
|  | 49 | int, S_IRUGO | S_IWUSR | S_IWGRP); | 
|  | 50 |  | 
|  | 51 | #if defined(DEBUG) | 
|  | 52 | static uint32_t sdio_dmux_read_cnt; | 
|  | 53 | static uint32_t sdio_dmux_write_cnt; | 
|  | 54 | static uint32_t sdio_dmux_write_cpy_cnt; | 
|  | 55 | static uint32_t sdio_dmux_write_cpy_bytes; | 
|  | 56 |  | 
|  | 57 | #define DBG(x...) do {		                 \ | 
|  | 58 | if (msm_sdio_dmux_debug_enable)  \ | 
|  | 59 | pr_debug(x);	         \ | 
|  | 60 | } while (0) | 
|  | 61 |  | 
|  | 62 | #define DBG_INC_READ_CNT(x) do {	                               \ | 
|  | 63 | sdio_dmux_read_cnt += (x);                             \ | 
|  | 64 | if (msm_sdio_dmux_debug_enable)                        \ | 
|  | 65 | pr_debug("%s: total read bytes %u\n",          \ | 
|  | 66 | __func__, sdio_dmux_read_cnt);        \ | 
|  | 67 | } while (0) | 
|  | 68 |  | 
|  | 69 | #define DBG_INC_WRITE_CNT(x)  do {	                               \ | 
|  | 70 | sdio_dmux_write_cnt += (x);                            \ | 
|  | 71 | if (msm_sdio_dmux_debug_enable)                        \ | 
|  | 72 | pr_debug("%s: total written bytes %u\n",       \ | 
|  | 73 | __func__, sdio_dmux_write_cnt);       \ | 
|  | 74 | } while (0) | 
|  | 75 |  | 
|  | 76 | #define DBG_INC_WRITE_CPY(x)  do {	                                     \ | 
|  | 77 | sdio_dmux_write_cpy_bytes += (x);                            \ | 
|  | 78 | sdio_dmux_write_cpy_cnt++;                                   \ | 
|  | 79 | if (msm_sdio_dmux_debug_enable)                              \ | 
|  | 80 | pr_debug("%s: total write copy cnt %u, bytes %u\n",  \ | 
|  | 81 | __func__, sdio_dmux_write_cpy_cnt,          \ | 
|  | 82 | sdio_dmux_write_cpy_bytes);                 \ | 
|  | 83 | } while (0) | 
|  | 84 | #else | 
|  | 85 | #define DBG(x...) do { } while (0) | 
|  | 86 | #define DBG_INC_READ_CNT(x...) do { } while (0) | 
|  | 87 | #define DBG_INC_WRITE_CNT(x...) do { } while (0) | 
|  | 88 | #define DBG_INC_WRITE_CPY(x...) do { } while (0) | 
|  | 89 | #endif | 
|  | 90 |  | 
|  | 91 | struct sdio_ch_info { | 
|  | 92 | uint32_t status; | 
|  | 93 | void (*receive_cb)(void *, struct sk_buff *); | 
|  | 94 | void (*write_done)(void *, struct sk_buff *); | 
|  | 95 | void *priv; | 
|  | 96 | spinlock_t lock; | 
|  | 97 | int num_tx_pkts; | 
|  | 98 | int use_wm; | 
|  | 99 | }; | 
|  | 100 |  | 
|  | 101 | static struct sk_buff_head sdio_mux_write_pool; | 
|  | 102 | static spinlock_t sdio_mux_write_lock; | 
|  | 103 |  | 
|  | 104 | static struct sdio_channel *sdio_mux_ch; | 
|  | 105 | static struct sdio_ch_info sdio_ch[SDIO_DMUX_NUM_CHANNELS]; | 
|  | 106 | struct wake_lock sdio_mux_ch_wakelock; | 
|  | 107 | static int sdio_mux_initialized; | 
|  | 108 | static int fatal_error; | 
|  | 109 |  | 
|  | 110 | struct sdio_mux_hdr { | 
|  | 111 | uint16_t magic_num; | 
|  | 112 | uint8_t reserved; | 
|  | 113 | uint8_t cmd; | 
|  | 114 | uint8_t pad_len; | 
|  | 115 | uint8_t ch_id; | 
|  | 116 | uint16_t pkt_len; | 
|  | 117 | }; | 
|  | 118 |  | 
|  | 119 | struct sdio_partial_pkt_info { | 
|  | 120 | uint32_t valid; | 
|  | 121 | struct sk_buff *skb; | 
|  | 122 | struct sdio_mux_hdr *hdr; | 
|  | 123 | }; | 
|  | 124 |  | 
|  | 125 | static void sdio_mux_read_data(struct work_struct *work); | 
|  | 126 | static void sdio_mux_write_data(struct work_struct *work); | 
|  | 127 | static void sdio_mux_send_open_cmd(uint32_t id); | 
|  | 128 |  | 
| Eric Holmberg | 2973cd9 | 2011-09-22 11:02:33 -0600 | [diff] [blame] | 129 | static DEFINE_MUTEX(sdio_mux_lock); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 130 | static DECLARE_WORK(work_sdio_mux_read, sdio_mux_read_data); | 
|  | 131 | static DECLARE_WORK(work_sdio_mux_write, sdio_mux_write_data); | 
|  | 132 | static DECLARE_DELAYED_WORK(delayed_work_sdio_mux_write, sdio_mux_write_data); | 
|  | 133 |  | 
| Eric Holmberg | f227576 | 2011-09-22 10:58:56 -0600 | [diff] [blame] | 134 | static struct workqueue_struct *sdio_mux_workqueue; | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 135 | static struct sdio_partial_pkt_info sdio_partial_pkt; | 
|  | 136 |  | 
|  | 137 | #define sdio_ch_is_open(x)						\ | 
|  | 138 | (sdio_ch[(x)].status == (SDIO_CH_LOCAL_OPEN | SDIO_CH_REMOTE_OPEN)) | 
|  | 139 |  | 
|  | 140 | #define sdio_ch_is_local_open(x)			\ | 
|  | 141 | (sdio_ch[(x)].status & SDIO_CH_LOCAL_OPEN) | 
|  | 142 |  | 
|  | 143 | #define sdio_ch_is_remote_open(x)			\ | 
|  | 144 | (sdio_ch[(x)].status & SDIO_CH_REMOTE_OPEN) | 
|  | 145 |  | 
|  | 146 | #define sdio_ch_is_in_reset(x)			\ | 
|  | 147 | (sdio_ch[(x)].status & SDIO_CH_IN_RESET) | 
|  | 148 |  | 
|  | 149 | static inline void skb_set_data(struct sk_buff *skb, | 
|  | 150 | unsigned char *data, | 
|  | 151 | unsigned int len) | 
|  | 152 | { | 
|  | 153 | /* panic if tail > end */ | 
|  | 154 | skb->data = data; | 
|  | 155 | skb->tail = skb->data + len; | 
|  | 156 | skb->len  = len; | 
|  | 157 | skb->truesize = len + sizeof(struct sk_buff); | 
|  | 158 | } | 
|  | 159 |  | 
|  | 160 | static void sdio_mux_save_partial_pkt(struct sdio_mux_hdr *hdr, | 
|  | 161 | struct sk_buff *skb_mux) | 
|  | 162 | { | 
|  | 163 | struct sk_buff *skb; | 
|  | 164 |  | 
|  | 165 | /* i think we can avoid cloning here */ | 
|  | 166 | skb =  skb_clone(skb_mux, GFP_KERNEL); | 
|  | 167 | if (!skb) { | 
|  | 168 | pr_err("%s: cannot clone skb\n", __func__); | 
|  | 169 | return; | 
|  | 170 | } | 
|  | 171 |  | 
|  | 172 | /* protect? */ | 
|  | 173 | skb_set_data(skb, (unsigned char *)hdr, | 
|  | 174 | skb->tail - (unsigned char *)hdr); | 
|  | 175 | sdio_partial_pkt.skb = skb; | 
|  | 176 | sdio_partial_pkt.valid = 1; | 
|  | 177 | DBG("%s: head %p data %p tail %p end %p len %d\n", __func__, | 
|  | 178 | skb->head, skb->data, skb->tail, skb->end, skb->len); | 
|  | 179 | return; | 
|  | 180 | } | 
|  | 181 |  | 
|  | 182 | static void *handle_sdio_mux_data(struct sdio_mux_hdr *hdr, | 
|  | 183 | struct sk_buff *skb_mux) | 
|  | 184 | { | 
|  | 185 | struct sk_buff *skb; | 
|  | 186 | void *rp = (void *)hdr; | 
|  | 187 | unsigned long flags; | 
|  | 188 |  | 
|  | 189 | /* protect? */ | 
|  | 190 | rp += sizeof(*hdr); | 
|  | 191 | if (rp < (void *)skb_mux->tail) | 
|  | 192 | rp += (hdr->pkt_len + hdr->pad_len); | 
|  | 193 |  | 
|  | 194 | if (rp > (void *)skb_mux->tail) { | 
|  | 195 | /* partial packet */ | 
|  | 196 | sdio_mux_save_partial_pkt(hdr, skb_mux); | 
|  | 197 | goto packet_done; | 
|  | 198 | } | 
|  | 199 |  | 
|  | 200 | DBG("%s: hdr %p next %p tail %p pkt_size %d\n", | 
|  | 201 | __func__, hdr, rp, skb_mux->tail, hdr->pkt_len + hdr->pad_len); | 
|  | 202 |  | 
|  | 203 | skb =  skb_clone(skb_mux, GFP_KERNEL); | 
|  | 204 | if (!skb) { | 
|  | 205 | pr_err("%s: cannot clone skb\n", __func__); | 
|  | 206 | goto packet_done; | 
|  | 207 | } | 
|  | 208 |  | 
|  | 209 | skb_set_data(skb, (unsigned char *)(hdr + 1), hdr->pkt_len); | 
|  | 210 | DBG("%s: head %p data %p tail %p end %p len %d\n", | 
|  | 211 | __func__, skb->head, skb->data, skb->tail, skb->end, skb->len); | 
|  | 212 |  | 
|  | 213 | /* probably we should check channel status */ | 
|  | 214 | /* discard packet early if local side not open */ | 
|  | 215 | spin_lock_irqsave(&sdio_ch[hdr->ch_id].lock, flags); | 
|  | 216 | if (sdio_ch[hdr->ch_id].receive_cb) | 
|  | 217 | sdio_ch[hdr->ch_id].receive_cb(sdio_ch[hdr->ch_id].priv, skb); | 
|  | 218 | else | 
|  | 219 | dev_kfree_skb_any(skb); | 
|  | 220 | spin_unlock_irqrestore(&sdio_ch[hdr->ch_id].lock, flags); | 
|  | 221 |  | 
|  | 222 | packet_done: | 
|  | 223 | return rp; | 
|  | 224 | } | 
|  | 225 |  | 
|  | 226 | static void *handle_sdio_mux_command(struct sdio_mux_hdr *hdr, | 
|  | 227 | struct sk_buff *skb_mux) | 
|  | 228 | { | 
|  | 229 | void *rp; | 
|  | 230 | unsigned long flags; | 
|  | 231 | int send_open = 0; | 
|  | 232 |  | 
|  | 233 | DBG("%s: cmd %d ch %d\n", __func__, hdr->cmd, hdr->ch_id); | 
|  | 234 | switch (hdr->cmd) { | 
|  | 235 | case SDIO_MUX_HDR_CMD_DATA: | 
|  | 236 | rp = handle_sdio_mux_data(hdr, skb_mux); | 
|  | 237 | break; | 
|  | 238 | case SDIO_MUX_HDR_CMD_OPEN: | 
|  | 239 | spin_lock_irqsave(&sdio_ch[hdr->ch_id].lock, flags); | 
|  | 240 | sdio_ch[hdr->ch_id].status |= SDIO_CH_REMOTE_OPEN; | 
| Eric Holmberg | 0d0de82 | 2011-09-16 11:28:06 -0600 | [diff] [blame] | 241 | sdio_ch[hdr->ch_id].num_tx_pkts = 0; | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 242 |  | 
|  | 243 | if (sdio_ch_is_in_reset(hdr->ch_id)) { | 
|  | 244 | DBG("%s: in reset - sending open cmd\n", __func__); | 
|  | 245 | sdio_ch[hdr->ch_id].status &= ~SDIO_CH_IN_RESET; | 
|  | 246 | send_open = 1; | 
|  | 247 | } | 
|  | 248 |  | 
|  | 249 | /* notify client so it can update its status */ | 
|  | 250 | if (sdio_ch[hdr->ch_id].receive_cb) | 
|  | 251 | sdio_ch[hdr->ch_id].receive_cb( | 
|  | 252 | sdio_ch[hdr->ch_id].priv, NULL); | 
|  | 253 |  | 
|  | 254 | if (sdio_ch[hdr->ch_id].write_done) | 
|  | 255 | sdio_ch[hdr->ch_id].write_done( | 
|  | 256 | sdio_ch[hdr->ch_id].priv, NULL); | 
|  | 257 | spin_unlock_irqrestore(&sdio_ch[hdr->ch_id].lock, flags); | 
|  | 258 | rp = hdr + 1; | 
|  | 259 | if (send_open) | 
|  | 260 | sdio_mux_send_open_cmd(hdr->ch_id); | 
|  | 261 |  | 
|  | 262 | break; | 
|  | 263 | case SDIO_MUX_HDR_CMD_CLOSE: | 
|  | 264 | /* probably should drop pending write */ | 
|  | 265 | spin_lock_irqsave(&sdio_ch[hdr->ch_id].lock, flags); | 
|  | 266 | sdio_ch[hdr->ch_id].status &= ~SDIO_CH_REMOTE_OPEN; | 
|  | 267 | spin_unlock_irqrestore(&sdio_ch[hdr->ch_id].lock, flags); | 
|  | 268 | rp = hdr + 1; | 
|  | 269 | break; | 
|  | 270 | default: | 
|  | 271 | rp = hdr + 1; | 
|  | 272 | } | 
|  | 273 |  | 
|  | 274 | return rp; | 
|  | 275 | } | 
|  | 276 |  | 
|  | 277 | static void *handle_sdio_partial_pkt(struct sk_buff *skb_mux) | 
|  | 278 | { | 
|  | 279 | struct sk_buff *p_skb; | 
|  | 280 | struct sdio_mux_hdr *p_hdr; | 
|  | 281 | void *ptr, *rp = skb_mux->data; | 
|  | 282 |  | 
|  | 283 | /* protoect? */ | 
|  | 284 | if (sdio_partial_pkt.valid) { | 
|  | 285 | p_skb = sdio_partial_pkt.skb; | 
|  | 286 |  | 
|  | 287 | ptr = skb_push(skb_mux, p_skb->len); | 
|  | 288 | memcpy(ptr, p_skb->data, p_skb->len); | 
|  | 289 | sdio_partial_pkt.skb = NULL; | 
|  | 290 | sdio_partial_pkt.valid = 0; | 
|  | 291 | dev_kfree_skb_any(p_skb); | 
|  | 292 |  | 
|  | 293 | DBG("%s: head %p data %p tail %p end %p len %d\n", __func__, | 
|  | 294 | skb_mux->head, skb_mux->data, skb_mux->tail, | 
|  | 295 | skb_mux->end, skb_mux->len); | 
|  | 296 |  | 
|  | 297 | p_hdr = (struct sdio_mux_hdr *)skb_mux->data; | 
|  | 298 | rp = handle_sdio_mux_command(p_hdr, skb_mux); | 
|  | 299 | } | 
|  | 300 | return rp; | 
|  | 301 | } | 
|  | 302 |  | 
|  | 303 | static void sdio_mux_read_data(struct work_struct *work) | 
|  | 304 | { | 
|  | 305 | struct sk_buff *skb_mux; | 
|  | 306 | void *ptr = 0; | 
|  | 307 | int sz, rc, len = 0; | 
|  | 308 | struct sdio_mux_hdr *hdr; | 
| Eric Holmberg | 4b1a17e | 2011-09-22 11:08:25 -0600 | [diff] [blame] | 309 | static int workqueue_pinned; | 
|  | 310 |  | 
|  | 311 | if (!workqueue_pinned) { | 
|  | 312 | struct cpumask cpus; | 
|  | 313 |  | 
|  | 314 | cpumask_clear(&cpus); | 
|  | 315 | cpumask_set_cpu(0, &cpus); | 
|  | 316 |  | 
|  | 317 | if (sched_setaffinity(current->pid, &cpus)) | 
|  | 318 | pr_err("%s: sdio_dmux set CPU affinity failed\n", | 
|  | 319 | __func__); | 
|  | 320 | workqueue_pinned = 1; | 
|  | 321 | } | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 322 |  | 
|  | 323 | DBG("%s: reading\n", __func__); | 
|  | 324 | /* should probably have a separate read lock */ | 
| Eric Holmberg | 2973cd9 | 2011-09-22 11:02:33 -0600 | [diff] [blame] | 325 | mutex_lock(&sdio_mux_lock); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 326 | sz = sdio_read_avail(sdio_mux_ch); | 
|  | 327 | DBG("%s: read avail %d\n", __func__, sz); | 
|  | 328 | if (sz <= 0) { | 
|  | 329 | if (sz) | 
|  | 330 | pr_err("%s: read avail failed %d\n", __func__, sz); | 
| Eric Holmberg | 2973cd9 | 2011-09-22 11:02:33 -0600 | [diff] [blame] | 331 | mutex_unlock(&sdio_mux_lock); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 332 | return; | 
|  | 333 | } | 
|  | 334 |  | 
|  | 335 | /* net_ip_aling is probably not required */ | 
|  | 336 | if (sdio_partial_pkt.valid) | 
|  | 337 | len = sdio_partial_pkt.skb->len; | 
|  | 338 |  | 
|  | 339 | /* If allocation fails attempt to get a smaller chunk of mem */ | 
|  | 340 | do { | 
|  | 341 | skb_mux = __dev_alloc_skb(sz + NET_IP_ALIGN + len, GFP_KERNEL); | 
|  | 342 | if (skb_mux) | 
|  | 343 | break; | 
|  | 344 |  | 
|  | 345 | pr_err("%s: cannot allocate skb of size:%d + " | 
|  | 346 | "%d (NET_SKB_PAD)\n", __func__, | 
|  | 347 | sz + NET_IP_ALIGN + len, NET_SKB_PAD); | 
|  | 348 | /* the skb structure adds NET_SKB_PAD bytes to the memory | 
|  | 349 | * request, which may push the actual request above PAGE_SIZE | 
|  | 350 | * in that case, we need to iterate one more time to make sure | 
|  | 351 | * we get the memory request under PAGE_SIZE | 
|  | 352 | */ | 
|  | 353 | if (sz + NET_IP_ALIGN + len + NET_SKB_PAD <= PAGE_SIZE) { | 
|  | 354 | pr_err("%s: allocation failed\n", __func__); | 
| Eric Holmberg | 2973cd9 | 2011-09-22 11:02:33 -0600 | [diff] [blame] | 355 | mutex_unlock(&sdio_mux_lock); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 356 | return; | 
|  | 357 | } | 
|  | 358 | sz /= 2; | 
|  | 359 | } while (1); | 
|  | 360 |  | 
|  | 361 | skb_reserve(skb_mux, NET_IP_ALIGN + len); | 
|  | 362 | ptr = skb_put(skb_mux, sz); | 
|  | 363 |  | 
|  | 364 | /* half second wakelock is fine? */ | 
|  | 365 | wake_lock_timeout(&sdio_mux_ch_wakelock, HZ / 2); | 
|  | 366 | rc = sdio_read(sdio_mux_ch, ptr, sz); | 
|  | 367 | DBG("%s: read %d\n", __func__, rc); | 
|  | 368 | if (rc) { | 
|  | 369 | pr_err("%s: sdio read failed %d\n", __func__, rc); | 
|  | 370 | dev_kfree_skb_any(skb_mux); | 
| Eric Holmberg | 2973cd9 | 2011-09-22 11:02:33 -0600 | [diff] [blame] | 371 | mutex_unlock(&sdio_mux_lock); | 
| Eric Holmberg | f227576 | 2011-09-22 10:58:56 -0600 | [diff] [blame] | 372 | queue_work(sdio_mux_workqueue, &work_sdio_mux_read); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 373 | return; | 
|  | 374 | } | 
| Eric Holmberg | 2973cd9 | 2011-09-22 11:02:33 -0600 | [diff] [blame] | 375 | mutex_unlock(&sdio_mux_lock); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 376 |  | 
|  | 377 | DBG_INC_READ_CNT(sz); | 
|  | 378 | DBG("%s: head %p data %p tail %p end %p len %d\n", __func__, | 
|  | 379 | skb_mux->head, skb_mux->data, skb_mux->tail, | 
|  | 380 | skb_mux->end, skb_mux->len); | 
|  | 381 |  | 
|  | 382 | /* move to a separate function */ | 
|  | 383 | /* probably do skb_pull instead of pointer adjustment */ | 
|  | 384 | hdr = handle_sdio_partial_pkt(skb_mux); | 
|  | 385 | while ((void *)hdr < (void *)skb_mux->tail) { | 
|  | 386 |  | 
|  | 387 | if (((void *)hdr + sizeof(*hdr)) > (void *)skb_mux->tail) { | 
|  | 388 | /* handle partial header */ | 
|  | 389 | sdio_mux_save_partial_pkt(hdr, skb_mux); | 
|  | 390 | break; | 
|  | 391 | } | 
|  | 392 |  | 
|  | 393 | if (hdr->magic_num != SDIO_MUX_HDR_MAGIC_NO) { | 
|  | 394 | pr_err("%s: packet error\n", __func__); | 
|  | 395 | break; | 
|  | 396 | } | 
|  | 397 |  | 
|  | 398 | hdr = handle_sdio_mux_command(hdr, skb_mux); | 
|  | 399 | } | 
|  | 400 | dev_kfree_skb_any(skb_mux); | 
|  | 401 |  | 
|  | 402 | DBG("%s: read done\n", __func__); | 
| Eric Holmberg | f227576 | 2011-09-22 10:58:56 -0600 | [diff] [blame] | 403 | queue_work(sdio_mux_workqueue, &work_sdio_mux_read); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 404 | } | 
|  | 405 |  | 
|  | 406 | static int sdio_mux_write(struct sk_buff *skb) | 
|  | 407 | { | 
|  | 408 | int rc, sz; | 
|  | 409 |  | 
| Eric Holmberg | 2973cd9 | 2011-09-22 11:02:33 -0600 | [diff] [blame] | 410 | mutex_lock(&sdio_mux_lock); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 411 | sz = sdio_write_avail(sdio_mux_ch); | 
|  | 412 | DBG("%s: avail %d len %d\n", __func__, sz, skb->len); | 
|  | 413 | if (skb->len <= sz) { | 
|  | 414 | rc = sdio_write(sdio_mux_ch, skb->data, skb->len); | 
|  | 415 | DBG("%s: write returned %d\n", __func__, rc); | 
|  | 416 | if (rc == 0) | 
|  | 417 | DBG_INC_WRITE_CNT(skb->len); | 
|  | 418 | } else | 
|  | 419 | rc = -ENOMEM; | 
|  | 420 |  | 
| Eric Holmberg | 2973cd9 | 2011-09-22 11:02:33 -0600 | [diff] [blame] | 421 | mutex_unlock(&sdio_mux_lock); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 422 | return rc; | 
|  | 423 | } | 
|  | 424 |  | 
|  | 425 | static int sdio_mux_write_cmd(void *data, uint32_t len) | 
|  | 426 | { | 
|  | 427 | int avail, rc; | 
|  | 428 | for (;;) { | 
| Eric Holmberg | 2973cd9 | 2011-09-22 11:02:33 -0600 | [diff] [blame] | 429 | mutex_lock(&sdio_mux_lock); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 430 | avail = sdio_write_avail(sdio_mux_ch); | 
|  | 431 | DBG("%s: avail %d len %d\n", __func__, avail, len); | 
|  | 432 | if (avail >= len) { | 
|  | 433 | rc = sdio_write(sdio_mux_ch, data, len); | 
|  | 434 | DBG("%s: write returned %d\n", __func__, rc); | 
|  | 435 | if (!rc) { | 
|  | 436 | DBG_INC_WRITE_CNT(len); | 
|  | 437 | break; | 
|  | 438 | } | 
|  | 439 | } | 
| Eric Holmberg | 2973cd9 | 2011-09-22 11:02:33 -0600 | [diff] [blame] | 440 | mutex_unlock(&sdio_mux_lock); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 441 | msleep(250); | 
|  | 442 | } | 
| Eric Holmberg | 2973cd9 | 2011-09-22 11:02:33 -0600 | [diff] [blame] | 443 | mutex_unlock(&sdio_mux_lock); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 444 | return 0; | 
|  | 445 | } | 
|  | 446 |  | 
|  | 447 | static void sdio_mux_send_open_cmd(uint32_t id) | 
|  | 448 | { | 
|  | 449 | struct sdio_mux_hdr hdr = { | 
|  | 450 | .magic_num = SDIO_MUX_HDR_MAGIC_NO, | 
|  | 451 | .cmd = SDIO_MUX_HDR_CMD_OPEN, | 
|  | 452 | .reserved = 0, | 
|  | 453 | .ch_id = id, | 
|  | 454 | .pkt_len = 0, | 
|  | 455 | .pad_len = 0 | 
|  | 456 | }; | 
|  | 457 |  | 
|  | 458 | sdio_mux_write_cmd((void *)&hdr, sizeof(hdr)); | 
|  | 459 | } | 
|  | 460 |  | 
|  | 461 | static void sdio_mux_write_data(struct work_struct *work) | 
|  | 462 | { | 
|  | 463 | int rc, reschedule = 0; | 
|  | 464 | int notify = 0; | 
|  | 465 | struct sk_buff *skb; | 
|  | 466 | unsigned long flags; | 
|  | 467 | int avail; | 
|  | 468 | int ch_id; | 
|  | 469 |  | 
|  | 470 | spin_lock_irqsave(&sdio_mux_write_lock, flags); | 
|  | 471 | while ((skb = __skb_dequeue(&sdio_mux_write_pool))) { | 
|  | 472 | ch_id = ((struct sdio_mux_hdr *)skb->data)->ch_id; | 
|  | 473 |  | 
|  | 474 | avail = sdio_write_avail(sdio_mux_ch); | 
|  | 475 | if (avail < skb->len) { | 
|  | 476 | /* we may have to wait for write avail | 
|  | 477 | * notification from sdio al | 
|  | 478 | */ | 
|  | 479 | DBG("%s: sdio_write_avail(%d) < skb->len(%d)\n", | 
|  | 480 | __func__, avail, skb->len); | 
|  | 481 |  | 
|  | 482 | reschedule = 1; | 
|  | 483 | break; | 
|  | 484 | } | 
|  | 485 | spin_unlock_irqrestore(&sdio_mux_write_lock, flags); | 
|  | 486 | rc = sdio_mux_write(skb); | 
|  | 487 | spin_lock_irqsave(&sdio_mux_write_lock, flags); | 
|  | 488 | if (rc == 0) { | 
|  | 489 |  | 
|  | 490 | spin_lock(&sdio_ch[ch_id].lock); | 
|  | 491 | sdio_ch[ch_id].num_tx_pkts--; | 
|  | 492 | spin_unlock(&sdio_ch[ch_id].lock); | 
|  | 493 |  | 
|  | 494 | if (sdio_ch[ch_id].write_done) | 
|  | 495 | sdio_ch[ch_id].write_done( | 
|  | 496 | sdio_ch[ch_id].priv, skb); | 
|  | 497 | else | 
|  | 498 | dev_kfree_skb_any(skb); | 
|  | 499 | } else if (rc == -EAGAIN || rc == -ENOMEM) { | 
|  | 500 | /* recoverable error - retry again later */ | 
|  | 501 | reschedule = 1; | 
|  | 502 | break; | 
|  | 503 | } else if (rc == -ENODEV) { | 
|  | 504 | /* | 
|  | 505 | * sdio_al suffered some kind of fatal error | 
|  | 506 | * prevent future writes and clean up pending ones | 
|  | 507 | */ | 
|  | 508 | fatal_error = 1; | 
| Eric Holmberg | 0d0de82 | 2011-09-16 11:28:06 -0600 | [diff] [blame] | 509 | do { | 
|  | 510 | ch_id = ((struct sdio_mux_hdr *) | 
|  | 511 | skb->data)->ch_id; | 
|  | 512 | spin_lock(&sdio_ch[ch_id].lock); | 
|  | 513 | sdio_ch[ch_id].num_tx_pkts--; | 
|  | 514 | spin_unlock(&sdio_ch[ch_id].lock); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 515 | dev_kfree_skb_any(skb); | 
| Eric Holmberg | 0d0de82 | 2011-09-16 11:28:06 -0600 | [diff] [blame] | 516 | } while ((skb = __skb_dequeue(&sdio_mux_write_pool))); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 517 | spin_unlock_irqrestore(&sdio_mux_write_lock, flags); | 
|  | 518 | return; | 
|  | 519 | } else { | 
|  | 520 | /* unknown error condition - drop the | 
|  | 521 | * skb and reschedule for the | 
|  | 522 | * other skb's | 
|  | 523 | */ | 
|  | 524 | pr_err("%s: sdio_mux_write error %d" | 
|  | 525 | " for ch %d, skb=%p\n", | 
|  | 526 | __func__, rc, ch_id, skb); | 
|  | 527 | notify = 1; | 
|  | 528 | break; | 
|  | 529 | } | 
|  | 530 | } | 
|  | 531 |  | 
|  | 532 | if (reschedule) { | 
|  | 533 | if (sdio_ch_is_in_reset(ch_id)) { | 
|  | 534 | notify = 1; | 
|  | 535 | } else { | 
|  | 536 | __skb_queue_head(&sdio_mux_write_pool, skb); | 
| Eric Holmberg | f227576 | 2011-09-22 10:58:56 -0600 | [diff] [blame] | 537 | queue_delayed_work(sdio_mux_workqueue, | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 538 | &delayed_work_sdio_mux_write, | 
|  | 539 | msecs_to_jiffies(250) | 
|  | 540 | ); | 
|  | 541 | } | 
|  | 542 | } | 
|  | 543 |  | 
|  | 544 | if (notify) { | 
|  | 545 | spin_lock(&sdio_ch[ch_id].lock); | 
|  | 546 | sdio_ch[ch_id].num_tx_pkts--; | 
|  | 547 | spin_unlock(&sdio_ch[ch_id].lock); | 
|  | 548 |  | 
|  | 549 | if (sdio_ch[ch_id].write_done) | 
|  | 550 | sdio_ch[ch_id].write_done( | 
|  | 551 | sdio_ch[ch_id].priv, skb); | 
|  | 552 | else | 
|  | 553 | dev_kfree_skb_any(skb); | 
|  | 554 | } | 
|  | 555 | spin_unlock_irqrestore(&sdio_mux_write_lock, flags); | 
|  | 556 | } | 
|  | 557 |  | 
|  | 558 | int msm_sdio_is_channel_in_reset(uint32_t id) | 
|  | 559 | { | 
|  | 560 | int rc = 0; | 
|  | 561 |  | 
|  | 562 | if (id >= SDIO_DMUX_NUM_CHANNELS) | 
|  | 563 | return -EINVAL; | 
|  | 564 |  | 
|  | 565 | if (sdio_ch_is_in_reset(id)) | 
|  | 566 | rc = 1; | 
|  | 567 |  | 
|  | 568 | return rc; | 
|  | 569 | } | 
|  | 570 |  | 
|  | 571 | int msm_sdio_dmux_write(uint32_t id, struct sk_buff *skb) | 
|  | 572 | { | 
|  | 573 | int rc = 0; | 
|  | 574 | struct sdio_mux_hdr *hdr; | 
|  | 575 | unsigned long flags; | 
|  | 576 | struct sk_buff *new_skb; | 
|  | 577 |  | 
|  | 578 | if (id >= SDIO_DMUX_NUM_CHANNELS) | 
|  | 579 | return -EINVAL; | 
|  | 580 | if (!skb) | 
|  | 581 | return -EINVAL; | 
|  | 582 | if (!sdio_mux_initialized) | 
|  | 583 | return -ENODEV; | 
|  | 584 | if (fatal_error) | 
|  | 585 | return -ENODEV; | 
|  | 586 |  | 
|  | 587 | DBG("%s: writing to ch %d len %d\n", __func__, id, skb->len); | 
|  | 588 | spin_lock_irqsave(&sdio_ch[id].lock, flags); | 
|  | 589 | if (sdio_ch_is_in_reset(id)) { | 
|  | 590 | spin_unlock_irqrestore(&sdio_ch[id].lock, flags); | 
|  | 591 | pr_err("%s: port is in reset: %d\n", __func__, | 
|  | 592 | sdio_ch[id].status); | 
|  | 593 | return -ENETRESET; | 
|  | 594 | } | 
|  | 595 | if (!sdio_ch_is_local_open(id)) { | 
|  | 596 | spin_unlock_irqrestore(&sdio_ch[id].lock, flags); | 
|  | 597 | pr_err("%s: port not open: %d\n", __func__, sdio_ch[id].status); | 
|  | 598 | return -ENODEV; | 
|  | 599 | } | 
|  | 600 | if (sdio_ch[id].use_wm && | 
|  | 601 | (sdio_ch[id].num_tx_pkts >= HIGH_WATERMARK)) { | 
|  | 602 | spin_unlock_irqrestore(&sdio_ch[id].lock, flags); | 
|  | 603 | pr_err("%s: watermark exceeded: %d\n", __func__, id); | 
|  | 604 | return -EAGAIN; | 
|  | 605 | } | 
|  | 606 | spin_unlock_irqrestore(&sdio_ch[id].lock, flags); | 
|  | 607 |  | 
| Eric Holmberg | 2973cd9 | 2011-09-22 11:02:33 -0600 | [diff] [blame] | 608 | spin_lock_irqsave(&sdio_mux_write_lock, flags); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 609 | /* if skb do not have any tailroom for padding, | 
|  | 610 | copy the skb into a new expanded skb */ | 
|  | 611 | if ((skb->len & 0x3) && (skb_tailroom(skb) < (4 - (skb->len & 0x3)))) { | 
|  | 612 | /* revisit, probably dev_alloc_skb and memcpy is effecient */ | 
|  | 613 | new_skb = skb_copy_expand(skb, skb_headroom(skb), | 
| Eric Holmberg | 2973cd9 | 2011-09-22 11:02:33 -0600 | [diff] [blame] | 614 | 4 - (skb->len & 0x3), GFP_ATOMIC); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 615 | if (new_skb == NULL) { | 
|  | 616 | pr_err("%s: cannot allocate skb\n", __func__); | 
| Eric Holmberg | 2973cd9 | 2011-09-22 11:02:33 -0600 | [diff] [blame] | 617 | rc = -ENOMEM; | 
|  | 618 | goto write_done; | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 619 | } | 
|  | 620 | dev_kfree_skb_any(skb); | 
|  | 621 | skb = new_skb; | 
|  | 622 | DBG_INC_WRITE_CPY(skb->len); | 
|  | 623 | } | 
|  | 624 |  | 
|  | 625 | hdr = (struct sdio_mux_hdr *)skb_push(skb, sizeof(struct sdio_mux_hdr)); | 
|  | 626 |  | 
|  | 627 | /* caller should allocate for hdr and padding | 
|  | 628 | hdr is fine, padding is tricky */ | 
|  | 629 | hdr->magic_num = SDIO_MUX_HDR_MAGIC_NO; | 
|  | 630 | hdr->cmd = SDIO_MUX_HDR_CMD_DATA; | 
|  | 631 | hdr->reserved = 0; | 
|  | 632 | hdr->ch_id = id; | 
|  | 633 | hdr->pkt_len = skb->len - sizeof(struct sdio_mux_hdr); | 
|  | 634 | if (skb->len & 0x3) | 
|  | 635 | skb_put(skb, 4 - (skb->len & 0x3)); | 
|  | 636 |  | 
|  | 637 | hdr->pad_len = skb->len - (sizeof(struct sdio_mux_hdr) + hdr->pkt_len); | 
|  | 638 |  | 
|  | 639 | DBG("%s: data %p, tail %p skb len %d pkt len %d pad len %d\n", | 
|  | 640 | __func__, skb->data, skb->tail, skb->len, | 
|  | 641 | hdr->pkt_len, hdr->pad_len); | 
| Eric Holmberg | 7835f31 | 2011-06-09 17:58:56 -0600 | [diff] [blame] | 642 | __skb_queue_tail(&sdio_mux_write_pool, skb); | 
| Eric Holmberg | 2973cd9 | 2011-09-22 11:02:33 -0600 | [diff] [blame] | 643 |  | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 644 | spin_lock(&sdio_ch[id].lock); | 
|  | 645 | sdio_ch[id].num_tx_pkts++; | 
|  | 646 | spin_unlock(&sdio_ch[id].lock); | 
|  | 647 |  | 
| Eric Holmberg | f227576 | 2011-09-22 10:58:56 -0600 | [diff] [blame] | 648 | queue_work(sdio_mux_workqueue, &work_sdio_mux_write); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 649 |  | 
| Eric Holmberg | 2973cd9 | 2011-09-22 11:02:33 -0600 | [diff] [blame] | 650 | write_done: | 
|  | 651 | spin_unlock_irqrestore(&sdio_mux_write_lock, flags); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 652 | return rc; | 
|  | 653 | } | 
|  | 654 |  | 
|  | 655 | int msm_sdio_dmux_open(uint32_t id, void *priv, | 
|  | 656 | void (*receive_cb)(void *, struct sk_buff *), | 
|  | 657 | void (*write_done)(void *, struct sk_buff *)) | 
|  | 658 | { | 
|  | 659 | unsigned long flags; | 
|  | 660 |  | 
|  | 661 | DBG("%s: opening ch %d\n", __func__, id); | 
|  | 662 | if (!sdio_mux_initialized) | 
|  | 663 | return -ENODEV; | 
|  | 664 | if (id >= SDIO_DMUX_NUM_CHANNELS) | 
|  | 665 | return -EINVAL; | 
|  | 666 |  | 
|  | 667 | spin_lock_irqsave(&sdio_ch[id].lock, flags); | 
|  | 668 | if (sdio_ch_is_local_open(id)) { | 
|  | 669 | pr_info("%s: Already opened %d\n", __func__, id); | 
|  | 670 | spin_unlock_irqrestore(&sdio_ch[id].lock, flags); | 
|  | 671 | goto open_done; | 
|  | 672 | } | 
|  | 673 |  | 
|  | 674 | sdio_ch[id].receive_cb = receive_cb; | 
|  | 675 | sdio_ch[id].write_done = write_done; | 
|  | 676 | sdio_ch[id].priv = priv; | 
|  | 677 | sdio_ch[id].status |= SDIO_CH_LOCAL_OPEN; | 
|  | 678 | sdio_ch[id].num_tx_pkts = 0; | 
|  | 679 | sdio_ch[id].use_wm = 0; | 
|  | 680 | spin_unlock_irqrestore(&sdio_ch[id].lock, flags); | 
|  | 681 |  | 
|  | 682 | sdio_mux_send_open_cmd(id); | 
|  | 683 |  | 
|  | 684 | open_done: | 
|  | 685 | pr_info("%s: opened ch %d\n", __func__, id); | 
|  | 686 | return 0; | 
|  | 687 | } | 
|  | 688 |  | 
|  | 689 | int msm_sdio_dmux_close(uint32_t id) | 
|  | 690 | { | 
|  | 691 | struct sdio_mux_hdr hdr; | 
|  | 692 | unsigned long flags; | 
|  | 693 |  | 
|  | 694 | if (id >= SDIO_DMUX_NUM_CHANNELS) | 
|  | 695 | return -EINVAL; | 
|  | 696 | DBG("%s: closing ch %d\n", __func__, id); | 
|  | 697 | if (!sdio_mux_initialized) | 
|  | 698 | return -ENODEV; | 
|  | 699 | spin_lock_irqsave(&sdio_ch[id].lock, flags); | 
|  | 700 |  | 
|  | 701 | sdio_ch[id].receive_cb = NULL; | 
|  | 702 | sdio_ch[id].priv = NULL; | 
|  | 703 | sdio_ch[id].status &= ~SDIO_CH_LOCAL_OPEN; | 
|  | 704 | sdio_ch[id].status &= ~SDIO_CH_IN_RESET; | 
|  | 705 | spin_unlock_irqrestore(&sdio_ch[id].lock, flags); | 
|  | 706 |  | 
|  | 707 | hdr.magic_num = SDIO_MUX_HDR_MAGIC_NO; | 
|  | 708 | hdr.cmd = SDIO_MUX_HDR_CMD_CLOSE; | 
|  | 709 | hdr.reserved = 0; | 
|  | 710 | hdr.ch_id = id; | 
|  | 711 | hdr.pkt_len = 0; | 
|  | 712 | hdr.pad_len = 0; | 
|  | 713 |  | 
|  | 714 | sdio_mux_write_cmd((void *)&hdr, sizeof(hdr)); | 
|  | 715 |  | 
|  | 716 | pr_info("%s: closed ch %d\n", __func__, id); | 
|  | 717 | return 0; | 
|  | 718 | } | 
|  | 719 |  | 
|  | 720 | static void sdio_mux_notify(void *_dev, unsigned event) | 
|  | 721 | { | 
|  | 722 | DBG("%s: event %d notified\n", __func__, event); | 
|  | 723 |  | 
|  | 724 | /* write avail may not be enouogh for a packet, but should be fine */ | 
|  | 725 | if ((event == SDIO_EVENT_DATA_WRITE_AVAIL) && | 
|  | 726 | sdio_write_avail(sdio_mux_ch)) | 
| Eric Holmberg | f227576 | 2011-09-22 10:58:56 -0600 | [diff] [blame] | 727 | queue_work(sdio_mux_workqueue, &work_sdio_mux_write); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 728 |  | 
|  | 729 | if ((event == SDIO_EVENT_DATA_READ_AVAIL) && | 
|  | 730 | sdio_read_avail(sdio_mux_ch)) | 
| Eric Holmberg | f227576 | 2011-09-22 10:58:56 -0600 | [diff] [blame] | 731 | queue_work(sdio_mux_workqueue, &work_sdio_mux_read); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 732 | } | 
|  | 733 |  | 
|  | 734 | int msm_sdio_dmux_is_ch_full(uint32_t id) | 
|  | 735 | { | 
|  | 736 | unsigned long flags; | 
|  | 737 | int ret; | 
|  | 738 |  | 
|  | 739 | if (id >= SDIO_DMUX_NUM_CHANNELS) | 
|  | 740 | return -EINVAL; | 
|  | 741 |  | 
|  | 742 | spin_lock_irqsave(&sdio_ch[id].lock, flags); | 
|  | 743 | sdio_ch[id].use_wm = 1; | 
|  | 744 | ret = sdio_ch[id].num_tx_pkts >= HIGH_WATERMARK; | 
|  | 745 | DBG("%s: ch %d num tx pkts=%d, HWM=%d\n", __func__, | 
|  | 746 | id, sdio_ch[id].num_tx_pkts, ret); | 
|  | 747 | if (!sdio_ch_is_local_open(id)) { | 
|  | 748 | ret = -ENODEV; | 
|  | 749 | pr_err("%s: port not open: %d\n", __func__, sdio_ch[id].status); | 
|  | 750 | } | 
|  | 751 | spin_unlock_irqrestore(&sdio_ch[id].lock, flags); | 
|  | 752 |  | 
|  | 753 | return ret; | 
|  | 754 | } | 
|  | 755 |  | 
|  | 756 | int msm_sdio_dmux_is_ch_low(uint32_t id) | 
|  | 757 | { | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 758 | int ret; | 
|  | 759 |  | 
|  | 760 | if (id >= SDIO_DMUX_NUM_CHANNELS) | 
|  | 761 | return -EINVAL; | 
|  | 762 |  | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 763 | sdio_ch[id].use_wm = 1; | 
|  | 764 | ret = sdio_ch[id].num_tx_pkts <= LOW_WATERMARK; | 
|  | 765 | DBG("%s: ch %d num tx pkts=%d, LWM=%d\n", __func__, | 
|  | 766 | id, sdio_ch[id].num_tx_pkts, ret); | 
|  | 767 | if (!sdio_ch_is_local_open(id)) { | 
|  | 768 | ret = -ENODEV; | 
|  | 769 | pr_err("%s: port not open: %d\n", __func__, sdio_ch[id].status); | 
|  | 770 | } | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 771 |  | 
|  | 772 | return ret; | 
|  | 773 | } | 
|  | 774 |  | 
|  | 775 | #ifdef CONFIG_DEBUG_FS | 
|  | 776 |  | 
|  | 777 | static int debug_tbl(char *buf, int max) | 
|  | 778 | { | 
|  | 779 | int i = 0; | 
|  | 780 | int j; | 
|  | 781 |  | 
|  | 782 | for (j = 0; j < SDIO_DMUX_NUM_CHANNELS; ++j) { | 
|  | 783 | i += scnprintf(buf + i, max - i, | 
|  | 784 | "ch%02d  local open=%s  remote open=%s\n", | 
|  | 785 | j, sdio_ch_is_local_open(j) ? "Y" : "N", | 
|  | 786 | sdio_ch_is_remote_open(j) ? "Y" : "N"); | 
|  | 787 | } | 
|  | 788 |  | 
|  | 789 | return i; | 
|  | 790 | } | 
|  | 791 |  | 
|  | 792 | #define DEBUG_BUFMAX 4096 | 
|  | 793 | static char debug_buffer[DEBUG_BUFMAX]; | 
|  | 794 |  | 
|  | 795 | static ssize_t debug_read(struct file *file, char __user *buf, | 
|  | 796 | size_t count, loff_t *ppos) | 
|  | 797 | { | 
|  | 798 | int (*fill)(char *buf, int max) = file->private_data; | 
|  | 799 | int bsize = fill(debug_buffer, DEBUG_BUFMAX); | 
|  | 800 | return simple_read_from_buffer(buf, count, ppos, debug_buffer, bsize); | 
|  | 801 | } | 
|  | 802 |  | 
|  | 803 | static int debug_open(struct inode *inode, struct file *file) | 
|  | 804 | { | 
|  | 805 | file->private_data = inode->i_private; | 
|  | 806 | return 0; | 
|  | 807 | } | 
|  | 808 |  | 
|  | 809 |  | 
|  | 810 | static const struct file_operations debug_ops = { | 
|  | 811 | .read = debug_read, | 
|  | 812 | .open = debug_open, | 
|  | 813 | }; | 
|  | 814 |  | 
|  | 815 | static void debug_create(const char *name, mode_t mode, | 
|  | 816 | struct dentry *dent, | 
|  | 817 | int (*fill)(char *buf, int max)) | 
|  | 818 | { | 
|  | 819 | debugfs_create_file(name, mode, dent, fill, &debug_ops); | 
|  | 820 | } | 
|  | 821 |  | 
|  | 822 | #endif | 
|  | 823 |  | 
|  | 824 | static int sdio_dmux_probe(struct platform_device *pdev) | 
|  | 825 | { | 
|  | 826 | int rc; | 
|  | 827 |  | 
|  | 828 | DBG("%s probe called\n", __func__); | 
|  | 829 |  | 
|  | 830 | if (!sdio_mux_initialized) { | 
| Eric Holmberg | f227576 | 2011-09-22 10:58:56 -0600 | [diff] [blame] | 831 | sdio_mux_workqueue = create_singlethread_workqueue("sdio_dmux"); | 
|  | 832 | if (!sdio_mux_workqueue) | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 833 | return -ENOMEM; | 
|  | 834 |  | 
|  | 835 | skb_queue_head_init(&sdio_mux_write_pool); | 
|  | 836 | spin_lock_init(&sdio_mux_write_lock); | 
|  | 837 |  | 
|  | 838 | for (rc = 0; rc < SDIO_DMUX_NUM_CHANNELS; ++rc) | 
|  | 839 | spin_lock_init(&sdio_ch[rc].lock); | 
|  | 840 |  | 
|  | 841 |  | 
|  | 842 | wake_lock_init(&sdio_mux_ch_wakelock, WAKE_LOCK_SUSPEND, | 
|  | 843 | "sdio_dmux"); | 
|  | 844 | } | 
|  | 845 |  | 
|  | 846 | rc = sdio_open("SDIO_RMNT", &sdio_mux_ch, NULL, sdio_mux_notify); | 
|  | 847 | if (rc < 0) { | 
|  | 848 | pr_err("%s: sido open failed %d\n", __func__, rc); | 
|  | 849 | wake_lock_destroy(&sdio_mux_ch_wakelock); | 
| Eric Holmberg | f227576 | 2011-09-22 10:58:56 -0600 | [diff] [blame] | 850 | destroy_workqueue(sdio_mux_workqueue); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 851 | sdio_mux_initialized = 0; | 
|  | 852 | return rc; | 
|  | 853 | } | 
|  | 854 |  | 
| Karthikeyan Ramasubramanian | e297a3e | 2011-09-13 18:26:13 -0600 | [diff] [blame] | 855 | fatal_error = 0; | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 856 | sdio_mux_initialized = 1; | 
|  | 857 | return 0; | 
|  | 858 | } | 
|  | 859 |  | 
|  | 860 | static int sdio_dmux_remove(struct platform_device *pdev) | 
|  | 861 | { | 
|  | 862 | int i; | 
|  | 863 | unsigned long ch_lock_flags; | 
|  | 864 | unsigned long write_lock_flags; | 
|  | 865 | struct sk_buff *skb; | 
|  | 866 |  | 
|  | 867 | DBG("%s remove called\n", __func__); | 
|  | 868 | if (!sdio_mux_initialized) | 
|  | 869 | return 0; | 
|  | 870 |  | 
|  | 871 | /* set reset state for any open channels */ | 
|  | 872 | for (i = 0; i < SDIO_DMUX_NUM_CHANNELS; ++i) { | 
|  | 873 | spin_lock_irqsave(&sdio_ch[i].lock, ch_lock_flags); | 
|  | 874 | if (sdio_ch_is_open(i)) { | 
|  | 875 | sdio_ch[i].status |= SDIO_CH_IN_RESET; | 
|  | 876 | sdio_ch[i].status &= ~SDIO_CH_REMOTE_OPEN; | 
|  | 877 |  | 
| Eric Holmberg | 3ff8dae | 2011-07-19 18:50:15 -0600 | [diff] [blame] | 878 | /* notify client so it can update its status */ | 
|  | 879 | if (sdio_ch[i].receive_cb) | 
|  | 880 | sdio_ch[i].receive_cb( | 
|  | 881 | sdio_ch[i].priv, NULL); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 882 | } | 
|  | 883 | spin_unlock_irqrestore(&sdio_ch[i].lock, ch_lock_flags); | 
|  | 884 | } | 
|  | 885 |  | 
| Vamsi Krishna | c6299de | 2011-07-21 18:18:53 -0700 | [diff] [blame] | 886 | /* cancel any pending writes */ | 
|  | 887 | spin_lock_irqsave(&sdio_mux_write_lock, write_lock_flags); | 
|  | 888 | while ((skb = __skb_dequeue(&sdio_mux_write_pool))) { | 
|  | 889 | i = ((struct sdio_mux_hdr *)skb->data)->ch_id; | 
|  | 890 | if (sdio_ch[i].write_done) | 
|  | 891 | sdio_ch[i].write_done( | 
|  | 892 | sdio_ch[i].priv, skb); | 
|  | 893 | else | 
|  | 894 | dev_kfree_skb_any(skb); | 
|  | 895 | } | 
|  | 896 | spin_unlock_irqrestore(&sdio_mux_write_lock, | 
|  | 897 | write_lock_flags); | 
|  | 898 |  | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 899 | return 0; | 
|  | 900 | } | 
|  | 901 |  | 
|  | 902 | static struct platform_driver sdio_dmux_driver = { | 
|  | 903 | .probe		= sdio_dmux_probe, | 
|  | 904 | .remove   = sdio_dmux_remove, | 
|  | 905 | .driver		= { | 
|  | 906 | .name	= "SDIO_RMNT", | 
|  | 907 | .owner	= THIS_MODULE, | 
|  | 908 | }, | 
|  | 909 | }; | 
|  | 910 |  | 
|  | 911 | static int __init sdio_dmux_init(void) | 
|  | 912 | { | 
|  | 913 | #ifdef CONFIG_DEBUG_FS | 
|  | 914 | struct dentry *dent; | 
|  | 915 |  | 
|  | 916 | dent = debugfs_create_dir("sdio_dmux", 0); | 
|  | 917 | if (!IS_ERR(dent)) | 
|  | 918 | debug_create("tbl", 0444, dent, debug_tbl); | 
|  | 919 | #endif | 
|  | 920 | return platform_driver_register(&sdio_dmux_driver); | 
|  | 921 | } | 
|  | 922 |  | 
|  | 923 | module_init(sdio_dmux_init); | 
|  | 924 | MODULE_DESCRIPTION("MSM SDIO DMUX"); | 
|  | 925 | MODULE_LICENSE("GPL v2"); |