blob: ca9702e923a5c55383c674462952d4bd094686d1 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14/*
15 * SDIO DMUX module.
16 */
17
18#define DEBUG
19
20#include <linux/delay.h>
21#include <linux/module.h>
22#include <linux/netdevice.h>
23#include <linux/platform_device.h>
24#include <linux/sched.h>
25#include <linux/skbuff.h>
26#include <linux/wakelock.h>
27#include <linux/debugfs.h>
28
29#include <mach/sdio_al.h>
30#include <mach/sdio_dmux.h>
31
32#define SDIO_CH_LOCAL_OPEN 0x1
33#define SDIO_CH_REMOTE_OPEN 0x2
34#define SDIO_CH_IN_RESET 0x4
35
36#define SDIO_MUX_HDR_MAGIC_NO 0x33fc
37
38#define SDIO_MUX_HDR_CMD_DATA 0
39#define SDIO_MUX_HDR_CMD_OPEN 1
40#define SDIO_MUX_HDR_CMD_CLOSE 2
41
42#define LOW_WATERMARK 2
43#define HIGH_WATERMARK 4
44
45static int msm_sdio_dmux_debug_enable;
46module_param_named(debug_enable, msm_sdio_dmux_debug_enable,
47 int, S_IRUGO | S_IWUSR | S_IWGRP);
48
49#if defined(DEBUG)
50static uint32_t sdio_dmux_read_cnt;
51static uint32_t sdio_dmux_write_cnt;
52static uint32_t sdio_dmux_write_cpy_cnt;
53static uint32_t sdio_dmux_write_cpy_bytes;
54
55#define DBG(x...) do { \
56 if (msm_sdio_dmux_debug_enable) \
57 pr_debug(x); \
58 } while (0)
59
60#define DBG_INC_READ_CNT(x) do { \
61 sdio_dmux_read_cnt += (x); \
62 if (msm_sdio_dmux_debug_enable) \
63 pr_debug("%s: total read bytes %u\n", \
64 __func__, sdio_dmux_read_cnt); \
65 } while (0)
66
67#define DBG_INC_WRITE_CNT(x) do { \
68 sdio_dmux_write_cnt += (x); \
69 if (msm_sdio_dmux_debug_enable) \
70 pr_debug("%s: total written bytes %u\n", \
71 __func__, sdio_dmux_write_cnt); \
72 } while (0)
73
74#define DBG_INC_WRITE_CPY(x) do { \
75 sdio_dmux_write_cpy_bytes += (x); \
76 sdio_dmux_write_cpy_cnt++; \
77 if (msm_sdio_dmux_debug_enable) \
78 pr_debug("%s: total write copy cnt %u, bytes %u\n", \
79 __func__, sdio_dmux_write_cpy_cnt, \
80 sdio_dmux_write_cpy_bytes); \
81 } while (0)
82#else
83#define DBG(x...) do { } while (0)
84#define DBG_INC_READ_CNT(x...) do { } while (0)
85#define DBG_INC_WRITE_CNT(x...) do { } while (0)
86#define DBG_INC_WRITE_CPY(x...) do { } while (0)
87#endif
88
89struct sdio_ch_info {
90 uint32_t status;
91 void (*receive_cb)(void *, struct sk_buff *);
92 void (*write_done)(void *, struct sk_buff *);
93 void *priv;
94 spinlock_t lock;
95 int num_tx_pkts;
96 int use_wm;
97};
98
99static struct sk_buff_head sdio_mux_write_pool;
100static spinlock_t sdio_mux_write_lock;
101
102static struct sdio_channel *sdio_mux_ch;
103static struct sdio_ch_info sdio_ch[SDIO_DMUX_NUM_CHANNELS];
104struct wake_lock sdio_mux_ch_wakelock;
105static int sdio_mux_initialized;
106static int fatal_error;
107
108struct sdio_mux_hdr {
109 uint16_t magic_num;
110 uint8_t reserved;
111 uint8_t cmd;
112 uint8_t pad_len;
113 uint8_t ch_id;
114 uint16_t pkt_len;
115};
116
117struct sdio_partial_pkt_info {
118 uint32_t valid;
119 struct sk_buff *skb;
120 struct sdio_mux_hdr *hdr;
121};
122
123static void sdio_mux_read_data(struct work_struct *work);
124static void sdio_mux_write_data(struct work_struct *work);
125static void sdio_mux_send_open_cmd(uint32_t id);
126
Eric Holmberg7835f312011-06-09 17:58:56 -0600127static DEFINE_MUTEX(sdio_read_mux_lock);
128static DEFINE_MUTEX(sdio_write_mux_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700129static DECLARE_WORK(work_sdio_mux_read, sdio_mux_read_data);
130static DECLARE_WORK(work_sdio_mux_write, sdio_mux_write_data);
131static DECLARE_DELAYED_WORK(delayed_work_sdio_mux_write, sdio_mux_write_data);
132
Eric Holmbergf2275762011-09-22 10:58:56 -0600133static struct workqueue_struct *sdio_mux_workqueue;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700134static struct sdio_partial_pkt_info sdio_partial_pkt;
135
136#define sdio_ch_is_open(x) \
137 (sdio_ch[(x)].status == (SDIO_CH_LOCAL_OPEN | SDIO_CH_REMOTE_OPEN))
138
139#define sdio_ch_is_local_open(x) \
140 (sdio_ch[(x)].status & SDIO_CH_LOCAL_OPEN)
141
142#define sdio_ch_is_remote_open(x) \
143 (sdio_ch[(x)].status & SDIO_CH_REMOTE_OPEN)
144
145#define sdio_ch_is_in_reset(x) \
146 (sdio_ch[(x)].status & SDIO_CH_IN_RESET)
147
148static inline void skb_set_data(struct sk_buff *skb,
149 unsigned char *data,
150 unsigned int len)
151{
152 /* panic if tail > end */
153 skb->data = data;
154 skb->tail = skb->data + len;
155 skb->len = len;
156 skb->truesize = len + sizeof(struct sk_buff);
157}
158
159static void sdio_mux_save_partial_pkt(struct sdio_mux_hdr *hdr,
160 struct sk_buff *skb_mux)
161{
162 struct sk_buff *skb;
163
164 /* i think we can avoid cloning here */
165 skb = skb_clone(skb_mux, GFP_KERNEL);
166 if (!skb) {
167 pr_err("%s: cannot clone skb\n", __func__);
168 return;
169 }
170
171 /* protect? */
172 skb_set_data(skb, (unsigned char *)hdr,
173 skb->tail - (unsigned char *)hdr);
174 sdio_partial_pkt.skb = skb;
175 sdio_partial_pkt.valid = 1;
176 DBG("%s: head %p data %p tail %p end %p len %d\n", __func__,
177 skb->head, skb->data, skb->tail, skb->end, skb->len);
178 return;
179}
180
181static void *handle_sdio_mux_data(struct sdio_mux_hdr *hdr,
182 struct sk_buff *skb_mux)
183{
184 struct sk_buff *skb;
185 void *rp = (void *)hdr;
186 unsigned long flags;
187
188 /* protect? */
189 rp += sizeof(*hdr);
190 if (rp < (void *)skb_mux->tail)
191 rp += (hdr->pkt_len + hdr->pad_len);
192
193 if (rp > (void *)skb_mux->tail) {
194 /* partial packet */
195 sdio_mux_save_partial_pkt(hdr, skb_mux);
196 goto packet_done;
197 }
198
199 DBG("%s: hdr %p next %p tail %p pkt_size %d\n",
200 __func__, hdr, rp, skb_mux->tail, hdr->pkt_len + hdr->pad_len);
201
202 skb = skb_clone(skb_mux, GFP_KERNEL);
203 if (!skb) {
204 pr_err("%s: cannot clone skb\n", __func__);
205 goto packet_done;
206 }
207
208 skb_set_data(skb, (unsigned char *)(hdr + 1), hdr->pkt_len);
209 DBG("%s: head %p data %p tail %p end %p len %d\n",
210 __func__, skb->head, skb->data, skb->tail, skb->end, skb->len);
211
212 /* probably we should check channel status */
213 /* discard packet early if local side not open */
214 spin_lock_irqsave(&sdio_ch[hdr->ch_id].lock, flags);
215 if (sdio_ch[hdr->ch_id].receive_cb)
216 sdio_ch[hdr->ch_id].receive_cb(sdio_ch[hdr->ch_id].priv, skb);
217 else
218 dev_kfree_skb_any(skb);
219 spin_unlock_irqrestore(&sdio_ch[hdr->ch_id].lock, flags);
220
221packet_done:
222 return rp;
223}
224
225static void *handle_sdio_mux_command(struct sdio_mux_hdr *hdr,
226 struct sk_buff *skb_mux)
227{
228 void *rp;
229 unsigned long flags;
230 int send_open = 0;
231
232 DBG("%s: cmd %d ch %d\n", __func__, hdr->cmd, hdr->ch_id);
233 switch (hdr->cmd) {
234 case SDIO_MUX_HDR_CMD_DATA:
235 rp = handle_sdio_mux_data(hdr, skb_mux);
236 break;
237 case SDIO_MUX_HDR_CMD_OPEN:
238 spin_lock_irqsave(&sdio_ch[hdr->ch_id].lock, flags);
239 sdio_ch[hdr->ch_id].status |= SDIO_CH_REMOTE_OPEN;
Eric Holmberg0d0de822011-09-16 11:28:06 -0600240 sdio_ch[hdr->ch_id].num_tx_pkts = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700241
242 if (sdio_ch_is_in_reset(hdr->ch_id)) {
243 DBG("%s: in reset - sending open cmd\n", __func__);
244 sdio_ch[hdr->ch_id].status &= ~SDIO_CH_IN_RESET;
245 send_open = 1;
246 }
247
248 /* notify client so it can update its status */
249 if (sdio_ch[hdr->ch_id].receive_cb)
250 sdio_ch[hdr->ch_id].receive_cb(
251 sdio_ch[hdr->ch_id].priv, NULL);
252
253 if (sdio_ch[hdr->ch_id].write_done)
254 sdio_ch[hdr->ch_id].write_done(
255 sdio_ch[hdr->ch_id].priv, NULL);
256 spin_unlock_irqrestore(&sdio_ch[hdr->ch_id].lock, flags);
257 rp = hdr + 1;
258 if (send_open)
259 sdio_mux_send_open_cmd(hdr->ch_id);
260
261 break;
262 case SDIO_MUX_HDR_CMD_CLOSE:
263 /* probably should drop pending write */
264 spin_lock_irqsave(&sdio_ch[hdr->ch_id].lock, flags);
265 sdio_ch[hdr->ch_id].status &= ~SDIO_CH_REMOTE_OPEN;
266 spin_unlock_irqrestore(&sdio_ch[hdr->ch_id].lock, flags);
267 rp = hdr + 1;
268 break;
269 default:
270 rp = hdr + 1;
271 }
272
273 return rp;
274}
275
276static void *handle_sdio_partial_pkt(struct sk_buff *skb_mux)
277{
278 struct sk_buff *p_skb;
279 struct sdio_mux_hdr *p_hdr;
280 void *ptr, *rp = skb_mux->data;
281
282 /* protoect? */
283 if (sdio_partial_pkt.valid) {
284 p_skb = sdio_partial_pkt.skb;
285
286 ptr = skb_push(skb_mux, p_skb->len);
287 memcpy(ptr, p_skb->data, p_skb->len);
288 sdio_partial_pkt.skb = NULL;
289 sdio_partial_pkt.valid = 0;
290 dev_kfree_skb_any(p_skb);
291
292 DBG("%s: head %p data %p tail %p end %p len %d\n", __func__,
293 skb_mux->head, skb_mux->data, skb_mux->tail,
294 skb_mux->end, skb_mux->len);
295
296 p_hdr = (struct sdio_mux_hdr *)skb_mux->data;
297 rp = handle_sdio_mux_command(p_hdr, skb_mux);
298 }
299 return rp;
300}
301
302static void sdio_mux_read_data(struct work_struct *work)
303{
304 struct sk_buff *skb_mux;
305 void *ptr = 0;
306 int sz, rc, len = 0;
307 struct sdio_mux_hdr *hdr;
308
309 DBG("%s: reading\n", __func__);
310 /* should probably have a separate read lock */
Eric Holmberg7835f312011-06-09 17:58:56 -0600311 mutex_lock(&sdio_read_mux_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700312 sz = sdio_read_avail(sdio_mux_ch);
313 DBG("%s: read avail %d\n", __func__, sz);
314 if (sz <= 0) {
315 if (sz)
316 pr_err("%s: read avail failed %d\n", __func__, sz);
Eric Holmberg7835f312011-06-09 17:58:56 -0600317 mutex_unlock(&sdio_read_mux_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700318 return;
319 }
320
321 /* net_ip_aling is probably not required */
322 if (sdio_partial_pkt.valid)
323 len = sdio_partial_pkt.skb->len;
324
325 /* If allocation fails attempt to get a smaller chunk of mem */
326 do {
327 skb_mux = __dev_alloc_skb(sz + NET_IP_ALIGN + len, GFP_KERNEL);
328 if (skb_mux)
329 break;
330
331 pr_err("%s: cannot allocate skb of size:%d + "
332 "%d (NET_SKB_PAD)\n", __func__,
333 sz + NET_IP_ALIGN + len, NET_SKB_PAD);
334 /* the skb structure adds NET_SKB_PAD bytes to the memory
335 * request, which may push the actual request above PAGE_SIZE
336 * in that case, we need to iterate one more time to make sure
337 * we get the memory request under PAGE_SIZE
338 */
339 if (sz + NET_IP_ALIGN + len + NET_SKB_PAD <= PAGE_SIZE) {
340 pr_err("%s: allocation failed\n", __func__);
Eric Holmberg7835f312011-06-09 17:58:56 -0600341 mutex_unlock(&sdio_read_mux_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700342 return;
343 }
344 sz /= 2;
345 } while (1);
346
347 skb_reserve(skb_mux, NET_IP_ALIGN + len);
348 ptr = skb_put(skb_mux, sz);
349
350 /* half second wakelock is fine? */
351 wake_lock_timeout(&sdio_mux_ch_wakelock, HZ / 2);
352 rc = sdio_read(sdio_mux_ch, ptr, sz);
353 DBG("%s: read %d\n", __func__, rc);
354 if (rc) {
355 pr_err("%s: sdio read failed %d\n", __func__, rc);
356 dev_kfree_skb_any(skb_mux);
Eric Holmberg7835f312011-06-09 17:58:56 -0600357 mutex_unlock(&sdio_read_mux_lock);
Eric Holmbergf2275762011-09-22 10:58:56 -0600358 queue_work(sdio_mux_workqueue, &work_sdio_mux_read);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700359 return;
360 }
Eric Holmberg7835f312011-06-09 17:58:56 -0600361 mutex_unlock(&sdio_read_mux_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700362
363 DBG_INC_READ_CNT(sz);
364 DBG("%s: head %p data %p tail %p end %p len %d\n", __func__,
365 skb_mux->head, skb_mux->data, skb_mux->tail,
366 skb_mux->end, skb_mux->len);
367
368 /* move to a separate function */
369 /* probably do skb_pull instead of pointer adjustment */
370 hdr = handle_sdio_partial_pkt(skb_mux);
371 while ((void *)hdr < (void *)skb_mux->tail) {
372
373 if (((void *)hdr + sizeof(*hdr)) > (void *)skb_mux->tail) {
374 /* handle partial header */
375 sdio_mux_save_partial_pkt(hdr, skb_mux);
376 break;
377 }
378
379 if (hdr->magic_num != SDIO_MUX_HDR_MAGIC_NO) {
380 pr_err("%s: packet error\n", __func__);
381 break;
382 }
383
384 hdr = handle_sdio_mux_command(hdr, skb_mux);
385 }
386 dev_kfree_skb_any(skb_mux);
387
388 DBG("%s: read done\n", __func__);
Eric Holmbergf2275762011-09-22 10:58:56 -0600389 queue_work(sdio_mux_workqueue, &work_sdio_mux_read);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700390}
391
392static int sdio_mux_write(struct sk_buff *skb)
393{
394 int rc, sz;
395
Eric Holmberg7835f312011-06-09 17:58:56 -0600396 mutex_lock(&sdio_write_mux_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700397 sz = sdio_write_avail(sdio_mux_ch);
398 DBG("%s: avail %d len %d\n", __func__, sz, skb->len);
399 if (skb->len <= sz) {
400 rc = sdio_write(sdio_mux_ch, skb->data, skb->len);
401 DBG("%s: write returned %d\n", __func__, rc);
402 if (rc == 0)
403 DBG_INC_WRITE_CNT(skb->len);
404 } else
405 rc = -ENOMEM;
406
Eric Holmberg7835f312011-06-09 17:58:56 -0600407 mutex_unlock(&sdio_write_mux_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700408 return rc;
409}
410
411static int sdio_mux_write_cmd(void *data, uint32_t len)
412{
413 int avail, rc;
414 for (;;) {
Eric Holmberg7835f312011-06-09 17:58:56 -0600415 mutex_lock(&sdio_write_mux_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700416 avail = sdio_write_avail(sdio_mux_ch);
417 DBG("%s: avail %d len %d\n", __func__, avail, len);
418 if (avail >= len) {
419 rc = sdio_write(sdio_mux_ch, data, len);
420 DBG("%s: write returned %d\n", __func__, rc);
421 if (!rc) {
422 DBG_INC_WRITE_CNT(len);
423 break;
424 }
425 }
Eric Holmberg7835f312011-06-09 17:58:56 -0600426 mutex_unlock(&sdio_write_mux_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700427 msleep(250);
428 }
Eric Holmberg7835f312011-06-09 17:58:56 -0600429 mutex_unlock(&sdio_write_mux_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700430 return 0;
431}
432
433static void sdio_mux_send_open_cmd(uint32_t id)
434{
435 struct sdio_mux_hdr hdr = {
436 .magic_num = SDIO_MUX_HDR_MAGIC_NO,
437 .cmd = SDIO_MUX_HDR_CMD_OPEN,
438 .reserved = 0,
439 .ch_id = id,
440 .pkt_len = 0,
441 .pad_len = 0
442 };
443
444 sdio_mux_write_cmd((void *)&hdr, sizeof(hdr));
445}
446
447static void sdio_mux_write_data(struct work_struct *work)
448{
449 int rc, reschedule = 0;
450 int notify = 0;
451 struct sk_buff *skb;
452 unsigned long flags;
453 int avail;
454 int ch_id;
455
456 spin_lock_irqsave(&sdio_mux_write_lock, flags);
457 while ((skb = __skb_dequeue(&sdio_mux_write_pool))) {
458 ch_id = ((struct sdio_mux_hdr *)skb->data)->ch_id;
459
460 avail = sdio_write_avail(sdio_mux_ch);
461 if (avail < skb->len) {
462 /* we may have to wait for write avail
463 * notification from sdio al
464 */
465 DBG("%s: sdio_write_avail(%d) < skb->len(%d)\n",
466 __func__, avail, skb->len);
467
468 reschedule = 1;
469 break;
470 }
471 spin_unlock_irqrestore(&sdio_mux_write_lock, flags);
472 rc = sdio_mux_write(skb);
473 spin_lock_irqsave(&sdio_mux_write_lock, flags);
474 if (rc == 0) {
475
476 spin_lock(&sdio_ch[ch_id].lock);
477 sdio_ch[ch_id].num_tx_pkts--;
478 spin_unlock(&sdio_ch[ch_id].lock);
479
480 if (sdio_ch[ch_id].write_done)
481 sdio_ch[ch_id].write_done(
482 sdio_ch[ch_id].priv, skb);
483 else
484 dev_kfree_skb_any(skb);
485 } else if (rc == -EAGAIN || rc == -ENOMEM) {
486 /* recoverable error - retry again later */
487 reschedule = 1;
488 break;
489 } else if (rc == -ENODEV) {
490 /*
491 * sdio_al suffered some kind of fatal error
492 * prevent future writes and clean up pending ones
493 */
494 fatal_error = 1;
Eric Holmberg0d0de822011-09-16 11:28:06 -0600495 do {
496 ch_id = ((struct sdio_mux_hdr *)
497 skb->data)->ch_id;
498 spin_lock(&sdio_ch[ch_id].lock);
499 sdio_ch[ch_id].num_tx_pkts--;
500 spin_unlock(&sdio_ch[ch_id].lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700501 dev_kfree_skb_any(skb);
Eric Holmberg0d0de822011-09-16 11:28:06 -0600502 } while ((skb = __skb_dequeue(&sdio_mux_write_pool)));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700503 spin_unlock_irqrestore(&sdio_mux_write_lock, flags);
504 return;
505 } else {
506 /* unknown error condition - drop the
507 * skb and reschedule for the
508 * other skb's
509 */
510 pr_err("%s: sdio_mux_write error %d"
511 " for ch %d, skb=%p\n",
512 __func__, rc, ch_id, skb);
513 notify = 1;
514 break;
515 }
516 }
517
518 if (reschedule) {
519 if (sdio_ch_is_in_reset(ch_id)) {
520 notify = 1;
521 } else {
522 __skb_queue_head(&sdio_mux_write_pool, skb);
Eric Holmbergf2275762011-09-22 10:58:56 -0600523 queue_delayed_work(sdio_mux_workqueue,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700524 &delayed_work_sdio_mux_write,
525 msecs_to_jiffies(250)
526 );
527 }
528 }
529
530 if (notify) {
531 spin_lock(&sdio_ch[ch_id].lock);
532 sdio_ch[ch_id].num_tx_pkts--;
533 spin_unlock(&sdio_ch[ch_id].lock);
534
535 if (sdio_ch[ch_id].write_done)
536 sdio_ch[ch_id].write_done(
537 sdio_ch[ch_id].priv, skb);
538 else
539 dev_kfree_skb_any(skb);
540 }
541 spin_unlock_irqrestore(&sdio_mux_write_lock, flags);
542}
543
544int msm_sdio_is_channel_in_reset(uint32_t id)
545{
546 int rc = 0;
547
548 if (id >= SDIO_DMUX_NUM_CHANNELS)
549 return -EINVAL;
550
551 if (sdio_ch_is_in_reset(id))
552 rc = 1;
553
554 return rc;
555}
556
557int msm_sdio_dmux_write(uint32_t id, struct sk_buff *skb)
558{
559 int rc = 0;
560 struct sdio_mux_hdr *hdr;
561 unsigned long flags;
562 struct sk_buff *new_skb;
563
564 if (id >= SDIO_DMUX_NUM_CHANNELS)
565 return -EINVAL;
566 if (!skb)
567 return -EINVAL;
568 if (!sdio_mux_initialized)
569 return -ENODEV;
570 if (fatal_error)
571 return -ENODEV;
572
573 DBG("%s: writing to ch %d len %d\n", __func__, id, skb->len);
574 spin_lock_irqsave(&sdio_ch[id].lock, flags);
575 if (sdio_ch_is_in_reset(id)) {
576 spin_unlock_irqrestore(&sdio_ch[id].lock, flags);
577 pr_err("%s: port is in reset: %d\n", __func__,
578 sdio_ch[id].status);
579 return -ENETRESET;
580 }
581 if (!sdio_ch_is_local_open(id)) {
582 spin_unlock_irqrestore(&sdio_ch[id].lock, flags);
583 pr_err("%s: port not open: %d\n", __func__, sdio_ch[id].status);
584 return -ENODEV;
585 }
586 if (sdio_ch[id].use_wm &&
587 (sdio_ch[id].num_tx_pkts >= HIGH_WATERMARK)) {
588 spin_unlock_irqrestore(&sdio_ch[id].lock, flags);
589 pr_err("%s: watermark exceeded: %d\n", __func__, id);
590 return -EAGAIN;
591 }
592 spin_unlock_irqrestore(&sdio_ch[id].lock, flags);
593
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700594 /* if skb do not have any tailroom for padding,
595 copy the skb into a new expanded skb */
596 if ((skb->len & 0x3) && (skb_tailroom(skb) < (4 - (skb->len & 0x3)))) {
597 /* revisit, probably dev_alloc_skb and memcpy is effecient */
598 new_skb = skb_copy_expand(skb, skb_headroom(skb),
Eric Holmberg7835f312011-06-09 17:58:56 -0600599 4 - (skb->len & 0x3), GFP_KERNEL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700600 if (new_skb == NULL) {
601 pr_err("%s: cannot allocate skb\n", __func__);
Eric Holmberg7835f312011-06-09 17:58:56 -0600602 return -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700603 }
604 dev_kfree_skb_any(skb);
605 skb = new_skb;
Eric Holmberg7835f312011-06-09 17:58:56 -0600606 spin_lock_irqsave(&sdio_mux_write_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700607 DBG_INC_WRITE_CPY(skb->len);
Eric Holmberg7835f312011-06-09 17:58:56 -0600608 spin_unlock_irqrestore(&sdio_mux_write_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700609 }
610
611 hdr = (struct sdio_mux_hdr *)skb_push(skb, sizeof(struct sdio_mux_hdr));
612
613 /* caller should allocate for hdr and padding
614 hdr is fine, padding is tricky */
615 hdr->magic_num = SDIO_MUX_HDR_MAGIC_NO;
616 hdr->cmd = SDIO_MUX_HDR_CMD_DATA;
617 hdr->reserved = 0;
618 hdr->ch_id = id;
619 hdr->pkt_len = skb->len - sizeof(struct sdio_mux_hdr);
620 if (skb->len & 0x3)
621 skb_put(skb, 4 - (skb->len & 0x3));
622
623 hdr->pad_len = skb->len - (sizeof(struct sdio_mux_hdr) + hdr->pkt_len);
624
625 DBG("%s: data %p, tail %p skb len %d pkt len %d pad len %d\n",
626 __func__, skb->data, skb->tail, skb->len,
627 hdr->pkt_len, hdr->pad_len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700628
Eric Holmberg7835f312011-06-09 17:58:56 -0600629 spin_lock_irqsave(&sdio_mux_write_lock, flags);
630 __skb_queue_tail(&sdio_mux_write_pool, skb);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700631 spin_lock(&sdio_ch[id].lock);
632 sdio_ch[id].num_tx_pkts++;
633 spin_unlock(&sdio_ch[id].lock);
Eric Holmberg7835f312011-06-09 17:58:56 -0600634 spin_unlock_irqrestore(&sdio_mux_write_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700635
Eric Holmbergf2275762011-09-22 10:58:56 -0600636 queue_work(sdio_mux_workqueue, &work_sdio_mux_write);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700637
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700638 return rc;
639}
640
641int msm_sdio_dmux_open(uint32_t id, void *priv,
642 void (*receive_cb)(void *, struct sk_buff *),
643 void (*write_done)(void *, struct sk_buff *))
644{
645 unsigned long flags;
646
647 DBG("%s: opening ch %d\n", __func__, id);
648 if (!sdio_mux_initialized)
649 return -ENODEV;
650 if (id >= SDIO_DMUX_NUM_CHANNELS)
651 return -EINVAL;
652
653 spin_lock_irqsave(&sdio_ch[id].lock, flags);
654 if (sdio_ch_is_local_open(id)) {
655 pr_info("%s: Already opened %d\n", __func__, id);
656 spin_unlock_irqrestore(&sdio_ch[id].lock, flags);
657 goto open_done;
658 }
659
660 sdio_ch[id].receive_cb = receive_cb;
661 sdio_ch[id].write_done = write_done;
662 sdio_ch[id].priv = priv;
663 sdio_ch[id].status |= SDIO_CH_LOCAL_OPEN;
664 sdio_ch[id].num_tx_pkts = 0;
665 sdio_ch[id].use_wm = 0;
666 spin_unlock_irqrestore(&sdio_ch[id].lock, flags);
667
668 sdio_mux_send_open_cmd(id);
669
670open_done:
671 pr_info("%s: opened ch %d\n", __func__, id);
672 return 0;
673}
674
675int msm_sdio_dmux_close(uint32_t id)
676{
677 struct sdio_mux_hdr hdr;
678 unsigned long flags;
679
680 if (id >= SDIO_DMUX_NUM_CHANNELS)
681 return -EINVAL;
682 DBG("%s: closing ch %d\n", __func__, id);
683 if (!sdio_mux_initialized)
684 return -ENODEV;
685 spin_lock_irqsave(&sdio_ch[id].lock, flags);
686
687 sdio_ch[id].receive_cb = NULL;
688 sdio_ch[id].priv = NULL;
689 sdio_ch[id].status &= ~SDIO_CH_LOCAL_OPEN;
690 sdio_ch[id].status &= ~SDIO_CH_IN_RESET;
691 spin_unlock_irqrestore(&sdio_ch[id].lock, flags);
692
693 hdr.magic_num = SDIO_MUX_HDR_MAGIC_NO;
694 hdr.cmd = SDIO_MUX_HDR_CMD_CLOSE;
695 hdr.reserved = 0;
696 hdr.ch_id = id;
697 hdr.pkt_len = 0;
698 hdr.pad_len = 0;
699
700 sdio_mux_write_cmd((void *)&hdr, sizeof(hdr));
701
702 pr_info("%s: closed ch %d\n", __func__, id);
703 return 0;
704}
705
706static void sdio_mux_notify(void *_dev, unsigned event)
707{
708 DBG("%s: event %d notified\n", __func__, event);
709
710 /* write avail may not be enouogh for a packet, but should be fine */
711 if ((event == SDIO_EVENT_DATA_WRITE_AVAIL) &&
712 sdio_write_avail(sdio_mux_ch))
Eric Holmbergf2275762011-09-22 10:58:56 -0600713 queue_work(sdio_mux_workqueue, &work_sdio_mux_write);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700714
715 if ((event == SDIO_EVENT_DATA_READ_AVAIL) &&
716 sdio_read_avail(sdio_mux_ch))
Eric Holmbergf2275762011-09-22 10:58:56 -0600717 queue_work(sdio_mux_workqueue, &work_sdio_mux_read);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700718}
719
720int msm_sdio_dmux_is_ch_full(uint32_t id)
721{
722 unsigned long flags;
723 int ret;
724
725 if (id >= SDIO_DMUX_NUM_CHANNELS)
726 return -EINVAL;
727
728 spin_lock_irqsave(&sdio_ch[id].lock, flags);
729 sdio_ch[id].use_wm = 1;
730 ret = sdio_ch[id].num_tx_pkts >= HIGH_WATERMARK;
731 DBG("%s: ch %d num tx pkts=%d, HWM=%d\n", __func__,
732 id, sdio_ch[id].num_tx_pkts, ret);
733 if (!sdio_ch_is_local_open(id)) {
734 ret = -ENODEV;
735 pr_err("%s: port not open: %d\n", __func__, sdio_ch[id].status);
736 }
737 spin_unlock_irqrestore(&sdio_ch[id].lock, flags);
738
739 return ret;
740}
741
742int msm_sdio_dmux_is_ch_low(uint32_t id)
743{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700744 int ret;
745
746 if (id >= SDIO_DMUX_NUM_CHANNELS)
747 return -EINVAL;
748
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700749 sdio_ch[id].use_wm = 1;
750 ret = sdio_ch[id].num_tx_pkts <= LOW_WATERMARK;
751 DBG("%s: ch %d num tx pkts=%d, LWM=%d\n", __func__,
752 id, sdio_ch[id].num_tx_pkts, ret);
753 if (!sdio_ch_is_local_open(id)) {
754 ret = -ENODEV;
755 pr_err("%s: port not open: %d\n", __func__, sdio_ch[id].status);
756 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700757
758 return ret;
759}
760
761#ifdef CONFIG_DEBUG_FS
762
763static int debug_tbl(char *buf, int max)
764{
765 int i = 0;
766 int j;
767
768 for (j = 0; j < SDIO_DMUX_NUM_CHANNELS; ++j) {
769 i += scnprintf(buf + i, max - i,
770 "ch%02d local open=%s remote open=%s\n",
771 j, sdio_ch_is_local_open(j) ? "Y" : "N",
772 sdio_ch_is_remote_open(j) ? "Y" : "N");
773 }
774
775 return i;
776}
777
778#define DEBUG_BUFMAX 4096
779static char debug_buffer[DEBUG_BUFMAX];
780
781static ssize_t debug_read(struct file *file, char __user *buf,
782 size_t count, loff_t *ppos)
783{
784 int (*fill)(char *buf, int max) = file->private_data;
785 int bsize = fill(debug_buffer, DEBUG_BUFMAX);
786 return simple_read_from_buffer(buf, count, ppos, debug_buffer, bsize);
787}
788
789static int debug_open(struct inode *inode, struct file *file)
790{
791 file->private_data = inode->i_private;
792 return 0;
793}
794
795
796static const struct file_operations debug_ops = {
797 .read = debug_read,
798 .open = debug_open,
799};
800
801static void debug_create(const char *name, mode_t mode,
802 struct dentry *dent,
803 int (*fill)(char *buf, int max))
804{
805 debugfs_create_file(name, mode, dent, fill, &debug_ops);
806}
807
808#endif
809
810static int sdio_dmux_probe(struct platform_device *pdev)
811{
812 int rc;
813
814 DBG("%s probe called\n", __func__);
815
816 if (!sdio_mux_initialized) {
Eric Holmbergf2275762011-09-22 10:58:56 -0600817 sdio_mux_workqueue = create_singlethread_workqueue("sdio_dmux");
818 if (!sdio_mux_workqueue)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700819 return -ENOMEM;
820
821 skb_queue_head_init(&sdio_mux_write_pool);
822 spin_lock_init(&sdio_mux_write_lock);
823
824 for (rc = 0; rc < SDIO_DMUX_NUM_CHANNELS; ++rc)
825 spin_lock_init(&sdio_ch[rc].lock);
826
827
828 wake_lock_init(&sdio_mux_ch_wakelock, WAKE_LOCK_SUSPEND,
829 "sdio_dmux");
830 }
831
832 rc = sdio_open("SDIO_RMNT", &sdio_mux_ch, NULL, sdio_mux_notify);
833 if (rc < 0) {
834 pr_err("%s: sido open failed %d\n", __func__, rc);
835 wake_lock_destroy(&sdio_mux_ch_wakelock);
Eric Holmbergf2275762011-09-22 10:58:56 -0600836 destroy_workqueue(sdio_mux_workqueue);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700837 sdio_mux_initialized = 0;
838 return rc;
839 }
840
Karthikeyan Ramasubramaniane297a3e2011-09-13 18:26:13 -0600841 fatal_error = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700842 sdio_mux_initialized = 1;
843 return 0;
844}
845
846static int sdio_dmux_remove(struct platform_device *pdev)
847{
848 int i;
849 unsigned long ch_lock_flags;
850 unsigned long write_lock_flags;
851 struct sk_buff *skb;
852
853 DBG("%s remove called\n", __func__);
854 if (!sdio_mux_initialized)
855 return 0;
856
857 /* set reset state for any open channels */
858 for (i = 0; i < SDIO_DMUX_NUM_CHANNELS; ++i) {
859 spin_lock_irqsave(&sdio_ch[i].lock, ch_lock_flags);
860 if (sdio_ch_is_open(i)) {
861 sdio_ch[i].status |= SDIO_CH_IN_RESET;
862 sdio_ch[i].status &= ~SDIO_CH_REMOTE_OPEN;
863
Eric Holmberg3ff8dae2011-07-19 18:50:15 -0600864 /* notify client so it can update its status */
865 if (sdio_ch[i].receive_cb)
866 sdio_ch[i].receive_cb(
867 sdio_ch[i].priv, NULL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700868 }
869 spin_unlock_irqrestore(&sdio_ch[i].lock, ch_lock_flags);
870 }
871
Vamsi Krishnac6299de2011-07-21 18:18:53 -0700872 /* cancel any pending writes */
873 spin_lock_irqsave(&sdio_mux_write_lock, write_lock_flags);
874 while ((skb = __skb_dequeue(&sdio_mux_write_pool))) {
875 i = ((struct sdio_mux_hdr *)skb->data)->ch_id;
876 if (sdio_ch[i].write_done)
877 sdio_ch[i].write_done(
878 sdio_ch[i].priv, skb);
879 else
880 dev_kfree_skb_any(skb);
881 }
882 spin_unlock_irqrestore(&sdio_mux_write_lock,
883 write_lock_flags);
884
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700885 return 0;
886}
887
888static struct platform_driver sdio_dmux_driver = {
889 .probe = sdio_dmux_probe,
890 .remove = sdio_dmux_remove,
891 .driver = {
892 .name = "SDIO_RMNT",
893 .owner = THIS_MODULE,
894 },
895};
896
897static int __init sdio_dmux_init(void)
898{
899#ifdef CONFIG_DEBUG_FS
900 struct dentry *dent;
901
902 dent = debugfs_create_dir("sdio_dmux", 0);
903 if (!IS_ERR(dent))
904 debug_create("tbl", 0444, dent, debug_tbl);
905#endif
906 return platform_driver_register(&sdio_dmux_driver);
907}
908
909module_init(sdio_dmux_init);
910MODULE_DESCRIPTION("MSM SDIO DMUX");
911MODULE_LICENSE("GPL v2");