blob: 2ea2adba78e0f8520e73ae269b2daaf844f071e1 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14/*
15 * SDIO DMUX module.
16 */
17
18#define DEBUG
19
20#include <linux/delay.h>
21#include <linux/module.h>
22#include <linux/netdevice.h>
23#include <linux/platform_device.h>
24#include <linux/sched.h>
25#include <linux/skbuff.h>
26#include <linux/wakelock.h>
27#include <linux/debugfs.h>
28
29#include <mach/sdio_al.h>
30#include <mach/sdio_dmux.h>
31
32#define SDIO_CH_LOCAL_OPEN 0x1
33#define SDIO_CH_REMOTE_OPEN 0x2
34#define SDIO_CH_IN_RESET 0x4
35
36#define SDIO_MUX_HDR_MAGIC_NO 0x33fc
37
38#define SDIO_MUX_HDR_CMD_DATA 0
39#define SDIO_MUX_HDR_CMD_OPEN 1
40#define SDIO_MUX_HDR_CMD_CLOSE 2
41
42#define LOW_WATERMARK 2
43#define HIGH_WATERMARK 4
44
45static int msm_sdio_dmux_debug_enable;
46module_param_named(debug_enable, msm_sdio_dmux_debug_enable,
47 int, S_IRUGO | S_IWUSR | S_IWGRP);
48
49#if defined(DEBUG)
50static uint32_t sdio_dmux_read_cnt;
51static uint32_t sdio_dmux_write_cnt;
52static uint32_t sdio_dmux_write_cpy_cnt;
53static uint32_t sdio_dmux_write_cpy_bytes;
54
55#define DBG(x...) do { \
56 if (msm_sdio_dmux_debug_enable) \
57 pr_debug(x); \
58 } while (0)
59
60#define DBG_INC_READ_CNT(x) do { \
61 sdio_dmux_read_cnt += (x); \
62 if (msm_sdio_dmux_debug_enable) \
63 pr_debug("%s: total read bytes %u\n", \
64 __func__, sdio_dmux_read_cnt); \
65 } while (0)
66
67#define DBG_INC_WRITE_CNT(x) do { \
68 sdio_dmux_write_cnt += (x); \
69 if (msm_sdio_dmux_debug_enable) \
70 pr_debug("%s: total written bytes %u\n", \
71 __func__, sdio_dmux_write_cnt); \
72 } while (0)
73
74#define DBG_INC_WRITE_CPY(x) do { \
75 sdio_dmux_write_cpy_bytes += (x); \
76 sdio_dmux_write_cpy_cnt++; \
77 if (msm_sdio_dmux_debug_enable) \
78 pr_debug("%s: total write copy cnt %u, bytes %u\n", \
79 __func__, sdio_dmux_write_cpy_cnt, \
80 sdio_dmux_write_cpy_bytes); \
81 } while (0)
82#else
83#define DBG(x...) do { } while (0)
84#define DBG_INC_READ_CNT(x...) do { } while (0)
85#define DBG_INC_WRITE_CNT(x...) do { } while (0)
86#define DBG_INC_WRITE_CPY(x...) do { } while (0)
87#endif
88
89struct sdio_ch_info {
90 uint32_t status;
91 void (*receive_cb)(void *, struct sk_buff *);
92 void (*write_done)(void *, struct sk_buff *);
93 void *priv;
94 spinlock_t lock;
95 int num_tx_pkts;
96 int use_wm;
97};
98
99static struct sk_buff_head sdio_mux_write_pool;
100static spinlock_t sdio_mux_write_lock;
101
102static struct sdio_channel *sdio_mux_ch;
103static struct sdio_ch_info sdio_ch[SDIO_DMUX_NUM_CHANNELS];
104struct wake_lock sdio_mux_ch_wakelock;
105static int sdio_mux_initialized;
106static int fatal_error;
107
108struct sdio_mux_hdr {
109 uint16_t magic_num;
110 uint8_t reserved;
111 uint8_t cmd;
112 uint8_t pad_len;
113 uint8_t ch_id;
114 uint16_t pkt_len;
115};
116
117struct sdio_partial_pkt_info {
118 uint32_t valid;
119 struct sk_buff *skb;
120 struct sdio_mux_hdr *hdr;
121};
122
123static void sdio_mux_read_data(struct work_struct *work);
124static void sdio_mux_write_data(struct work_struct *work);
125static void sdio_mux_send_open_cmd(uint32_t id);
126
127static DEFINE_MUTEX(sdio_mux_lock);
128static DECLARE_WORK(work_sdio_mux_read, sdio_mux_read_data);
129static DECLARE_WORK(work_sdio_mux_write, sdio_mux_write_data);
130static DECLARE_DELAYED_WORK(delayed_work_sdio_mux_write, sdio_mux_write_data);
131
Eric Holmberg32edc1d2011-06-01 14:20:08 -0600132static struct workqueue_struct *sdio_mux_read_workqueue;
133static struct workqueue_struct *sdio_mux_write_workqueue;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700134static struct sdio_partial_pkt_info sdio_partial_pkt;
135
136#define sdio_ch_is_open(x) \
137 (sdio_ch[(x)].status == (SDIO_CH_LOCAL_OPEN | SDIO_CH_REMOTE_OPEN))
138
139#define sdio_ch_is_local_open(x) \
140 (sdio_ch[(x)].status & SDIO_CH_LOCAL_OPEN)
141
142#define sdio_ch_is_remote_open(x) \
143 (sdio_ch[(x)].status & SDIO_CH_REMOTE_OPEN)
144
145#define sdio_ch_is_in_reset(x) \
146 (sdio_ch[(x)].status & SDIO_CH_IN_RESET)
147
148static inline void skb_set_data(struct sk_buff *skb,
149 unsigned char *data,
150 unsigned int len)
151{
152 /* panic if tail > end */
153 skb->data = data;
154 skb->tail = skb->data + len;
155 skb->len = len;
156 skb->truesize = len + sizeof(struct sk_buff);
157}
158
159static void sdio_mux_save_partial_pkt(struct sdio_mux_hdr *hdr,
160 struct sk_buff *skb_mux)
161{
162 struct sk_buff *skb;
163
164 /* i think we can avoid cloning here */
165 skb = skb_clone(skb_mux, GFP_KERNEL);
166 if (!skb) {
167 pr_err("%s: cannot clone skb\n", __func__);
168 return;
169 }
170
171 /* protect? */
172 skb_set_data(skb, (unsigned char *)hdr,
173 skb->tail - (unsigned char *)hdr);
174 sdio_partial_pkt.skb = skb;
175 sdio_partial_pkt.valid = 1;
176 DBG("%s: head %p data %p tail %p end %p len %d\n", __func__,
177 skb->head, skb->data, skb->tail, skb->end, skb->len);
178 return;
179}
180
181static void *handle_sdio_mux_data(struct sdio_mux_hdr *hdr,
182 struct sk_buff *skb_mux)
183{
184 struct sk_buff *skb;
185 void *rp = (void *)hdr;
186 unsigned long flags;
187
188 /* protect? */
189 rp += sizeof(*hdr);
190 if (rp < (void *)skb_mux->tail)
191 rp += (hdr->pkt_len + hdr->pad_len);
192
193 if (rp > (void *)skb_mux->tail) {
194 /* partial packet */
195 sdio_mux_save_partial_pkt(hdr, skb_mux);
196 goto packet_done;
197 }
198
199 DBG("%s: hdr %p next %p tail %p pkt_size %d\n",
200 __func__, hdr, rp, skb_mux->tail, hdr->pkt_len + hdr->pad_len);
201
202 skb = skb_clone(skb_mux, GFP_KERNEL);
203 if (!skb) {
204 pr_err("%s: cannot clone skb\n", __func__);
205 goto packet_done;
206 }
207
208 skb_set_data(skb, (unsigned char *)(hdr + 1), hdr->pkt_len);
209 DBG("%s: head %p data %p tail %p end %p len %d\n",
210 __func__, skb->head, skb->data, skb->tail, skb->end, skb->len);
211
212 /* probably we should check channel status */
213 /* discard packet early if local side not open */
214 spin_lock_irqsave(&sdio_ch[hdr->ch_id].lock, flags);
215 if (sdio_ch[hdr->ch_id].receive_cb)
216 sdio_ch[hdr->ch_id].receive_cb(sdio_ch[hdr->ch_id].priv, skb);
217 else
218 dev_kfree_skb_any(skb);
219 spin_unlock_irqrestore(&sdio_ch[hdr->ch_id].lock, flags);
220
221packet_done:
222 return rp;
223}
224
225static void *handle_sdio_mux_command(struct sdio_mux_hdr *hdr,
226 struct sk_buff *skb_mux)
227{
228 void *rp;
229 unsigned long flags;
230 int send_open = 0;
231
232 DBG("%s: cmd %d ch %d\n", __func__, hdr->cmd, hdr->ch_id);
233 switch (hdr->cmd) {
234 case SDIO_MUX_HDR_CMD_DATA:
235 rp = handle_sdio_mux_data(hdr, skb_mux);
236 break;
237 case SDIO_MUX_HDR_CMD_OPEN:
238 spin_lock_irqsave(&sdio_ch[hdr->ch_id].lock, flags);
239 sdio_ch[hdr->ch_id].status |= SDIO_CH_REMOTE_OPEN;
240
241 if (sdio_ch_is_in_reset(hdr->ch_id)) {
242 DBG("%s: in reset - sending open cmd\n", __func__);
243 sdio_ch[hdr->ch_id].status &= ~SDIO_CH_IN_RESET;
244 send_open = 1;
245 }
246
247 /* notify client so it can update its status */
248 if (sdio_ch[hdr->ch_id].receive_cb)
249 sdio_ch[hdr->ch_id].receive_cb(
250 sdio_ch[hdr->ch_id].priv, NULL);
251
252 if (sdio_ch[hdr->ch_id].write_done)
253 sdio_ch[hdr->ch_id].write_done(
254 sdio_ch[hdr->ch_id].priv, NULL);
255 spin_unlock_irqrestore(&sdio_ch[hdr->ch_id].lock, flags);
256 rp = hdr + 1;
257 if (send_open)
258 sdio_mux_send_open_cmd(hdr->ch_id);
259
260 break;
261 case SDIO_MUX_HDR_CMD_CLOSE:
262 /* probably should drop pending write */
263 spin_lock_irqsave(&sdio_ch[hdr->ch_id].lock, flags);
264 sdio_ch[hdr->ch_id].status &= ~SDIO_CH_REMOTE_OPEN;
265 spin_unlock_irqrestore(&sdio_ch[hdr->ch_id].lock, flags);
266 rp = hdr + 1;
267 break;
268 default:
269 rp = hdr + 1;
270 }
271
272 return rp;
273}
274
275static void *handle_sdio_partial_pkt(struct sk_buff *skb_mux)
276{
277 struct sk_buff *p_skb;
278 struct sdio_mux_hdr *p_hdr;
279 void *ptr, *rp = skb_mux->data;
280
281 /* protoect? */
282 if (sdio_partial_pkt.valid) {
283 p_skb = sdio_partial_pkt.skb;
284
285 ptr = skb_push(skb_mux, p_skb->len);
286 memcpy(ptr, p_skb->data, p_skb->len);
287 sdio_partial_pkt.skb = NULL;
288 sdio_partial_pkt.valid = 0;
289 dev_kfree_skb_any(p_skb);
290
291 DBG("%s: head %p data %p tail %p end %p len %d\n", __func__,
292 skb_mux->head, skb_mux->data, skb_mux->tail,
293 skb_mux->end, skb_mux->len);
294
295 p_hdr = (struct sdio_mux_hdr *)skb_mux->data;
296 rp = handle_sdio_mux_command(p_hdr, skb_mux);
297 }
298 return rp;
299}
300
301static void sdio_mux_read_data(struct work_struct *work)
302{
303 struct sk_buff *skb_mux;
304 void *ptr = 0;
305 int sz, rc, len = 0;
306 struct sdio_mux_hdr *hdr;
307
308 DBG("%s: reading\n", __func__);
309 /* should probably have a separate read lock */
310 mutex_lock(&sdio_mux_lock);
311 sz = sdio_read_avail(sdio_mux_ch);
312 DBG("%s: read avail %d\n", __func__, sz);
313 if (sz <= 0) {
314 if (sz)
315 pr_err("%s: read avail failed %d\n", __func__, sz);
316 mutex_unlock(&sdio_mux_lock);
317 return;
318 }
319
320 /* net_ip_aling is probably not required */
321 if (sdio_partial_pkt.valid)
322 len = sdio_partial_pkt.skb->len;
323
324 /* If allocation fails attempt to get a smaller chunk of mem */
325 do {
326 skb_mux = __dev_alloc_skb(sz + NET_IP_ALIGN + len, GFP_KERNEL);
327 if (skb_mux)
328 break;
329
330 pr_err("%s: cannot allocate skb of size:%d + "
331 "%d (NET_SKB_PAD)\n", __func__,
332 sz + NET_IP_ALIGN + len, NET_SKB_PAD);
333 /* the skb structure adds NET_SKB_PAD bytes to the memory
334 * request, which may push the actual request above PAGE_SIZE
335 * in that case, we need to iterate one more time to make sure
336 * we get the memory request under PAGE_SIZE
337 */
338 if (sz + NET_IP_ALIGN + len + NET_SKB_PAD <= PAGE_SIZE) {
339 pr_err("%s: allocation failed\n", __func__);
340 mutex_unlock(&sdio_mux_lock);
341 return;
342 }
343 sz /= 2;
344 } while (1);
345
346 skb_reserve(skb_mux, NET_IP_ALIGN + len);
347 ptr = skb_put(skb_mux, sz);
348
349 /* half second wakelock is fine? */
350 wake_lock_timeout(&sdio_mux_ch_wakelock, HZ / 2);
351 rc = sdio_read(sdio_mux_ch, ptr, sz);
352 DBG("%s: read %d\n", __func__, rc);
353 if (rc) {
354 pr_err("%s: sdio read failed %d\n", __func__, rc);
355 dev_kfree_skb_any(skb_mux);
356 mutex_unlock(&sdio_mux_lock);
Eric Holmberg32edc1d2011-06-01 14:20:08 -0600357 queue_work(sdio_mux_read_workqueue, &work_sdio_mux_read);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700358 return;
359 }
360 mutex_unlock(&sdio_mux_lock);
361
362 DBG_INC_READ_CNT(sz);
363 DBG("%s: head %p data %p tail %p end %p len %d\n", __func__,
364 skb_mux->head, skb_mux->data, skb_mux->tail,
365 skb_mux->end, skb_mux->len);
366
367 /* move to a separate function */
368 /* probably do skb_pull instead of pointer adjustment */
369 hdr = handle_sdio_partial_pkt(skb_mux);
370 while ((void *)hdr < (void *)skb_mux->tail) {
371
372 if (((void *)hdr + sizeof(*hdr)) > (void *)skb_mux->tail) {
373 /* handle partial header */
374 sdio_mux_save_partial_pkt(hdr, skb_mux);
375 break;
376 }
377
378 if (hdr->magic_num != SDIO_MUX_HDR_MAGIC_NO) {
379 pr_err("%s: packet error\n", __func__);
380 break;
381 }
382
383 hdr = handle_sdio_mux_command(hdr, skb_mux);
384 }
385 dev_kfree_skb_any(skb_mux);
386
387 DBG("%s: read done\n", __func__);
Eric Holmberg32edc1d2011-06-01 14:20:08 -0600388 queue_work(sdio_mux_read_workqueue, &work_sdio_mux_read);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700389}
390
391static int sdio_mux_write(struct sk_buff *skb)
392{
393 int rc, sz;
394
395 mutex_lock(&sdio_mux_lock);
396 sz = sdio_write_avail(sdio_mux_ch);
397 DBG("%s: avail %d len %d\n", __func__, sz, skb->len);
398 if (skb->len <= sz) {
399 rc = sdio_write(sdio_mux_ch, skb->data, skb->len);
400 DBG("%s: write returned %d\n", __func__, rc);
401 if (rc == 0)
402 DBG_INC_WRITE_CNT(skb->len);
403 } else
404 rc = -ENOMEM;
405
406 mutex_unlock(&sdio_mux_lock);
407 return rc;
408}
409
410static int sdio_mux_write_cmd(void *data, uint32_t len)
411{
412 int avail, rc;
413 for (;;) {
414 mutex_lock(&sdio_mux_lock);
415 avail = sdio_write_avail(sdio_mux_ch);
416 DBG("%s: avail %d len %d\n", __func__, avail, len);
417 if (avail >= len) {
418 rc = sdio_write(sdio_mux_ch, data, len);
419 DBG("%s: write returned %d\n", __func__, rc);
420 if (!rc) {
421 DBG_INC_WRITE_CNT(len);
422 break;
423 }
424 }
425 mutex_unlock(&sdio_mux_lock);
426 msleep(250);
427 }
428 mutex_unlock(&sdio_mux_lock);
429 return 0;
430}
431
432static void sdio_mux_send_open_cmd(uint32_t id)
433{
434 struct sdio_mux_hdr hdr = {
435 .magic_num = SDIO_MUX_HDR_MAGIC_NO,
436 .cmd = SDIO_MUX_HDR_CMD_OPEN,
437 .reserved = 0,
438 .ch_id = id,
439 .pkt_len = 0,
440 .pad_len = 0
441 };
442
443 sdio_mux_write_cmd((void *)&hdr, sizeof(hdr));
444}
445
446static void sdio_mux_write_data(struct work_struct *work)
447{
448 int rc, reschedule = 0;
449 int notify = 0;
450 struct sk_buff *skb;
451 unsigned long flags;
452 int avail;
453 int ch_id;
454
455 spin_lock_irqsave(&sdio_mux_write_lock, flags);
456 while ((skb = __skb_dequeue(&sdio_mux_write_pool))) {
457 ch_id = ((struct sdio_mux_hdr *)skb->data)->ch_id;
458
459 avail = sdio_write_avail(sdio_mux_ch);
460 if (avail < skb->len) {
461 /* we may have to wait for write avail
462 * notification from sdio al
463 */
464 DBG("%s: sdio_write_avail(%d) < skb->len(%d)\n",
465 __func__, avail, skb->len);
466
467 reschedule = 1;
468 break;
469 }
470 spin_unlock_irqrestore(&sdio_mux_write_lock, flags);
471 rc = sdio_mux_write(skb);
472 spin_lock_irqsave(&sdio_mux_write_lock, flags);
473 if (rc == 0) {
474
475 spin_lock(&sdio_ch[ch_id].lock);
476 sdio_ch[ch_id].num_tx_pkts--;
477 spin_unlock(&sdio_ch[ch_id].lock);
478
479 if (sdio_ch[ch_id].write_done)
480 sdio_ch[ch_id].write_done(
481 sdio_ch[ch_id].priv, skb);
482 else
483 dev_kfree_skb_any(skb);
484 } else if (rc == -EAGAIN || rc == -ENOMEM) {
485 /* recoverable error - retry again later */
486 reschedule = 1;
487 break;
488 } else if (rc == -ENODEV) {
489 /*
490 * sdio_al suffered some kind of fatal error
491 * prevent future writes and clean up pending ones
492 */
493 fatal_error = 1;
494 dev_kfree_skb_any(skb);
495 while ((skb = __skb_dequeue(&sdio_mux_write_pool)))
496 dev_kfree_skb_any(skb);
497 spin_unlock_irqrestore(&sdio_mux_write_lock, flags);
498 return;
499 } else {
500 /* unknown error condition - drop the
501 * skb and reschedule for the
502 * other skb's
503 */
504 pr_err("%s: sdio_mux_write error %d"
505 " for ch %d, skb=%p\n",
506 __func__, rc, ch_id, skb);
507 notify = 1;
508 break;
509 }
510 }
511
512 if (reschedule) {
513 if (sdio_ch_is_in_reset(ch_id)) {
514 notify = 1;
515 } else {
516 __skb_queue_head(&sdio_mux_write_pool, skb);
Eric Holmberg32edc1d2011-06-01 14:20:08 -0600517 queue_delayed_work(sdio_mux_write_workqueue,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700518 &delayed_work_sdio_mux_write,
519 msecs_to_jiffies(250)
520 );
521 }
522 }
523
524 if (notify) {
525 spin_lock(&sdio_ch[ch_id].lock);
526 sdio_ch[ch_id].num_tx_pkts--;
527 spin_unlock(&sdio_ch[ch_id].lock);
528
529 if (sdio_ch[ch_id].write_done)
530 sdio_ch[ch_id].write_done(
531 sdio_ch[ch_id].priv, skb);
532 else
533 dev_kfree_skb_any(skb);
534 }
535 spin_unlock_irqrestore(&sdio_mux_write_lock, flags);
536}
537
538int msm_sdio_is_channel_in_reset(uint32_t id)
539{
540 int rc = 0;
541
542 if (id >= SDIO_DMUX_NUM_CHANNELS)
543 return -EINVAL;
544
545 if (sdio_ch_is_in_reset(id))
546 rc = 1;
547
548 return rc;
549}
550
551int msm_sdio_dmux_write(uint32_t id, struct sk_buff *skb)
552{
553 int rc = 0;
554 struct sdio_mux_hdr *hdr;
555 unsigned long flags;
556 struct sk_buff *new_skb;
557
558 if (id >= SDIO_DMUX_NUM_CHANNELS)
559 return -EINVAL;
560 if (!skb)
561 return -EINVAL;
562 if (!sdio_mux_initialized)
563 return -ENODEV;
564 if (fatal_error)
565 return -ENODEV;
566
567 DBG("%s: writing to ch %d len %d\n", __func__, id, skb->len);
568 spin_lock_irqsave(&sdio_ch[id].lock, flags);
569 if (sdio_ch_is_in_reset(id)) {
570 spin_unlock_irqrestore(&sdio_ch[id].lock, flags);
571 pr_err("%s: port is in reset: %d\n", __func__,
572 sdio_ch[id].status);
573 return -ENETRESET;
574 }
575 if (!sdio_ch_is_local_open(id)) {
576 spin_unlock_irqrestore(&sdio_ch[id].lock, flags);
577 pr_err("%s: port not open: %d\n", __func__, sdio_ch[id].status);
578 return -ENODEV;
579 }
580 if (sdio_ch[id].use_wm &&
581 (sdio_ch[id].num_tx_pkts >= HIGH_WATERMARK)) {
582 spin_unlock_irqrestore(&sdio_ch[id].lock, flags);
583 pr_err("%s: watermark exceeded: %d\n", __func__, id);
584 return -EAGAIN;
585 }
586 spin_unlock_irqrestore(&sdio_ch[id].lock, flags);
587
588 spin_lock_irqsave(&sdio_mux_write_lock, flags);
589 /* if skb do not have any tailroom for padding,
590 copy the skb into a new expanded skb */
591 if ((skb->len & 0x3) && (skb_tailroom(skb) < (4 - (skb->len & 0x3)))) {
592 /* revisit, probably dev_alloc_skb and memcpy is effecient */
593 new_skb = skb_copy_expand(skb, skb_headroom(skb),
594 4 - (skb->len & 0x3), GFP_ATOMIC);
595 if (new_skb == NULL) {
596 pr_err("%s: cannot allocate skb\n", __func__);
597 rc = -ENOMEM;
598 goto write_done;
599 }
600 dev_kfree_skb_any(skb);
601 skb = new_skb;
602 DBG_INC_WRITE_CPY(skb->len);
603 }
604
605 hdr = (struct sdio_mux_hdr *)skb_push(skb, sizeof(struct sdio_mux_hdr));
606
607 /* caller should allocate for hdr and padding
608 hdr is fine, padding is tricky */
609 hdr->magic_num = SDIO_MUX_HDR_MAGIC_NO;
610 hdr->cmd = SDIO_MUX_HDR_CMD_DATA;
611 hdr->reserved = 0;
612 hdr->ch_id = id;
613 hdr->pkt_len = skb->len - sizeof(struct sdio_mux_hdr);
614 if (skb->len & 0x3)
615 skb_put(skb, 4 - (skb->len & 0x3));
616
617 hdr->pad_len = skb->len - (sizeof(struct sdio_mux_hdr) + hdr->pkt_len);
618
619 DBG("%s: data %p, tail %p skb len %d pkt len %d pad len %d\n",
620 __func__, skb->data, skb->tail, skb->len,
621 hdr->pkt_len, hdr->pad_len);
622 __skb_queue_tail(&sdio_mux_write_pool, skb);
623
624 spin_lock(&sdio_ch[id].lock);
625 sdio_ch[id].num_tx_pkts++;
626 spin_unlock(&sdio_ch[id].lock);
627
Eric Holmberg32edc1d2011-06-01 14:20:08 -0600628 queue_work(sdio_mux_write_workqueue, &work_sdio_mux_write);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700629
630write_done:
631 spin_unlock_irqrestore(&sdio_mux_write_lock, flags);
632 return rc;
633}
634
635int msm_sdio_dmux_open(uint32_t id, void *priv,
636 void (*receive_cb)(void *, struct sk_buff *),
637 void (*write_done)(void *, struct sk_buff *))
638{
639 unsigned long flags;
640
641 DBG("%s: opening ch %d\n", __func__, id);
642 if (!sdio_mux_initialized)
643 return -ENODEV;
644 if (id >= SDIO_DMUX_NUM_CHANNELS)
645 return -EINVAL;
646
647 spin_lock_irqsave(&sdio_ch[id].lock, flags);
648 if (sdio_ch_is_local_open(id)) {
649 pr_info("%s: Already opened %d\n", __func__, id);
650 spin_unlock_irqrestore(&sdio_ch[id].lock, flags);
651 goto open_done;
652 }
653
654 sdio_ch[id].receive_cb = receive_cb;
655 sdio_ch[id].write_done = write_done;
656 sdio_ch[id].priv = priv;
657 sdio_ch[id].status |= SDIO_CH_LOCAL_OPEN;
658 sdio_ch[id].num_tx_pkts = 0;
659 sdio_ch[id].use_wm = 0;
660 spin_unlock_irqrestore(&sdio_ch[id].lock, flags);
661
662 sdio_mux_send_open_cmd(id);
663
664open_done:
665 pr_info("%s: opened ch %d\n", __func__, id);
666 return 0;
667}
668
669int msm_sdio_dmux_close(uint32_t id)
670{
671 struct sdio_mux_hdr hdr;
672 unsigned long flags;
673
674 if (id >= SDIO_DMUX_NUM_CHANNELS)
675 return -EINVAL;
676 DBG("%s: closing ch %d\n", __func__, id);
677 if (!sdio_mux_initialized)
678 return -ENODEV;
679 spin_lock_irqsave(&sdio_ch[id].lock, flags);
680
681 sdio_ch[id].receive_cb = NULL;
682 sdio_ch[id].priv = NULL;
683 sdio_ch[id].status &= ~SDIO_CH_LOCAL_OPEN;
684 sdio_ch[id].status &= ~SDIO_CH_IN_RESET;
685 spin_unlock_irqrestore(&sdio_ch[id].lock, flags);
686
687 hdr.magic_num = SDIO_MUX_HDR_MAGIC_NO;
688 hdr.cmd = SDIO_MUX_HDR_CMD_CLOSE;
689 hdr.reserved = 0;
690 hdr.ch_id = id;
691 hdr.pkt_len = 0;
692 hdr.pad_len = 0;
693
694 sdio_mux_write_cmd((void *)&hdr, sizeof(hdr));
695
696 pr_info("%s: closed ch %d\n", __func__, id);
697 return 0;
698}
699
700static void sdio_mux_notify(void *_dev, unsigned event)
701{
702 DBG("%s: event %d notified\n", __func__, event);
703
704 /* write avail may not be enouogh for a packet, but should be fine */
705 if ((event == SDIO_EVENT_DATA_WRITE_AVAIL) &&
706 sdio_write_avail(sdio_mux_ch))
Eric Holmberg32edc1d2011-06-01 14:20:08 -0600707 queue_work(sdio_mux_write_workqueue, &work_sdio_mux_write);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700708
709 if ((event == SDIO_EVENT_DATA_READ_AVAIL) &&
710 sdio_read_avail(sdio_mux_ch))
Eric Holmberg32edc1d2011-06-01 14:20:08 -0600711 queue_work(sdio_mux_read_workqueue, &work_sdio_mux_read);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700712}
713
714int msm_sdio_dmux_is_ch_full(uint32_t id)
715{
716 unsigned long flags;
717 int ret;
718
719 if (id >= SDIO_DMUX_NUM_CHANNELS)
720 return -EINVAL;
721
722 spin_lock_irqsave(&sdio_ch[id].lock, flags);
723 sdio_ch[id].use_wm = 1;
724 ret = sdio_ch[id].num_tx_pkts >= HIGH_WATERMARK;
725 DBG("%s: ch %d num tx pkts=%d, HWM=%d\n", __func__,
726 id, sdio_ch[id].num_tx_pkts, ret);
727 if (!sdio_ch_is_local_open(id)) {
728 ret = -ENODEV;
729 pr_err("%s: port not open: %d\n", __func__, sdio_ch[id].status);
730 }
731 spin_unlock_irqrestore(&sdio_ch[id].lock, flags);
732
733 return ret;
734}
735
736int msm_sdio_dmux_is_ch_low(uint32_t id)
737{
738 unsigned long flags;
739 int ret;
740
741 if (id >= SDIO_DMUX_NUM_CHANNELS)
742 return -EINVAL;
743
744 spin_lock_irqsave(&sdio_ch[id].lock, flags);
745 sdio_ch[id].use_wm = 1;
746 ret = sdio_ch[id].num_tx_pkts <= LOW_WATERMARK;
747 DBG("%s: ch %d num tx pkts=%d, LWM=%d\n", __func__,
748 id, sdio_ch[id].num_tx_pkts, ret);
749 if (!sdio_ch_is_local_open(id)) {
750 ret = -ENODEV;
751 pr_err("%s: port not open: %d\n", __func__, sdio_ch[id].status);
752 }
753 spin_unlock_irqrestore(&sdio_ch[id].lock, flags);
754
755 return ret;
756}
757
758#ifdef CONFIG_DEBUG_FS
759
760static int debug_tbl(char *buf, int max)
761{
762 int i = 0;
763 int j;
764
765 for (j = 0; j < SDIO_DMUX_NUM_CHANNELS; ++j) {
766 i += scnprintf(buf + i, max - i,
767 "ch%02d local open=%s remote open=%s\n",
768 j, sdio_ch_is_local_open(j) ? "Y" : "N",
769 sdio_ch_is_remote_open(j) ? "Y" : "N");
770 }
771
772 return i;
773}
774
775#define DEBUG_BUFMAX 4096
776static char debug_buffer[DEBUG_BUFMAX];
777
778static ssize_t debug_read(struct file *file, char __user *buf,
779 size_t count, loff_t *ppos)
780{
781 int (*fill)(char *buf, int max) = file->private_data;
782 int bsize = fill(debug_buffer, DEBUG_BUFMAX);
783 return simple_read_from_buffer(buf, count, ppos, debug_buffer, bsize);
784}
785
786static int debug_open(struct inode *inode, struct file *file)
787{
788 file->private_data = inode->i_private;
789 return 0;
790}
791
792
793static const struct file_operations debug_ops = {
794 .read = debug_read,
795 .open = debug_open,
796};
797
798static void debug_create(const char *name, mode_t mode,
799 struct dentry *dent,
800 int (*fill)(char *buf, int max))
801{
802 debugfs_create_file(name, mode, dent, fill, &debug_ops);
803}
804
805#endif
806
807static int sdio_dmux_probe(struct platform_device *pdev)
808{
809 int rc;
810
811 DBG("%s probe called\n", __func__);
812
813 if (!sdio_mux_initialized) {
Eric Holmberg32edc1d2011-06-01 14:20:08 -0600814 sdio_mux_read_workqueue = create_singlethread_workqueue(
815 "sdio_dmux_read");
816 if (!sdio_mux_read_workqueue)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700817 return -ENOMEM;
818
Eric Holmberg32edc1d2011-06-01 14:20:08 -0600819 sdio_mux_write_workqueue = create_singlethread_workqueue(
820 "sdio_dmux_write");
821 if (!sdio_mux_write_workqueue) {
822 destroy_workqueue(sdio_mux_read_workqueue);
823 return -ENOMEM;
824 }
825
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700826 skb_queue_head_init(&sdio_mux_write_pool);
827 spin_lock_init(&sdio_mux_write_lock);
828
829 for (rc = 0; rc < SDIO_DMUX_NUM_CHANNELS; ++rc)
830 spin_lock_init(&sdio_ch[rc].lock);
831
832
833 wake_lock_init(&sdio_mux_ch_wakelock, WAKE_LOCK_SUSPEND,
834 "sdio_dmux");
835 }
836
837 rc = sdio_open("SDIO_RMNT", &sdio_mux_ch, NULL, sdio_mux_notify);
838 if (rc < 0) {
839 pr_err("%s: sido open failed %d\n", __func__, rc);
840 wake_lock_destroy(&sdio_mux_ch_wakelock);
Eric Holmberg32edc1d2011-06-01 14:20:08 -0600841 destroy_workqueue(sdio_mux_read_workqueue);
842 destroy_workqueue(sdio_mux_write_workqueue);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700843 sdio_mux_initialized = 0;
844 return rc;
845 }
846
847 sdio_mux_initialized = 1;
848 return 0;
849}
850
851static int sdio_dmux_remove(struct platform_device *pdev)
852{
853 int i;
854 unsigned long ch_lock_flags;
855 unsigned long write_lock_flags;
856 struct sk_buff *skb;
857
858 DBG("%s remove called\n", __func__);
859 if (!sdio_mux_initialized)
860 return 0;
861
862 /* set reset state for any open channels */
863 for (i = 0; i < SDIO_DMUX_NUM_CHANNELS; ++i) {
864 spin_lock_irqsave(&sdio_ch[i].lock, ch_lock_flags);
865 if (sdio_ch_is_open(i)) {
866 sdio_ch[i].status |= SDIO_CH_IN_RESET;
867 sdio_ch[i].status &= ~SDIO_CH_REMOTE_OPEN;
868
869 /* cancel any pending writes */
870 spin_lock_irqsave(&sdio_mux_write_lock,
871 write_lock_flags);
872 while ((skb = __skb_dequeue(&sdio_mux_write_pool))) {
873 if (sdio_ch[i].write_done)
874 sdio_ch[i].write_done(
875 sdio_ch[i].priv, skb);
876 else
877 dev_kfree_skb_any(skb);
878 }
879 spin_unlock_irqrestore(&sdio_mux_write_lock,
880 write_lock_flags);
881
882 /* notify client so it can update its status */
883 if (sdio_ch[i].receive_cb)
884 sdio_ch[i].receive_cb(
885 sdio_ch[i].priv, NULL);
886 }
887 spin_unlock_irqrestore(&sdio_ch[i].lock, ch_lock_flags);
888 }
889
890 return 0;
891}
892
893static struct platform_driver sdio_dmux_driver = {
894 .probe = sdio_dmux_probe,
895 .remove = sdio_dmux_remove,
896 .driver = {
897 .name = "SDIO_RMNT",
898 .owner = THIS_MODULE,
899 },
900};
901
902static int __init sdio_dmux_init(void)
903{
904#ifdef CONFIG_DEBUG_FS
905 struct dentry *dent;
906
907 dent = debugfs_create_dir("sdio_dmux", 0);
908 if (!IS_ERR(dent))
909 debug_create("tbl", 0444, dent, debug_tbl);
910#endif
911 return platform_driver_register(&sdio_dmux_driver);
912}
913
914module_init(sdio_dmux_init);
915MODULE_DESCRIPTION("MSM SDIO DMUX");
916MODULE_LICENSE("GPL v2");