blob: 46788f06e418706b7c0dbce5bd7f269c19484bc4 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14/*
15 * SDIO DMUX module.
16 */
17
18#define DEBUG
19
20#include <linux/delay.h>
21#include <linux/module.h>
22#include <linux/netdevice.h>
23#include <linux/platform_device.h>
24#include <linux/sched.h>
25#include <linux/skbuff.h>
26#include <linux/wakelock.h>
27#include <linux/debugfs.h>
28
29#include <mach/sdio_al.h>
30#include <mach/sdio_dmux.h>
31
32#define SDIO_CH_LOCAL_OPEN 0x1
33#define SDIO_CH_REMOTE_OPEN 0x2
34#define SDIO_CH_IN_RESET 0x4
35
36#define SDIO_MUX_HDR_MAGIC_NO 0x33fc
37
38#define SDIO_MUX_HDR_CMD_DATA 0
39#define SDIO_MUX_HDR_CMD_OPEN 1
40#define SDIO_MUX_HDR_CMD_CLOSE 2
41
42#define LOW_WATERMARK 2
43#define HIGH_WATERMARK 4
44
45static int msm_sdio_dmux_debug_enable;
46module_param_named(debug_enable, msm_sdio_dmux_debug_enable,
47 int, S_IRUGO | S_IWUSR | S_IWGRP);
48
49#if defined(DEBUG)
50static uint32_t sdio_dmux_read_cnt;
51static uint32_t sdio_dmux_write_cnt;
52static uint32_t sdio_dmux_write_cpy_cnt;
53static uint32_t sdio_dmux_write_cpy_bytes;
54
55#define DBG(x...) do { \
56 if (msm_sdio_dmux_debug_enable) \
57 pr_debug(x); \
58 } while (0)
59
60#define DBG_INC_READ_CNT(x) do { \
61 sdio_dmux_read_cnt += (x); \
62 if (msm_sdio_dmux_debug_enable) \
63 pr_debug("%s: total read bytes %u\n", \
64 __func__, sdio_dmux_read_cnt); \
65 } while (0)
66
67#define DBG_INC_WRITE_CNT(x) do { \
68 sdio_dmux_write_cnt += (x); \
69 if (msm_sdio_dmux_debug_enable) \
70 pr_debug("%s: total written bytes %u\n", \
71 __func__, sdio_dmux_write_cnt); \
72 } while (0)
73
74#define DBG_INC_WRITE_CPY(x) do { \
75 sdio_dmux_write_cpy_bytes += (x); \
76 sdio_dmux_write_cpy_cnt++; \
77 if (msm_sdio_dmux_debug_enable) \
78 pr_debug("%s: total write copy cnt %u, bytes %u\n", \
79 __func__, sdio_dmux_write_cpy_cnt, \
80 sdio_dmux_write_cpy_bytes); \
81 } while (0)
82#else
83#define DBG(x...) do { } while (0)
84#define DBG_INC_READ_CNT(x...) do { } while (0)
85#define DBG_INC_WRITE_CNT(x...) do { } while (0)
86#define DBG_INC_WRITE_CPY(x...) do { } while (0)
87#endif
88
89struct sdio_ch_info {
90 uint32_t status;
91 void (*receive_cb)(void *, struct sk_buff *);
92 void (*write_done)(void *, struct sk_buff *);
93 void *priv;
94 spinlock_t lock;
95 int num_tx_pkts;
96 int use_wm;
97};
98
99static struct sk_buff_head sdio_mux_write_pool;
100static spinlock_t sdio_mux_write_lock;
101
102static struct sdio_channel *sdio_mux_ch;
103static struct sdio_ch_info sdio_ch[SDIO_DMUX_NUM_CHANNELS];
104struct wake_lock sdio_mux_ch_wakelock;
105static int sdio_mux_initialized;
106static int fatal_error;
107
108struct sdio_mux_hdr {
109 uint16_t magic_num;
110 uint8_t reserved;
111 uint8_t cmd;
112 uint8_t pad_len;
113 uint8_t ch_id;
114 uint16_t pkt_len;
115};
116
117struct sdio_partial_pkt_info {
118 uint32_t valid;
119 struct sk_buff *skb;
120 struct sdio_mux_hdr *hdr;
121};
122
123static void sdio_mux_read_data(struct work_struct *work);
124static void sdio_mux_write_data(struct work_struct *work);
125static void sdio_mux_send_open_cmd(uint32_t id);
126
127static DEFINE_MUTEX(sdio_mux_lock);
128static DECLARE_WORK(work_sdio_mux_read, sdio_mux_read_data);
129static DECLARE_WORK(work_sdio_mux_write, sdio_mux_write_data);
130static DECLARE_DELAYED_WORK(delayed_work_sdio_mux_write, sdio_mux_write_data);
131
132static struct workqueue_struct *sdio_mux_workqueue;
133static struct sdio_partial_pkt_info sdio_partial_pkt;
134
135#define sdio_ch_is_open(x) \
136 (sdio_ch[(x)].status == (SDIO_CH_LOCAL_OPEN | SDIO_CH_REMOTE_OPEN))
137
138#define sdio_ch_is_local_open(x) \
139 (sdio_ch[(x)].status & SDIO_CH_LOCAL_OPEN)
140
141#define sdio_ch_is_remote_open(x) \
142 (sdio_ch[(x)].status & SDIO_CH_REMOTE_OPEN)
143
144#define sdio_ch_is_in_reset(x) \
145 (sdio_ch[(x)].status & SDIO_CH_IN_RESET)
146
147static inline void skb_set_data(struct sk_buff *skb,
148 unsigned char *data,
149 unsigned int len)
150{
151 /* panic if tail > end */
152 skb->data = data;
153 skb->tail = skb->data + len;
154 skb->len = len;
155 skb->truesize = len + sizeof(struct sk_buff);
156}
157
158static void sdio_mux_save_partial_pkt(struct sdio_mux_hdr *hdr,
159 struct sk_buff *skb_mux)
160{
161 struct sk_buff *skb;
162
163 /* i think we can avoid cloning here */
164 skb = skb_clone(skb_mux, GFP_KERNEL);
165 if (!skb) {
166 pr_err("%s: cannot clone skb\n", __func__);
167 return;
168 }
169
170 /* protect? */
171 skb_set_data(skb, (unsigned char *)hdr,
172 skb->tail - (unsigned char *)hdr);
173 sdio_partial_pkt.skb = skb;
174 sdio_partial_pkt.valid = 1;
175 DBG("%s: head %p data %p tail %p end %p len %d\n", __func__,
176 skb->head, skb->data, skb->tail, skb->end, skb->len);
177 return;
178}
179
180static void *handle_sdio_mux_data(struct sdio_mux_hdr *hdr,
181 struct sk_buff *skb_mux)
182{
183 struct sk_buff *skb;
184 void *rp = (void *)hdr;
185 unsigned long flags;
186
187 /* protect? */
188 rp += sizeof(*hdr);
189 if (rp < (void *)skb_mux->tail)
190 rp += (hdr->pkt_len + hdr->pad_len);
191
192 if (rp > (void *)skb_mux->tail) {
193 /* partial packet */
194 sdio_mux_save_partial_pkt(hdr, skb_mux);
195 goto packet_done;
196 }
197
198 DBG("%s: hdr %p next %p tail %p pkt_size %d\n",
199 __func__, hdr, rp, skb_mux->tail, hdr->pkt_len + hdr->pad_len);
200
201 skb = skb_clone(skb_mux, GFP_KERNEL);
202 if (!skb) {
203 pr_err("%s: cannot clone skb\n", __func__);
204 goto packet_done;
205 }
206
207 skb_set_data(skb, (unsigned char *)(hdr + 1), hdr->pkt_len);
208 DBG("%s: head %p data %p tail %p end %p len %d\n",
209 __func__, skb->head, skb->data, skb->tail, skb->end, skb->len);
210
211 /* probably we should check channel status */
212 /* discard packet early if local side not open */
213 spin_lock_irqsave(&sdio_ch[hdr->ch_id].lock, flags);
214 if (sdio_ch[hdr->ch_id].receive_cb)
215 sdio_ch[hdr->ch_id].receive_cb(sdio_ch[hdr->ch_id].priv, skb);
216 else
217 dev_kfree_skb_any(skb);
218 spin_unlock_irqrestore(&sdio_ch[hdr->ch_id].lock, flags);
219
220packet_done:
221 return rp;
222}
223
224static void *handle_sdio_mux_command(struct sdio_mux_hdr *hdr,
225 struct sk_buff *skb_mux)
226{
227 void *rp;
228 unsigned long flags;
229 int send_open = 0;
230
231 DBG("%s: cmd %d ch %d\n", __func__, hdr->cmd, hdr->ch_id);
232 switch (hdr->cmd) {
233 case SDIO_MUX_HDR_CMD_DATA:
234 rp = handle_sdio_mux_data(hdr, skb_mux);
235 break;
236 case SDIO_MUX_HDR_CMD_OPEN:
237 spin_lock_irqsave(&sdio_ch[hdr->ch_id].lock, flags);
238 sdio_ch[hdr->ch_id].status |= SDIO_CH_REMOTE_OPEN;
239
240 if (sdio_ch_is_in_reset(hdr->ch_id)) {
241 DBG("%s: in reset - sending open cmd\n", __func__);
242 sdio_ch[hdr->ch_id].status &= ~SDIO_CH_IN_RESET;
243 send_open = 1;
244 }
245
246 /* notify client so it can update its status */
247 if (sdio_ch[hdr->ch_id].receive_cb)
248 sdio_ch[hdr->ch_id].receive_cb(
249 sdio_ch[hdr->ch_id].priv, NULL);
250
251 if (sdio_ch[hdr->ch_id].write_done)
252 sdio_ch[hdr->ch_id].write_done(
253 sdio_ch[hdr->ch_id].priv, NULL);
254 spin_unlock_irqrestore(&sdio_ch[hdr->ch_id].lock, flags);
255 rp = hdr + 1;
256 if (send_open)
257 sdio_mux_send_open_cmd(hdr->ch_id);
258
259 break;
260 case SDIO_MUX_HDR_CMD_CLOSE:
261 /* probably should drop pending write */
262 spin_lock_irqsave(&sdio_ch[hdr->ch_id].lock, flags);
263 sdio_ch[hdr->ch_id].status &= ~SDIO_CH_REMOTE_OPEN;
264 spin_unlock_irqrestore(&sdio_ch[hdr->ch_id].lock, flags);
265 rp = hdr + 1;
266 break;
267 default:
268 rp = hdr + 1;
269 }
270
271 return rp;
272}
273
274static void *handle_sdio_partial_pkt(struct sk_buff *skb_mux)
275{
276 struct sk_buff *p_skb;
277 struct sdio_mux_hdr *p_hdr;
278 void *ptr, *rp = skb_mux->data;
279
280 /* protoect? */
281 if (sdio_partial_pkt.valid) {
282 p_skb = sdio_partial_pkt.skb;
283
284 ptr = skb_push(skb_mux, p_skb->len);
285 memcpy(ptr, p_skb->data, p_skb->len);
286 sdio_partial_pkt.skb = NULL;
287 sdio_partial_pkt.valid = 0;
288 dev_kfree_skb_any(p_skb);
289
290 DBG("%s: head %p data %p tail %p end %p len %d\n", __func__,
291 skb_mux->head, skb_mux->data, skb_mux->tail,
292 skb_mux->end, skb_mux->len);
293
294 p_hdr = (struct sdio_mux_hdr *)skb_mux->data;
295 rp = handle_sdio_mux_command(p_hdr, skb_mux);
296 }
297 return rp;
298}
299
300static void sdio_mux_read_data(struct work_struct *work)
301{
302 struct sk_buff *skb_mux;
303 void *ptr = 0;
304 int sz, rc, len = 0;
305 struct sdio_mux_hdr *hdr;
306
307 DBG("%s: reading\n", __func__);
308 /* should probably have a separate read lock */
309 mutex_lock(&sdio_mux_lock);
310 sz = sdio_read_avail(sdio_mux_ch);
311 DBG("%s: read avail %d\n", __func__, sz);
312 if (sz <= 0) {
313 if (sz)
314 pr_err("%s: read avail failed %d\n", __func__, sz);
315 mutex_unlock(&sdio_mux_lock);
316 return;
317 }
318
319 /* net_ip_aling is probably not required */
320 if (sdio_partial_pkt.valid)
321 len = sdio_partial_pkt.skb->len;
322
323 /* If allocation fails attempt to get a smaller chunk of mem */
324 do {
325 skb_mux = __dev_alloc_skb(sz + NET_IP_ALIGN + len, GFP_KERNEL);
326 if (skb_mux)
327 break;
328
329 pr_err("%s: cannot allocate skb of size:%d + "
330 "%d (NET_SKB_PAD)\n", __func__,
331 sz + NET_IP_ALIGN + len, NET_SKB_PAD);
332 /* the skb structure adds NET_SKB_PAD bytes to the memory
333 * request, which may push the actual request above PAGE_SIZE
334 * in that case, we need to iterate one more time to make sure
335 * we get the memory request under PAGE_SIZE
336 */
337 if (sz + NET_IP_ALIGN + len + NET_SKB_PAD <= PAGE_SIZE) {
338 pr_err("%s: allocation failed\n", __func__);
339 mutex_unlock(&sdio_mux_lock);
340 return;
341 }
342 sz /= 2;
343 } while (1);
344
345 skb_reserve(skb_mux, NET_IP_ALIGN + len);
346 ptr = skb_put(skb_mux, sz);
347
348 /* half second wakelock is fine? */
349 wake_lock_timeout(&sdio_mux_ch_wakelock, HZ / 2);
350 rc = sdio_read(sdio_mux_ch, ptr, sz);
351 DBG("%s: read %d\n", __func__, rc);
352 if (rc) {
353 pr_err("%s: sdio read failed %d\n", __func__, rc);
354 dev_kfree_skb_any(skb_mux);
355 mutex_unlock(&sdio_mux_lock);
356 queue_work(sdio_mux_workqueue, &work_sdio_mux_read);
357 return;
358 }
359 mutex_unlock(&sdio_mux_lock);
360
361 DBG_INC_READ_CNT(sz);
362 DBG("%s: head %p data %p tail %p end %p len %d\n", __func__,
363 skb_mux->head, skb_mux->data, skb_mux->tail,
364 skb_mux->end, skb_mux->len);
365
366 /* move to a separate function */
367 /* probably do skb_pull instead of pointer adjustment */
368 hdr = handle_sdio_partial_pkt(skb_mux);
369 while ((void *)hdr < (void *)skb_mux->tail) {
370
371 if (((void *)hdr + sizeof(*hdr)) > (void *)skb_mux->tail) {
372 /* handle partial header */
373 sdio_mux_save_partial_pkt(hdr, skb_mux);
374 break;
375 }
376
377 if (hdr->magic_num != SDIO_MUX_HDR_MAGIC_NO) {
378 pr_err("%s: packet error\n", __func__);
379 break;
380 }
381
382 hdr = handle_sdio_mux_command(hdr, skb_mux);
383 }
384 dev_kfree_skb_any(skb_mux);
385
386 DBG("%s: read done\n", __func__);
387 queue_work(sdio_mux_workqueue, &work_sdio_mux_read);
388}
389
390static int sdio_mux_write(struct sk_buff *skb)
391{
392 int rc, sz;
393
394 mutex_lock(&sdio_mux_lock);
395 sz = sdio_write_avail(sdio_mux_ch);
396 DBG("%s: avail %d len %d\n", __func__, sz, skb->len);
397 if (skb->len <= sz) {
398 rc = sdio_write(sdio_mux_ch, skb->data, skb->len);
399 DBG("%s: write returned %d\n", __func__, rc);
400 if (rc == 0)
401 DBG_INC_WRITE_CNT(skb->len);
402 } else
403 rc = -ENOMEM;
404
405 mutex_unlock(&sdio_mux_lock);
406 return rc;
407}
408
409static int sdio_mux_write_cmd(void *data, uint32_t len)
410{
411 int avail, rc;
412 for (;;) {
413 mutex_lock(&sdio_mux_lock);
414 avail = sdio_write_avail(sdio_mux_ch);
415 DBG("%s: avail %d len %d\n", __func__, avail, len);
416 if (avail >= len) {
417 rc = sdio_write(sdio_mux_ch, data, len);
418 DBG("%s: write returned %d\n", __func__, rc);
419 if (!rc) {
420 DBG_INC_WRITE_CNT(len);
421 break;
422 }
423 }
424 mutex_unlock(&sdio_mux_lock);
425 msleep(250);
426 }
427 mutex_unlock(&sdio_mux_lock);
428 return 0;
429}
430
431static void sdio_mux_send_open_cmd(uint32_t id)
432{
433 struct sdio_mux_hdr hdr = {
434 .magic_num = SDIO_MUX_HDR_MAGIC_NO,
435 .cmd = SDIO_MUX_HDR_CMD_OPEN,
436 .reserved = 0,
437 .ch_id = id,
438 .pkt_len = 0,
439 .pad_len = 0
440 };
441
442 sdio_mux_write_cmd((void *)&hdr, sizeof(hdr));
443}
444
445static void sdio_mux_write_data(struct work_struct *work)
446{
447 int rc, reschedule = 0;
448 int notify = 0;
449 struct sk_buff *skb;
450 unsigned long flags;
451 int avail;
452 int ch_id;
453
454 spin_lock_irqsave(&sdio_mux_write_lock, flags);
455 while ((skb = __skb_dequeue(&sdio_mux_write_pool))) {
456 ch_id = ((struct sdio_mux_hdr *)skb->data)->ch_id;
457
458 avail = sdio_write_avail(sdio_mux_ch);
459 if (avail < skb->len) {
460 /* we may have to wait for write avail
461 * notification from sdio al
462 */
463 DBG("%s: sdio_write_avail(%d) < skb->len(%d)\n",
464 __func__, avail, skb->len);
465
466 reschedule = 1;
467 break;
468 }
469 spin_unlock_irqrestore(&sdio_mux_write_lock, flags);
470 rc = sdio_mux_write(skb);
471 spin_lock_irqsave(&sdio_mux_write_lock, flags);
472 if (rc == 0) {
473
474 spin_lock(&sdio_ch[ch_id].lock);
475 sdio_ch[ch_id].num_tx_pkts--;
476 spin_unlock(&sdio_ch[ch_id].lock);
477
478 if (sdio_ch[ch_id].write_done)
479 sdio_ch[ch_id].write_done(
480 sdio_ch[ch_id].priv, skb);
481 else
482 dev_kfree_skb_any(skb);
483 } else if (rc == -EAGAIN || rc == -ENOMEM) {
484 /* recoverable error - retry again later */
485 reschedule = 1;
486 break;
487 } else if (rc == -ENODEV) {
488 /*
489 * sdio_al suffered some kind of fatal error
490 * prevent future writes and clean up pending ones
491 */
492 fatal_error = 1;
493 dev_kfree_skb_any(skb);
494 while ((skb = __skb_dequeue(&sdio_mux_write_pool)))
495 dev_kfree_skb_any(skb);
496 spin_unlock_irqrestore(&sdio_mux_write_lock, flags);
497 return;
498 } else {
499 /* unknown error condition - drop the
500 * skb and reschedule for the
501 * other skb's
502 */
503 pr_err("%s: sdio_mux_write error %d"
504 " for ch %d, skb=%p\n",
505 __func__, rc, ch_id, skb);
506 notify = 1;
507 break;
508 }
509 }
510
511 if (reschedule) {
512 if (sdio_ch_is_in_reset(ch_id)) {
513 notify = 1;
514 } else {
515 __skb_queue_head(&sdio_mux_write_pool, skb);
516 queue_delayed_work(sdio_mux_workqueue,
517 &delayed_work_sdio_mux_write,
518 msecs_to_jiffies(250)
519 );
520 }
521 }
522
523 if (notify) {
524 spin_lock(&sdio_ch[ch_id].lock);
525 sdio_ch[ch_id].num_tx_pkts--;
526 spin_unlock(&sdio_ch[ch_id].lock);
527
528 if (sdio_ch[ch_id].write_done)
529 sdio_ch[ch_id].write_done(
530 sdio_ch[ch_id].priv, skb);
531 else
532 dev_kfree_skb_any(skb);
533 }
534 spin_unlock_irqrestore(&sdio_mux_write_lock, flags);
535}
536
537int msm_sdio_is_channel_in_reset(uint32_t id)
538{
539 int rc = 0;
540
541 if (id >= SDIO_DMUX_NUM_CHANNELS)
542 return -EINVAL;
543
544 if (sdio_ch_is_in_reset(id))
545 rc = 1;
546
547 return rc;
548}
549
550int msm_sdio_dmux_write(uint32_t id, struct sk_buff *skb)
551{
552 int rc = 0;
553 struct sdio_mux_hdr *hdr;
554 unsigned long flags;
555 struct sk_buff *new_skb;
556
557 if (id >= SDIO_DMUX_NUM_CHANNELS)
558 return -EINVAL;
559 if (!skb)
560 return -EINVAL;
561 if (!sdio_mux_initialized)
562 return -ENODEV;
563 if (fatal_error)
564 return -ENODEV;
565
566 DBG("%s: writing to ch %d len %d\n", __func__, id, skb->len);
567 spin_lock_irqsave(&sdio_ch[id].lock, flags);
568 if (sdio_ch_is_in_reset(id)) {
569 spin_unlock_irqrestore(&sdio_ch[id].lock, flags);
570 pr_err("%s: port is in reset: %d\n", __func__,
571 sdio_ch[id].status);
572 return -ENETRESET;
573 }
574 if (!sdio_ch_is_local_open(id)) {
575 spin_unlock_irqrestore(&sdio_ch[id].lock, flags);
576 pr_err("%s: port not open: %d\n", __func__, sdio_ch[id].status);
577 return -ENODEV;
578 }
579 if (sdio_ch[id].use_wm &&
580 (sdio_ch[id].num_tx_pkts >= HIGH_WATERMARK)) {
581 spin_unlock_irqrestore(&sdio_ch[id].lock, flags);
582 pr_err("%s: watermark exceeded: %d\n", __func__, id);
583 return -EAGAIN;
584 }
585 spin_unlock_irqrestore(&sdio_ch[id].lock, flags);
586
587 spin_lock_irqsave(&sdio_mux_write_lock, flags);
588 /* if skb do not have any tailroom for padding,
589 copy the skb into a new expanded skb */
590 if ((skb->len & 0x3) && (skb_tailroom(skb) < (4 - (skb->len & 0x3)))) {
591 /* revisit, probably dev_alloc_skb and memcpy is effecient */
592 new_skb = skb_copy_expand(skb, skb_headroom(skb),
593 4 - (skb->len & 0x3), GFP_ATOMIC);
594 if (new_skb == NULL) {
595 pr_err("%s: cannot allocate skb\n", __func__);
596 rc = -ENOMEM;
597 goto write_done;
598 }
599 dev_kfree_skb_any(skb);
600 skb = new_skb;
601 DBG_INC_WRITE_CPY(skb->len);
602 }
603
604 hdr = (struct sdio_mux_hdr *)skb_push(skb, sizeof(struct sdio_mux_hdr));
605
606 /* caller should allocate for hdr and padding
607 hdr is fine, padding is tricky */
608 hdr->magic_num = SDIO_MUX_HDR_MAGIC_NO;
609 hdr->cmd = SDIO_MUX_HDR_CMD_DATA;
610 hdr->reserved = 0;
611 hdr->ch_id = id;
612 hdr->pkt_len = skb->len - sizeof(struct sdio_mux_hdr);
613 if (skb->len & 0x3)
614 skb_put(skb, 4 - (skb->len & 0x3));
615
616 hdr->pad_len = skb->len - (sizeof(struct sdio_mux_hdr) + hdr->pkt_len);
617
618 DBG("%s: data %p, tail %p skb len %d pkt len %d pad len %d\n",
619 __func__, skb->data, skb->tail, skb->len,
620 hdr->pkt_len, hdr->pad_len);
621 __skb_queue_tail(&sdio_mux_write_pool, skb);
622
623 spin_lock(&sdio_ch[id].lock);
624 sdio_ch[id].num_tx_pkts++;
625 spin_unlock(&sdio_ch[id].lock);
626
627 queue_work(sdio_mux_workqueue, &work_sdio_mux_write);
628
629write_done:
630 spin_unlock_irqrestore(&sdio_mux_write_lock, flags);
631 return rc;
632}
633
634int msm_sdio_dmux_open(uint32_t id, void *priv,
635 void (*receive_cb)(void *, struct sk_buff *),
636 void (*write_done)(void *, struct sk_buff *))
637{
638 unsigned long flags;
639
640 DBG("%s: opening ch %d\n", __func__, id);
641 if (!sdio_mux_initialized)
642 return -ENODEV;
643 if (id >= SDIO_DMUX_NUM_CHANNELS)
644 return -EINVAL;
645
646 spin_lock_irqsave(&sdio_ch[id].lock, flags);
647 if (sdio_ch_is_local_open(id)) {
648 pr_info("%s: Already opened %d\n", __func__, id);
649 spin_unlock_irqrestore(&sdio_ch[id].lock, flags);
650 goto open_done;
651 }
652
653 sdio_ch[id].receive_cb = receive_cb;
654 sdio_ch[id].write_done = write_done;
655 sdio_ch[id].priv = priv;
656 sdio_ch[id].status |= SDIO_CH_LOCAL_OPEN;
657 sdio_ch[id].num_tx_pkts = 0;
658 sdio_ch[id].use_wm = 0;
659 spin_unlock_irqrestore(&sdio_ch[id].lock, flags);
660
661 sdio_mux_send_open_cmd(id);
662
663open_done:
664 pr_info("%s: opened ch %d\n", __func__, id);
665 return 0;
666}
667
668int msm_sdio_dmux_close(uint32_t id)
669{
670 struct sdio_mux_hdr hdr;
671 unsigned long flags;
672
673 if (id >= SDIO_DMUX_NUM_CHANNELS)
674 return -EINVAL;
675 DBG("%s: closing ch %d\n", __func__, id);
676 if (!sdio_mux_initialized)
677 return -ENODEV;
678 spin_lock_irqsave(&sdio_ch[id].lock, flags);
679
680 sdio_ch[id].receive_cb = NULL;
681 sdio_ch[id].priv = NULL;
682 sdio_ch[id].status &= ~SDIO_CH_LOCAL_OPEN;
683 sdio_ch[id].status &= ~SDIO_CH_IN_RESET;
684 spin_unlock_irqrestore(&sdio_ch[id].lock, flags);
685
686 hdr.magic_num = SDIO_MUX_HDR_MAGIC_NO;
687 hdr.cmd = SDIO_MUX_HDR_CMD_CLOSE;
688 hdr.reserved = 0;
689 hdr.ch_id = id;
690 hdr.pkt_len = 0;
691 hdr.pad_len = 0;
692
693 sdio_mux_write_cmd((void *)&hdr, sizeof(hdr));
694
695 pr_info("%s: closed ch %d\n", __func__, id);
696 return 0;
697}
698
699static void sdio_mux_notify(void *_dev, unsigned event)
700{
701 DBG("%s: event %d notified\n", __func__, event);
702
703 /* write avail may not be enouogh for a packet, but should be fine */
704 if ((event == SDIO_EVENT_DATA_WRITE_AVAIL) &&
705 sdio_write_avail(sdio_mux_ch))
706 queue_work(sdio_mux_workqueue, &work_sdio_mux_write);
707
708 if ((event == SDIO_EVENT_DATA_READ_AVAIL) &&
709 sdio_read_avail(sdio_mux_ch))
710 queue_work(sdio_mux_workqueue, &work_sdio_mux_read);
711}
712
713int msm_sdio_dmux_is_ch_full(uint32_t id)
714{
715 unsigned long flags;
716 int ret;
717
718 if (id >= SDIO_DMUX_NUM_CHANNELS)
719 return -EINVAL;
720
721 spin_lock_irqsave(&sdio_ch[id].lock, flags);
722 sdio_ch[id].use_wm = 1;
723 ret = sdio_ch[id].num_tx_pkts >= HIGH_WATERMARK;
724 DBG("%s: ch %d num tx pkts=%d, HWM=%d\n", __func__,
725 id, sdio_ch[id].num_tx_pkts, ret);
726 if (!sdio_ch_is_local_open(id)) {
727 ret = -ENODEV;
728 pr_err("%s: port not open: %d\n", __func__, sdio_ch[id].status);
729 }
730 spin_unlock_irqrestore(&sdio_ch[id].lock, flags);
731
732 return ret;
733}
734
735int msm_sdio_dmux_is_ch_low(uint32_t id)
736{
737 unsigned long flags;
738 int ret;
739
740 if (id >= SDIO_DMUX_NUM_CHANNELS)
741 return -EINVAL;
742
743 spin_lock_irqsave(&sdio_ch[id].lock, flags);
744 sdio_ch[id].use_wm = 1;
745 ret = sdio_ch[id].num_tx_pkts <= LOW_WATERMARK;
746 DBG("%s: ch %d num tx pkts=%d, LWM=%d\n", __func__,
747 id, sdio_ch[id].num_tx_pkts, ret);
748 if (!sdio_ch_is_local_open(id)) {
749 ret = -ENODEV;
750 pr_err("%s: port not open: %d\n", __func__, sdio_ch[id].status);
751 }
752 spin_unlock_irqrestore(&sdio_ch[id].lock, flags);
753
754 return ret;
755}
756
757#ifdef CONFIG_DEBUG_FS
758
759static int debug_tbl(char *buf, int max)
760{
761 int i = 0;
762 int j;
763
764 for (j = 0; j < SDIO_DMUX_NUM_CHANNELS; ++j) {
765 i += scnprintf(buf + i, max - i,
766 "ch%02d local open=%s remote open=%s\n",
767 j, sdio_ch_is_local_open(j) ? "Y" : "N",
768 sdio_ch_is_remote_open(j) ? "Y" : "N");
769 }
770
771 return i;
772}
773
774#define DEBUG_BUFMAX 4096
775static char debug_buffer[DEBUG_BUFMAX];
776
777static ssize_t debug_read(struct file *file, char __user *buf,
778 size_t count, loff_t *ppos)
779{
780 int (*fill)(char *buf, int max) = file->private_data;
781 int bsize = fill(debug_buffer, DEBUG_BUFMAX);
782 return simple_read_from_buffer(buf, count, ppos, debug_buffer, bsize);
783}
784
785static int debug_open(struct inode *inode, struct file *file)
786{
787 file->private_data = inode->i_private;
788 return 0;
789}
790
791
792static const struct file_operations debug_ops = {
793 .read = debug_read,
794 .open = debug_open,
795};
796
797static void debug_create(const char *name, mode_t mode,
798 struct dentry *dent,
799 int (*fill)(char *buf, int max))
800{
801 debugfs_create_file(name, mode, dent, fill, &debug_ops);
802}
803
804#endif
805
806static int sdio_dmux_probe(struct platform_device *pdev)
807{
808 int rc;
809
810 DBG("%s probe called\n", __func__);
811
812 if (!sdio_mux_initialized) {
813 sdio_mux_workqueue = create_singlethread_workqueue("sdio_dmux");
814 if (!sdio_mux_workqueue)
815 return -ENOMEM;
816
817 skb_queue_head_init(&sdio_mux_write_pool);
818 spin_lock_init(&sdio_mux_write_lock);
819
820 for (rc = 0; rc < SDIO_DMUX_NUM_CHANNELS; ++rc)
821 spin_lock_init(&sdio_ch[rc].lock);
822
823
824 wake_lock_init(&sdio_mux_ch_wakelock, WAKE_LOCK_SUSPEND,
825 "sdio_dmux");
826 }
827
828 rc = sdio_open("SDIO_RMNT", &sdio_mux_ch, NULL, sdio_mux_notify);
829 if (rc < 0) {
830 pr_err("%s: sido open failed %d\n", __func__, rc);
831 wake_lock_destroy(&sdio_mux_ch_wakelock);
832 destroy_workqueue(sdio_mux_workqueue);
833 sdio_mux_initialized = 0;
834 return rc;
835 }
836
837 sdio_mux_initialized = 1;
838 return 0;
839}
840
841static int sdio_dmux_remove(struct platform_device *pdev)
842{
843 int i;
844 unsigned long ch_lock_flags;
845 unsigned long write_lock_flags;
846 struct sk_buff *skb;
847
848 DBG("%s remove called\n", __func__);
849 if (!sdio_mux_initialized)
850 return 0;
851
852 /* set reset state for any open channels */
853 for (i = 0; i < SDIO_DMUX_NUM_CHANNELS; ++i) {
854 spin_lock_irqsave(&sdio_ch[i].lock, ch_lock_flags);
855 if (sdio_ch_is_open(i)) {
856 sdio_ch[i].status |= SDIO_CH_IN_RESET;
857 sdio_ch[i].status &= ~SDIO_CH_REMOTE_OPEN;
858
859 /* cancel any pending writes */
860 spin_lock_irqsave(&sdio_mux_write_lock,
861 write_lock_flags);
862 while ((skb = __skb_dequeue(&sdio_mux_write_pool))) {
863 if (sdio_ch[i].write_done)
864 sdio_ch[i].write_done(
865 sdio_ch[i].priv, skb);
866 else
867 dev_kfree_skb_any(skb);
868 }
869 spin_unlock_irqrestore(&sdio_mux_write_lock,
870 write_lock_flags);
871
872 /* notify client so it can update its status */
873 if (sdio_ch[i].receive_cb)
874 sdio_ch[i].receive_cb(
875 sdio_ch[i].priv, NULL);
876 }
877 spin_unlock_irqrestore(&sdio_ch[i].lock, ch_lock_flags);
878 }
879
880 return 0;
881}
882
883static struct platform_driver sdio_dmux_driver = {
884 .probe = sdio_dmux_probe,
885 .remove = sdio_dmux_remove,
886 .driver = {
887 .name = "SDIO_RMNT",
888 .owner = THIS_MODULE,
889 },
890};
891
892static int __init sdio_dmux_init(void)
893{
894#ifdef CONFIG_DEBUG_FS
895 struct dentry *dent;
896
897 dent = debugfs_create_dir("sdio_dmux", 0);
898 if (!IS_ERR(dent))
899 debug_create("tbl", 0444, dent, debug_tbl);
900#endif
901 return platform_driver_register(&sdio_dmux_driver);
902}
903
904module_init(sdio_dmux_init);
905MODULE_DESCRIPTION("MSM SDIO DMUX");
906MODULE_LICENSE("GPL v2");