blob: 0aa5423d7e8e931dadadbf912800da41967d74b4 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#define DEBUG
14
15#include <linux/fs.h>
16#include <linux/device.h>
17#include <linux/delay.h>
18#include <linux/sched.h>
19#include <linux/spinlock.h>
20#include <linux/mutex.h>
21#include <linux/uaccess.h>
22#include <linux/workqueue.h>
23#include <linux/platform_device.h>
24#include <linux/slab.h>
25#include <linux/termios.h>
26#include <linux/debugfs.h>
27
28#include <mach/sdio_al.h>
29#include <mach/sdio_cmux.h>
30
31#include "modem_notifier.h"
32
33#define MAX_WRITE_RETRY 5
34#define MAGIC_NO_V1 0x33FC
35
36static int msm_sdio_cmux_debug_mask;
37module_param_named(debug_mask, msm_sdio_cmux_debug_mask,
38 int, S_IRUGO | S_IWUSR | S_IWGRP);
39
40enum cmd_type {
41 DATA = 0,
42 OPEN,
43 CLOSE,
44 STATUS,
45 NUM_CMDS
46};
47
48#define DSR_POS 0x1
49#define CTS_POS 0x2
50#define RI_POS 0x4
51#define CD_POS 0x8
52
53struct sdio_cmux_ch {
54 int lc_id;
55
56 struct mutex lc_lock;
57 wait_queue_head_t open_wait_queue;
58 int is_remote_open;
59 int is_local_open;
60 int is_channel_reset;
61
62 char local_status;
63 char remote_status;
64
65 struct mutex tx_lock;
66 struct list_head tx_list;
67
68 void *priv;
69 void (*receive_cb)(void *, int, void *);
70 void (*write_done)(void *, int, void *);
71 void (*status_callback)(int, void *);
72} logical_ch[SDIO_CMUX_NUM_CHANNELS];
73
74struct sdio_cmux_hdr {
75 uint16_t magic_no;
76 uint8_t status; /* This field is reserved for commands other
77 * than STATUS */
78 uint8_t cmd;
79 uint8_t pad_bytes;
80 uint8_t lc_id;
81 uint16_t pkt_len;
82};
83
84struct sdio_cmux_pkt {
85 struct sdio_cmux_hdr *hdr;
86 void *data;
87};
88
89struct sdio_cmux_list_elem {
90 struct list_head list;
91 struct sdio_cmux_pkt cmux_pkt;
92};
93
94#define logical_ch_is_local_open(x) \
95 (logical_ch[(x)].is_local_open)
96
97#define logical_ch_is_remote_open(x) \
98 (logical_ch[(x)].is_remote_open)
99
100static void sdio_cdemux_fn(struct work_struct *work);
101static DECLARE_WORK(sdio_cdemux_work, sdio_cdemux_fn);
102static struct workqueue_struct *sdio_cdemux_wq;
103
104static DEFINE_MUTEX(write_lock);
105static uint32_t bytes_to_write;
106static DEFINE_MUTEX(temp_rx_lock);
107static LIST_HEAD(temp_rx_list);
108
109static void sdio_cmux_fn(struct work_struct *work);
110static DECLARE_WORK(sdio_cmux_work, sdio_cmux_fn);
111static struct workqueue_struct *sdio_cmux_wq;
112
113static struct sdio_channel *sdio_qmi_chl;
114static uint32_t sdio_cmux_inited;
115
116static uint32_t abort_tx;
117static DEFINE_MUTEX(modem_reset_lock);
118
Jeff Hugoa1f008b2011-07-13 14:33:59 -0600119static DEFINE_MUTEX(probe_lock);
120
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700121enum {
122 MSM_SDIO_CMUX_DEBUG = 1U << 0,
123 MSM_SDIO_CMUX_DUMP_BUFFER = 1U << 1,
124};
125
126static struct platform_device sdio_ctl_dev = {
127 .name = "SDIO_CTL",
128 .id = -1,
129};
130
131#if defined(DEBUG)
132#define D_DUMP_BUFFER(prestr, cnt, buf) \
133do { \
134 if (msm_sdio_cmux_debug_mask & MSM_SDIO_CMUX_DUMP_BUFFER) { \
135 int i; \
136 pr_debug("%s", prestr); \
137 for (i = 0; i < cnt; i++) \
138 pr_info("%.2x", buf[i]); \
139 pr_debug("\n"); \
140 } \
141} while (0)
142
143#define D(x...) \
144do { \
145 if (msm_sdio_cmux_debug_mask & MSM_SDIO_CMUX_DEBUG) \
146 pr_debug(x); \
147} while (0)
148
149#else
150#define D_DUMP_BUFFER(prestr, cnt, buf) do {} while (0)
151#define D(x...) do {} while (0)
152#endif
153
154static int sdio_cmux_ch_alloc(int id)
155{
156 if (id < 0 || id >= SDIO_CMUX_NUM_CHANNELS) {
157 pr_err("%s: Invalid lc_id - %d\n", __func__, id);
158 return -EINVAL;
159 }
160
161 logical_ch[id].lc_id = id;
162 mutex_init(&logical_ch[id].lc_lock);
163 init_waitqueue_head(&logical_ch[id].open_wait_queue);
164 logical_ch[id].is_remote_open = 0;
165 logical_ch[id].is_local_open = 0;
166 logical_ch[id].is_channel_reset = 0;
167
168 INIT_LIST_HEAD(&logical_ch[id].tx_list);
169 mutex_init(&logical_ch[id].tx_lock);
170
171 logical_ch[id].priv = NULL;
172 logical_ch[id].receive_cb = NULL;
173 logical_ch[id].write_done = NULL;
174 return 0;
175}
176
177static int sdio_cmux_ch_clear_and_signal(int id)
178{
179 struct sdio_cmux_list_elem *list_elem;
180
181 if (id < 0 || id >= SDIO_CMUX_NUM_CHANNELS) {
182 pr_err("%s: Invalid lc_id - %d\n", __func__, id);
183 return -EINVAL;
184 }
185
186 mutex_lock(&logical_ch[id].lc_lock);
187 logical_ch[id].is_remote_open = 0;
188 mutex_lock(&logical_ch[id].tx_lock);
189 while (!list_empty(&logical_ch[id].tx_list)) {
190 list_elem = list_first_entry(&logical_ch[id].tx_list,
191 struct sdio_cmux_list_elem,
192 list);
193 list_del(&list_elem->list);
194 kfree(list_elem->cmux_pkt.hdr);
195 kfree(list_elem);
196 }
197 mutex_unlock(&logical_ch[id].tx_lock);
198 if (logical_ch[id].receive_cb)
199 logical_ch[id].receive_cb(NULL, 0, logical_ch[id].priv);
200 if (logical_ch[id].write_done)
201 logical_ch[id].write_done(NULL, 0, logical_ch[id].priv);
202 mutex_unlock(&logical_ch[id].lc_lock);
203 wake_up(&logical_ch[id].open_wait_queue);
204 return 0;
205}
206
207static int sdio_cmux_write_cmd(const int id, enum cmd_type type)
208{
209 int write_size = 0;
210 void *write_data = NULL;
211 struct sdio_cmux_list_elem *list_elem;
212
213 if (id < 0 || id >= SDIO_CMUX_NUM_CHANNELS) {
214 pr_err("%s: Invalid lc_id - %d\n", __func__, id);
215 return -EINVAL;
216 }
217
218 if (type < 0 || type > NUM_CMDS) {
219 pr_err("%s: Invalid cmd - %d\n", __func__, type);
220 return -EINVAL;
221 }
222
223 write_size = sizeof(struct sdio_cmux_hdr);
224 list_elem = kmalloc(sizeof(struct sdio_cmux_list_elem), GFP_KERNEL);
225 if (!list_elem) {
226 pr_err("%s: list_elem alloc failed\n", __func__);
227 return -ENOMEM;
228 }
229
230 write_data = kmalloc(write_size, GFP_KERNEL);
231 if (!write_data) {
232 pr_err("%s: write_data alloc failed\n", __func__);
233 kfree(list_elem);
234 return -ENOMEM;
235 }
236
237 list_elem->cmux_pkt.hdr = (struct sdio_cmux_hdr *)write_data;
238 list_elem->cmux_pkt.data = NULL;
239
240 list_elem->cmux_pkt.hdr->lc_id = (uint8_t)id;
241 list_elem->cmux_pkt.hdr->pkt_len = (uint16_t)0;
242 list_elem->cmux_pkt.hdr->cmd = (uint8_t)type;
243 list_elem->cmux_pkt.hdr->status = (uint8_t)0;
244 if (type == STATUS)
245 list_elem->cmux_pkt.hdr->status = logical_ch[id].local_status;
246 list_elem->cmux_pkt.hdr->pad_bytes = (uint8_t)0;
247 list_elem->cmux_pkt.hdr->magic_no = (uint16_t)MAGIC_NO_V1;
248
249 mutex_lock(&logical_ch[id].tx_lock);
250 list_add_tail(&list_elem->list, &logical_ch[id].tx_list);
251 mutex_unlock(&logical_ch[id].tx_lock);
252
253 mutex_lock(&write_lock);
254 bytes_to_write += write_size;
255 mutex_unlock(&write_lock);
256 queue_work(sdio_cmux_wq, &sdio_cmux_work);
257
258 return 0;
259}
260
261int sdio_cmux_open(const int id,
262 void (*receive_cb)(void *, int, void *),
263 void (*write_done)(void *, int, void *),
264 void (*status_callback)(int, void *),
265 void *priv)
266{
267 int r;
268 struct sdio_cmux_list_elem *list_elem, *list_elem_tmp;
269
270 if (!sdio_cmux_inited)
271 return -ENODEV;
272 if (id < 0 || id >= SDIO_CMUX_NUM_CHANNELS) {
273 pr_err("%s: Invalid id - %d\n", __func__, id);
274 return -EINVAL;
275 }
276
277 r = wait_event_timeout(logical_ch[id].open_wait_queue,
278 logical_ch[id].is_remote_open, (1 * HZ));
279 if (r < 0) {
280 pr_err("ERROR %s: wait_event_timeout() failed for"
281 " ch%d with rc %d\n", __func__, id, r);
282 return r;
283 }
284 if (r == 0) {
285 pr_err("ERROR %s: Wait Timed Out for ch%d\n", __func__, id);
286 return -ETIMEDOUT;
287 }
288
289 mutex_lock(&logical_ch[id].lc_lock);
290 if (!logical_ch[id].is_remote_open) {
291 pr_err("%s: Remote ch%d not opened\n", __func__, id);
292 mutex_unlock(&logical_ch[id].lc_lock);
293 return -EINVAL;
294 }
295 if (logical_ch[id].is_local_open) {
296 mutex_unlock(&logical_ch[id].lc_lock);
297 return 0;
298 }
299 logical_ch[id].is_local_open = 1;
300 logical_ch[id].priv = priv;
301 logical_ch[id].receive_cb = receive_cb;
302 logical_ch[id].write_done = write_done;
303 logical_ch[id].status_callback = status_callback;
304 if (logical_ch[id].receive_cb) {
305 mutex_lock(&temp_rx_lock);
306 list_for_each_entry_safe(list_elem, list_elem_tmp,
307 &temp_rx_list, list) {
308 if ((int)list_elem->cmux_pkt.hdr->lc_id == id) {
309 logical_ch[id].receive_cb(
310 list_elem->cmux_pkt.data,
311 (int)list_elem->cmux_pkt.hdr->pkt_len,
312 logical_ch[id].priv);
313 list_del(&list_elem->list);
314 kfree(list_elem->cmux_pkt.hdr);
315 kfree(list_elem);
316 }
317 }
318 mutex_unlock(&temp_rx_lock);
319 }
320 mutex_unlock(&logical_ch[id].lc_lock);
321 sdio_cmux_write_cmd(id, OPEN);
322 return 0;
323}
324EXPORT_SYMBOL(sdio_cmux_open);
325
326int sdio_cmux_close(int id)
327{
328 struct sdio_cmux_ch *ch;
329
330 if (!sdio_cmux_inited)
331 return -ENODEV;
332 if (id < 0 || id >= SDIO_CMUX_NUM_CHANNELS) {
333 pr_err("%s: Invalid channel close\n", __func__);
334 return -EINVAL;
335 }
336
337 ch = &logical_ch[id];
338 mutex_lock(&ch->lc_lock);
Karthikeyan Ramasubramanian90f76682011-10-17 12:19:29 -0600339 ch->receive_cb = NULL;
340 mutex_lock(&ch->tx_lock);
341 ch->write_done = NULL;
342 mutex_unlock(&ch->tx_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700343 ch->is_local_open = 0;
344 ch->priv = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700345 mutex_unlock(&ch->lc_lock);
346 sdio_cmux_write_cmd(ch->lc_id, CLOSE);
347 return 0;
348}
349EXPORT_SYMBOL(sdio_cmux_close);
350
351int sdio_cmux_write_avail(int id)
352{
353 int write_avail;
354
355 mutex_lock(&logical_ch[id].lc_lock);
356 if (logical_ch[id].is_channel_reset) {
357 mutex_unlock(&logical_ch[id].lc_lock);
358 return -ENETRESET;
359 }
360 mutex_unlock(&logical_ch[id].lc_lock);
361 write_avail = sdio_write_avail(sdio_qmi_chl);
362 return write_avail - bytes_to_write;
363}
364EXPORT_SYMBOL(sdio_cmux_write_avail);
365
366int sdio_cmux_write(int id, void *data, int len)
367{
368 struct sdio_cmux_list_elem *list_elem;
369 uint32_t write_size;
370 void *write_data = NULL;
371 struct sdio_cmux_ch *ch;
372 int ret;
373
374 if (!sdio_cmux_inited)
375 return -ENODEV;
376 if (id < 0 || id >= SDIO_CMUX_NUM_CHANNELS) {
377 pr_err("%s: Invalid channel id %d\n", __func__, id);
378 return -ENODEV;
379 }
380
381 ch = &logical_ch[id];
382 if (len <= 0) {
383 pr_err("%s: Invalid len %d bytes to write\n",
384 __func__, len);
385 return -EINVAL;
386 }
387
388 write_size = sizeof(struct sdio_cmux_hdr) + len;
389 list_elem = kmalloc(sizeof(struct sdio_cmux_list_elem), GFP_KERNEL);
390 if (!list_elem) {
391 pr_err("%s: list_elem alloc failed\n", __func__);
392 return -ENOMEM;
393 }
394
395 write_data = kmalloc(write_size, GFP_KERNEL);
396 if (!write_data) {
397 pr_err("%s: write_data alloc failed\n", __func__);
398 kfree(list_elem);
399 return -ENOMEM;
400 }
401
402 list_elem->cmux_pkt.hdr = (struct sdio_cmux_hdr *)write_data;
403 list_elem->cmux_pkt.data = (void *)((char *)write_data +
404 sizeof(struct sdio_cmux_hdr));
405 memcpy(list_elem->cmux_pkt.data, data, len);
406
407 list_elem->cmux_pkt.hdr->lc_id = (uint8_t)ch->lc_id;
408 list_elem->cmux_pkt.hdr->pkt_len = (uint16_t)len;
409 list_elem->cmux_pkt.hdr->cmd = (uint8_t)DATA;
410 list_elem->cmux_pkt.hdr->status = (uint8_t)0;
411 list_elem->cmux_pkt.hdr->pad_bytes = (uint8_t)0;
412 list_elem->cmux_pkt.hdr->magic_no = (uint16_t)MAGIC_NO_V1;
413
414 mutex_lock(&ch->lc_lock);
415 if (!ch->is_remote_open || !ch->is_local_open) {
416 pr_err("%s: Local ch%d sending data before sending/receiving"
417 " OPEN command\n", __func__, ch->lc_id);
418 if (ch->is_channel_reset)
419 ret = -ENETRESET;
420 else
421 ret = -ENODEV;
422 mutex_unlock(&ch->lc_lock);
423 kfree(write_data);
424 kfree(list_elem);
425 return ret;
426 }
427 mutex_lock(&ch->tx_lock);
428 list_add_tail(&list_elem->list, &ch->tx_list);
429 mutex_unlock(&ch->tx_lock);
430 mutex_unlock(&ch->lc_lock);
431
432 mutex_lock(&write_lock);
433 bytes_to_write += write_size;
434 mutex_unlock(&write_lock);
435 queue_work(sdio_cmux_wq, &sdio_cmux_work);
436
437 return len;
438}
439EXPORT_SYMBOL(sdio_cmux_write);
440
441int is_remote_open(int id)
442{
443 if (id < 0 || id >= SDIO_CMUX_NUM_CHANNELS)
444 return -ENODEV;
445
446 return logical_ch_is_remote_open(id);
447}
448EXPORT_SYMBOL(is_remote_open);
449
450int sdio_cmux_is_channel_reset(int id)
451{
452 int ret;
453 if (id < 0 || id >= SDIO_CMUX_NUM_CHANNELS)
454 return -ENODEV;
455
456 mutex_lock(&logical_ch[id].lc_lock);
457 ret = logical_ch[id].is_channel_reset;
458 mutex_unlock(&logical_ch[id].lc_lock);
459 return ret;
460}
461EXPORT_SYMBOL(sdio_cmux_is_channel_reset);
462
463int sdio_cmux_tiocmget(int id)
464{
465 int ret = (logical_ch[id].remote_status & DSR_POS ? TIOCM_DSR : 0) |
466 (logical_ch[id].remote_status & CTS_POS ? TIOCM_CTS : 0) |
467 (logical_ch[id].remote_status & CD_POS ? TIOCM_CD : 0) |
468 (logical_ch[id].remote_status & RI_POS ? TIOCM_RI : 0) |
469 (logical_ch[id].local_status & CTS_POS ? TIOCM_RTS : 0) |
470 (logical_ch[id].local_status & DSR_POS ? TIOCM_DTR : 0);
471 return ret;
472}
473EXPORT_SYMBOL(sdio_cmux_tiocmget);
474
475int sdio_cmux_tiocmset(int id, unsigned int set, unsigned int clear)
476{
477 if (set & TIOCM_DTR)
478 logical_ch[id].local_status |= DSR_POS;
479
480 if (set & TIOCM_RTS)
481 logical_ch[id].local_status |= CTS_POS;
482
483 if (clear & TIOCM_DTR)
484 logical_ch[id].local_status &= ~DSR_POS;
485
486 if (clear & TIOCM_RTS)
487 logical_ch[id].local_status &= ~CTS_POS;
488
489 sdio_cmux_write_cmd(id, STATUS);
490 return 0;
491}
492EXPORT_SYMBOL(sdio_cmux_tiocmset);
493
494static int copy_packet(void *pkt, int size)
495{
496 struct sdio_cmux_list_elem *list_elem = NULL;
497 void *temp_pkt = NULL;
498
499 list_elem = kmalloc(sizeof(struct sdio_cmux_list_elem), GFP_KERNEL);
500 if (!list_elem) {
501 pr_err("%s: list_elem alloc failed\n", __func__);
502 return -ENOMEM;
503 }
504 temp_pkt = kmalloc(size, GFP_KERNEL);
505 if (!temp_pkt) {
506 pr_err("%s: temp_pkt alloc failed\n", __func__);
507 kfree(list_elem);
508 return -ENOMEM;
509 }
510
511 memcpy(temp_pkt, pkt, size);
512 list_elem->cmux_pkt.hdr = temp_pkt;
513 list_elem->cmux_pkt.data = (void *)((char *)temp_pkt +
514 sizeof(struct sdio_cmux_hdr));
515 mutex_lock(&temp_rx_lock);
516 list_add_tail(&list_elem->list, &temp_rx_list);
517 mutex_unlock(&temp_rx_lock);
518 return 0;
519}
520
521static int process_cmux_pkt(void *pkt, int size)
522{
523 struct sdio_cmux_hdr *mux_hdr;
524 uint32_t id, data_size;
525 void *data;
526 char *dump_buf = (char *)pkt;
527
528 D_DUMP_BUFFER("process_cmux_pkt:", size, dump_buf);
529 mux_hdr = (struct sdio_cmux_hdr *)pkt;
530 switch (mux_hdr->cmd) {
531 case OPEN:
532 id = (uint32_t)(mux_hdr->lc_id);
533 D("%s: Received OPEN command for ch%d\n", __func__, id);
534 mutex_lock(&logical_ch[id].lc_lock);
535 logical_ch[id].is_remote_open = 1;
Karthikeyan Ramasubramanian550be3d2011-09-12 16:55:36 -0600536 if (logical_ch[id].is_channel_reset) {
537 sdio_cmux_write_cmd(id, OPEN);
538 logical_ch[id].is_channel_reset = 0;
539 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700540 mutex_unlock(&logical_ch[id].lc_lock);
541 wake_up(&logical_ch[id].open_wait_queue);
542 break;
543
544 case CLOSE:
545 id = (uint32_t)(mux_hdr->lc_id);
546 D("%s: Received CLOSE command for ch%d\n", __func__, id);
547 sdio_cmux_ch_clear_and_signal(id);
548 break;
549
550 case DATA:
551 id = (uint32_t)(mux_hdr->lc_id);
552 D("%s: Received DATA for ch%d\n", __func__, id);
553 /*Channel is not locally open & if single packet received
554 then drop it*/
555 mutex_lock(&logical_ch[id].lc_lock);
556 if (!logical_ch[id].is_remote_open) {
557 mutex_unlock(&logical_ch[id].lc_lock);
558 pr_err("%s: Remote Ch%d sent data before sending/"
559 "receiving OPEN command\n", __func__, id);
560 return -ENODEV;
561 }
562
563 data = (void *)((char *)pkt + sizeof(struct sdio_cmux_hdr));
564 data_size = (int)(((struct sdio_cmux_hdr *)pkt)->pkt_len);
565 if (logical_ch[id].receive_cb)
566 logical_ch[id].receive_cb(data, data_size,
567 logical_ch[id].priv);
568 else
569 copy_packet(pkt, size);
570 mutex_unlock(&logical_ch[id].lc_lock);
571 break;
572
573 case STATUS:
574 id = (uint32_t)(mux_hdr->lc_id);
575 D("%s: Received STATUS command for ch%d\n", __func__, id);
576 if (logical_ch[id].remote_status != mux_hdr->status) {
577 mutex_lock(&logical_ch[id].lc_lock);
578 logical_ch[id].remote_status = mux_hdr->status;
579 mutex_unlock(&logical_ch[id].lc_lock);
580 if (logical_ch[id].status_callback)
581 logical_ch[id].status_callback(
582 sdio_cmux_tiocmget(id),
583 logical_ch[id].priv);
584 }
585 break;
586 }
587 return 0;
588}
589
590static void parse_cmux_data(void *data, int size)
591{
592 int data_parsed = 0, pkt_size;
593 char *temp_ptr;
594
595 D("Entered %s\n", __func__);
596 temp_ptr = (char *)data;
597 while (data_parsed < size) {
598 pkt_size = sizeof(struct sdio_cmux_hdr) +
599 (int)(((struct sdio_cmux_hdr *)temp_ptr)->pkt_len);
600 D("Parsed %d bytes, Current Pkt Size %d bytes,"
601 " Total size %d bytes\n", data_parsed, pkt_size, size);
602 process_cmux_pkt((void *)temp_ptr, pkt_size);
603 data_parsed += pkt_size;
604 temp_ptr += pkt_size;
605 }
606
607 kfree(data);
608}
609
610static void sdio_cdemux_fn(struct work_struct *work)
611{
612 int r = 0, read_avail = 0;
613 void *cmux_data;
614
615 while (1) {
616 read_avail = sdio_read_avail(sdio_qmi_chl);
617 if (read_avail < 0) {
618 pr_err("%s: sdio_read_avail failed with rc %d\n",
619 __func__, read_avail);
620 return;
621 }
622
623 if (read_avail == 0) {
624 D("%s: Nothing to read\n", __func__);
625 return;
626 }
627
628 D("%s: kmalloc %d bytes\n", __func__, read_avail);
629 cmux_data = kmalloc(read_avail, GFP_KERNEL);
630 if (!cmux_data) {
631 pr_err("%s: kmalloc Failed\n", __func__);
632 return;
633 }
634
635 D("%s: sdio_read %d bytes\n", __func__, read_avail);
636 r = sdio_read(sdio_qmi_chl, cmux_data, read_avail);
637 if (r < 0) {
638 pr_err("%s: sdio_read failed with rc %d\n",
639 __func__, r);
640 kfree(cmux_data);
641 return;
642 }
643
644 parse_cmux_data(cmux_data, read_avail);
645 }
646 return;
647}
648
649static void sdio_cmux_fn(struct work_struct *work)
650{
651 int i, r = 0;
652 void *write_data;
653 uint32_t write_size, write_avail, write_retry = 0;
654 int bytes_written;
655 struct sdio_cmux_list_elem *list_elem = NULL;
656 struct sdio_cmux_ch *ch;
657
658 for (i = 0; i < SDIO_CMUX_NUM_CHANNELS; ++i) {
659 ch = &logical_ch[i];
660 bytes_written = 0;
661 mutex_lock(&ch->tx_lock);
662 while (!list_empty(&ch->tx_list)) {
663 list_elem = list_first_entry(&ch->tx_list,
664 struct sdio_cmux_list_elem,
665 list);
666 list_del(&list_elem->list);
667 mutex_unlock(&ch->tx_lock);
668
669 write_data = (void *)list_elem->cmux_pkt.hdr;
670 write_size = sizeof(struct sdio_cmux_hdr) +
671 (uint32_t)list_elem->cmux_pkt.hdr->pkt_len;
672
673 mutex_lock(&modem_reset_lock);
674 while (!(abort_tx) &&
675 ((write_avail = sdio_write_avail(sdio_qmi_chl))
676 < write_size)) {
677 mutex_unlock(&modem_reset_lock);
678 pr_err("%s: sdio_write_avail %d bytes, "
679 "write size %d bytes. Waiting...\n",
680 __func__, write_avail, write_size);
681 msleep(250);
682 mutex_lock(&modem_reset_lock);
683 }
684 while (!(abort_tx) &&
685 ((r = sdio_write(sdio_qmi_chl,
686 write_data, write_size)) < 0)
Karthikeyan Ramasubramaniane549b762011-09-02 12:16:09 -0600687 && (r != -ENODEV)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700688 && (write_retry++ < MAX_WRITE_RETRY)) {
689 mutex_unlock(&modem_reset_lock);
690 pr_err("%s: sdio_write failed with rc %d."
691 "Retrying...", __func__, r);
692 msleep(250);
693 mutex_lock(&modem_reset_lock);
694 }
695 if (!r && !abort_tx) {
696 D("%s: sdio_write_completed %dbytes\n",
697 __func__, write_size);
698 bytes_written += write_size;
Karthikeyan Ramasubramaniane549b762011-09-02 12:16:09 -0600699 } else if (r == -ENODEV) {
700 pr_err("%s: aborting_tx because sdio_write"
701 " returned %d\n", __func__, r);
702 r = 0;
703 abort_tx = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700704 }
705 mutex_unlock(&modem_reset_lock);
706 kfree(list_elem->cmux_pkt.hdr);
707 kfree(list_elem);
708 mutex_lock(&write_lock);
709 bytes_to_write -= write_size;
710 mutex_unlock(&write_lock);
711 mutex_lock(&ch->tx_lock);
712 }
713 if (ch->write_done)
714 ch->write_done(NULL, bytes_written, ch->priv);
715 mutex_unlock(&ch->tx_lock);
716 }
717 return;
718}
719
720static void sdio_qmi_chl_notify(void *priv, unsigned event)
721{
722 if (event == SDIO_EVENT_DATA_READ_AVAIL) {
723 D("%s: Received SDIO_EVENT_DATA_READ_AVAIL\n", __func__);
724 queue_work(sdio_cdemux_wq, &sdio_cdemux_work);
725 }
726}
727
728#ifdef CONFIG_DEBUG_FS
729
730static int debug_tbl(char *buf, int max)
731{
732 int i = 0;
733 int j;
734
735 for (j = 0; j < SDIO_CMUX_NUM_CHANNELS; ++j) {
736 i += scnprintf(buf + i, max - i,
737 "ch%02d local open=%s remote open=%s\n",
738 j, logical_ch_is_local_open(j) ? "Y" : "N",
739 logical_ch_is_remote_open(j) ? "Y" : "N");
740 }
741
742 return i;
743}
744
745#define DEBUG_BUFMAX 4096
746static char debug_buffer[DEBUG_BUFMAX];
747
748static ssize_t debug_read(struct file *file, char __user *buf,
749 size_t count, loff_t *ppos)
750{
751 int (*fill)(char *buf, int max) = file->private_data;
752 int bsize = fill(debug_buffer, DEBUG_BUFMAX);
753 return simple_read_from_buffer(buf, count, ppos, debug_buffer, bsize);
754}
755
756static int debug_open(struct inode *inode, struct file *file)
757{
758 file->private_data = inode->i_private;
759 return 0;
760}
761
762
763static const struct file_operations debug_ops = {
764 .read = debug_read,
765 .open = debug_open,
766};
767
768static void debug_create(const char *name, mode_t mode,
769 struct dentry *dent,
770 int (*fill)(char *buf, int max))
771{
772 debugfs_create_file(name, mode, dent, fill, &debug_ops);
773}
774
775#endif
776
777static int sdio_cmux_probe(struct platform_device *pdev)
778{
779 int i, r;
780
Jeff Hugoa1f008b2011-07-13 14:33:59 -0600781 mutex_lock(&probe_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700782 D("%s Begins\n", __func__);
783 if (sdio_cmux_inited) {
784 mutex_lock(&modem_reset_lock);
785 r = sdio_open("SDIO_QMI", &sdio_qmi_chl, NULL,
786 sdio_qmi_chl_notify);
787 if (r < 0) {
788 mutex_unlock(&modem_reset_lock);
789 pr_err("%s: sdio_open() failed\n", __func__);
Jeff Hugo1efef082011-07-13 13:02:38 -0600790 goto error0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700791 }
792 abort_tx = 0;
793 mutex_unlock(&modem_reset_lock);
Jeff Hugoa1f008b2011-07-13 14:33:59 -0600794 mutex_unlock(&probe_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700795 return 0;
796 }
797
798 for (i = 0; i < SDIO_CMUX_NUM_CHANNELS; ++i)
799 sdio_cmux_ch_alloc(i);
800 INIT_LIST_HEAD(&temp_rx_list);
801
802 sdio_cmux_wq = create_singlethread_workqueue("sdio_cmux");
803 if (IS_ERR(sdio_cmux_wq)) {
804 pr_err("%s: create_singlethread_workqueue() ENOMEM\n",
805 __func__);
806 r = -ENOMEM;
807 goto error0;
808 }
809
810 sdio_cdemux_wq = create_singlethread_workqueue("sdio_cdemux");
811 if (IS_ERR(sdio_cdemux_wq)) {
812 pr_err("%s: create_singlethread_workqueue() ENOMEM\n",
813 __func__);
814 r = -ENOMEM;
815 goto error1;
816 }
817
818 r = sdio_open("SDIO_QMI", &sdio_qmi_chl, NULL, sdio_qmi_chl_notify);
819 if (r < 0) {
820 pr_err("%s: sdio_open() failed\n", __func__);
821 goto error2;
822 }
823
824 platform_device_register(&sdio_ctl_dev);
825 sdio_cmux_inited = 1;
826 D("SDIO Control MUX Driver Initialized.\n");
Jeff Hugoa1f008b2011-07-13 14:33:59 -0600827 mutex_unlock(&probe_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700828 return 0;
829
830error2:
831 destroy_workqueue(sdio_cdemux_wq);
832error1:
833 destroy_workqueue(sdio_cmux_wq);
834error0:
Jeff Hugoa1f008b2011-07-13 14:33:59 -0600835 mutex_unlock(&probe_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700836 return r;
837}
838
839static int sdio_cmux_remove(struct platform_device *pdev)
840{
841 int i;
842
843 mutex_lock(&modem_reset_lock);
844 abort_tx = 1;
845
846 for (i = 0; i < SDIO_CMUX_NUM_CHANNELS; ++i) {
847 mutex_lock(&logical_ch[i].lc_lock);
848 logical_ch[i].is_channel_reset = 1;
849 mutex_unlock(&logical_ch[i].lc_lock);
850 sdio_cmux_ch_clear_and_signal(i);
851 }
852 sdio_qmi_chl = NULL;
853 mutex_unlock(&modem_reset_lock);
854
855 return 0;
856}
857
858static struct platform_driver sdio_cmux_driver = {
859 .probe = sdio_cmux_probe,
860 .remove = sdio_cmux_remove,
861 .driver = {
862 .name = "SDIO_QMI",
863 .owner = THIS_MODULE,
864 },
865};
866
867static int __init sdio_cmux_init(void)
868{
869#ifdef CONFIG_DEBUG_FS
870 struct dentry *dent;
871
872 dent = debugfs_create_dir("sdio_cmux", 0);
873 if (!IS_ERR(dent))
874 debug_create("tbl", 0444, dent, debug_tbl);
875#endif
876
877 msm_sdio_cmux_debug_mask = 0;
878 return platform_driver_register(&sdio_cmux_driver);
879}
880
881module_init(sdio_cmux_init);
882MODULE_DESCRIPTION("MSM SDIO Control MUX");
883MODULE_LICENSE("GPL v2");