blob: 4ccfbb7a4612bd385f02bf14992e0ee141925561 [file] [log] [blame]
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14/*
15 * BAM DMUX module.
16 */
17
18#define DEBUG
19
20#include <linux/delay.h>
21#include <linux/module.h>
22#include <linux/netdevice.h>
23#include <linux/platform_device.h>
24#include <linux/sched.h>
25#include <linux/skbuff.h>
26#include <linux/debugfs.h>
Jeff Hugoaab7ebc2011-09-07 16:46:04 -060027#include <linux/clk.h>
Jeff Hugoae3a85e2011-12-02 17:10:18 -070028#include <linux/wakelock.h>
Eric Holmberg878923a2012-01-10 14:28:19 -070029#include <linux/kfifo.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070030
31#include <mach/sps.h>
32#include <mach/bam_dmux.h>
Jeff Hugoade1f842011-08-03 15:53:59 -060033#include <mach/msm_smsm.h>
Jeff Hugo6e7a92a2011-10-24 05:25:13 -060034#include <mach/subsystem_notif.h>
Jeff Hugo75913c82011-12-05 15:59:01 -070035#include <mach/socinfo.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070036
37#define BAM_CH_LOCAL_OPEN 0x1
38#define BAM_CH_REMOTE_OPEN 0x2
Jeff Hugo6e7a92a2011-10-24 05:25:13 -060039#define BAM_CH_IN_RESET 0x4
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070040
41#define BAM_MUX_HDR_MAGIC_NO 0x33fc
42
Eric Holmberg006057d2012-01-11 10:10:42 -070043#define BAM_MUX_HDR_CMD_DATA 0
44#define BAM_MUX_HDR_CMD_OPEN 1
45#define BAM_MUX_HDR_CMD_CLOSE 2
46#define BAM_MUX_HDR_CMD_STATUS 3 /* unused */
47#define BAM_MUX_HDR_CMD_OPEN_NO_A2_PC 4
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070048
Jeff Hugo949080a2011-08-30 11:58:56 -060049#define POLLING_MIN_SLEEP 950 /* 0.95 ms */
50#define POLLING_MAX_SLEEP 1050 /* 1.05 ms */
51#define POLLING_INACTIVITY 40 /* cycles before switch to intr mode */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070052
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -070053#define LOW_WATERMARK 2
54#define HIGH_WATERMARK 4
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070055
56static int msm_bam_dmux_debug_enable;
57module_param_named(debug_enable, msm_bam_dmux_debug_enable,
58 int, S_IRUGO | S_IWUSR | S_IWGRP);
59
60#if defined(DEBUG)
61static uint32_t bam_dmux_read_cnt;
62static uint32_t bam_dmux_write_cnt;
63static uint32_t bam_dmux_write_cpy_cnt;
64static uint32_t bam_dmux_write_cpy_bytes;
Eric Holmberg2fddbcd2011-11-28 18:25:57 -070065static uint32_t bam_dmux_tx_sps_failure_cnt;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070066
67#define DBG(x...) do { \
68 if (msm_bam_dmux_debug_enable) \
69 pr_debug(x); \
70 } while (0)
71
72#define DBG_INC_READ_CNT(x) do { \
73 bam_dmux_read_cnt += (x); \
74 if (msm_bam_dmux_debug_enable) \
75 pr_debug("%s: total read bytes %u\n", \
76 __func__, bam_dmux_read_cnt); \
77 } while (0)
78
79#define DBG_INC_WRITE_CNT(x) do { \
80 bam_dmux_write_cnt += (x); \
81 if (msm_bam_dmux_debug_enable) \
82 pr_debug("%s: total written bytes %u\n", \
83 __func__, bam_dmux_write_cnt); \
84 } while (0)
85
86#define DBG_INC_WRITE_CPY(x) do { \
87 bam_dmux_write_cpy_bytes += (x); \
88 bam_dmux_write_cpy_cnt++; \
89 if (msm_bam_dmux_debug_enable) \
90 pr_debug("%s: total write copy cnt %u, bytes %u\n", \
91 __func__, bam_dmux_write_cpy_cnt, \
92 bam_dmux_write_cpy_bytes); \
93 } while (0)
Eric Holmberg2fddbcd2011-11-28 18:25:57 -070094
95#define DBG_INC_TX_SPS_FAILURE_CNT() do { \
96 bam_dmux_tx_sps_failure_cnt++; \
97} while (0)
98
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070099#else
100#define DBG(x...) do { } while (0)
101#define DBG_INC_READ_CNT(x...) do { } while (0)
102#define DBG_INC_WRITE_CNT(x...) do { } while (0)
103#define DBG_INC_WRITE_CPY(x...) do { } while (0)
Eric Holmberg2fddbcd2011-11-28 18:25:57 -0700104#define DBG_INC_TX_SPS_FAILURE_CNT() do { } while (0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700105#endif
106
107struct bam_ch_info {
108 uint32_t status;
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600109 void (*notify)(void *, int, unsigned long);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700110 void *priv;
111 spinlock_t lock;
Jeff Hugo7960abd2011-08-02 15:39:38 -0600112 struct platform_device *pdev;
113 char name[BAM_DMUX_CH_NAME_MAX_LEN];
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700114 int num_tx_pkts;
115 int use_wm;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700116};
117
118struct tx_pkt_info {
119 struct sk_buff *skb;
120 dma_addr_t dma_address;
121 char is_cmd;
122 uint32_t len;
123 struct work_struct work;
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600124 struct list_head list_node;
Eric Holmberg878923a2012-01-10 14:28:19 -0700125 unsigned ts_sec;
126 unsigned long ts_nsec;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700127};
128
129struct rx_pkt_info {
130 struct sk_buff *skb;
131 dma_addr_t dma_address;
132 struct work_struct work;
Jeff Hugo949080a2011-08-30 11:58:56 -0600133 struct list_head list_node;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700134};
135
136#define A2_NUM_PIPES 6
137#define A2_SUMMING_THRESHOLD 4096
138#define A2_DEFAULT_DESCRIPTORS 32
139#define A2_PHYS_BASE 0x124C2000
140#define A2_PHYS_SIZE 0x2000
141#define BUFFER_SIZE 2048
142#define NUM_BUFFERS 32
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700143static struct sps_bam_props a2_props;
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600144static u32 a2_device_handle;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700145static struct sps_pipe *bam_tx_pipe;
146static struct sps_pipe *bam_rx_pipe;
147static struct sps_connect tx_connection;
148static struct sps_connect rx_connection;
149static struct sps_mem_buffer tx_desc_mem_buf;
150static struct sps_mem_buffer rx_desc_mem_buf;
151static struct sps_register_event tx_register_event;
Jeff Hugo33dbc002011-08-25 15:52:53 -0600152static struct sps_register_event rx_register_event;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700153
154static struct bam_ch_info bam_ch[BAM_DMUX_NUM_CHANNELS];
155static int bam_mux_initialized;
156
Jeff Hugo949080a2011-08-30 11:58:56 -0600157static int polling_mode;
158
159static LIST_HEAD(bam_rx_pool);
Jeff Hugoc9749932011-11-02 17:50:40 -0600160static DEFINE_MUTEX(bam_rx_pool_mutexlock);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600161static LIST_HEAD(bam_tx_pool);
Jeff Hugoc9749932011-11-02 17:50:40 -0600162static DEFINE_SPINLOCK(bam_tx_pool_spinlock);
Jeff Hugo949080a2011-08-30 11:58:56 -0600163
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700164struct bam_mux_hdr {
165 uint16_t magic_num;
166 uint8_t reserved;
167 uint8_t cmd;
168 uint8_t pad_len;
169 uint8_t ch_id;
170 uint16_t pkt_len;
171};
172
Jeff Hugod98b1082011-10-24 10:30:23 -0600173static void notify_all(int event, unsigned long data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700174static void bam_mux_write_done(struct work_struct *work);
175static void handle_bam_mux_cmd(struct work_struct *work);
Jeff Hugo949080a2011-08-30 11:58:56 -0600176static void rx_timer_work_func(struct work_struct *work);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700177
Jeff Hugo949080a2011-08-30 11:58:56 -0600178static DECLARE_WORK(rx_timer_work, rx_timer_work_func);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700179
180static struct workqueue_struct *bam_mux_rx_workqueue;
181static struct workqueue_struct *bam_mux_tx_workqueue;
182
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600183/* A2 power collaspe */
184#define UL_TIMEOUT_DELAY 1000 /* in ms */
185static void toggle_apps_ack(void);
186static void reconnect_to_bam(void);
187static void disconnect_to_bam(void);
188static void ul_wakeup(void);
189static void ul_timeout(struct work_struct *work);
190static void vote_dfab(void);
191static void unvote_dfab(void);
Jeff Hugod98b1082011-10-24 10:30:23 -0600192static void kickoff_ul_wakeup_func(struct work_struct *work);
Eric Holmberg006057d2012-01-11 10:10:42 -0700193static void grab_wakelock(void);
194static void release_wakelock(void);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600195
196static int bam_is_connected;
197static DEFINE_MUTEX(wakeup_lock);
198static struct completion ul_wakeup_ack_completion;
199static struct completion bam_connection_completion;
200static struct delayed_work ul_timeout_work;
201static int ul_packet_written;
202static struct clk *dfab_clk;
203static DEFINE_RWLOCK(ul_wakeup_lock);
Jeff Hugod98b1082011-10-24 10:30:23 -0600204static DECLARE_WORK(kickoff_ul_wakeup, kickoff_ul_wakeup_func);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600205static int bam_connection_is_active;
Jeff Hugof6c1c1e2011-12-01 17:43:49 -0700206static int wait_for_ack;
Jeff Hugoae3a85e2011-12-02 17:10:18 -0700207static struct wake_lock bam_wakelock;
Eric Holmberg006057d2012-01-11 10:10:42 -0700208static int a2_pc_disabled;
209static DEFINE_MUTEX(dfab_status_lock);
210static int dfab_is_on;
211static int wait_for_dfab;
212static struct completion dfab_unvote_completion;
213static DEFINE_SPINLOCK(wakelock_reference_lock);
214static int wakelock_reference_count;
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600215/* End A2 power collaspe */
216
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600217/* subsystem restart */
218static int restart_notifier_cb(struct notifier_block *this,
219 unsigned long code,
220 void *data);
221
222static struct notifier_block restart_notifier = {
223 .notifier_call = restart_notifier_cb,
224};
225static int in_global_reset;
226/* end subsystem restart */
227
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700228#define bam_ch_is_open(x) \
229 (bam_ch[(x)].status == (BAM_CH_LOCAL_OPEN | BAM_CH_REMOTE_OPEN))
230
231#define bam_ch_is_local_open(x) \
232 (bam_ch[(x)].status & BAM_CH_LOCAL_OPEN)
233
234#define bam_ch_is_remote_open(x) \
235 (bam_ch[(x)].status & BAM_CH_REMOTE_OPEN)
236
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600237#define bam_ch_is_in_reset(x) \
238 (bam_ch[(x)].status & BAM_CH_IN_RESET)
239
Eric Holmberg878923a2012-01-10 14:28:19 -0700240#define LOG_MESSAGE_MAX_SIZE 80
241struct kfifo bam_dmux_state_log;
242static uint32_t bam_dmux_state_logging_disabled;
243static DEFINE_SPINLOCK(bam_dmux_logging_spinlock);
244static int bam_dmux_uplink_vote;
245static int bam_dmux_power_state;
246
247
248#define DMUX_LOG_KERR(fmt...) \
249do { \
250 bam_dmux_log(fmt); \
251 pr_err(fmt); \
252} while (0)
253
254/**
255 * Log a state change along with a small message.
256 *
257 * Complete size of messsage is limited to @todo.
258 */
259static void bam_dmux_log(const char *fmt, ...)
260{
261 char buff[LOG_MESSAGE_MAX_SIZE];
262 unsigned long flags;
263 va_list arg_list;
264 unsigned long long t_now;
265 unsigned long nanosec_rem;
266 int len = 0;
267
268 if (bam_dmux_state_logging_disabled)
269 return;
270
271 t_now = sched_clock();
272 nanosec_rem = do_div(t_now, 1000000000U);
273
274 /*
275 * States
Eric Holmberg006057d2012-01-11 10:10:42 -0700276 * D: 1 = Power collapse disabled
Eric Holmberg878923a2012-01-10 14:28:19 -0700277 * R: 1 = in global reset
278 * P: 1 = BAM is powered up
279 * A: 1 = BAM initialized and ready for data
280 *
281 * V: 1 = Uplink vote for power
282 * U: 1 = Uplink active
283 * W: 1 = Uplink Wait-for-ack
284 * A: 1 = Uplink ACK received
285 */
286 len += scnprintf(buff, sizeof(buff),
Eric Holmberg006057d2012-01-11 10:10:42 -0700287 "<DMUX> %u.%09lu %c%c%c%c %c%c%c%c ",
Eric Holmberg878923a2012-01-10 14:28:19 -0700288 (unsigned)t_now, nanosec_rem,
Eric Holmberg006057d2012-01-11 10:10:42 -0700289 a2_pc_disabled ? 'D' : 'd',
Eric Holmberg878923a2012-01-10 14:28:19 -0700290 in_global_reset ? 'R' : 'r',
291 bam_dmux_power_state ? 'P' : 'p',
292 bam_connection_is_active ? 'A' : 'a',
293 bam_dmux_uplink_vote ? 'V' : 'v',
294 bam_is_connected ? 'U' : 'u',
295 wait_for_ack ? 'W' : 'w',
296 ul_wakeup_ack_completion.done ? 'A' : 'a'
297 );
298
299 va_start(arg_list, fmt);
300 len += vscnprintf(buff + len, sizeof(buff) - len, fmt, arg_list);
301 va_end(arg_list);
302 memset(buff + len, 0x0, sizeof(buff) - len);
303
304 spin_lock_irqsave(&bam_dmux_logging_spinlock, flags);
305 if (kfifo_avail(&bam_dmux_state_log) < LOG_MESSAGE_MAX_SIZE) {
306 char junk[LOG_MESSAGE_MAX_SIZE];
307 int ret;
308
309 ret = kfifo_out(&bam_dmux_state_log, junk, sizeof(junk));
310 if (ret != LOG_MESSAGE_MAX_SIZE) {
311 pr_err("%s: unable to empty log %d\n", __func__, ret);
312 spin_unlock_irqrestore(&bam_dmux_logging_spinlock,
313 flags);
314 return;
315 }
316 }
317 kfifo_in(&bam_dmux_state_log, buff, sizeof(buff));
318 spin_unlock_irqrestore(&bam_dmux_logging_spinlock, flags);
319}
320
321static inline void set_tx_timestamp(struct tx_pkt_info *pkt)
322{
323 unsigned long long t_now;
324
325 t_now = sched_clock();
326 pkt->ts_nsec = do_div(t_now, 1000000000U);
327 pkt->ts_sec = (unsigned)t_now;
328}
329
330static inline void verify_tx_queue_is_empty(const char *func)
331{
332 unsigned long flags;
333 struct tx_pkt_info *info;
334 int reported = 0;
335
336 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
337 list_for_each_entry(info, &bam_tx_pool, list_node) {
338 if (!reported) {
339 DMUX_LOG_KERR("%s: tx pool not empty\n", func);
340 reported = 1;
341 }
342 DMUX_LOG_KERR("%s: node=%p ts=%u.%09lu\n", __func__,
343 &info->list_node, info->ts_sec, info->ts_nsec);
344 }
345 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
346}
347
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700348static void queue_rx(void)
349{
350 void *ptr;
351 struct rx_pkt_info *info;
352
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600353 if (in_global_reset)
354 return;
355
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700356 info = kmalloc(sizeof(struct rx_pkt_info), GFP_KERNEL);
Jeff Hugoe05bc222011-12-07 13:57:23 -0700357 if (!info) {
358 pr_err("%s: unable to alloc rx_pkt_info\n", __func__);
359 return;
360 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700361
362 INIT_WORK(&info->work, handle_bam_mux_cmd);
363
364 info->skb = __dev_alloc_skb(BUFFER_SIZE, GFP_KERNEL);
Jeff Hugo4ba22f92011-12-07 12:42:47 -0700365 if (info->skb == NULL) {
366 pr_err("%s: unable to alloc skb\n", __func__);
367 kfree(info);
368 return;
369 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700370 ptr = skb_put(info->skb, BUFFER_SIZE);
Jeff Hugo949080a2011-08-30 11:58:56 -0600371
Jeff Hugoc9749932011-11-02 17:50:40 -0600372 mutex_lock(&bam_rx_pool_mutexlock);
Jeff Hugo949080a2011-08-30 11:58:56 -0600373 list_add_tail(&info->list_node, &bam_rx_pool);
Jeff Hugoc9749932011-11-02 17:50:40 -0600374 mutex_unlock(&bam_rx_pool_mutexlock);
Jeff Hugo949080a2011-08-30 11:58:56 -0600375
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700376 /* need a way to handle error case */
377 info->dma_address = dma_map_single(NULL, ptr, BUFFER_SIZE,
378 DMA_FROM_DEVICE);
379 sps_transfer_one(bam_rx_pipe, info->dma_address,
Jeff Hugo33dbc002011-08-25 15:52:53 -0600380 BUFFER_SIZE, info,
381 SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700382}
383
384static void bam_mux_process_data(struct sk_buff *rx_skb)
385{
386 unsigned long flags;
387 struct bam_mux_hdr *rx_hdr;
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600388 unsigned long event_data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700389
390 rx_hdr = (struct bam_mux_hdr *)rx_skb->data;
391
392 rx_skb->data = (unsigned char *)(rx_hdr + 1);
393 rx_skb->tail = rx_skb->data + rx_hdr->pkt_len;
394 rx_skb->len = rx_hdr->pkt_len;
Jeff Hugoee88f672011-10-04 17:14:52 -0600395 rx_skb->truesize = rx_hdr->pkt_len + sizeof(struct sk_buff);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700396
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600397 event_data = (unsigned long)(rx_skb);
398
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700399 spin_lock_irqsave(&bam_ch[rx_hdr->ch_id].lock, flags);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600400 if (bam_ch[rx_hdr->ch_id].notify)
401 bam_ch[rx_hdr->ch_id].notify(
402 bam_ch[rx_hdr->ch_id].priv, BAM_DMUX_RECEIVE,
403 event_data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700404 else
405 dev_kfree_skb_any(rx_skb);
406 spin_unlock_irqrestore(&bam_ch[rx_hdr->ch_id].lock, flags);
407
408 queue_rx();
409}
410
Eric Holmberg006057d2012-01-11 10:10:42 -0700411static inline void handle_bam_mux_cmd_open(struct bam_mux_hdr *rx_hdr)
412{
413 unsigned long flags;
414 int ret;
415
416 spin_lock_irqsave(&bam_ch[rx_hdr->ch_id].lock, flags);
417 bam_ch[rx_hdr->ch_id].status |= BAM_CH_REMOTE_OPEN;
418 bam_ch[rx_hdr->ch_id].num_tx_pkts = 0;
419 spin_unlock_irqrestore(&bam_ch[rx_hdr->ch_id].lock, flags);
420 queue_rx();
421 ret = platform_device_add(bam_ch[rx_hdr->ch_id].pdev);
422 if (ret)
423 pr_err("%s: platform_device_add() error: %d\n",
424 __func__, ret);
425}
426
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700427static void handle_bam_mux_cmd(struct work_struct *work)
428{
429 unsigned long flags;
430 struct bam_mux_hdr *rx_hdr;
431 struct rx_pkt_info *info;
432 struct sk_buff *rx_skb;
433
434 info = container_of(work, struct rx_pkt_info, work);
435 rx_skb = info->skb;
Jeff Hugo949080a2011-08-30 11:58:56 -0600436 dma_unmap_single(NULL, info->dma_address, BUFFER_SIZE, DMA_FROM_DEVICE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700437 kfree(info);
438
439 rx_hdr = (struct bam_mux_hdr *)rx_skb->data;
440
441 DBG_INC_READ_CNT(sizeof(struct bam_mux_hdr));
442 DBG("%s: magic %x reserved %d cmd %d pad %d ch %d len %d\n", __func__,
443 rx_hdr->magic_num, rx_hdr->reserved, rx_hdr->cmd,
444 rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
445 if (rx_hdr->magic_num != BAM_MUX_HDR_MAGIC_NO) {
Eric Holmberg878923a2012-01-10 14:28:19 -0700446 DMUX_LOG_KERR("%s: dropping invalid hdr. magic %x"
447 " reserved %d cmd %d"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700448 " pad %d ch %d len %d\n", __func__,
449 rx_hdr->magic_num, rx_hdr->reserved, rx_hdr->cmd,
450 rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
451 dev_kfree_skb_any(rx_skb);
452 queue_rx();
453 return;
454 }
Eric Holmberg9ff40a52011-11-17 19:17:00 -0700455
456 if (rx_hdr->ch_id >= BAM_DMUX_NUM_CHANNELS) {
Eric Holmberg878923a2012-01-10 14:28:19 -0700457 DMUX_LOG_KERR("%s: dropping invalid LCID %d"
458 " reserved %d cmd %d"
Eric Holmberg9ff40a52011-11-17 19:17:00 -0700459 " pad %d ch %d len %d\n", __func__,
460 rx_hdr->ch_id, rx_hdr->reserved, rx_hdr->cmd,
461 rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
462 dev_kfree_skb_any(rx_skb);
463 queue_rx();
464 return;
465 }
466
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700467 switch (rx_hdr->cmd) {
468 case BAM_MUX_HDR_CMD_DATA:
469 DBG_INC_READ_CNT(rx_hdr->pkt_len);
470 bam_mux_process_data(rx_skb);
471 break;
472 case BAM_MUX_HDR_CMD_OPEN:
Eric Holmberg006057d2012-01-11 10:10:42 -0700473 bam_dmux_log("%s: opening cid %d PC enabled\n", __func__,
Eric Holmberg878923a2012-01-10 14:28:19 -0700474 rx_hdr->ch_id);
Eric Holmberg006057d2012-01-11 10:10:42 -0700475 handle_bam_mux_cmd_open(rx_hdr);
476 dev_kfree_skb_any(rx_skb);
477 break;
478 case BAM_MUX_HDR_CMD_OPEN_NO_A2_PC:
479 bam_dmux_log("%s: opening cid %d PC disabled\n", __func__,
480 rx_hdr->ch_id);
481
482 if (!a2_pc_disabled) {
483 a2_pc_disabled = 1;
484 schedule_delayed_work(&ul_timeout_work,
485 msecs_to_jiffies(UL_TIMEOUT_DELAY));
486 }
487
488 handle_bam_mux_cmd_open(rx_hdr);
Eric Holmberge779dba2011-11-04 18:22:01 -0600489 dev_kfree_skb_any(rx_skb);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700490 break;
491 case BAM_MUX_HDR_CMD_CLOSE:
492 /* probably should drop pending write */
Eric Holmberg878923a2012-01-10 14:28:19 -0700493 bam_dmux_log("%s: closing cid %d\n", __func__,
494 rx_hdr->ch_id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700495 spin_lock_irqsave(&bam_ch[rx_hdr->ch_id].lock, flags);
496 bam_ch[rx_hdr->ch_id].status &= ~BAM_CH_REMOTE_OPEN;
497 spin_unlock_irqrestore(&bam_ch[rx_hdr->ch_id].lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700498 queue_rx();
Jeff Hugo7960abd2011-08-02 15:39:38 -0600499 platform_device_unregister(bam_ch[rx_hdr->ch_id].pdev);
500 bam_ch[rx_hdr->ch_id].pdev =
501 platform_device_alloc(bam_ch[rx_hdr->ch_id].name, 2);
502 if (!bam_ch[rx_hdr->ch_id].pdev)
503 pr_err("%s: platform_device_alloc failed\n", __func__);
Eric Holmberge779dba2011-11-04 18:22:01 -0600504 dev_kfree_skb_any(rx_skb);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700505 break;
506 default:
Eric Holmberg878923a2012-01-10 14:28:19 -0700507 DMUX_LOG_KERR("%s: dropping invalid hdr. magic %x"
508 " reserved %d cmd %d pad %d ch %d len %d\n",
509 __func__, rx_hdr->magic_num, rx_hdr->reserved,
510 rx_hdr->cmd, rx_hdr->pad_len, rx_hdr->ch_id,
511 rx_hdr->pkt_len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700512 dev_kfree_skb_any(rx_skb);
513 queue_rx();
514 return;
515 }
516}
517
518static int bam_mux_write_cmd(void *data, uint32_t len)
519{
520 int rc;
521 struct tx_pkt_info *pkt;
522 dma_addr_t dma_address;
Jeff Hugo626303bf2011-11-21 11:43:28 -0700523 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700524
Eric Holmbergd83cd2b2011-11-04 15:54:17 -0600525 pkt = kmalloc(sizeof(struct tx_pkt_info), GFP_ATOMIC);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700526 if (pkt == NULL) {
527 pr_err("%s: mem alloc for tx_pkt_info failed\n", __func__);
528 rc = -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700529 return rc;
530 }
531
532 dma_address = dma_map_single(NULL, data, len,
533 DMA_TO_DEVICE);
534 if (!dma_address) {
535 pr_err("%s: dma_map_single() failed\n", __func__);
Jeff Hugo96cb7482011-12-07 13:28:31 -0700536 kfree(pkt);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700537 rc = -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700538 return rc;
539 }
540 pkt->skb = (struct sk_buff *)(data);
541 pkt->len = len;
542 pkt->dma_address = dma_address;
543 pkt->is_cmd = 1;
Eric Holmberg878923a2012-01-10 14:28:19 -0700544 set_tx_timestamp(pkt);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600545 INIT_WORK(&pkt->work, bam_mux_write_done);
Jeff Hugo626303bf2011-11-21 11:43:28 -0700546 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600547 list_add_tail(&pkt->list_node, &bam_tx_pool);
Jeff Hugo626303bf2011-11-21 11:43:28 -0700548 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700549 rc = sps_transfer_one(bam_tx_pipe, dma_address, len,
550 pkt, SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600551 if (rc) {
552 DBG("%s sps_transfer_one failed rc=%d\n", __func__, rc);
Jeff Hugo626303bf2011-11-21 11:43:28 -0700553 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600554 list_del(&pkt->list_node);
Eric Holmberg2fddbcd2011-11-28 18:25:57 -0700555 DBG_INC_TX_SPS_FAILURE_CNT();
Jeff Hugo626303bf2011-11-21 11:43:28 -0700556 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600557 kfree(pkt);
558 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700559
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600560 ul_packet_written = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700561 return rc;
562}
563
564static void bam_mux_write_done(struct work_struct *work)
565{
566 struct sk_buff *skb;
567 struct bam_mux_hdr *hdr;
568 struct tx_pkt_info *info;
Eric Holmberg1cde7a62011-12-19 18:34:01 -0700569 struct tx_pkt_info *info_expected;
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600570 unsigned long event_data;
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700571 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700572
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600573 if (in_global_reset)
574 return;
Eric Holmberg1cde7a62011-12-19 18:34:01 -0700575
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700576 info = container_of(work, struct tx_pkt_info, work);
Eric Holmberg1cde7a62011-12-19 18:34:01 -0700577
578 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
579 info_expected = list_first_entry(&bam_tx_pool,
580 struct tx_pkt_info, list_node);
581 if (unlikely(info != info_expected)) {
Eric Holmberg878923a2012-01-10 14:28:19 -0700582 struct tx_pkt_info *errant_pkt;
Eric Holmberg1cde7a62011-12-19 18:34:01 -0700583
Eric Holmberg878923a2012-01-10 14:28:19 -0700584 DMUX_LOG_KERR("%s: bam_tx_pool mismatch .next=%p,"
585 " list_node=%p, ts=%u.%09lu\n",
586 __func__, bam_tx_pool.next, &info->list_node,
587 info->ts_sec, info->ts_nsec
588 );
589
590 list_for_each_entry(errant_pkt, &bam_tx_pool, list_node) {
591 DMUX_LOG_KERR("%s: node=%p ts=%u.%09lu\n", __func__,
592 &errant_pkt->list_node, errant_pkt->ts_sec,
593 errant_pkt->ts_nsec);
594
595 }
Eric Holmberg1cde7a62011-12-19 18:34:01 -0700596 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
597 BUG();
598 }
599 list_del(&info->list_node);
600 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
601
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600602 if (info->is_cmd) {
603 kfree(info->skb);
604 kfree(info);
605 return;
606 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700607 skb = info->skb;
608 kfree(info);
609 hdr = (struct bam_mux_hdr *)skb->data;
610 DBG_INC_WRITE_CNT(skb->data_len);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600611 event_data = (unsigned long)(skb);
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700612 spin_lock_irqsave(&bam_ch[hdr->ch_id].lock, flags);
613 bam_ch[hdr->ch_id].num_tx_pkts--;
614 spin_unlock_irqrestore(&bam_ch[hdr->ch_id].lock, flags);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600615 if (bam_ch[hdr->ch_id].notify)
616 bam_ch[hdr->ch_id].notify(
617 bam_ch[hdr->ch_id].priv, BAM_DMUX_WRITE_DONE,
618 event_data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700619 else
620 dev_kfree_skb_any(skb);
621}
622
623int msm_bam_dmux_write(uint32_t id, struct sk_buff *skb)
624{
625 int rc = 0;
626 struct bam_mux_hdr *hdr;
627 unsigned long flags;
628 struct sk_buff *new_skb = NULL;
629 dma_addr_t dma_address;
630 struct tx_pkt_info *pkt;
631
632 if (id >= BAM_DMUX_NUM_CHANNELS)
633 return -EINVAL;
634 if (!skb)
635 return -EINVAL;
636 if (!bam_mux_initialized)
637 return -ENODEV;
638
639 DBG("%s: writing to ch %d len %d\n", __func__, id, skb->len);
640 spin_lock_irqsave(&bam_ch[id].lock, flags);
641 if (!bam_ch_is_open(id)) {
642 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
643 pr_err("%s: port not open: %d\n", __func__, bam_ch[id].status);
644 return -ENODEV;
645 }
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700646
647 if (bam_ch[id].use_wm &&
648 (bam_ch[id].num_tx_pkts >= HIGH_WATERMARK)) {
649 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
650 pr_err("%s: watermark exceeded: %d\n", __func__, id);
651 return -EAGAIN;
652 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700653 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
654
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600655 read_lock(&ul_wakeup_lock);
Jeff Hugo061ce672011-10-21 17:15:32 -0600656 if (!bam_is_connected) {
657 read_unlock(&ul_wakeup_lock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600658 ul_wakeup();
Jeff Hugo061ce672011-10-21 17:15:32 -0600659 read_lock(&ul_wakeup_lock);
Jeff Hugod98b1082011-10-24 10:30:23 -0600660 notify_all(BAM_DMUX_UL_CONNECTED, (unsigned long)(NULL));
Jeff Hugo061ce672011-10-21 17:15:32 -0600661 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600662
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700663 /* if skb do not have any tailroom for padding,
664 copy the skb into a new expanded skb */
665 if ((skb->len & 0x3) && (skb_tailroom(skb) < (4 - (skb->len & 0x3)))) {
666 /* revisit, probably dev_alloc_skb and memcpy is effecient */
667 new_skb = skb_copy_expand(skb, skb_headroom(skb),
668 4 - (skb->len & 0x3), GFP_ATOMIC);
669 if (new_skb == NULL) {
670 pr_err("%s: cannot allocate skb\n", __func__);
Jeff Hugoc6af54d2011-11-02 17:00:27 -0600671 goto write_fail;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700672 }
673 dev_kfree_skb_any(skb);
674 skb = new_skb;
675 DBG_INC_WRITE_CPY(skb->len);
676 }
677
678 hdr = (struct bam_mux_hdr *)skb_push(skb, sizeof(struct bam_mux_hdr));
679
680 /* caller should allocate for hdr and padding
681 hdr is fine, padding is tricky */
682 hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
683 hdr->cmd = BAM_MUX_HDR_CMD_DATA;
684 hdr->reserved = 0;
685 hdr->ch_id = id;
686 hdr->pkt_len = skb->len - sizeof(struct bam_mux_hdr);
687 if (skb->len & 0x3)
688 skb_put(skb, 4 - (skb->len & 0x3));
689
690 hdr->pad_len = skb->len - (sizeof(struct bam_mux_hdr) + hdr->pkt_len);
691
692 DBG("%s: data %p, tail %p skb len %d pkt len %d pad len %d\n",
693 __func__, skb->data, skb->tail, skb->len,
694 hdr->pkt_len, hdr->pad_len);
695
696 pkt = kmalloc(sizeof(struct tx_pkt_info), GFP_ATOMIC);
697 if (pkt == NULL) {
698 pr_err("%s: mem alloc for tx_pkt_info failed\n", __func__);
Jeff Hugoc6af54d2011-11-02 17:00:27 -0600699 goto write_fail2;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700700 }
701
702 dma_address = dma_map_single(NULL, skb->data, skb->len,
703 DMA_TO_DEVICE);
704 if (!dma_address) {
705 pr_err("%s: dma_map_single() failed\n", __func__);
Jeff Hugoc6af54d2011-11-02 17:00:27 -0600706 goto write_fail3;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700707 }
708 pkt->skb = skb;
709 pkt->dma_address = dma_address;
710 pkt->is_cmd = 0;
Eric Holmberg878923a2012-01-10 14:28:19 -0700711 set_tx_timestamp(pkt);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700712 INIT_WORK(&pkt->work, bam_mux_write_done);
Jeff Hugo626303bf2011-11-21 11:43:28 -0700713 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600714 list_add_tail(&pkt->list_node, &bam_tx_pool);
Jeff Hugo626303bf2011-11-21 11:43:28 -0700715 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700716 rc = sps_transfer_one(bam_tx_pipe, dma_address, skb->len,
717 pkt, SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600718 if (rc) {
719 DBG("%s sps_transfer_one failed rc=%d\n", __func__, rc);
Jeff Hugo626303bf2011-11-21 11:43:28 -0700720 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600721 list_del(&pkt->list_node);
Eric Holmberg2fddbcd2011-11-28 18:25:57 -0700722 DBG_INC_TX_SPS_FAILURE_CNT();
Jeff Hugo626303bf2011-11-21 11:43:28 -0700723 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600724 kfree(pkt);
Jeff Hugo872bd062011-11-15 17:47:21 -0700725 if (new_skb)
726 dev_kfree_skb_any(new_skb);
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700727 } else {
728 spin_lock_irqsave(&bam_ch[id].lock, flags);
729 bam_ch[id].num_tx_pkts++;
730 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600731 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600732 ul_packet_written = 1;
733 read_unlock(&ul_wakeup_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700734 return rc;
Jeff Hugoc6af54d2011-11-02 17:00:27 -0600735
736write_fail3:
737 kfree(pkt);
738write_fail2:
739 if (new_skb)
740 dev_kfree_skb_any(new_skb);
741write_fail:
742 read_unlock(&ul_wakeup_lock);
743 return -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700744}
745
746int msm_bam_dmux_open(uint32_t id, void *priv,
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600747 void (*notify)(void *, int, unsigned long))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700748{
749 struct bam_mux_hdr *hdr;
750 unsigned long flags;
751 int rc = 0;
752
753 DBG("%s: opening ch %d\n", __func__, id);
Eric Holmberg5d775432011-11-09 10:23:35 -0700754 if (!bam_mux_initialized) {
755 DBG("%s: not inititialized\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700756 return -ENODEV;
Eric Holmberg5d775432011-11-09 10:23:35 -0700757 }
758 if (id >= BAM_DMUX_NUM_CHANNELS) {
759 pr_err("%s: invalid channel id %d\n", __func__, id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700760 return -EINVAL;
Eric Holmberg5d775432011-11-09 10:23:35 -0700761 }
762 if (notify == NULL) {
763 pr_err("%s: notify function is NULL\n", __func__);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600764 return -EINVAL;
Eric Holmberg5d775432011-11-09 10:23:35 -0700765 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700766
767 hdr = kmalloc(sizeof(struct bam_mux_hdr), GFP_KERNEL);
768 if (hdr == NULL) {
769 pr_err("%s: hdr kmalloc failed. ch: %d\n", __func__, id);
770 return -ENOMEM;
771 }
772 spin_lock_irqsave(&bam_ch[id].lock, flags);
773 if (bam_ch_is_open(id)) {
774 DBG("%s: Already opened %d\n", __func__, id);
775 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
776 kfree(hdr);
777 goto open_done;
778 }
779 if (!bam_ch_is_remote_open(id)) {
780 DBG("%s: Remote not open; ch: %d\n", __func__, id);
781 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
782 kfree(hdr);
Eric Holmberg5d775432011-11-09 10:23:35 -0700783 return -ENODEV;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700784 }
785
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600786 bam_ch[id].notify = notify;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700787 bam_ch[id].priv = priv;
788 bam_ch[id].status |= BAM_CH_LOCAL_OPEN;
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700789 bam_ch[id].num_tx_pkts = 0;
790 bam_ch[id].use_wm = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700791 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
792
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600793 read_lock(&ul_wakeup_lock);
Jeff Hugo061ce672011-10-21 17:15:32 -0600794 if (!bam_is_connected) {
795 read_unlock(&ul_wakeup_lock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600796 ul_wakeup();
Jeff Hugo061ce672011-10-21 17:15:32 -0600797 read_lock(&ul_wakeup_lock);
Jeff Hugod98b1082011-10-24 10:30:23 -0600798 notify_all(BAM_DMUX_UL_CONNECTED, (unsigned long)(NULL));
Jeff Hugo061ce672011-10-21 17:15:32 -0600799 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600800
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700801 hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
802 hdr->cmd = BAM_MUX_HDR_CMD_OPEN;
803 hdr->reserved = 0;
804 hdr->ch_id = id;
805 hdr->pkt_len = 0;
806 hdr->pad_len = 0;
807
808 rc = bam_mux_write_cmd((void *)hdr, sizeof(struct bam_mux_hdr));
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600809 read_unlock(&ul_wakeup_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700810
811open_done:
812 DBG("%s: opened ch %d\n", __func__, id);
813 return rc;
814}
815
816int msm_bam_dmux_close(uint32_t id)
817{
818 struct bam_mux_hdr *hdr;
819 unsigned long flags;
820 int rc;
821
822 if (id >= BAM_DMUX_NUM_CHANNELS)
823 return -EINVAL;
824 DBG("%s: closing ch %d\n", __func__, id);
825 if (!bam_mux_initialized)
826 return -ENODEV;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700827
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600828 read_lock(&ul_wakeup_lock);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600829 if (!bam_is_connected && !bam_ch_is_in_reset(id)) {
Jeff Hugo061ce672011-10-21 17:15:32 -0600830 read_unlock(&ul_wakeup_lock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600831 ul_wakeup();
Jeff Hugo061ce672011-10-21 17:15:32 -0600832 read_lock(&ul_wakeup_lock);
Jeff Hugod98b1082011-10-24 10:30:23 -0600833 notify_all(BAM_DMUX_UL_CONNECTED, (unsigned long)(NULL));
Jeff Hugo061ce672011-10-21 17:15:32 -0600834 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600835
Jeff Hugo061ce672011-10-21 17:15:32 -0600836 spin_lock_irqsave(&bam_ch[id].lock, flags);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600837 bam_ch[id].notify = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700838 bam_ch[id].priv = NULL;
839 bam_ch[id].status &= ~BAM_CH_LOCAL_OPEN;
840 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
841
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600842 if (bam_ch_is_in_reset(id)) {
843 read_unlock(&ul_wakeup_lock);
844 bam_ch[id].status &= ~BAM_CH_IN_RESET;
845 return 0;
846 }
847
Jeff Hugobb5802f2011-11-02 17:10:29 -0600848 hdr = kmalloc(sizeof(struct bam_mux_hdr), GFP_ATOMIC);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700849 if (hdr == NULL) {
850 pr_err("%s: hdr kmalloc failed. ch: %d\n", __func__, id);
Jeff Hugoc6af54d2011-11-02 17:00:27 -0600851 read_unlock(&ul_wakeup_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700852 return -ENOMEM;
853 }
854 hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
855 hdr->cmd = BAM_MUX_HDR_CMD_CLOSE;
856 hdr->reserved = 0;
857 hdr->ch_id = id;
858 hdr->pkt_len = 0;
859 hdr->pad_len = 0;
860
861 rc = bam_mux_write_cmd((void *)hdr, sizeof(struct bam_mux_hdr));
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600862 read_unlock(&ul_wakeup_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700863
864 DBG("%s: closed ch %d\n", __func__, id);
865 return rc;
866}
867
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700868int msm_bam_dmux_is_ch_full(uint32_t id)
869{
870 unsigned long flags;
871 int ret;
872
873 if (id >= BAM_DMUX_NUM_CHANNELS)
874 return -EINVAL;
875
876 spin_lock_irqsave(&bam_ch[id].lock, flags);
877 bam_ch[id].use_wm = 1;
878 ret = bam_ch[id].num_tx_pkts >= HIGH_WATERMARK;
879 DBG("%s: ch %d num tx pkts=%d, HWM=%d\n", __func__,
880 id, bam_ch[id].num_tx_pkts, ret);
881 if (!bam_ch_is_local_open(id)) {
882 ret = -ENODEV;
883 pr_err("%s: port not open: %d\n", __func__, bam_ch[id].status);
884 }
885 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
886
887 return ret;
888}
889
890int msm_bam_dmux_is_ch_low(uint32_t id)
891{
892 int ret;
893
894 if (id >= BAM_DMUX_NUM_CHANNELS)
895 return -EINVAL;
896
897 bam_ch[id].use_wm = 1;
898 ret = bam_ch[id].num_tx_pkts <= LOW_WATERMARK;
899 DBG("%s: ch %d num tx pkts=%d, LWM=%d\n", __func__,
900 id, bam_ch[id].num_tx_pkts, ret);
901 if (!bam_ch_is_local_open(id)) {
902 ret = -ENODEV;
903 pr_err("%s: port not open: %d\n", __func__, bam_ch[id].status);
904 }
905
906 return ret;
907}
908
Eric Holmberg8df0cdb2012-01-04 17:40:46 -0700909static void rx_switch_to_interrupt_mode(void)
910{
911 struct sps_connect cur_rx_conn;
912 struct sps_iovec iov;
913 struct rx_pkt_info *info;
914 int ret;
915
916 /*
917 * Attempt to enable interrupts - if this fails,
918 * continue polling and we will retry later.
919 */
920 ret = sps_get_config(bam_rx_pipe, &cur_rx_conn);
921 if (ret) {
922 pr_err("%s: sps_get_config() failed %d\n", __func__, ret);
923 goto fail;
924 }
925
926 rx_register_event.options = SPS_O_EOT;
927 ret = sps_register_event(bam_rx_pipe, &rx_register_event);
928 if (ret) {
929 pr_err("%s: sps_register_event() failed %d\n", __func__, ret);
930 goto fail;
931 }
932
933 cur_rx_conn.options = SPS_O_AUTO_ENABLE |
934 SPS_O_EOT | SPS_O_ACK_TRANSFERS;
935 ret = sps_set_config(bam_rx_pipe, &cur_rx_conn);
936 if (ret) {
937 pr_err("%s: sps_set_config() failed %d\n", __func__, ret);
938 goto fail;
939 }
940 polling_mode = 0;
Eric Holmberg006057d2012-01-11 10:10:42 -0700941 release_wakelock();
Eric Holmberg8df0cdb2012-01-04 17:40:46 -0700942
943 /* handle any rx packets before interrupt was enabled */
944 while (bam_connection_is_active && !polling_mode) {
945 ret = sps_get_iovec(bam_rx_pipe, &iov);
946 if (ret) {
947 pr_err("%s: sps_get_iovec failed %d\n",
948 __func__, ret);
949 break;
950 }
951 if (iov.addr == 0)
952 break;
953
954 mutex_lock(&bam_rx_pool_mutexlock);
955 if (unlikely(list_empty(&bam_rx_pool))) {
956 mutex_unlock(&bam_rx_pool_mutexlock);
957 continue;
958 }
959 info = list_first_entry(&bam_rx_pool, struct rx_pkt_info,
960 list_node);
961 list_del(&info->list_node);
962 mutex_unlock(&bam_rx_pool_mutexlock);
963 handle_bam_mux_cmd(&info->work);
964 }
965 return;
966
967fail:
968 pr_err("%s: reverting to polling\n", __func__);
969 queue_work(bam_mux_rx_workqueue, &rx_timer_work);
970}
971
Jeff Hugo949080a2011-08-30 11:58:56 -0600972static void rx_timer_work_func(struct work_struct *work)
973{
974 struct sps_iovec iov;
Jeff Hugo949080a2011-08-30 11:58:56 -0600975 struct rx_pkt_info *info;
976 int inactive_cycles = 0;
977 int ret;
Jeff Hugo949080a2011-08-30 11:58:56 -0600978
Eric Holmberg8df0cdb2012-01-04 17:40:46 -0700979 while (bam_connection_is_active) { /* timer loop */
Jeff Hugo949080a2011-08-30 11:58:56 -0600980 ++inactive_cycles;
Eric Holmberg8df0cdb2012-01-04 17:40:46 -0700981 while (bam_connection_is_active) { /* deplete queue loop */
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600982 if (in_global_reset)
983 return;
Eric Holmberg8df0cdb2012-01-04 17:40:46 -0700984
985 ret = sps_get_iovec(bam_rx_pipe, &iov);
986 if (ret) {
987 pr_err("%s: sps_get_iovec failed %d\n",
988 __func__, ret);
989 break;
990 }
Jeff Hugo949080a2011-08-30 11:58:56 -0600991 if (iov.addr == 0)
992 break;
993 inactive_cycles = 0;
Jeff Hugoc9749932011-11-02 17:50:40 -0600994 mutex_lock(&bam_rx_pool_mutexlock);
Eric Holmberg8df0cdb2012-01-04 17:40:46 -0700995 if (unlikely(list_empty(&bam_rx_pool))) {
996 mutex_unlock(&bam_rx_pool_mutexlock);
997 continue;
998 }
999 info = list_first_entry(&bam_rx_pool,
1000 struct rx_pkt_info, list_node);
1001 list_del(&info->list_node);
Jeff Hugoc9749932011-11-02 17:50:40 -06001002 mutex_unlock(&bam_rx_pool_mutexlock);
Jeff Hugo949080a2011-08-30 11:58:56 -06001003 handle_bam_mux_cmd(&info->work);
1004 }
1005
1006 if (inactive_cycles == POLLING_INACTIVITY) {
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001007 rx_switch_to_interrupt_mode();
1008 break;
Jeff Hugo949080a2011-08-30 11:58:56 -06001009 }
1010
1011 usleep_range(POLLING_MIN_SLEEP, POLLING_MAX_SLEEP);
1012 }
1013}
1014
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001015static void bam_mux_tx_notify(struct sps_event_notify *notify)
1016{
1017 struct tx_pkt_info *pkt;
1018
1019 DBG("%s: event %d notified\n", __func__, notify->event_id);
1020
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001021 if (in_global_reset)
1022 return;
1023
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001024 switch (notify->event_id) {
1025 case SPS_EVENT_EOT:
1026 pkt = notify->data.transfer.user;
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001027 if (!pkt->is_cmd)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001028 dma_unmap_single(NULL, pkt->dma_address,
1029 pkt->skb->len,
1030 DMA_TO_DEVICE);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001031 else
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001032 dma_unmap_single(NULL, pkt->dma_address,
1033 pkt->len,
1034 DMA_TO_DEVICE);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001035 queue_work(bam_mux_tx_workqueue, &pkt->work);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001036 break;
1037 default:
1038 pr_err("%s: recieved unexpected event id %d\n", __func__,
1039 notify->event_id);
1040 }
1041}
1042
Jeff Hugo33dbc002011-08-25 15:52:53 -06001043static void bam_mux_rx_notify(struct sps_event_notify *notify)
1044{
Jeff Hugo949080a2011-08-30 11:58:56 -06001045 int ret;
1046 struct sps_connect cur_rx_conn;
Jeff Hugo33dbc002011-08-25 15:52:53 -06001047
1048 DBG("%s: event %d notified\n", __func__, notify->event_id);
1049
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001050 if (in_global_reset)
1051 return;
1052
Jeff Hugo33dbc002011-08-25 15:52:53 -06001053 switch (notify->event_id) {
1054 case SPS_EVENT_EOT:
Jeff Hugo949080a2011-08-30 11:58:56 -06001055 /* attempt to disable interrupts in this pipe */
1056 if (!polling_mode) {
1057 ret = sps_get_config(bam_rx_pipe, &cur_rx_conn);
1058 if (ret) {
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001059 pr_err("%s: sps_get_config() failed %d, interrupts"
1060 " not disabled\n", __func__, ret);
Jeff Hugo949080a2011-08-30 11:58:56 -06001061 break;
1062 }
Jeff Hugoa9d32ba2011-11-21 14:59:48 -07001063 cur_rx_conn.options = SPS_O_AUTO_ENABLE |
Jeff Hugo949080a2011-08-30 11:58:56 -06001064 SPS_O_ACK_TRANSFERS | SPS_O_POLL;
1065 ret = sps_set_config(bam_rx_pipe, &cur_rx_conn);
1066 if (ret) {
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001067 pr_err("%s: sps_set_config() failed %d, interrupts"
1068 " not disabled\n", __func__, ret);
Jeff Hugo949080a2011-08-30 11:58:56 -06001069 break;
1070 }
Eric Holmberg006057d2012-01-11 10:10:42 -07001071 grab_wakelock();
Jeff Hugo949080a2011-08-30 11:58:56 -06001072 polling_mode = 1;
1073 queue_work(bam_mux_rx_workqueue, &rx_timer_work);
1074 }
Jeff Hugo33dbc002011-08-25 15:52:53 -06001075 break;
1076 default:
1077 pr_err("%s: recieved unexpected event id %d\n", __func__,
1078 notify->event_id);
1079 }
1080}
1081
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001082#ifdef CONFIG_DEBUG_FS
1083
1084static int debug_tbl(char *buf, int max)
1085{
1086 int i = 0;
1087 int j;
1088
1089 for (j = 0; j < BAM_DMUX_NUM_CHANNELS; ++j) {
1090 i += scnprintf(buf + i, max - i,
1091 "ch%02d local open=%s remote open=%s\n",
1092 j, bam_ch_is_local_open(j) ? "Y" : "N",
1093 bam_ch_is_remote_open(j) ? "Y" : "N");
1094 }
1095
1096 return i;
1097}
1098
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07001099static int debug_ul_pkt_cnt(char *buf, int max)
1100{
1101 struct list_head *p;
1102 unsigned long flags;
1103 int n = 0;
1104
1105 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
1106 __list_for_each(p, &bam_tx_pool) {
1107 ++n;
1108 }
1109 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
1110
1111 return scnprintf(buf, max, "Number of UL packets in flight: %d\n", n);
1112}
1113
1114static int debug_stats(char *buf, int max)
1115{
1116 int i = 0;
1117
1118 i += scnprintf(buf + i, max - i,
1119 "skb copy cnt: %u\n"
1120 "skb copy bytes: %u\n"
1121 "sps tx failures: %u\n",
1122 bam_dmux_write_cpy_cnt,
1123 bam_dmux_write_cpy_bytes,
1124 bam_dmux_tx_sps_failure_cnt
1125 );
1126
1127 return i;
1128}
1129
Eric Holmberg878923a2012-01-10 14:28:19 -07001130static int debug_log(char *buff, int max, loff_t *ppos)
1131{
1132 unsigned long flags;
1133 int i = 0;
1134
1135 if (bam_dmux_state_logging_disabled) {
1136 i += scnprintf(buff - i, max - i, "Logging disabled\n");
1137 return i;
1138 }
1139
1140 if (*ppos == 0) {
1141 i += scnprintf(buff - i, max - i,
1142 "<DMUX> timestamp FLAGS [Message]\n"
1143 "FLAGS:\n"
Eric Holmberg006057d2012-01-11 10:10:42 -07001144 "\tD: 1 = Power collapse disabled\n"
Eric Holmberg878923a2012-01-10 14:28:19 -07001145 "\tR: 1 = in global reset\n"
1146 "\tP: 1 = BAM is powered up\n"
1147 "\tA: 1 = BAM initialized and ready for data\n"
1148 "\n"
1149 "\tV: 1 = Uplink vote for power\n"
1150 "\tU: 1 = Uplink active\n"
1151 "\tW: 1 = Uplink Wait-for-ack\n"
1152 "\tA: 1 = Uplink ACK received\n"
1153 );
1154 buff += i;
1155 }
1156
1157 spin_lock_irqsave(&bam_dmux_logging_spinlock, flags);
1158 while (kfifo_len(&bam_dmux_state_log)
1159 && (i + LOG_MESSAGE_MAX_SIZE) < max) {
1160 int k_len;
1161 k_len = kfifo_out(&bam_dmux_state_log,
1162 buff, LOG_MESSAGE_MAX_SIZE);
1163 if (k_len != LOG_MESSAGE_MAX_SIZE) {
1164 pr_err("%s: retrieve failure %d\n", __func__, k_len);
1165 break;
1166 }
1167
1168 /* keep non-null portion of string and add line break */
1169 k_len = strnlen(buff, LOG_MESSAGE_MAX_SIZE);
1170 buff += k_len;
1171 i += k_len;
1172 if (k_len && *(buff - 1) != '\n') {
1173 *buff++ = '\n';
1174 ++i;
1175 }
1176 }
1177 spin_unlock_irqrestore(&bam_dmux_logging_spinlock, flags);
1178
1179 return i;
1180}
1181
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001182#define DEBUG_BUFMAX 4096
1183static char debug_buffer[DEBUG_BUFMAX];
1184
1185static ssize_t debug_read(struct file *file, char __user *buf,
1186 size_t count, loff_t *ppos)
1187{
1188 int (*fill)(char *buf, int max) = file->private_data;
1189 int bsize = fill(debug_buffer, DEBUG_BUFMAX);
1190 return simple_read_from_buffer(buf, count, ppos, debug_buffer, bsize);
1191}
1192
Eric Holmberg878923a2012-01-10 14:28:19 -07001193static ssize_t debug_read_multiple(struct file *file, char __user *buff,
1194 size_t count, loff_t *ppos)
1195{
1196 int (*util_func)(char *buf, int max, loff_t *) = file->private_data;
1197 char *buffer;
1198 int bsize;
1199
1200 buffer = kmalloc(count, GFP_KERNEL);
1201 if (!buffer)
1202 return -ENOMEM;
1203
1204 bsize = util_func(buffer, count, ppos);
1205
1206 if (bsize >= 0) {
1207 if (copy_to_user(buff, buffer, bsize)) {
1208 kfree(buffer);
1209 return -EFAULT;
1210 }
1211 *ppos += bsize;
1212 }
1213 kfree(buffer);
1214 return bsize;
1215}
1216
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001217static int debug_open(struct inode *inode, struct file *file)
1218{
1219 file->private_data = inode->i_private;
1220 return 0;
1221}
1222
1223
1224static const struct file_operations debug_ops = {
1225 .read = debug_read,
1226 .open = debug_open,
1227};
1228
Eric Holmberg878923a2012-01-10 14:28:19 -07001229static const struct file_operations debug_ops_multiple = {
1230 .read = debug_read_multiple,
1231 .open = debug_open,
1232};
1233
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001234static void debug_create(const char *name, mode_t mode,
1235 struct dentry *dent,
1236 int (*fill)(char *buf, int max))
1237{
1238 debugfs_create_file(name, mode, dent, fill, &debug_ops);
1239}
1240
1241#endif
1242
Jeff Hugod98b1082011-10-24 10:30:23 -06001243static void notify_all(int event, unsigned long data)
1244{
1245 int i;
1246
1247 for (i = 0; i < BAM_DMUX_NUM_CHANNELS; ++i) {
1248 if (bam_ch_is_open(i))
1249 bam_ch[i].notify(bam_ch[i].priv, event, data);
1250 }
1251}
1252
1253static void kickoff_ul_wakeup_func(struct work_struct *work)
1254{
1255 read_lock(&ul_wakeup_lock);
1256 if (!bam_is_connected) {
1257 read_unlock(&ul_wakeup_lock);
1258 ul_wakeup();
1259 read_lock(&ul_wakeup_lock);
1260 ul_packet_written = 1;
1261 notify_all(BAM_DMUX_UL_CONNECTED, (unsigned long)(NULL));
1262 }
1263 read_unlock(&ul_wakeup_lock);
1264}
1265
1266void msm_bam_dmux_kickoff_ul_wakeup(void)
1267{
1268 queue_work(bam_mux_tx_workqueue, &kickoff_ul_wakeup);
1269}
1270
Eric Holmberg878923a2012-01-10 14:28:19 -07001271static void power_vote(int vote)
1272{
1273 bam_dmux_log("%s: curr=%d, vote=%d\n", __func__,
1274 bam_dmux_uplink_vote, vote);
1275
1276 if (bam_dmux_uplink_vote == vote)
1277 bam_dmux_log("%s: warning - duplicate power vote\n", __func__);
1278
1279 bam_dmux_uplink_vote = vote;
1280 if (vote)
1281 smsm_change_state(SMSM_APPS_STATE, 0, SMSM_A2_POWER_CONTROL);
1282 else
1283 smsm_change_state(SMSM_APPS_STATE, SMSM_A2_POWER_CONTROL, 0);
1284}
1285
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001286static void ul_timeout(struct work_struct *work)
1287{
Jeff Hugoc040a5b2011-11-15 14:26:01 -07001288 unsigned long flags;
1289 int ret;
1290
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001291 if (in_global_reset)
1292 return;
Jeff Hugoc040a5b2011-11-15 14:26:01 -07001293 ret = write_trylock_irqsave(&ul_wakeup_lock, flags);
1294 if (!ret) { /* failed to grab lock, reschedule and bail */
1295 schedule_delayed_work(&ul_timeout_work,
1296 msecs_to_jiffies(UL_TIMEOUT_DELAY));
1297 return;
1298 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001299 if (ul_packet_written) {
Eric Holmberg878923a2012-01-10 14:28:19 -07001300 bam_dmux_log("%s: packet written\n", __func__);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001301 ul_packet_written = 0;
1302 schedule_delayed_work(&ul_timeout_work,
1303 msecs_to_jiffies(UL_TIMEOUT_DELAY));
1304 } else {
Eric Holmberg878923a2012-01-10 14:28:19 -07001305 bam_dmux_log("%s: powerdown\n", __func__);
1306 verify_tx_queue_is_empty(__func__);
Eric Holmberg006057d2012-01-11 10:10:42 -07001307
1308 if (a2_pc_disabled) {
1309 wait_for_dfab = 1;
1310 INIT_COMPLETION(dfab_unvote_completion);
1311 release_wakelock();
1312 } else {
1313 wait_for_ack = 1;
1314 INIT_COMPLETION(ul_wakeup_ack_completion);
1315 power_vote(0);
1316 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001317 bam_is_connected = 0;
Jeff Hugod98b1082011-10-24 10:30:23 -06001318 notify_all(BAM_DMUX_UL_DISCONNECTED, (unsigned long)(NULL));
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001319 }
Jeff Hugoc040a5b2011-11-15 14:26:01 -07001320 write_unlock_irqrestore(&ul_wakeup_lock, flags);
Eric Holmberg006057d2012-01-11 10:10:42 -07001321 if (a2_pc_disabled && wait_for_dfab) {
1322 unvote_dfab();
1323 complete_all(&dfab_unvote_completion);
1324 wait_for_dfab = 0;
1325 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001326}
1327static void ul_wakeup(void)
1328{
Jeff Hugof6c1c1e2011-12-01 17:43:49 -07001329 int ret;
Eric Holmberg006057d2012-01-11 10:10:42 -07001330 static int called_before;
Jeff Hugof6c1c1e2011-12-01 17:43:49 -07001331
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001332 mutex_lock(&wakeup_lock);
1333 if (bam_is_connected) { /* bam got connected before lock grabbed */
Eric Holmberg878923a2012-01-10 14:28:19 -07001334 bam_dmux_log("%s Already awake\n", __func__);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001335 mutex_unlock(&wakeup_lock);
1336 return;
1337 }
Eric Holmberg878923a2012-01-10 14:28:19 -07001338
Eric Holmberg006057d2012-01-11 10:10:42 -07001339 if (a2_pc_disabled) {
1340 /*
1341 * don't grab the wakelock the first time because it is
1342 * already grabbed when a2 powers on
1343 */
1344 if (likely(called_before))
1345 grab_wakelock();
1346 else
1347 called_before = 1;
1348 if (wait_for_dfab) {
1349 ret = wait_for_completion_interruptible_timeout(
1350 &dfab_unvote_completion, HZ);
1351 BUG_ON(ret == 0);
1352 }
1353 vote_dfab();
1354 schedule_delayed_work(&ul_timeout_work,
1355 msecs_to_jiffies(UL_TIMEOUT_DELAY));
1356 bam_is_connected = 1;
1357 mutex_unlock(&wakeup_lock);
1358 return;
1359 }
1360
Jeff Hugof6c1c1e2011-12-01 17:43:49 -07001361 /*
1362 * must wait for the previous power down request to have been acked
1363 * chances are it already came in and this will just fall through
1364 * instead of waiting
1365 */
1366 if (wait_for_ack) {
Eric Holmberg878923a2012-01-10 14:28:19 -07001367 bam_dmux_log("%s waiting for previous ack\n", __func__);
Jeff Hugof6c1c1e2011-12-01 17:43:49 -07001368 ret = wait_for_completion_interruptible_timeout(
1369 &ul_wakeup_ack_completion, HZ);
1370 BUG_ON(ret == 0);
Eric Holmberg006057d2012-01-11 10:10:42 -07001371 wait_for_ack = 0;
Jeff Hugof6c1c1e2011-12-01 17:43:49 -07001372 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001373 INIT_COMPLETION(ul_wakeup_ack_completion);
Eric Holmberg878923a2012-01-10 14:28:19 -07001374 power_vote(1);
1375 bam_dmux_log("%s waiting for wakeup ack\n", __func__);
Jeff Hugof6c1c1e2011-12-01 17:43:49 -07001376 ret = wait_for_completion_interruptible_timeout(
1377 &ul_wakeup_ack_completion, HZ);
1378 BUG_ON(ret == 0);
Eric Holmberg878923a2012-01-10 14:28:19 -07001379 bam_dmux_log("%s waiting completion\n", __func__);
Jeff Hugof6c1c1e2011-12-01 17:43:49 -07001380 ret = wait_for_completion_interruptible_timeout(
1381 &bam_connection_completion, HZ);
1382 BUG_ON(ret == 0);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001383
1384 bam_is_connected = 1;
Eric Holmberg878923a2012-01-10 14:28:19 -07001385 bam_dmux_log("%s complete\n", __func__);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001386 schedule_delayed_work(&ul_timeout_work,
1387 msecs_to_jiffies(UL_TIMEOUT_DELAY));
1388 mutex_unlock(&wakeup_lock);
1389}
1390
1391static void reconnect_to_bam(void)
1392{
1393 int i;
1394
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001395 in_global_reset = 0;
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001396 vote_dfab();
1397 i = sps_device_reset(a2_device_handle);
1398 if (i)
1399 pr_err("%s: device reset failed rc = %d\n", __func__, i);
1400 i = sps_connect(bam_tx_pipe, &tx_connection);
1401 if (i)
1402 pr_err("%s: tx connection failed rc = %d\n", __func__, i);
1403 i = sps_connect(bam_rx_pipe, &rx_connection);
1404 if (i)
1405 pr_err("%s: rx connection failed rc = %d\n", __func__, i);
1406 i = sps_register_event(bam_tx_pipe, &tx_register_event);
1407 if (i)
1408 pr_err("%s: tx event reg failed rc = %d\n", __func__, i);
1409 i = sps_register_event(bam_rx_pipe, &rx_register_event);
1410 if (i)
1411 pr_err("%s: rx event reg failed rc = %d\n", __func__, i);
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001412
1413 bam_connection_is_active = 1;
1414
1415 if (polling_mode)
1416 rx_switch_to_interrupt_mode();
1417
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001418 for (i = 0; i < NUM_BUFFERS; ++i)
1419 queue_rx();
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001420
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001421 toggle_apps_ack();
1422 complete_all(&bam_connection_completion);
1423}
1424
1425static void disconnect_to_bam(void)
1426{
1427 struct list_head *node;
1428 struct rx_pkt_info *info;
1429
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001430 bam_connection_is_active = 0;
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001431 INIT_COMPLETION(bam_connection_completion);
1432 sps_disconnect(bam_tx_pipe);
1433 sps_disconnect(bam_rx_pipe);
1434 unvote_dfab();
1435 __memzero(rx_desc_mem_buf.base, rx_desc_mem_buf.size);
1436 __memzero(tx_desc_mem_buf.base, tx_desc_mem_buf.size);
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001437
1438 mutex_lock(&bam_rx_pool_mutexlock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001439 while (!list_empty(&bam_rx_pool)) {
1440 node = bam_rx_pool.next;
1441 list_del(node);
1442 info = container_of(node, struct rx_pkt_info, list_node);
1443 dma_unmap_single(NULL, info->dma_address, BUFFER_SIZE,
1444 DMA_FROM_DEVICE);
1445 dev_kfree_skb_any(info->skb);
1446 kfree(info);
1447 }
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001448 mutex_unlock(&bam_rx_pool_mutexlock);
Eric Holmberg878923a2012-01-10 14:28:19 -07001449
1450 verify_tx_queue_is_empty(__func__);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001451}
1452
1453static void vote_dfab(void)
1454{
Jeff Hugoca0caa82011-12-05 16:05:23 -07001455 int rc;
1456
Eric Holmberg006057d2012-01-11 10:10:42 -07001457 bam_dmux_log("%s\n", __func__);
1458 mutex_lock(&dfab_status_lock);
1459 if (dfab_is_on) {
1460 bam_dmux_log("%s: dfab is already on\n", __func__);
1461 mutex_unlock(&dfab_status_lock);
1462 return;
1463 }
Jeff Hugoca0caa82011-12-05 16:05:23 -07001464 rc = clk_enable(dfab_clk);
1465 if (rc)
Eric Holmberg006057d2012-01-11 10:10:42 -07001466 DMUX_LOG_KERR("bam_dmux vote for dfab failed rc = %d\n", rc);
1467 dfab_is_on = 1;
1468 mutex_unlock(&dfab_status_lock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001469}
1470
1471static void unvote_dfab(void)
1472{
Eric Holmberg006057d2012-01-11 10:10:42 -07001473 bam_dmux_log("%s\n", __func__);
1474 mutex_lock(&dfab_status_lock);
1475 if (!dfab_is_on) {
1476 DMUX_LOG_KERR("%s: dfab is already off\n", __func__);
1477 dump_stack();
1478 mutex_unlock(&dfab_status_lock);
1479 return;
1480 }
Jeff Hugoca0caa82011-12-05 16:05:23 -07001481 clk_disable(dfab_clk);
Eric Holmberg006057d2012-01-11 10:10:42 -07001482 dfab_is_on = 0;
1483 mutex_unlock(&dfab_status_lock);
1484}
1485
1486/* reference counting wrapper around wakelock */
1487static void grab_wakelock(void)
1488{
1489 unsigned long flags;
1490
1491 spin_lock_irqsave(&wakelock_reference_lock, flags);
1492 bam_dmux_log("%s: ref count = %d\n", __func__,
1493 wakelock_reference_count);
1494 if (wakelock_reference_count == 0)
1495 wake_lock(&bam_wakelock);
1496 ++wakelock_reference_count;
1497 spin_unlock_irqrestore(&wakelock_reference_lock, flags);
1498}
1499
1500static void release_wakelock(void)
1501{
1502 unsigned long flags;
1503
1504 spin_lock_irqsave(&wakelock_reference_lock, flags);
1505 if (wakelock_reference_count == 0) {
1506 DMUX_LOG_KERR("%s: bam_dmux wakelock not locked\n", __func__);
1507 dump_stack();
1508 spin_unlock_irqrestore(&wakelock_reference_lock, flags);
1509 return;
1510 }
1511 bam_dmux_log("%s: ref count = %d\n", __func__,
1512 wakelock_reference_count);
1513 --wakelock_reference_count;
1514 if (wakelock_reference_count == 0)
1515 wake_unlock(&bam_wakelock);
1516 spin_unlock_irqrestore(&wakelock_reference_lock, flags);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001517}
1518
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001519static int restart_notifier_cb(struct notifier_block *this,
1520 unsigned long code,
1521 void *data)
1522{
1523 int i;
1524 struct list_head *node;
1525 struct tx_pkt_info *info;
1526 int temp_remote_status;
Jeff Hugo626303bf2011-11-21 11:43:28 -07001527 unsigned long flags;
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001528
1529 if (code != SUBSYS_AFTER_SHUTDOWN)
1530 return NOTIFY_DONE;
1531
Eric Holmberg878923a2012-01-10 14:28:19 -07001532 bam_dmux_log("%s: begin\n", __func__);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001533 in_global_reset = 1;
Eric Holmberg006057d2012-01-11 10:10:42 -07001534 a2_pc_disabled = 0;
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001535 for (i = 0; i < BAM_DMUX_NUM_CHANNELS; ++i) {
1536 temp_remote_status = bam_ch_is_remote_open(i);
1537 bam_ch[i].status &= ~BAM_CH_REMOTE_OPEN;
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -07001538 bam_ch[i].num_tx_pkts = 0;
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001539 if (bam_ch_is_local_open(i))
1540 bam_ch[i].status |= BAM_CH_IN_RESET;
1541 if (temp_remote_status) {
1542 platform_device_unregister(bam_ch[i].pdev);
1543 bam_ch[i].pdev = platform_device_alloc(
1544 bam_ch[i].name, 2);
1545 }
1546 }
1547 /*cleanup UL*/
Jeff Hugo626303bf2011-11-21 11:43:28 -07001548 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001549 while (!list_empty(&bam_tx_pool)) {
1550 node = bam_tx_pool.next;
1551 list_del(node);
1552 info = container_of(node, struct tx_pkt_info,
1553 list_node);
1554 if (!info->is_cmd) {
1555 dma_unmap_single(NULL, info->dma_address,
1556 info->skb->len,
1557 DMA_TO_DEVICE);
1558 dev_kfree_skb_any(info->skb);
1559 } else {
1560 dma_unmap_single(NULL, info->dma_address,
1561 info->len,
1562 DMA_TO_DEVICE);
1563 kfree(info->skb);
1564 }
1565 kfree(info);
1566 }
Jeff Hugo626303bf2011-11-21 11:43:28 -07001567 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
Eric Holmberg878923a2012-01-10 14:28:19 -07001568 power_vote(0);
1569 bam_dmux_log("%s: complete\n", __func__);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001570 return NOTIFY_DONE;
1571}
1572
Jeff Hugo9dea05c2011-12-21 12:23:05 -07001573static int bam_init(void)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001574{
1575 u32 h;
1576 dma_addr_t dma_addr;
1577 int ret;
1578 void *a2_virt_addr;
1579 int i;
1580
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001581 vote_dfab();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001582 /* init BAM */
1583 a2_virt_addr = ioremap_nocache(A2_PHYS_BASE, A2_PHYS_SIZE);
1584 if (!a2_virt_addr) {
1585 pr_err("%s: ioremap failed\n", __func__);
1586 ret = -ENOMEM;
Jeff Hugo994a92d2012-01-05 13:25:21 -07001587 goto ioremap_failed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001588 }
1589 a2_props.phys_addr = A2_PHYS_BASE;
1590 a2_props.virt_addr = a2_virt_addr;
1591 a2_props.virt_size = A2_PHYS_SIZE;
1592 a2_props.irq = A2_BAM_IRQ;
Jeff Hugo927cba62011-11-11 11:49:52 -07001593 a2_props.options = SPS_BAM_OPT_IRQ_WAKEUP;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001594 a2_props.num_pipes = A2_NUM_PIPES;
1595 a2_props.summing_threshold = A2_SUMMING_THRESHOLD;
Jeff Hugo75913c82011-12-05 15:59:01 -07001596 if (cpu_is_msm9615())
1597 a2_props.manage = SPS_BAM_MGR_DEVICE_REMOTE;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001598 /* need to free on tear down */
1599 ret = sps_register_bam_device(&a2_props, &h);
1600 if (ret < 0) {
1601 pr_err("%s: register bam error %d\n", __func__, ret);
1602 goto register_bam_failed;
1603 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001604 a2_device_handle = h;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001605
1606 bam_tx_pipe = sps_alloc_endpoint();
1607 if (bam_tx_pipe == NULL) {
1608 pr_err("%s: tx alloc endpoint failed\n", __func__);
1609 ret = -ENOMEM;
1610 goto register_bam_failed;
1611 }
1612 ret = sps_get_config(bam_tx_pipe, &tx_connection);
1613 if (ret) {
1614 pr_err("%s: tx get config failed %d\n", __func__, ret);
1615 goto tx_get_config_failed;
1616 }
1617
1618 tx_connection.source = SPS_DEV_HANDLE_MEM;
1619 tx_connection.src_pipe_index = 0;
1620 tx_connection.destination = h;
1621 tx_connection.dest_pipe_index = 4;
1622 tx_connection.mode = SPS_MODE_DEST;
1623 tx_connection.options = SPS_O_AUTO_ENABLE | SPS_O_EOT;
1624 tx_desc_mem_buf.size = 0x800; /* 2k */
1625 tx_desc_mem_buf.base = dma_alloc_coherent(NULL, tx_desc_mem_buf.size,
1626 &dma_addr, 0);
1627 if (tx_desc_mem_buf.base == NULL) {
1628 pr_err("%s: tx memory alloc failed\n", __func__);
1629 ret = -ENOMEM;
1630 goto tx_mem_failed;
1631 }
1632 tx_desc_mem_buf.phys_base = dma_addr;
1633 memset(tx_desc_mem_buf.base, 0x0, tx_desc_mem_buf.size);
1634 tx_connection.desc = tx_desc_mem_buf;
1635 tx_connection.event_thresh = 0x10;
1636
1637 ret = sps_connect(bam_tx_pipe, &tx_connection);
1638 if (ret < 0) {
1639 pr_err("%s: tx connect error %d\n", __func__, ret);
1640 goto tx_connect_failed;
1641 }
1642
1643 bam_rx_pipe = sps_alloc_endpoint();
1644 if (bam_rx_pipe == NULL) {
1645 pr_err("%s: rx alloc endpoint failed\n", __func__);
1646 ret = -ENOMEM;
1647 goto tx_connect_failed;
1648 }
1649 ret = sps_get_config(bam_rx_pipe, &rx_connection);
1650 if (ret) {
1651 pr_err("%s: rx get config failed %d\n", __func__, ret);
1652 goto rx_get_config_failed;
1653 }
1654
1655 rx_connection.source = h;
1656 rx_connection.src_pipe_index = 5;
1657 rx_connection.destination = SPS_DEV_HANDLE_MEM;
1658 rx_connection.dest_pipe_index = 1;
1659 rx_connection.mode = SPS_MODE_SRC;
Jeff Hugo949080a2011-08-30 11:58:56 -06001660 rx_connection.options = SPS_O_AUTO_ENABLE | SPS_O_EOT |
1661 SPS_O_ACK_TRANSFERS;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001662 rx_desc_mem_buf.size = 0x800; /* 2k */
1663 rx_desc_mem_buf.base = dma_alloc_coherent(NULL, rx_desc_mem_buf.size,
1664 &dma_addr, 0);
1665 if (rx_desc_mem_buf.base == NULL) {
1666 pr_err("%s: rx memory alloc failed\n", __func__);
1667 ret = -ENOMEM;
1668 goto rx_mem_failed;
1669 }
1670 rx_desc_mem_buf.phys_base = dma_addr;
1671 memset(rx_desc_mem_buf.base, 0x0, rx_desc_mem_buf.size);
1672 rx_connection.desc = rx_desc_mem_buf;
1673 rx_connection.event_thresh = 0x10;
1674
1675 ret = sps_connect(bam_rx_pipe, &rx_connection);
1676 if (ret < 0) {
1677 pr_err("%s: rx connect error %d\n", __func__, ret);
1678 goto rx_connect_failed;
1679 }
1680
1681 tx_register_event.options = SPS_O_EOT;
1682 tx_register_event.mode = SPS_TRIGGER_CALLBACK;
1683 tx_register_event.xfer_done = NULL;
1684 tx_register_event.callback = bam_mux_tx_notify;
1685 tx_register_event.user = NULL;
1686 ret = sps_register_event(bam_tx_pipe, &tx_register_event);
1687 if (ret < 0) {
1688 pr_err("%s: tx register event error %d\n", __func__, ret);
1689 goto rx_event_reg_failed;
1690 }
1691
Jeff Hugo33dbc002011-08-25 15:52:53 -06001692 rx_register_event.options = SPS_O_EOT;
1693 rx_register_event.mode = SPS_TRIGGER_CALLBACK;
1694 rx_register_event.xfer_done = NULL;
1695 rx_register_event.callback = bam_mux_rx_notify;
1696 rx_register_event.user = NULL;
1697 ret = sps_register_event(bam_rx_pipe, &rx_register_event);
1698 if (ret < 0) {
1699 pr_err("%s: tx register event error %d\n", __func__, ret);
1700 goto rx_event_reg_failed;
1701 }
1702
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001703 bam_mux_initialized = 1;
1704 for (i = 0; i < NUM_BUFFERS; ++i)
1705 queue_rx();
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001706 toggle_apps_ack();
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001707 bam_connection_is_active = 1;
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001708 complete_all(&bam_connection_completion);
Jeff Hugo9dea05c2011-12-21 12:23:05 -07001709 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001710
1711rx_event_reg_failed:
1712 sps_disconnect(bam_rx_pipe);
1713rx_connect_failed:
1714 dma_free_coherent(NULL, rx_desc_mem_buf.size, rx_desc_mem_buf.base,
1715 rx_desc_mem_buf.phys_base);
1716rx_mem_failed:
1717 sps_disconnect(bam_tx_pipe);
1718rx_get_config_failed:
1719 sps_free_endpoint(bam_rx_pipe);
1720tx_connect_failed:
1721 dma_free_coherent(NULL, tx_desc_mem_buf.size, tx_desc_mem_buf.base,
1722 tx_desc_mem_buf.phys_base);
1723tx_get_config_failed:
1724 sps_free_endpoint(bam_tx_pipe);
1725tx_mem_failed:
1726 sps_deregister_bam_device(h);
1727register_bam_failed:
Jeff Hugo994a92d2012-01-05 13:25:21 -07001728ioremap_failed:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001729 /*destroy_workqueue(bam_mux_workqueue);*/
Jeff Hugo9dea05c2011-12-21 12:23:05 -07001730 return ret;
1731}
1732
1733static int bam_init_fallback(void)
1734{
1735 u32 h;
1736 int ret;
1737 void *a2_virt_addr;
1738
1739 unvote_dfab();
1740 /* init BAM */
1741 a2_virt_addr = ioremap_nocache(A2_PHYS_BASE, A2_PHYS_SIZE);
1742 if (!a2_virt_addr) {
1743 pr_err("%s: ioremap failed\n", __func__);
1744 ret = -ENOMEM;
1745 goto ioremap_failed;
1746 }
1747 a2_props.phys_addr = A2_PHYS_BASE;
1748 a2_props.virt_addr = a2_virt_addr;
1749 a2_props.virt_size = A2_PHYS_SIZE;
1750 a2_props.irq = A2_BAM_IRQ;
1751 a2_props.options = SPS_BAM_OPT_IRQ_WAKEUP;
1752 a2_props.num_pipes = A2_NUM_PIPES;
1753 a2_props.summing_threshold = A2_SUMMING_THRESHOLD;
1754 if (cpu_is_msm9615())
1755 a2_props.manage = SPS_BAM_MGR_DEVICE_REMOTE;
1756 ret = sps_register_bam_device(&a2_props, &h);
1757 if (ret < 0) {
1758 pr_err("%s: register bam error %d\n", __func__, ret);
1759 goto register_bam_failed;
1760 }
1761 a2_device_handle = h;
1762
1763 return 0;
1764
1765register_bam_failed:
Jeff Hugo9dea05c2011-12-21 12:23:05 -07001766ioremap_failed:
1767 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001768}
Jeff Hugoade1f842011-08-03 15:53:59 -06001769
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001770static void toggle_apps_ack(void)
1771{
1772 static unsigned int clear_bit; /* 0 = set the bit, else clear bit */
Eric Holmberg878923a2012-01-10 14:28:19 -07001773
1774 bam_dmux_log("%s: apps ack %d->%d\n", __func__,
1775 clear_bit & 0x1, ~clear_bit & 0x1);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001776 smsm_change_state(SMSM_APPS_STATE,
1777 clear_bit & SMSM_A2_POWER_CONTROL_ACK,
1778 ~clear_bit & SMSM_A2_POWER_CONTROL_ACK);
1779 clear_bit = ~clear_bit;
1780}
1781
Jeff Hugoade1f842011-08-03 15:53:59 -06001782static void bam_dmux_smsm_cb(void *priv, uint32_t old_state, uint32_t new_state)
1783{
Jeff Hugo9dea05c2011-12-21 12:23:05 -07001784 int ret = 0;
Eric Holmberg878923a2012-01-10 14:28:19 -07001785
1786 bam_dmux_power_state = new_state & SMSM_A2_POWER_CONTROL ? 1 : 0;
1787 bam_dmux_log("%s: 0x%08x -> 0x%08x\n", __func__, old_state,
1788 new_state);
1789
Jeff Hugoae3a85e2011-12-02 17:10:18 -07001790 if (bam_mux_initialized && new_state & SMSM_A2_POWER_CONTROL) {
Eric Holmberg878923a2012-01-10 14:28:19 -07001791 bam_dmux_log("%s: reconnect\n", __func__);
Eric Holmberg006057d2012-01-11 10:10:42 -07001792 grab_wakelock();
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001793 reconnect_to_bam();
Jeff Hugoae3a85e2011-12-02 17:10:18 -07001794 } else if (bam_mux_initialized &&
1795 !(new_state & SMSM_A2_POWER_CONTROL)) {
Eric Holmberg878923a2012-01-10 14:28:19 -07001796 bam_dmux_log("%s: disconnect\n", __func__);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001797 disconnect_to_bam();
Eric Holmberg006057d2012-01-11 10:10:42 -07001798 release_wakelock();
Jeff Hugoae3a85e2011-12-02 17:10:18 -07001799 } else if (new_state & SMSM_A2_POWER_CONTROL) {
Eric Holmberg878923a2012-01-10 14:28:19 -07001800 bam_dmux_log("%s: init\n", __func__);
Eric Holmberg006057d2012-01-11 10:10:42 -07001801 grab_wakelock();
Jeff Hugo9dea05c2011-12-21 12:23:05 -07001802 ret = bam_init();
1803 if (ret) {
1804 ret = bam_init_fallback();
1805 if (ret)
1806 pr_err("%s: bam init fallback failed: %d",
1807 __func__, ret);
1808 }
Jeff Hugoae3a85e2011-12-02 17:10:18 -07001809 } else {
Eric Holmberg878923a2012-01-10 14:28:19 -07001810 bam_dmux_log("%s: bad state change\n", __func__);
Jeff Hugoade1f842011-08-03 15:53:59 -06001811 pr_err("%s: unsupported state change\n", __func__);
Jeff Hugoae3a85e2011-12-02 17:10:18 -07001812 }
Jeff Hugoade1f842011-08-03 15:53:59 -06001813
1814}
1815
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001816static void bam_dmux_smsm_ack_cb(void *priv, uint32_t old_state,
1817 uint32_t new_state)
1818{
Eric Holmberg878923a2012-01-10 14:28:19 -07001819 bam_dmux_log("%s: 0x%08x -> 0x%08x\n", __func__, old_state,
1820 new_state);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001821 complete_all(&ul_wakeup_ack_completion);
1822}
1823
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001824static int bam_dmux_probe(struct platform_device *pdev)
1825{
1826 int rc;
1827
1828 DBG("%s probe called\n", __func__);
1829 if (bam_mux_initialized)
1830 return 0;
1831
Stephen Boyd1c51a492011-10-26 12:11:47 -07001832 dfab_clk = clk_get(&pdev->dev, "bus_clk");
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001833 if (IS_ERR(dfab_clk)) {
1834 pr_err("%s: did not get dfab clock\n", __func__);
1835 return -EFAULT;
1836 }
1837
1838 rc = clk_set_rate(dfab_clk, 64000000);
1839 if (rc)
1840 pr_err("%s: unable to set dfab clock rate\n", __func__);
1841
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001842 bam_mux_rx_workqueue = create_singlethread_workqueue("bam_dmux_rx");
1843 if (!bam_mux_rx_workqueue)
1844 return -ENOMEM;
1845
1846 bam_mux_tx_workqueue = create_singlethread_workqueue("bam_dmux_tx");
1847 if (!bam_mux_tx_workqueue) {
1848 destroy_workqueue(bam_mux_rx_workqueue);
1849 return -ENOMEM;
1850 }
1851
Jeff Hugo7960abd2011-08-02 15:39:38 -06001852 for (rc = 0; rc < BAM_DMUX_NUM_CHANNELS; ++rc) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001853 spin_lock_init(&bam_ch[rc].lock);
Jeff Hugo7960abd2011-08-02 15:39:38 -06001854 scnprintf(bam_ch[rc].name, BAM_DMUX_CH_NAME_MAX_LEN,
1855 "bam_dmux_ch_%d", rc);
1856 /* bus 2, ie a2 stream 2 */
1857 bam_ch[rc].pdev = platform_device_alloc(bam_ch[rc].name, 2);
1858 if (!bam_ch[rc].pdev) {
1859 pr_err("%s: platform device alloc failed\n", __func__);
1860 destroy_workqueue(bam_mux_rx_workqueue);
1861 destroy_workqueue(bam_mux_tx_workqueue);
1862 return -ENOMEM;
1863 }
1864 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001865
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001866 init_completion(&ul_wakeup_ack_completion);
1867 init_completion(&bam_connection_completion);
Eric Holmberg006057d2012-01-11 10:10:42 -07001868 init_completion(&dfab_unvote_completion);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001869 INIT_DELAYED_WORK(&ul_timeout_work, ul_timeout);
Jeff Hugoae3a85e2011-12-02 17:10:18 -07001870 wake_lock_init(&bam_wakelock, WAKE_LOCK_SUSPEND, "bam_dmux_wakelock");
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001871
Jeff Hugoade1f842011-08-03 15:53:59 -06001872 rc = smsm_state_cb_register(SMSM_MODEM_STATE, SMSM_A2_POWER_CONTROL,
1873 bam_dmux_smsm_cb, NULL);
1874
1875 if (rc) {
1876 destroy_workqueue(bam_mux_rx_workqueue);
1877 destroy_workqueue(bam_mux_tx_workqueue);
1878 pr_err("%s: smsm cb register failed, rc: %d\n", __func__, rc);
1879 return -ENOMEM;
1880 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001881
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001882 rc = smsm_state_cb_register(SMSM_MODEM_STATE, SMSM_A2_POWER_CONTROL_ACK,
1883 bam_dmux_smsm_ack_cb, NULL);
1884
1885 if (rc) {
1886 destroy_workqueue(bam_mux_rx_workqueue);
1887 destroy_workqueue(bam_mux_tx_workqueue);
1888 smsm_state_cb_deregister(SMSM_MODEM_STATE,
1889 SMSM_A2_POWER_CONTROL,
1890 bam_dmux_smsm_cb, NULL);
1891 pr_err("%s: smsm ack cb register failed, rc: %d\n", __func__,
1892 rc);
1893 for (rc = 0; rc < BAM_DMUX_NUM_CHANNELS; ++rc)
1894 platform_device_put(bam_ch[rc].pdev);
1895 return -ENOMEM;
1896 }
1897
Eric Holmbergfd1e2ae2011-11-15 18:28:17 -07001898 if (smsm_get_state(SMSM_MODEM_STATE) & SMSM_A2_POWER_CONTROL)
1899 bam_dmux_smsm_cb(NULL, 0, smsm_get_state(SMSM_MODEM_STATE));
1900
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001901 return 0;
1902}
1903
1904static struct platform_driver bam_dmux_driver = {
1905 .probe = bam_dmux_probe,
1906 .driver = {
1907 .name = "BAM_RMNT",
1908 .owner = THIS_MODULE,
1909 },
1910};
1911
1912static int __init bam_dmux_init(void)
1913{
Eric Holmberg878923a2012-01-10 14:28:19 -07001914 int ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001915#ifdef CONFIG_DEBUG_FS
1916 struct dentry *dent;
1917
1918 dent = debugfs_create_dir("bam_dmux", 0);
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07001919 if (!IS_ERR(dent)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001920 debug_create("tbl", 0444, dent, debug_tbl);
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07001921 debug_create("ul_pkt_cnt", 0444, dent, debug_ul_pkt_cnt);
1922 debug_create("stats", 0444, dent, debug_stats);
Eric Holmberg878923a2012-01-10 14:28:19 -07001923 debugfs_create_file("log", 0444, dent, debug_log,
1924 &debug_ops_multiple);
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07001925 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001926#endif
Eric Holmberg878923a2012-01-10 14:28:19 -07001927 ret = kfifo_alloc(&bam_dmux_state_log, PAGE_SIZE, GFP_KERNEL);
1928 if (ret) {
1929 pr_err("%s: failed to allocate log %d\n", __func__, ret);
1930 bam_dmux_state_logging_disabled = 1;
1931 }
1932
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001933 subsys_notif_register_notifier("modem", &restart_notifier);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001934 return platform_driver_register(&bam_dmux_driver);
1935}
1936
Jeff Hugoade1f842011-08-03 15:53:59 -06001937late_initcall(bam_dmux_init); /* needs to init after SMD */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001938MODULE_DESCRIPTION("MSM BAM DMUX");
1939MODULE_LICENSE("GPL v2");