blob: 6361d6d496ea49bb66fba10c5bff2b2166f83fd1 [file] [log] [blame]
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14/*
15 * BAM DMUX module.
16 */
17
18#define DEBUG
19
20#include <linux/delay.h>
21#include <linux/module.h>
22#include <linux/netdevice.h>
23#include <linux/platform_device.h>
24#include <linux/sched.h>
25#include <linux/skbuff.h>
26#include <linux/debugfs.h>
Jeff Hugoaab7ebc2011-09-07 16:46:04 -060027#include <linux/clk.h>
Jeff Hugoae3a85e2011-12-02 17:10:18 -070028#include <linux/wakelock.h>
Eric Holmberg878923a2012-01-10 14:28:19 -070029#include <linux/kfifo.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070030
31#include <mach/sps.h>
32#include <mach/bam_dmux.h>
Jeff Hugoade1f842011-08-03 15:53:59 -060033#include <mach/msm_smsm.h>
Jeff Hugo6e7a92a2011-10-24 05:25:13 -060034#include <mach/subsystem_notif.h>
Jeff Hugo75913c82011-12-05 15:59:01 -070035#include <mach/socinfo.h>
Jeff Hugo4838f412012-01-20 11:19:37 -070036#include <mach/subsystem_restart.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070037
38#define BAM_CH_LOCAL_OPEN 0x1
39#define BAM_CH_REMOTE_OPEN 0x2
Jeff Hugo6e7a92a2011-10-24 05:25:13 -060040#define BAM_CH_IN_RESET 0x4
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070041
42#define BAM_MUX_HDR_MAGIC_NO 0x33fc
43
Eric Holmberg006057d2012-01-11 10:10:42 -070044#define BAM_MUX_HDR_CMD_DATA 0
45#define BAM_MUX_HDR_CMD_OPEN 1
46#define BAM_MUX_HDR_CMD_CLOSE 2
47#define BAM_MUX_HDR_CMD_STATUS 3 /* unused */
48#define BAM_MUX_HDR_CMD_OPEN_NO_A2_PC 4
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070049
Jeff Hugo949080a2011-08-30 11:58:56 -060050#define POLLING_MIN_SLEEP 950 /* 0.95 ms */
51#define POLLING_MAX_SLEEP 1050 /* 1.05 ms */
52#define POLLING_INACTIVITY 40 /* cycles before switch to intr mode */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070053
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -070054#define LOW_WATERMARK 2
55#define HIGH_WATERMARK 4
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070056
57static int msm_bam_dmux_debug_enable;
58module_param_named(debug_enable, msm_bam_dmux_debug_enable,
59 int, S_IRUGO | S_IWUSR | S_IWGRP);
60
61#if defined(DEBUG)
62static uint32_t bam_dmux_read_cnt;
63static uint32_t bam_dmux_write_cnt;
64static uint32_t bam_dmux_write_cpy_cnt;
65static uint32_t bam_dmux_write_cpy_bytes;
Eric Holmberg2fddbcd2011-11-28 18:25:57 -070066static uint32_t bam_dmux_tx_sps_failure_cnt;
Eric Holmberg6074aba2012-01-18 17:59:44 -070067static uint32_t bam_dmux_tx_stall_cnt;
Eric Holmberg1f1255d2012-02-22 13:37:21 -070068static atomic_t bam_dmux_ack_out_cnt = ATOMIC_INIT(0);
69static atomic_t bam_dmux_ack_in_cnt = ATOMIC_INIT(0);
70static atomic_t bam_dmux_a2_pwr_cntl_in_cnt = ATOMIC_INIT(0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070071
72#define DBG(x...) do { \
73 if (msm_bam_dmux_debug_enable) \
74 pr_debug(x); \
75 } while (0)
76
77#define DBG_INC_READ_CNT(x) do { \
78 bam_dmux_read_cnt += (x); \
79 if (msm_bam_dmux_debug_enable) \
80 pr_debug("%s: total read bytes %u\n", \
81 __func__, bam_dmux_read_cnt); \
82 } while (0)
83
84#define DBG_INC_WRITE_CNT(x) do { \
85 bam_dmux_write_cnt += (x); \
86 if (msm_bam_dmux_debug_enable) \
87 pr_debug("%s: total written bytes %u\n", \
88 __func__, bam_dmux_write_cnt); \
89 } while (0)
90
91#define DBG_INC_WRITE_CPY(x) do { \
92 bam_dmux_write_cpy_bytes += (x); \
93 bam_dmux_write_cpy_cnt++; \
94 if (msm_bam_dmux_debug_enable) \
95 pr_debug("%s: total write copy cnt %u, bytes %u\n", \
96 __func__, bam_dmux_write_cpy_cnt, \
97 bam_dmux_write_cpy_bytes); \
98 } while (0)
Eric Holmberg2fddbcd2011-11-28 18:25:57 -070099
100#define DBG_INC_TX_SPS_FAILURE_CNT() do { \
101 bam_dmux_tx_sps_failure_cnt++; \
102} while (0)
103
Eric Holmberg6074aba2012-01-18 17:59:44 -0700104#define DBG_INC_TX_STALL_CNT() do { \
105 bam_dmux_tx_stall_cnt++; \
106} while (0)
107
Eric Holmberg1f1255d2012-02-22 13:37:21 -0700108#define DBG_INC_ACK_OUT_CNT() \
109 atomic_inc(&bam_dmux_ack_out_cnt)
110
111#define DBG_INC_A2_POWER_CONTROL_IN_CNT() \
112 atomic_inc(&bam_dmux_a2_pwr_cntl_in_cnt)
113
114#define DBG_INC_ACK_IN_CNT() \
115 atomic_inc(&bam_dmux_ack_in_cnt)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700116#else
117#define DBG(x...) do { } while (0)
118#define DBG_INC_READ_CNT(x...) do { } while (0)
119#define DBG_INC_WRITE_CNT(x...) do { } while (0)
120#define DBG_INC_WRITE_CPY(x...) do { } while (0)
Eric Holmberg2fddbcd2011-11-28 18:25:57 -0700121#define DBG_INC_TX_SPS_FAILURE_CNT() do { } while (0)
Eric Holmberg6074aba2012-01-18 17:59:44 -0700122#define DBG_INC_TX_STALL_CNT() do { } while (0)
Eric Holmberg1f1255d2012-02-22 13:37:21 -0700123#define DBG_INC_ACK_OUT_CNT() do { } while (0)
124#define DBG_INC_A2_POWER_CONTROL_IN_CNT() \
125 do { } while (0)
126#define DBG_INC_ACK_IN_CNT() do { } while (0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700127#endif
128
129struct bam_ch_info {
130 uint32_t status;
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600131 void (*notify)(void *, int, unsigned long);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700132 void *priv;
133 spinlock_t lock;
Jeff Hugo7960abd2011-08-02 15:39:38 -0600134 struct platform_device *pdev;
135 char name[BAM_DMUX_CH_NAME_MAX_LEN];
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700136 int num_tx_pkts;
137 int use_wm;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700138};
139
140struct tx_pkt_info {
141 struct sk_buff *skb;
142 dma_addr_t dma_address;
143 char is_cmd;
144 uint32_t len;
145 struct work_struct work;
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600146 struct list_head list_node;
Eric Holmberg878923a2012-01-10 14:28:19 -0700147 unsigned ts_sec;
148 unsigned long ts_nsec;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700149};
150
151struct rx_pkt_info {
152 struct sk_buff *skb;
153 dma_addr_t dma_address;
154 struct work_struct work;
Jeff Hugo949080a2011-08-30 11:58:56 -0600155 struct list_head list_node;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700156};
157
158#define A2_NUM_PIPES 6
159#define A2_SUMMING_THRESHOLD 4096
160#define A2_DEFAULT_DESCRIPTORS 32
161#define A2_PHYS_BASE 0x124C2000
162#define A2_PHYS_SIZE 0x2000
163#define BUFFER_SIZE 2048
164#define NUM_BUFFERS 32
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700165static struct sps_bam_props a2_props;
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600166static u32 a2_device_handle;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700167static struct sps_pipe *bam_tx_pipe;
168static struct sps_pipe *bam_rx_pipe;
169static struct sps_connect tx_connection;
170static struct sps_connect rx_connection;
171static struct sps_mem_buffer tx_desc_mem_buf;
172static struct sps_mem_buffer rx_desc_mem_buf;
173static struct sps_register_event tx_register_event;
Jeff Hugo33dbc002011-08-25 15:52:53 -0600174static struct sps_register_event rx_register_event;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700175
176static struct bam_ch_info bam_ch[BAM_DMUX_NUM_CHANNELS];
177static int bam_mux_initialized;
178
Jeff Hugo949080a2011-08-30 11:58:56 -0600179static int polling_mode;
180
181static LIST_HEAD(bam_rx_pool);
Jeff Hugoc9749932011-11-02 17:50:40 -0600182static DEFINE_MUTEX(bam_rx_pool_mutexlock);
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700183static int bam_rx_pool_len;
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600184static LIST_HEAD(bam_tx_pool);
Jeff Hugoc9749932011-11-02 17:50:40 -0600185static DEFINE_SPINLOCK(bam_tx_pool_spinlock);
Jeff Hugo949080a2011-08-30 11:58:56 -0600186
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700187struct bam_mux_hdr {
188 uint16_t magic_num;
189 uint8_t reserved;
190 uint8_t cmd;
191 uint8_t pad_len;
192 uint8_t ch_id;
193 uint16_t pkt_len;
194};
195
Jeff Hugod98b1082011-10-24 10:30:23 -0600196static void notify_all(int event, unsigned long data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700197static void bam_mux_write_done(struct work_struct *work);
198static void handle_bam_mux_cmd(struct work_struct *work);
Jeff Hugo949080a2011-08-30 11:58:56 -0600199static void rx_timer_work_func(struct work_struct *work);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700200
Jeff Hugo949080a2011-08-30 11:58:56 -0600201static DECLARE_WORK(rx_timer_work, rx_timer_work_func);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700202
203static struct workqueue_struct *bam_mux_rx_workqueue;
204static struct workqueue_struct *bam_mux_tx_workqueue;
205
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600206/* A2 power collaspe */
207#define UL_TIMEOUT_DELAY 1000 /* in ms */
Jeff Hugo0b13a352012-03-17 23:18:30 -0600208#define ENABLE_DISCONNECT_ACK 0x1
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600209static void toggle_apps_ack(void);
210static void reconnect_to_bam(void);
211static void disconnect_to_bam(void);
212static void ul_wakeup(void);
213static void ul_timeout(struct work_struct *work);
214static void vote_dfab(void);
215static void unvote_dfab(void);
Jeff Hugod98b1082011-10-24 10:30:23 -0600216static void kickoff_ul_wakeup_func(struct work_struct *work);
Eric Holmberg006057d2012-01-11 10:10:42 -0700217static void grab_wakelock(void);
218static void release_wakelock(void);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600219
220static int bam_is_connected;
221static DEFINE_MUTEX(wakeup_lock);
222static struct completion ul_wakeup_ack_completion;
223static struct completion bam_connection_completion;
224static struct delayed_work ul_timeout_work;
225static int ul_packet_written;
Eric Holmbergbc9f21c2012-01-18 11:33:33 -0700226static atomic_t ul_ondemand_vote = ATOMIC_INIT(0);
Stephen Boyd69d35e32012-02-14 15:33:30 -0800227static struct clk *dfab_clk, *xo_clk;
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600228static DEFINE_RWLOCK(ul_wakeup_lock);
Jeff Hugod98b1082011-10-24 10:30:23 -0600229static DECLARE_WORK(kickoff_ul_wakeup, kickoff_ul_wakeup_func);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600230static int bam_connection_is_active;
Jeff Hugof6c1c1e2011-12-01 17:43:49 -0700231static int wait_for_ack;
Jeff Hugoae3a85e2011-12-02 17:10:18 -0700232static struct wake_lock bam_wakelock;
Eric Holmberg006057d2012-01-11 10:10:42 -0700233static int a2_pc_disabled;
234static DEFINE_MUTEX(dfab_status_lock);
235static int dfab_is_on;
236static int wait_for_dfab;
237static struct completion dfab_unvote_completion;
238static DEFINE_SPINLOCK(wakelock_reference_lock);
239static int wakelock_reference_count;
Jeff Hugo583a6da2012-02-03 11:37:30 -0700240static int a2_pc_disabled_wakelock_skipped;
Jeff Hugo0b13a352012-03-17 23:18:30 -0600241static int disconnect_ack;
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600242/* End A2 power collaspe */
243
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600244/* subsystem restart */
245static int restart_notifier_cb(struct notifier_block *this,
246 unsigned long code,
247 void *data);
248
249static struct notifier_block restart_notifier = {
250 .notifier_call = restart_notifier_cb,
251};
252static int in_global_reset;
253/* end subsystem restart */
254
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700255#define bam_ch_is_open(x) \
256 (bam_ch[(x)].status == (BAM_CH_LOCAL_OPEN | BAM_CH_REMOTE_OPEN))
257
258#define bam_ch_is_local_open(x) \
259 (bam_ch[(x)].status & BAM_CH_LOCAL_OPEN)
260
261#define bam_ch_is_remote_open(x) \
262 (bam_ch[(x)].status & BAM_CH_REMOTE_OPEN)
263
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600264#define bam_ch_is_in_reset(x) \
265 (bam_ch[(x)].status & BAM_CH_IN_RESET)
266
Eric Holmberg878923a2012-01-10 14:28:19 -0700267#define LOG_MESSAGE_MAX_SIZE 80
268struct kfifo bam_dmux_state_log;
269static uint32_t bam_dmux_state_logging_disabled;
270static DEFINE_SPINLOCK(bam_dmux_logging_spinlock);
271static int bam_dmux_uplink_vote;
272static int bam_dmux_power_state;
273
274
275#define DMUX_LOG_KERR(fmt...) \
276do { \
277 bam_dmux_log(fmt); \
278 pr_err(fmt); \
279} while (0)
280
281/**
282 * Log a state change along with a small message.
283 *
284 * Complete size of messsage is limited to @todo.
285 */
286static void bam_dmux_log(const char *fmt, ...)
287{
288 char buff[LOG_MESSAGE_MAX_SIZE];
289 unsigned long flags;
290 va_list arg_list;
291 unsigned long long t_now;
292 unsigned long nanosec_rem;
293 int len = 0;
294
295 if (bam_dmux_state_logging_disabled)
296 return;
297
298 t_now = sched_clock();
299 nanosec_rem = do_div(t_now, 1000000000U);
300
301 /*
302 * States
Eric Holmberg006057d2012-01-11 10:10:42 -0700303 * D: 1 = Power collapse disabled
Eric Holmberg878923a2012-01-10 14:28:19 -0700304 * R: 1 = in global reset
305 * P: 1 = BAM is powered up
306 * A: 1 = BAM initialized and ready for data
307 *
308 * V: 1 = Uplink vote for power
309 * U: 1 = Uplink active
310 * W: 1 = Uplink Wait-for-ack
311 * A: 1 = Uplink ACK received
Eric Holmbergbc9f21c2012-01-18 11:33:33 -0700312 * #: >=1 On-demand uplink vote
Jeff Hugo0b13a352012-03-17 23:18:30 -0600313 * D: 1 = Disconnect ACK active
Eric Holmberg878923a2012-01-10 14:28:19 -0700314 */
315 len += scnprintf(buff, sizeof(buff),
Jeff Hugo0b13a352012-03-17 23:18:30 -0600316 "<DMUX> %u.%09lu %c%c%c%c %c%c%c%c%d%c ",
Eric Holmberg878923a2012-01-10 14:28:19 -0700317 (unsigned)t_now, nanosec_rem,
Eric Holmberg006057d2012-01-11 10:10:42 -0700318 a2_pc_disabled ? 'D' : 'd',
Eric Holmberg878923a2012-01-10 14:28:19 -0700319 in_global_reset ? 'R' : 'r',
320 bam_dmux_power_state ? 'P' : 'p',
321 bam_connection_is_active ? 'A' : 'a',
322 bam_dmux_uplink_vote ? 'V' : 'v',
323 bam_is_connected ? 'U' : 'u',
324 wait_for_ack ? 'W' : 'w',
Eric Holmbergbc9f21c2012-01-18 11:33:33 -0700325 ul_wakeup_ack_completion.done ? 'A' : 'a',
Jeff Hugo0b13a352012-03-17 23:18:30 -0600326 atomic_read(&ul_ondemand_vote),
327 disconnect_ack ? 'D' : 'd'
Eric Holmberg878923a2012-01-10 14:28:19 -0700328 );
329
330 va_start(arg_list, fmt);
331 len += vscnprintf(buff + len, sizeof(buff) - len, fmt, arg_list);
332 va_end(arg_list);
333 memset(buff + len, 0x0, sizeof(buff) - len);
334
335 spin_lock_irqsave(&bam_dmux_logging_spinlock, flags);
336 if (kfifo_avail(&bam_dmux_state_log) < LOG_MESSAGE_MAX_SIZE) {
337 char junk[LOG_MESSAGE_MAX_SIZE];
338 int ret;
339
340 ret = kfifo_out(&bam_dmux_state_log, junk, sizeof(junk));
341 if (ret != LOG_MESSAGE_MAX_SIZE) {
342 pr_err("%s: unable to empty log %d\n", __func__, ret);
343 spin_unlock_irqrestore(&bam_dmux_logging_spinlock,
344 flags);
345 return;
346 }
347 }
348 kfifo_in(&bam_dmux_state_log, buff, sizeof(buff));
349 spin_unlock_irqrestore(&bam_dmux_logging_spinlock, flags);
350}
351
352static inline void set_tx_timestamp(struct tx_pkt_info *pkt)
353{
354 unsigned long long t_now;
355
356 t_now = sched_clock();
357 pkt->ts_nsec = do_div(t_now, 1000000000U);
358 pkt->ts_sec = (unsigned)t_now;
359}
360
361static inline void verify_tx_queue_is_empty(const char *func)
362{
363 unsigned long flags;
364 struct tx_pkt_info *info;
365 int reported = 0;
366
367 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
368 list_for_each_entry(info, &bam_tx_pool, list_node) {
369 if (!reported) {
Eric Holmberg454d9da2012-01-12 09:37:14 -0700370 bam_dmux_log("%s: tx pool not empty\n", func);
371 if (!in_global_reset)
372 pr_err("%s: tx pool not empty\n", func);
Eric Holmberg878923a2012-01-10 14:28:19 -0700373 reported = 1;
374 }
Eric Holmberg454d9da2012-01-12 09:37:14 -0700375 bam_dmux_log("%s: node=%p ts=%u.%09lu\n", __func__,
376 &info->list_node, info->ts_sec, info->ts_nsec);
377 if (!in_global_reset)
378 pr_err("%s: node=%p ts=%u.%09lu\n", __func__,
379 &info->list_node, info->ts_sec, info->ts_nsec);
Eric Holmberg878923a2012-01-10 14:28:19 -0700380 }
381 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
382}
383
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700384static void queue_rx(void)
385{
386 void *ptr;
387 struct rx_pkt_info *info;
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700388 int ret;
389 int rx_len_cached;
Jeff Hugo949080a2011-08-30 11:58:56 -0600390
Jeff Hugoc9749932011-11-02 17:50:40 -0600391 mutex_lock(&bam_rx_pool_mutexlock);
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700392 rx_len_cached = bam_rx_pool_len;
Jeff Hugoc9749932011-11-02 17:50:40 -0600393 mutex_unlock(&bam_rx_pool_mutexlock);
Jeff Hugo949080a2011-08-30 11:58:56 -0600394
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700395 while (rx_len_cached < NUM_BUFFERS) {
396 if (in_global_reset)
397 goto fail;
398
399 info = kmalloc(sizeof(struct rx_pkt_info), GFP_KERNEL);
400 if (!info) {
401 pr_err("%s: unable to alloc rx_pkt_info\n", __func__);
402 goto fail;
403 }
404
405 INIT_WORK(&info->work, handle_bam_mux_cmd);
406
407 info->skb = __dev_alloc_skb(BUFFER_SIZE, GFP_KERNEL);
408 if (info->skb == NULL) {
409 DMUX_LOG_KERR("%s: unable to alloc skb\n", __func__);
410 goto fail_info;
411 }
412 ptr = skb_put(info->skb, BUFFER_SIZE);
413
414 info->dma_address = dma_map_single(NULL, ptr, BUFFER_SIZE,
415 DMA_FROM_DEVICE);
416 if (info->dma_address == 0 || info->dma_address == ~0) {
417 DMUX_LOG_KERR("%s: dma_map_single failure %p for %p\n",
418 __func__, (void *)info->dma_address, ptr);
419 goto fail_skb;
420 }
421
422 mutex_lock(&bam_rx_pool_mutexlock);
423 list_add_tail(&info->list_node, &bam_rx_pool);
424 rx_len_cached = ++bam_rx_pool_len;
425 mutex_unlock(&bam_rx_pool_mutexlock);
426
427 ret = sps_transfer_one(bam_rx_pipe, info->dma_address,
428 BUFFER_SIZE, info,
429 SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT);
430
431 if (ret) {
432 DMUX_LOG_KERR("%s: sps_transfer_one failed %d\n",
433 __func__, ret);
434 goto fail_transfer;
435 }
436 }
437 return;
438
439fail_transfer:
440 mutex_lock(&bam_rx_pool_mutexlock);
441 list_del(&info->list_node);
442 --bam_rx_pool_len;
443 rx_len_cached = bam_rx_pool_len;
444 mutex_unlock(&bam_rx_pool_mutexlock);
445
446 dma_unmap_single(NULL, info->dma_address, BUFFER_SIZE,
447 DMA_FROM_DEVICE);
448
449fail_skb:
450 dev_kfree_skb_any(info->skb);
451
452fail_info:
453 kfree(info);
454
455fail:
456 if (rx_len_cached == 0) {
457 DMUX_LOG_KERR("%s: RX queue failure\n", __func__);
458 in_global_reset = 1;
459 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700460}
461
462static void bam_mux_process_data(struct sk_buff *rx_skb)
463{
464 unsigned long flags;
465 struct bam_mux_hdr *rx_hdr;
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600466 unsigned long event_data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700467
468 rx_hdr = (struct bam_mux_hdr *)rx_skb->data;
469
470 rx_skb->data = (unsigned char *)(rx_hdr + 1);
471 rx_skb->tail = rx_skb->data + rx_hdr->pkt_len;
472 rx_skb->len = rx_hdr->pkt_len;
Jeff Hugoee88f672011-10-04 17:14:52 -0600473 rx_skb->truesize = rx_hdr->pkt_len + sizeof(struct sk_buff);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700474
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600475 event_data = (unsigned long)(rx_skb);
476
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700477 spin_lock_irqsave(&bam_ch[rx_hdr->ch_id].lock, flags);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600478 if (bam_ch[rx_hdr->ch_id].notify)
479 bam_ch[rx_hdr->ch_id].notify(
480 bam_ch[rx_hdr->ch_id].priv, BAM_DMUX_RECEIVE,
481 event_data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700482 else
483 dev_kfree_skb_any(rx_skb);
484 spin_unlock_irqrestore(&bam_ch[rx_hdr->ch_id].lock, flags);
485
486 queue_rx();
487}
488
Eric Holmberg006057d2012-01-11 10:10:42 -0700489static inline void handle_bam_mux_cmd_open(struct bam_mux_hdr *rx_hdr)
490{
491 unsigned long flags;
492 int ret;
493
494 spin_lock_irqsave(&bam_ch[rx_hdr->ch_id].lock, flags);
495 bam_ch[rx_hdr->ch_id].status |= BAM_CH_REMOTE_OPEN;
496 bam_ch[rx_hdr->ch_id].num_tx_pkts = 0;
497 spin_unlock_irqrestore(&bam_ch[rx_hdr->ch_id].lock, flags);
498 queue_rx();
499 ret = platform_device_add(bam_ch[rx_hdr->ch_id].pdev);
500 if (ret)
501 pr_err("%s: platform_device_add() error: %d\n",
502 __func__, ret);
503}
504
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700505static void handle_bam_mux_cmd(struct work_struct *work)
506{
507 unsigned long flags;
508 struct bam_mux_hdr *rx_hdr;
509 struct rx_pkt_info *info;
510 struct sk_buff *rx_skb;
511
512 info = container_of(work, struct rx_pkt_info, work);
513 rx_skb = info->skb;
Jeff Hugo949080a2011-08-30 11:58:56 -0600514 dma_unmap_single(NULL, info->dma_address, BUFFER_SIZE, DMA_FROM_DEVICE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700515 kfree(info);
516
517 rx_hdr = (struct bam_mux_hdr *)rx_skb->data;
518
519 DBG_INC_READ_CNT(sizeof(struct bam_mux_hdr));
520 DBG("%s: magic %x reserved %d cmd %d pad %d ch %d len %d\n", __func__,
521 rx_hdr->magic_num, rx_hdr->reserved, rx_hdr->cmd,
522 rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
523 if (rx_hdr->magic_num != BAM_MUX_HDR_MAGIC_NO) {
Eric Holmberg878923a2012-01-10 14:28:19 -0700524 DMUX_LOG_KERR("%s: dropping invalid hdr. magic %x"
525 " reserved %d cmd %d"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700526 " pad %d ch %d len %d\n", __func__,
527 rx_hdr->magic_num, rx_hdr->reserved, rx_hdr->cmd,
528 rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
529 dev_kfree_skb_any(rx_skb);
530 queue_rx();
531 return;
532 }
Eric Holmberg9ff40a52011-11-17 19:17:00 -0700533
534 if (rx_hdr->ch_id >= BAM_DMUX_NUM_CHANNELS) {
Eric Holmberg878923a2012-01-10 14:28:19 -0700535 DMUX_LOG_KERR("%s: dropping invalid LCID %d"
536 " reserved %d cmd %d"
Eric Holmberg9ff40a52011-11-17 19:17:00 -0700537 " pad %d ch %d len %d\n", __func__,
538 rx_hdr->ch_id, rx_hdr->reserved, rx_hdr->cmd,
539 rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
540 dev_kfree_skb_any(rx_skb);
541 queue_rx();
542 return;
543 }
544
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700545 switch (rx_hdr->cmd) {
546 case BAM_MUX_HDR_CMD_DATA:
547 DBG_INC_READ_CNT(rx_hdr->pkt_len);
548 bam_mux_process_data(rx_skb);
549 break;
550 case BAM_MUX_HDR_CMD_OPEN:
Eric Holmberg006057d2012-01-11 10:10:42 -0700551 bam_dmux_log("%s: opening cid %d PC enabled\n", __func__,
Eric Holmberg878923a2012-01-10 14:28:19 -0700552 rx_hdr->ch_id);
Eric Holmberg006057d2012-01-11 10:10:42 -0700553 handle_bam_mux_cmd_open(rx_hdr);
Jeff Hugo0b13a352012-03-17 23:18:30 -0600554 if (rx_hdr->reserved & ENABLE_DISCONNECT_ACK) {
555 bam_dmux_log("%s: activating disconnect ack\n");
556 disconnect_ack = 1;
557 }
Eric Holmberg006057d2012-01-11 10:10:42 -0700558 dev_kfree_skb_any(rx_skb);
559 break;
560 case BAM_MUX_HDR_CMD_OPEN_NO_A2_PC:
561 bam_dmux_log("%s: opening cid %d PC disabled\n", __func__,
562 rx_hdr->ch_id);
563
564 if (!a2_pc_disabled) {
565 a2_pc_disabled = 1;
Jeff Hugo322179f2012-02-29 10:52:34 -0700566 ul_wakeup();
Eric Holmberg006057d2012-01-11 10:10:42 -0700567 }
568
569 handle_bam_mux_cmd_open(rx_hdr);
Eric Holmberge779dba2011-11-04 18:22:01 -0600570 dev_kfree_skb_any(rx_skb);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700571 break;
572 case BAM_MUX_HDR_CMD_CLOSE:
573 /* probably should drop pending write */
Eric Holmberg878923a2012-01-10 14:28:19 -0700574 bam_dmux_log("%s: closing cid %d\n", __func__,
575 rx_hdr->ch_id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700576 spin_lock_irqsave(&bam_ch[rx_hdr->ch_id].lock, flags);
577 bam_ch[rx_hdr->ch_id].status &= ~BAM_CH_REMOTE_OPEN;
578 spin_unlock_irqrestore(&bam_ch[rx_hdr->ch_id].lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700579 queue_rx();
Jeff Hugo7960abd2011-08-02 15:39:38 -0600580 platform_device_unregister(bam_ch[rx_hdr->ch_id].pdev);
581 bam_ch[rx_hdr->ch_id].pdev =
582 platform_device_alloc(bam_ch[rx_hdr->ch_id].name, 2);
583 if (!bam_ch[rx_hdr->ch_id].pdev)
584 pr_err("%s: platform_device_alloc failed\n", __func__);
Eric Holmberge779dba2011-11-04 18:22:01 -0600585 dev_kfree_skb_any(rx_skb);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700586 break;
587 default:
Eric Holmberg878923a2012-01-10 14:28:19 -0700588 DMUX_LOG_KERR("%s: dropping invalid hdr. magic %x"
589 " reserved %d cmd %d pad %d ch %d len %d\n",
590 __func__, rx_hdr->magic_num, rx_hdr->reserved,
591 rx_hdr->cmd, rx_hdr->pad_len, rx_hdr->ch_id,
592 rx_hdr->pkt_len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700593 dev_kfree_skb_any(rx_skb);
594 queue_rx();
595 return;
596 }
597}
598
599static int bam_mux_write_cmd(void *data, uint32_t len)
600{
601 int rc;
602 struct tx_pkt_info *pkt;
603 dma_addr_t dma_address;
Jeff Hugo626303bf2011-11-21 11:43:28 -0700604 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700605
Eric Holmbergd83cd2b2011-11-04 15:54:17 -0600606 pkt = kmalloc(sizeof(struct tx_pkt_info), GFP_ATOMIC);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700607 if (pkt == NULL) {
608 pr_err("%s: mem alloc for tx_pkt_info failed\n", __func__);
609 rc = -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700610 return rc;
611 }
612
613 dma_address = dma_map_single(NULL, data, len,
614 DMA_TO_DEVICE);
615 if (!dma_address) {
616 pr_err("%s: dma_map_single() failed\n", __func__);
Jeff Hugo96cb7482011-12-07 13:28:31 -0700617 kfree(pkt);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700618 rc = -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700619 return rc;
620 }
621 pkt->skb = (struct sk_buff *)(data);
622 pkt->len = len;
623 pkt->dma_address = dma_address;
624 pkt->is_cmd = 1;
Eric Holmberg878923a2012-01-10 14:28:19 -0700625 set_tx_timestamp(pkt);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600626 INIT_WORK(&pkt->work, bam_mux_write_done);
Jeff Hugo626303bf2011-11-21 11:43:28 -0700627 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600628 list_add_tail(&pkt->list_node, &bam_tx_pool);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700629 rc = sps_transfer_one(bam_tx_pipe, dma_address, len,
630 pkt, SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600631 if (rc) {
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700632 DMUX_LOG_KERR("%s sps_transfer_one failed rc=%d\n",
633 __func__, rc);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600634 list_del(&pkt->list_node);
Eric Holmberg2fddbcd2011-11-28 18:25:57 -0700635 DBG_INC_TX_SPS_FAILURE_CNT();
Jeff Hugo626303bf2011-11-21 11:43:28 -0700636 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700637 dma_unmap_single(NULL, pkt->dma_address,
638 pkt->len,
639 DMA_TO_DEVICE);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600640 kfree(pkt);
Jeff Hugobb6da952012-01-16 15:02:42 -0700641 } else {
642 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600643 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700644
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600645 ul_packet_written = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700646 return rc;
647}
648
649static void bam_mux_write_done(struct work_struct *work)
650{
651 struct sk_buff *skb;
652 struct bam_mux_hdr *hdr;
653 struct tx_pkt_info *info;
Eric Holmberg1cde7a62011-12-19 18:34:01 -0700654 struct tx_pkt_info *info_expected;
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600655 unsigned long event_data;
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700656 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700657
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600658 if (in_global_reset)
659 return;
Eric Holmberg1cde7a62011-12-19 18:34:01 -0700660
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700661 info = container_of(work, struct tx_pkt_info, work);
Eric Holmberg1cde7a62011-12-19 18:34:01 -0700662
663 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
664 info_expected = list_first_entry(&bam_tx_pool,
665 struct tx_pkt_info, list_node);
666 if (unlikely(info != info_expected)) {
Eric Holmberg878923a2012-01-10 14:28:19 -0700667 struct tx_pkt_info *errant_pkt;
Eric Holmberg1cde7a62011-12-19 18:34:01 -0700668
Eric Holmberg878923a2012-01-10 14:28:19 -0700669 DMUX_LOG_KERR("%s: bam_tx_pool mismatch .next=%p,"
670 " list_node=%p, ts=%u.%09lu\n",
671 __func__, bam_tx_pool.next, &info->list_node,
672 info->ts_sec, info->ts_nsec
673 );
674
675 list_for_each_entry(errant_pkt, &bam_tx_pool, list_node) {
676 DMUX_LOG_KERR("%s: node=%p ts=%u.%09lu\n", __func__,
677 &errant_pkt->list_node, errant_pkt->ts_sec,
678 errant_pkt->ts_nsec);
679
680 }
Eric Holmberg1cde7a62011-12-19 18:34:01 -0700681 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
682 BUG();
683 }
684 list_del(&info->list_node);
685 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
686
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600687 if (info->is_cmd) {
688 kfree(info->skb);
689 kfree(info);
690 return;
691 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700692 skb = info->skb;
693 kfree(info);
694 hdr = (struct bam_mux_hdr *)skb->data;
Eric Holmberg9fdef262012-02-14 11:46:05 -0700695 DBG_INC_WRITE_CNT(skb->len);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600696 event_data = (unsigned long)(skb);
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700697 spin_lock_irqsave(&bam_ch[hdr->ch_id].lock, flags);
698 bam_ch[hdr->ch_id].num_tx_pkts--;
699 spin_unlock_irqrestore(&bam_ch[hdr->ch_id].lock, flags);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600700 if (bam_ch[hdr->ch_id].notify)
701 bam_ch[hdr->ch_id].notify(
702 bam_ch[hdr->ch_id].priv, BAM_DMUX_WRITE_DONE,
703 event_data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700704 else
705 dev_kfree_skb_any(skb);
706}
707
708int msm_bam_dmux_write(uint32_t id, struct sk_buff *skb)
709{
710 int rc = 0;
711 struct bam_mux_hdr *hdr;
712 unsigned long flags;
713 struct sk_buff *new_skb = NULL;
714 dma_addr_t dma_address;
715 struct tx_pkt_info *pkt;
716
717 if (id >= BAM_DMUX_NUM_CHANNELS)
718 return -EINVAL;
719 if (!skb)
720 return -EINVAL;
721 if (!bam_mux_initialized)
722 return -ENODEV;
723
724 DBG("%s: writing to ch %d len %d\n", __func__, id, skb->len);
725 spin_lock_irqsave(&bam_ch[id].lock, flags);
726 if (!bam_ch_is_open(id)) {
727 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
728 pr_err("%s: port not open: %d\n", __func__, bam_ch[id].status);
729 return -ENODEV;
730 }
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700731
732 if (bam_ch[id].use_wm &&
733 (bam_ch[id].num_tx_pkts >= HIGH_WATERMARK)) {
734 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
735 pr_err("%s: watermark exceeded: %d\n", __func__, id);
736 return -EAGAIN;
737 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700738 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
739
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600740 read_lock(&ul_wakeup_lock);
Jeff Hugo061ce672011-10-21 17:15:32 -0600741 if (!bam_is_connected) {
742 read_unlock(&ul_wakeup_lock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600743 ul_wakeup();
Jeff Hugo4838f412012-01-20 11:19:37 -0700744 if (unlikely(in_global_reset == 1))
745 return -EFAULT;
Jeff Hugo061ce672011-10-21 17:15:32 -0600746 read_lock(&ul_wakeup_lock);
Jeff Hugod98b1082011-10-24 10:30:23 -0600747 notify_all(BAM_DMUX_UL_CONNECTED, (unsigned long)(NULL));
Jeff Hugo061ce672011-10-21 17:15:32 -0600748 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600749
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700750 /* if skb do not have any tailroom for padding,
751 copy the skb into a new expanded skb */
752 if ((skb->len & 0x3) && (skb_tailroom(skb) < (4 - (skb->len & 0x3)))) {
753 /* revisit, probably dev_alloc_skb and memcpy is effecient */
754 new_skb = skb_copy_expand(skb, skb_headroom(skb),
755 4 - (skb->len & 0x3), GFP_ATOMIC);
756 if (new_skb == NULL) {
757 pr_err("%s: cannot allocate skb\n", __func__);
Jeff Hugoc6af54d2011-11-02 17:00:27 -0600758 goto write_fail;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700759 }
760 dev_kfree_skb_any(skb);
761 skb = new_skb;
762 DBG_INC_WRITE_CPY(skb->len);
763 }
764
765 hdr = (struct bam_mux_hdr *)skb_push(skb, sizeof(struct bam_mux_hdr));
766
767 /* caller should allocate for hdr and padding
768 hdr is fine, padding is tricky */
769 hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
770 hdr->cmd = BAM_MUX_HDR_CMD_DATA;
771 hdr->reserved = 0;
772 hdr->ch_id = id;
773 hdr->pkt_len = skb->len - sizeof(struct bam_mux_hdr);
774 if (skb->len & 0x3)
775 skb_put(skb, 4 - (skb->len & 0x3));
776
777 hdr->pad_len = skb->len - (sizeof(struct bam_mux_hdr) + hdr->pkt_len);
778
779 DBG("%s: data %p, tail %p skb len %d pkt len %d pad len %d\n",
780 __func__, skb->data, skb->tail, skb->len,
781 hdr->pkt_len, hdr->pad_len);
782
783 pkt = kmalloc(sizeof(struct tx_pkt_info), GFP_ATOMIC);
784 if (pkt == NULL) {
785 pr_err("%s: mem alloc for tx_pkt_info failed\n", __func__);
Jeff Hugoc6af54d2011-11-02 17:00:27 -0600786 goto write_fail2;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700787 }
788
789 dma_address = dma_map_single(NULL, skb->data, skb->len,
790 DMA_TO_DEVICE);
791 if (!dma_address) {
792 pr_err("%s: dma_map_single() failed\n", __func__);
Jeff Hugoc6af54d2011-11-02 17:00:27 -0600793 goto write_fail3;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700794 }
795 pkt->skb = skb;
796 pkt->dma_address = dma_address;
797 pkt->is_cmd = 0;
Eric Holmberg878923a2012-01-10 14:28:19 -0700798 set_tx_timestamp(pkt);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700799 INIT_WORK(&pkt->work, bam_mux_write_done);
Jeff Hugo626303bf2011-11-21 11:43:28 -0700800 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600801 list_add_tail(&pkt->list_node, &bam_tx_pool);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700802 rc = sps_transfer_one(bam_tx_pipe, dma_address, skb->len,
803 pkt, SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600804 if (rc) {
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700805 DMUX_LOG_KERR("%s sps_transfer_one failed rc=%d\n",
806 __func__, rc);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600807 list_del(&pkt->list_node);
Eric Holmberg2fddbcd2011-11-28 18:25:57 -0700808 DBG_INC_TX_SPS_FAILURE_CNT();
Jeff Hugo626303bf2011-11-21 11:43:28 -0700809 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700810 dma_unmap_single(NULL, pkt->dma_address,
811 pkt->skb->len, DMA_TO_DEVICE);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600812 kfree(pkt);
Jeff Hugo872bd062011-11-15 17:47:21 -0700813 if (new_skb)
814 dev_kfree_skb_any(new_skb);
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700815 } else {
Jeff Hugobb6da952012-01-16 15:02:42 -0700816 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700817 spin_lock_irqsave(&bam_ch[id].lock, flags);
818 bam_ch[id].num_tx_pkts++;
819 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600820 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600821 ul_packet_written = 1;
822 read_unlock(&ul_wakeup_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700823 return rc;
Jeff Hugoc6af54d2011-11-02 17:00:27 -0600824
825write_fail3:
826 kfree(pkt);
827write_fail2:
828 if (new_skb)
829 dev_kfree_skb_any(new_skb);
830write_fail:
831 read_unlock(&ul_wakeup_lock);
832 return -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700833}
834
835int msm_bam_dmux_open(uint32_t id, void *priv,
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600836 void (*notify)(void *, int, unsigned long))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700837{
838 struct bam_mux_hdr *hdr;
839 unsigned long flags;
840 int rc = 0;
841
842 DBG("%s: opening ch %d\n", __func__, id);
Eric Holmberg5d775432011-11-09 10:23:35 -0700843 if (!bam_mux_initialized) {
844 DBG("%s: not inititialized\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700845 return -ENODEV;
Eric Holmberg5d775432011-11-09 10:23:35 -0700846 }
847 if (id >= BAM_DMUX_NUM_CHANNELS) {
848 pr_err("%s: invalid channel id %d\n", __func__, id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700849 return -EINVAL;
Eric Holmberg5d775432011-11-09 10:23:35 -0700850 }
851 if (notify == NULL) {
852 pr_err("%s: notify function is NULL\n", __func__);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600853 return -EINVAL;
Eric Holmberg5d775432011-11-09 10:23:35 -0700854 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700855
856 hdr = kmalloc(sizeof(struct bam_mux_hdr), GFP_KERNEL);
857 if (hdr == NULL) {
858 pr_err("%s: hdr kmalloc failed. ch: %d\n", __func__, id);
859 return -ENOMEM;
860 }
861 spin_lock_irqsave(&bam_ch[id].lock, flags);
862 if (bam_ch_is_open(id)) {
863 DBG("%s: Already opened %d\n", __func__, id);
864 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
865 kfree(hdr);
866 goto open_done;
867 }
868 if (!bam_ch_is_remote_open(id)) {
869 DBG("%s: Remote not open; ch: %d\n", __func__, id);
870 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
871 kfree(hdr);
Eric Holmberg5d775432011-11-09 10:23:35 -0700872 return -ENODEV;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700873 }
874
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600875 bam_ch[id].notify = notify;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700876 bam_ch[id].priv = priv;
877 bam_ch[id].status |= BAM_CH_LOCAL_OPEN;
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700878 bam_ch[id].num_tx_pkts = 0;
879 bam_ch[id].use_wm = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700880 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
881
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600882 read_lock(&ul_wakeup_lock);
Jeff Hugo061ce672011-10-21 17:15:32 -0600883 if (!bam_is_connected) {
884 read_unlock(&ul_wakeup_lock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600885 ul_wakeup();
Jeff Hugo4838f412012-01-20 11:19:37 -0700886 if (unlikely(in_global_reset == 1))
887 return -EFAULT;
Jeff Hugo061ce672011-10-21 17:15:32 -0600888 read_lock(&ul_wakeup_lock);
Jeff Hugod98b1082011-10-24 10:30:23 -0600889 notify_all(BAM_DMUX_UL_CONNECTED, (unsigned long)(NULL));
Jeff Hugo061ce672011-10-21 17:15:32 -0600890 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600891
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700892 hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
893 hdr->cmd = BAM_MUX_HDR_CMD_OPEN;
894 hdr->reserved = 0;
895 hdr->ch_id = id;
896 hdr->pkt_len = 0;
897 hdr->pad_len = 0;
898
899 rc = bam_mux_write_cmd((void *)hdr, sizeof(struct bam_mux_hdr));
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600900 read_unlock(&ul_wakeup_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700901
902open_done:
903 DBG("%s: opened ch %d\n", __func__, id);
904 return rc;
905}
906
907int msm_bam_dmux_close(uint32_t id)
908{
909 struct bam_mux_hdr *hdr;
910 unsigned long flags;
911 int rc;
912
913 if (id >= BAM_DMUX_NUM_CHANNELS)
914 return -EINVAL;
915 DBG("%s: closing ch %d\n", __func__, id);
916 if (!bam_mux_initialized)
917 return -ENODEV;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700918
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600919 read_lock(&ul_wakeup_lock);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600920 if (!bam_is_connected && !bam_ch_is_in_reset(id)) {
Jeff Hugo061ce672011-10-21 17:15:32 -0600921 read_unlock(&ul_wakeup_lock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600922 ul_wakeup();
Jeff Hugo4838f412012-01-20 11:19:37 -0700923 if (unlikely(in_global_reset == 1))
924 return -EFAULT;
Jeff Hugo061ce672011-10-21 17:15:32 -0600925 read_lock(&ul_wakeup_lock);
Jeff Hugod98b1082011-10-24 10:30:23 -0600926 notify_all(BAM_DMUX_UL_CONNECTED, (unsigned long)(NULL));
Jeff Hugo061ce672011-10-21 17:15:32 -0600927 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600928
Jeff Hugo061ce672011-10-21 17:15:32 -0600929 spin_lock_irqsave(&bam_ch[id].lock, flags);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600930 bam_ch[id].notify = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700931 bam_ch[id].priv = NULL;
932 bam_ch[id].status &= ~BAM_CH_LOCAL_OPEN;
933 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
934
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600935 if (bam_ch_is_in_reset(id)) {
936 read_unlock(&ul_wakeup_lock);
937 bam_ch[id].status &= ~BAM_CH_IN_RESET;
938 return 0;
939 }
940
Jeff Hugobb5802f2011-11-02 17:10:29 -0600941 hdr = kmalloc(sizeof(struct bam_mux_hdr), GFP_ATOMIC);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700942 if (hdr == NULL) {
943 pr_err("%s: hdr kmalloc failed. ch: %d\n", __func__, id);
Jeff Hugoc6af54d2011-11-02 17:00:27 -0600944 read_unlock(&ul_wakeup_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700945 return -ENOMEM;
946 }
947 hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
948 hdr->cmd = BAM_MUX_HDR_CMD_CLOSE;
949 hdr->reserved = 0;
950 hdr->ch_id = id;
951 hdr->pkt_len = 0;
952 hdr->pad_len = 0;
953
954 rc = bam_mux_write_cmd((void *)hdr, sizeof(struct bam_mux_hdr));
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600955 read_unlock(&ul_wakeup_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700956
957 DBG("%s: closed ch %d\n", __func__, id);
958 return rc;
959}
960
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700961int msm_bam_dmux_is_ch_full(uint32_t id)
962{
963 unsigned long flags;
964 int ret;
965
966 if (id >= BAM_DMUX_NUM_CHANNELS)
967 return -EINVAL;
968
969 spin_lock_irqsave(&bam_ch[id].lock, flags);
970 bam_ch[id].use_wm = 1;
971 ret = bam_ch[id].num_tx_pkts >= HIGH_WATERMARK;
972 DBG("%s: ch %d num tx pkts=%d, HWM=%d\n", __func__,
973 id, bam_ch[id].num_tx_pkts, ret);
974 if (!bam_ch_is_local_open(id)) {
975 ret = -ENODEV;
976 pr_err("%s: port not open: %d\n", __func__, bam_ch[id].status);
977 }
978 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
979
980 return ret;
981}
982
983int msm_bam_dmux_is_ch_low(uint32_t id)
984{
Eric Holmberged3ca0a2012-04-09 15:44:58 -0600985 unsigned long flags;
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700986 int ret;
987
988 if (id >= BAM_DMUX_NUM_CHANNELS)
989 return -EINVAL;
990
Eric Holmberged3ca0a2012-04-09 15:44:58 -0600991 spin_lock_irqsave(&bam_ch[id].lock, flags);
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700992 bam_ch[id].use_wm = 1;
993 ret = bam_ch[id].num_tx_pkts <= LOW_WATERMARK;
994 DBG("%s: ch %d num tx pkts=%d, LWM=%d\n", __func__,
995 id, bam_ch[id].num_tx_pkts, ret);
996 if (!bam_ch_is_local_open(id)) {
997 ret = -ENODEV;
998 pr_err("%s: port not open: %d\n", __func__, bam_ch[id].status);
999 }
Eric Holmberged3ca0a2012-04-09 15:44:58 -06001000 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -07001001
1002 return ret;
1003}
1004
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001005static void rx_switch_to_interrupt_mode(void)
1006{
1007 struct sps_connect cur_rx_conn;
1008 struct sps_iovec iov;
1009 struct rx_pkt_info *info;
1010 int ret;
1011
1012 /*
1013 * Attempt to enable interrupts - if this fails,
1014 * continue polling and we will retry later.
1015 */
1016 ret = sps_get_config(bam_rx_pipe, &cur_rx_conn);
1017 if (ret) {
1018 pr_err("%s: sps_get_config() failed %d\n", __func__, ret);
1019 goto fail;
1020 }
1021
1022 rx_register_event.options = SPS_O_EOT;
1023 ret = sps_register_event(bam_rx_pipe, &rx_register_event);
1024 if (ret) {
1025 pr_err("%s: sps_register_event() failed %d\n", __func__, ret);
1026 goto fail;
1027 }
1028
1029 cur_rx_conn.options = SPS_O_AUTO_ENABLE |
1030 SPS_O_EOT | SPS_O_ACK_TRANSFERS;
1031 ret = sps_set_config(bam_rx_pipe, &cur_rx_conn);
1032 if (ret) {
1033 pr_err("%s: sps_set_config() failed %d\n", __func__, ret);
1034 goto fail;
1035 }
1036 polling_mode = 0;
Eric Holmberg006057d2012-01-11 10:10:42 -07001037 release_wakelock();
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001038
1039 /* handle any rx packets before interrupt was enabled */
1040 while (bam_connection_is_active && !polling_mode) {
1041 ret = sps_get_iovec(bam_rx_pipe, &iov);
1042 if (ret) {
1043 pr_err("%s: sps_get_iovec failed %d\n",
1044 __func__, ret);
1045 break;
1046 }
1047 if (iov.addr == 0)
1048 break;
1049
1050 mutex_lock(&bam_rx_pool_mutexlock);
1051 if (unlikely(list_empty(&bam_rx_pool))) {
1052 mutex_unlock(&bam_rx_pool_mutexlock);
1053 continue;
1054 }
1055 info = list_first_entry(&bam_rx_pool, struct rx_pkt_info,
1056 list_node);
1057 list_del(&info->list_node);
Eric Holmbergb5b08e52012-01-20 14:19:00 -07001058 --bam_rx_pool_len;
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001059 mutex_unlock(&bam_rx_pool_mutexlock);
Eric Holmbergb5b08e52012-01-20 14:19:00 -07001060 if (info->dma_address != iov.addr)
1061 DMUX_LOG_KERR("%s: iovec %p != dma %p\n",
1062 __func__,
1063 (void *)info->dma_address, (void *)iov.addr);
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001064 handle_bam_mux_cmd(&info->work);
1065 }
1066 return;
1067
1068fail:
1069 pr_err("%s: reverting to polling\n", __func__);
1070 queue_work(bam_mux_rx_workqueue, &rx_timer_work);
1071}
1072
Jeff Hugo949080a2011-08-30 11:58:56 -06001073static void rx_timer_work_func(struct work_struct *work)
1074{
1075 struct sps_iovec iov;
Jeff Hugo949080a2011-08-30 11:58:56 -06001076 struct rx_pkt_info *info;
1077 int inactive_cycles = 0;
1078 int ret;
Jeff Hugo949080a2011-08-30 11:58:56 -06001079
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001080 while (bam_connection_is_active) { /* timer loop */
Jeff Hugo949080a2011-08-30 11:58:56 -06001081 ++inactive_cycles;
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001082 while (bam_connection_is_active) { /* deplete queue loop */
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001083 if (in_global_reset)
1084 return;
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001085
1086 ret = sps_get_iovec(bam_rx_pipe, &iov);
1087 if (ret) {
1088 pr_err("%s: sps_get_iovec failed %d\n",
1089 __func__, ret);
1090 break;
1091 }
Jeff Hugo949080a2011-08-30 11:58:56 -06001092 if (iov.addr == 0)
1093 break;
1094 inactive_cycles = 0;
Jeff Hugoc9749932011-11-02 17:50:40 -06001095 mutex_lock(&bam_rx_pool_mutexlock);
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001096 if (unlikely(list_empty(&bam_rx_pool))) {
1097 mutex_unlock(&bam_rx_pool_mutexlock);
1098 continue;
1099 }
1100 info = list_first_entry(&bam_rx_pool,
1101 struct rx_pkt_info, list_node);
Eric Holmbergb5b08e52012-01-20 14:19:00 -07001102 --bam_rx_pool_len;
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001103 list_del(&info->list_node);
Jeff Hugoc9749932011-11-02 17:50:40 -06001104 mutex_unlock(&bam_rx_pool_mutexlock);
Jeff Hugo949080a2011-08-30 11:58:56 -06001105 handle_bam_mux_cmd(&info->work);
1106 }
1107
1108 if (inactive_cycles == POLLING_INACTIVITY) {
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001109 rx_switch_to_interrupt_mode();
1110 break;
Jeff Hugo949080a2011-08-30 11:58:56 -06001111 }
1112
1113 usleep_range(POLLING_MIN_SLEEP, POLLING_MAX_SLEEP);
1114 }
1115}
1116
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001117static void bam_mux_tx_notify(struct sps_event_notify *notify)
1118{
1119 struct tx_pkt_info *pkt;
1120
1121 DBG("%s: event %d notified\n", __func__, notify->event_id);
1122
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001123 if (in_global_reset)
1124 return;
1125
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001126 switch (notify->event_id) {
1127 case SPS_EVENT_EOT:
1128 pkt = notify->data.transfer.user;
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001129 if (!pkt->is_cmd)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001130 dma_unmap_single(NULL, pkt->dma_address,
1131 pkt->skb->len,
1132 DMA_TO_DEVICE);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001133 else
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001134 dma_unmap_single(NULL, pkt->dma_address,
1135 pkt->len,
1136 DMA_TO_DEVICE);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001137 queue_work(bam_mux_tx_workqueue, &pkt->work);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001138 break;
1139 default:
1140 pr_err("%s: recieved unexpected event id %d\n", __func__,
1141 notify->event_id);
1142 }
1143}
1144
Jeff Hugo33dbc002011-08-25 15:52:53 -06001145static void bam_mux_rx_notify(struct sps_event_notify *notify)
1146{
Jeff Hugo949080a2011-08-30 11:58:56 -06001147 int ret;
1148 struct sps_connect cur_rx_conn;
Jeff Hugo33dbc002011-08-25 15:52:53 -06001149
1150 DBG("%s: event %d notified\n", __func__, notify->event_id);
1151
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001152 if (in_global_reset)
1153 return;
1154
Jeff Hugo33dbc002011-08-25 15:52:53 -06001155 switch (notify->event_id) {
1156 case SPS_EVENT_EOT:
Jeff Hugo949080a2011-08-30 11:58:56 -06001157 /* attempt to disable interrupts in this pipe */
1158 if (!polling_mode) {
1159 ret = sps_get_config(bam_rx_pipe, &cur_rx_conn);
1160 if (ret) {
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001161 pr_err("%s: sps_get_config() failed %d, interrupts"
1162 " not disabled\n", __func__, ret);
Jeff Hugo949080a2011-08-30 11:58:56 -06001163 break;
1164 }
Jeff Hugoa9d32ba2011-11-21 14:59:48 -07001165 cur_rx_conn.options = SPS_O_AUTO_ENABLE |
Jeff Hugo949080a2011-08-30 11:58:56 -06001166 SPS_O_ACK_TRANSFERS | SPS_O_POLL;
1167 ret = sps_set_config(bam_rx_pipe, &cur_rx_conn);
1168 if (ret) {
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001169 pr_err("%s: sps_set_config() failed %d, interrupts"
1170 " not disabled\n", __func__, ret);
Jeff Hugo949080a2011-08-30 11:58:56 -06001171 break;
1172 }
Eric Holmberg006057d2012-01-11 10:10:42 -07001173 grab_wakelock();
Jeff Hugo949080a2011-08-30 11:58:56 -06001174 polling_mode = 1;
1175 queue_work(bam_mux_rx_workqueue, &rx_timer_work);
1176 }
Jeff Hugo33dbc002011-08-25 15:52:53 -06001177 break;
1178 default:
1179 pr_err("%s: recieved unexpected event id %d\n", __func__,
1180 notify->event_id);
1181 }
1182}
1183
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001184#ifdef CONFIG_DEBUG_FS
1185
1186static int debug_tbl(char *buf, int max)
1187{
1188 int i = 0;
1189 int j;
1190
1191 for (j = 0; j < BAM_DMUX_NUM_CHANNELS; ++j) {
1192 i += scnprintf(buf + i, max - i,
1193 "ch%02d local open=%s remote open=%s\n",
1194 j, bam_ch_is_local_open(j) ? "Y" : "N",
1195 bam_ch_is_remote_open(j) ? "Y" : "N");
1196 }
1197
1198 return i;
1199}
1200
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07001201static int debug_ul_pkt_cnt(char *buf, int max)
1202{
1203 struct list_head *p;
1204 unsigned long flags;
1205 int n = 0;
1206
1207 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
1208 __list_for_each(p, &bam_tx_pool) {
1209 ++n;
1210 }
1211 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
1212
1213 return scnprintf(buf, max, "Number of UL packets in flight: %d\n", n);
1214}
1215
1216static int debug_stats(char *buf, int max)
1217{
1218 int i = 0;
1219
1220 i += scnprintf(buf + i, max - i,
Eric Holmberg9fdef262012-02-14 11:46:05 -07001221 "skb read cnt: %u\n"
1222 "skb write cnt: %u\n"
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07001223 "skb copy cnt: %u\n"
1224 "skb copy bytes: %u\n"
Eric Holmberg6074aba2012-01-18 17:59:44 -07001225 "sps tx failures: %u\n"
Eric Holmbergb5b08e52012-01-20 14:19:00 -07001226 "sps tx stalls: %u\n"
Eric Holmberg1f1255d2012-02-22 13:37:21 -07001227 "rx queue len: %d\n"
1228 "a2 ack out cnt: %d\n"
1229 "a2 ack in cnt: %d\n"
1230 "a2 pwr cntl in: %d\n",
Eric Holmberg9fdef262012-02-14 11:46:05 -07001231 bam_dmux_read_cnt,
1232 bam_dmux_write_cnt,
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07001233 bam_dmux_write_cpy_cnt,
1234 bam_dmux_write_cpy_bytes,
Eric Holmberg6074aba2012-01-18 17:59:44 -07001235 bam_dmux_tx_sps_failure_cnt,
Eric Holmbergb5b08e52012-01-20 14:19:00 -07001236 bam_dmux_tx_stall_cnt,
Eric Holmberg1f1255d2012-02-22 13:37:21 -07001237 bam_rx_pool_len,
1238 atomic_read(&bam_dmux_ack_out_cnt),
1239 atomic_read(&bam_dmux_ack_in_cnt),
1240 atomic_read(&bam_dmux_a2_pwr_cntl_in_cnt)
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07001241 );
1242
1243 return i;
1244}
1245
Eric Holmberg878923a2012-01-10 14:28:19 -07001246static int debug_log(char *buff, int max, loff_t *ppos)
1247{
1248 unsigned long flags;
1249 int i = 0;
1250
1251 if (bam_dmux_state_logging_disabled) {
1252 i += scnprintf(buff - i, max - i, "Logging disabled\n");
1253 return i;
1254 }
1255
1256 if (*ppos == 0) {
1257 i += scnprintf(buff - i, max - i,
1258 "<DMUX> timestamp FLAGS [Message]\n"
1259 "FLAGS:\n"
Eric Holmberg006057d2012-01-11 10:10:42 -07001260 "\tD: 1 = Power collapse disabled\n"
Eric Holmberg878923a2012-01-10 14:28:19 -07001261 "\tR: 1 = in global reset\n"
1262 "\tP: 1 = BAM is powered up\n"
1263 "\tA: 1 = BAM initialized and ready for data\n"
1264 "\n"
1265 "\tV: 1 = Uplink vote for power\n"
1266 "\tU: 1 = Uplink active\n"
1267 "\tW: 1 = Uplink Wait-for-ack\n"
1268 "\tA: 1 = Uplink ACK received\n"
Eric Holmbergbc9f21c2012-01-18 11:33:33 -07001269 "\t#: >=1 On-demand uplink vote\n"
Jeff Hugo0b13a352012-03-17 23:18:30 -06001270 "\tD: 1 = Disconnect ACK active\n"
Eric Holmberg878923a2012-01-10 14:28:19 -07001271 );
1272 buff += i;
1273 }
1274
1275 spin_lock_irqsave(&bam_dmux_logging_spinlock, flags);
1276 while (kfifo_len(&bam_dmux_state_log)
1277 && (i + LOG_MESSAGE_MAX_SIZE) < max) {
1278 int k_len;
1279 k_len = kfifo_out(&bam_dmux_state_log,
1280 buff, LOG_MESSAGE_MAX_SIZE);
1281 if (k_len != LOG_MESSAGE_MAX_SIZE) {
1282 pr_err("%s: retrieve failure %d\n", __func__, k_len);
1283 break;
1284 }
1285
1286 /* keep non-null portion of string and add line break */
1287 k_len = strnlen(buff, LOG_MESSAGE_MAX_SIZE);
1288 buff += k_len;
1289 i += k_len;
1290 if (k_len && *(buff - 1) != '\n') {
1291 *buff++ = '\n';
1292 ++i;
1293 }
1294 }
1295 spin_unlock_irqrestore(&bam_dmux_logging_spinlock, flags);
1296
1297 return i;
1298}
1299
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001300#define DEBUG_BUFMAX 4096
1301static char debug_buffer[DEBUG_BUFMAX];
1302
1303static ssize_t debug_read(struct file *file, char __user *buf,
1304 size_t count, loff_t *ppos)
1305{
1306 int (*fill)(char *buf, int max) = file->private_data;
1307 int bsize = fill(debug_buffer, DEBUG_BUFMAX);
1308 return simple_read_from_buffer(buf, count, ppos, debug_buffer, bsize);
1309}
1310
Eric Holmberg878923a2012-01-10 14:28:19 -07001311static ssize_t debug_read_multiple(struct file *file, char __user *buff,
1312 size_t count, loff_t *ppos)
1313{
1314 int (*util_func)(char *buf, int max, loff_t *) = file->private_data;
1315 char *buffer;
1316 int bsize;
1317
1318 buffer = kmalloc(count, GFP_KERNEL);
1319 if (!buffer)
1320 return -ENOMEM;
1321
1322 bsize = util_func(buffer, count, ppos);
1323
1324 if (bsize >= 0) {
1325 if (copy_to_user(buff, buffer, bsize)) {
1326 kfree(buffer);
1327 return -EFAULT;
1328 }
1329 *ppos += bsize;
1330 }
1331 kfree(buffer);
1332 return bsize;
1333}
1334
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001335static int debug_open(struct inode *inode, struct file *file)
1336{
1337 file->private_data = inode->i_private;
1338 return 0;
1339}
1340
1341
1342static const struct file_operations debug_ops = {
1343 .read = debug_read,
1344 .open = debug_open,
1345};
1346
Eric Holmberg878923a2012-01-10 14:28:19 -07001347static const struct file_operations debug_ops_multiple = {
1348 .read = debug_read_multiple,
1349 .open = debug_open,
1350};
1351
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001352static void debug_create(const char *name, mode_t mode,
1353 struct dentry *dent,
1354 int (*fill)(char *buf, int max))
1355{
Eric Holmberge4ac80b2012-01-12 09:21:59 -07001356 struct dentry *file;
1357
1358 file = debugfs_create_file(name, mode, dent, fill, &debug_ops);
1359 if (IS_ERR(file))
1360 pr_err("%s: debugfs create failed %d\n", __func__,
1361 (int)PTR_ERR(file));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001362}
1363
Eric Holmberge4ac80b2012-01-12 09:21:59 -07001364static void debug_create_multiple(const char *name, mode_t mode,
1365 struct dentry *dent,
1366 int (*fill)(char *buf, int max, loff_t *ppos))
1367{
1368 struct dentry *file;
1369
1370 file = debugfs_create_file(name, mode, dent, fill, &debug_ops_multiple);
1371 if (IS_ERR(file))
1372 pr_err("%s: debugfs create failed %d\n", __func__,
1373 (int)PTR_ERR(file));
1374}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001375#endif
1376
Jeff Hugod98b1082011-10-24 10:30:23 -06001377static void notify_all(int event, unsigned long data)
1378{
1379 int i;
1380
1381 for (i = 0; i < BAM_DMUX_NUM_CHANNELS; ++i) {
Eric Holmberg454d9da2012-01-12 09:37:14 -07001382 if (bam_ch_is_open(i)) {
Jeff Hugod98b1082011-10-24 10:30:23 -06001383 bam_ch[i].notify(bam_ch[i].priv, event, data);
Eric Holmberg454d9da2012-01-12 09:37:14 -07001384 bam_dmux_log("%s: cid=%d, event=%d, data=%lu\n",
1385 __func__, i, event, data);
1386 }
Jeff Hugod98b1082011-10-24 10:30:23 -06001387 }
1388}
1389
1390static void kickoff_ul_wakeup_func(struct work_struct *work)
1391{
1392 read_lock(&ul_wakeup_lock);
1393 if (!bam_is_connected) {
1394 read_unlock(&ul_wakeup_lock);
1395 ul_wakeup();
Jeff Hugo4838f412012-01-20 11:19:37 -07001396 if (unlikely(in_global_reset == 1))
1397 return;
Jeff Hugod98b1082011-10-24 10:30:23 -06001398 read_lock(&ul_wakeup_lock);
1399 ul_packet_written = 1;
1400 notify_all(BAM_DMUX_UL_CONNECTED, (unsigned long)(NULL));
1401 }
1402 read_unlock(&ul_wakeup_lock);
1403}
1404
Eric Holmbergbc9f21c2012-01-18 11:33:33 -07001405int msm_bam_dmux_kickoff_ul_wakeup(void)
Jeff Hugod98b1082011-10-24 10:30:23 -06001406{
Eric Holmbergbc9f21c2012-01-18 11:33:33 -07001407 int is_connected;
1408
1409 read_lock(&ul_wakeup_lock);
1410 ul_packet_written = 1;
1411 is_connected = bam_is_connected;
1412 if (!is_connected)
1413 queue_work(bam_mux_tx_workqueue, &kickoff_ul_wakeup);
1414 read_unlock(&ul_wakeup_lock);
1415
1416 return is_connected;
Jeff Hugod98b1082011-10-24 10:30:23 -06001417}
1418
Eric Holmberg878923a2012-01-10 14:28:19 -07001419static void power_vote(int vote)
1420{
1421 bam_dmux_log("%s: curr=%d, vote=%d\n", __func__,
1422 bam_dmux_uplink_vote, vote);
1423
1424 if (bam_dmux_uplink_vote == vote)
1425 bam_dmux_log("%s: warning - duplicate power vote\n", __func__);
1426
1427 bam_dmux_uplink_vote = vote;
1428 if (vote)
1429 smsm_change_state(SMSM_APPS_STATE, 0, SMSM_A2_POWER_CONTROL);
1430 else
1431 smsm_change_state(SMSM_APPS_STATE, SMSM_A2_POWER_CONTROL, 0);
1432}
1433
Eric Holmberg454d9da2012-01-12 09:37:14 -07001434/*
1435 * @note: Must be called with ul_wakeup_lock locked.
1436 */
1437static inline void ul_powerdown(void)
1438{
1439 bam_dmux_log("%s: powerdown\n", __func__);
1440 verify_tx_queue_is_empty(__func__);
1441
1442 if (a2_pc_disabled) {
1443 wait_for_dfab = 1;
1444 INIT_COMPLETION(dfab_unvote_completion);
1445 release_wakelock();
1446 } else {
1447 wait_for_ack = 1;
1448 INIT_COMPLETION(ul_wakeup_ack_completion);
1449 power_vote(0);
1450 }
1451 bam_is_connected = 0;
1452 notify_all(BAM_DMUX_UL_DISCONNECTED, (unsigned long)(NULL));
1453}
1454
1455static inline void ul_powerdown_finish(void)
1456{
1457 if (a2_pc_disabled && wait_for_dfab) {
1458 unvote_dfab();
1459 complete_all(&dfab_unvote_completion);
1460 wait_for_dfab = 0;
1461 }
1462}
1463
Eric Holmbergbc9f21c2012-01-18 11:33:33 -07001464/*
1465 * Votes for UL power and returns current power state.
1466 *
1467 * @returns true if currently connected
1468 */
1469int msm_bam_dmux_ul_power_vote(void)
1470{
1471 int is_connected;
1472
1473 read_lock(&ul_wakeup_lock);
1474 atomic_inc(&ul_ondemand_vote);
1475 is_connected = bam_is_connected;
1476 if (!is_connected)
1477 queue_work(bam_mux_tx_workqueue, &kickoff_ul_wakeup);
1478 read_unlock(&ul_wakeup_lock);
1479
1480 return is_connected;
1481}
1482
1483/*
1484 * Unvotes for UL power.
1485 *
1486 * @returns true if vote count is 0 (UL shutdown possible)
1487 */
1488int msm_bam_dmux_ul_power_unvote(void)
1489{
1490 int vote;
1491
1492 read_lock(&ul_wakeup_lock);
1493 vote = atomic_dec_return(&ul_ondemand_vote);
1494 if (unlikely(vote) < 0)
1495 DMUX_LOG_KERR("%s: invalid power vote %d\n", __func__, vote);
1496 read_unlock(&ul_wakeup_lock);
1497
1498 return vote == 0;
1499}
1500
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001501static void ul_timeout(struct work_struct *work)
1502{
Jeff Hugoc040a5b2011-11-15 14:26:01 -07001503 unsigned long flags;
1504 int ret;
1505
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001506 if (in_global_reset)
1507 return;
Jeff Hugoc040a5b2011-11-15 14:26:01 -07001508 ret = write_trylock_irqsave(&ul_wakeup_lock, flags);
1509 if (!ret) { /* failed to grab lock, reschedule and bail */
1510 schedule_delayed_work(&ul_timeout_work,
1511 msecs_to_jiffies(UL_TIMEOUT_DELAY));
1512 return;
1513 }
Eric Holmberg454d9da2012-01-12 09:37:14 -07001514 if (bam_is_connected) {
Eric Holmberg6074aba2012-01-18 17:59:44 -07001515 if (!ul_packet_written) {
1516 spin_lock(&bam_tx_pool_spinlock);
1517 if (!list_empty(&bam_tx_pool)) {
1518 struct tx_pkt_info *info;
1519
1520 info = list_first_entry(&bam_tx_pool,
1521 struct tx_pkt_info, list_node);
1522 DMUX_LOG_KERR("%s: UL delayed ts=%u.%09lu\n",
1523 __func__, info->ts_sec, info->ts_nsec);
1524 DBG_INC_TX_STALL_CNT();
1525 ul_packet_written = 1;
1526 }
1527 spin_unlock(&bam_tx_pool_spinlock);
1528 }
1529
Eric Holmbergbc9f21c2012-01-18 11:33:33 -07001530 if (ul_packet_written || atomic_read(&ul_ondemand_vote)) {
1531 bam_dmux_log("%s: pkt written %d\n",
1532 __func__, ul_packet_written);
Eric Holmberg454d9da2012-01-12 09:37:14 -07001533 ul_packet_written = 0;
1534 schedule_delayed_work(&ul_timeout_work,
1535 msecs_to_jiffies(UL_TIMEOUT_DELAY));
Eric Holmberg006057d2012-01-11 10:10:42 -07001536 } else {
Eric Holmberg454d9da2012-01-12 09:37:14 -07001537 ul_powerdown();
Eric Holmberg006057d2012-01-11 10:10:42 -07001538 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001539 }
Jeff Hugoc040a5b2011-11-15 14:26:01 -07001540 write_unlock_irqrestore(&ul_wakeup_lock, flags);
Eric Holmberg454d9da2012-01-12 09:37:14 -07001541 ul_powerdown_finish();
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001542}
Jeff Hugo4838f412012-01-20 11:19:37 -07001543
1544static int ssrestart_check(void)
1545{
Eric Holmberg90285e22012-02-22 12:33:05 -07001546 DMUX_LOG_KERR("%s: modem timeout: BAM DMUX disabled\n", __func__);
1547 in_global_reset = 1;
1548 if (get_restart_level() <= RESET_SOC)
1549 DMUX_LOG_KERR("%s: ssrestart not enabled\n", __func__);
1550 return 1;
Jeff Hugo4838f412012-01-20 11:19:37 -07001551}
1552
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001553static void ul_wakeup(void)
1554{
Jeff Hugof6c1c1e2011-12-01 17:43:49 -07001555 int ret;
1556
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001557 mutex_lock(&wakeup_lock);
1558 if (bam_is_connected) { /* bam got connected before lock grabbed */
Eric Holmberg878923a2012-01-10 14:28:19 -07001559 bam_dmux_log("%s Already awake\n", __func__);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001560 mutex_unlock(&wakeup_lock);
1561 return;
1562 }
Eric Holmberg878923a2012-01-10 14:28:19 -07001563
Eric Holmberg006057d2012-01-11 10:10:42 -07001564 if (a2_pc_disabled) {
1565 /*
1566 * don't grab the wakelock the first time because it is
1567 * already grabbed when a2 powers on
1568 */
Jeff Hugo583a6da2012-02-03 11:37:30 -07001569 if (likely(a2_pc_disabled_wakelock_skipped))
Eric Holmberg006057d2012-01-11 10:10:42 -07001570 grab_wakelock();
1571 else
Jeff Hugo583a6da2012-02-03 11:37:30 -07001572 a2_pc_disabled_wakelock_skipped = 1;
Eric Holmberg006057d2012-01-11 10:10:42 -07001573 if (wait_for_dfab) {
Jeff Hugo66f7f1e2012-01-16 14:30:42 -07001574 ret = wait_for_completion_timeout(
Eric Holmberg006057d2012-01-11 10:10:42 -07001575 &dfab_unvote_completion, HZ);
1576 BUG_ON(ret == 0);
1577 }
1578 vote_dfab();
1579 schedule_delayed_work(&ul_timeout_work,
1580 msecs_to_jiffies(UL_TIMEOUT_DELAY));
1581 bam_is_connected = 1;
1582 mutex_unlock(&wakeup_lock);
1583 return;
1584 }
1585
Jeff Hugof6c1c1e2011-12-01 17:43:49 -07001586 /*
1587 * must wait for the previous power down request to have been acked
1588 * chances are it already came in and this will just fall through
1589 * instead of waiting
1590 */
1591 if (wait_for_ack) {
Eric Holmberg878923a2012-01-10 14:28:19 -07001592 bam_dmux_log("%s waiting for previous ack\n", __func__);
Jeff Hugo66f7f1e2012-01-16 14:30:42 -07001593 ret = wait_for_completion_timeout(
Jeff Hugof6c1c1e2011-12-01 17:43:49 -07001594 &ul_wakeup_ack_completion, HZ);
Eric Holmberg006057d2012-01-11 10:10:42 -07001595 wait_for_ack = 0;
Jeff Hugo4838f412012-01-20 11:19:37 -07001596 if (unlikely(ret == 0) && ssrestart_check()) {
1597 mutex_unlock(&wakeup_lock);
1598 bam_dmux_log("%s timeout previous ack\n", __func__);
1599 return;
1600 }
Jeff Hugof6c1c1e2011-12-01 17:43:49 -07001601 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001602 INIT_COMPLETION(ul_wakeup_ack_completion);
Eric Holmberg878923a2012-01-10 14:28:19 -07001603 power_vote(1);
1604 bam_dmux_log("%s waiting for wakeup ack\n", __func__);
Jeff Hugo66f7f1e2012-01-16 14:30:42 -07001605 ret = wait_for_completion_timeout(&ul_wakeup_ack_completion, HZ);
Jeff Hugo4838f412012-01-20 11:19:37 -07001606 if (unlikely(ret == 0) && ssrestart_check()) {
1607 mutex_unlock(&wakeup_lock);
1608 bam_dmux_log("%s timeout wakeup ack\n", __func__);
1609 return;
1610 }
Eric Holmberg878923a2012-01-10 14:28:19 -07001611 bam_dmux_log("%s waiting completion\n", __func__);
Jeff Hugo66f7f1e2012-01-16 14:30:42 -07001612 ret = wait_for_completion_timeout(&bam_connection_completion, HZ);
Jeff Hugo4838f412012-01-20 11:19:37 -07001613 if (unlikely(ret == 0) && ssrestart_check()) {
1614 mutex_unlock(&wakeup_lock);
1615 bam_dmux_log("%s timeout power on\n", __func__);
1616 return;
1617 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001618
1619 bam_is_connected = 1;
Eric Holmberg878923a2012-01-10 14:28:19 -07001620 bam_dmux_log("%s complete\n", __func__);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001621 schedule_delayed_work(&ul_timeout_work,
1622 msecs_to_jiffies(UL_TIMEOUT_DELAY));
1623 mutex_unlock(&wakeup_lock);
1624}
1625
1626static void reconnect_to_bam(void)
1627{
1628 int i;
1629
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001630 in_global_reset = 0;
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001631 vote_dfab();
1632 i = sps_device_reset(a2_device_handle);
1633 if (i)
1634 pr_err("%s: device reset failed rc = %d\n", __func__, i);
1635 i = sps_connect(bam_tx_pipe, &tx_connection);
1636 if (i)
1637 pr_err("%s: tx connection failed rc = %d\n", __func__, i);
1638 i = sps_connect(bam_rx_pipe, &rx_connection);
1639 if (i)
1640 pr_err("%s: rx connection failed rc = %d\n", __func__, i);
1641 i = sps_register_event(bam_tx_pipe, &tx_register_event);
1642 if (i)
1643 pr_err("%s: tx event reg failed rc = %d\n", __func__, i);
1644 i = sps_register_event(bam_rx_pipe, &rx_register_event);
1645 if (i)
1646 pr_err("%s: rx event reg failed rc = %d\n", __func__, i);
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001647
1648 bam_connection_is_active = 1;
1649
1650 if (polling_mode)
1651 rx_switch_to_interrupt_mode();
1652
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001653 toggle_apps_ack();
1654 complete_all(&bam_connection_completion);
Jeff Hugo2fb555e2012-03-14 16:33:47 -06001655 queue_rx();
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001656}
1657
1658static void disconnect_to_bam(void)
1659{
1660 struct list_head *node;
1661 struct rx_pkt_info *info;
Eric Holmberg454d9da2012-01-12 09:37:14 -07001662 unsigned long flags;
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001663
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001664 bam_connection_is_active = 0;
Eric Holmberg454d9da2012-01-12 09:37:14 -07001665
1666 /* handle disconnect during active UL */
1667 write_lock_irqsave(&ul_wakeup_lock, flags);
1668 if (bam_is_connected) {
1669 bam_dmux_log("%s: UL active - forcing powerdown\n", __func__);
1670 ul_powerdown();
1671 }
1672 write_unlock_irqrestore(&ul_wakeup_lock, flags);
1673 ul_powerdown_finish();
1674
1675 /* tear down BAM connection */
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001676 INIT_COMPLETION(bam_connection_completion);
1677 sps_disconnect(bam_tx_pipe);
1678 sps_disconnect(bam_rx_pipe);
1679 unvote_dfab();
1680 __memzero(rx_desc_mem_buf.base, rx_desc_mem_buf.size);
1681 __memzero(tx_desc_mem_buf.base, tx_desc_mem_buf.size);
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001682
1683 mutex_lock(&bam_rx_pool_mutexlock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001684 while (!list_empty(&bam_rx_pool)) {
1685 node = bam_rx_pool.next;
1686 list_del(node);
1687 info = container_of(node, struct rx_pkt_info, list_node);
1688 dma_unmap_single(NULL, info->dma_address, BUFFER_SIZE,
1689 DMA_FROM_DEVICE);
1690 dev_kfree_skb_any(info->skb);
1691 kfree(info);
1692 }
Eric Holmbergb5b08e52012-01-20 14:19:00 -07001693 bam_rx_pool_len = 0;
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001694 mutex_unlock(&bam_rx_pool_mutexlock);
Eric Holmberg878923a2012-01-10 14:28:19 -07001695
Jeff Hugo0b13a352012-03-17 23:18:30 -06001696 if (disconnect_ack)
1697 toggle_apps_ack();
1698
Eric Holmberg878923a2012-01-10 14:28:19 -07001699 verify_tx_queue_is_empty(__func__);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001700}
1701
1702static void vote_dfab(void)
1703{
Jeff Hugoca0caa82011-12-05 16:05:23 -07001704 int rc;
1705
Eric Holmberg006057d2012-01-11 10:10:42 -07001706 bam_dmux_log("%s\n", __func__);
1707 mutex_lock(&dfab_status_lock);
1708 if (dfab_is_on) {
1709 bam_dmux_log("%s: dfab is already on\n", __func__);
1710 mutex_unlock(&dfab_status_lock);
1711 return;
1712 }
Jeff Hugo23a812b2012-01-13 13:43:42 -07001713 rc = clk_prepare_enable(dfab_clk);
Jeff Hugoca0caa82011-12-05 16:05:23 -07001714 if (rc)
Eric Holmberg006057d2012-01-11 10:10:42 -07001715 DMUX_LOG_KERR("bam_dmux vote for dfab failed rc = %d\n", rc);
Stephen Boyd69d35e32012-02-14 15:33:30 -08001716 rc = clk_prepare_enable(xo_clk);
1717 if (rc)
1718 DMUX_LOG_KERR("bam_dmux vote for xo failed rc = %d\n", rc);
Eric Holmberg006057d2012-01-11 10:10:42 -07001719 dfab_is_on = 1;
1720 mutex_unlock(&dfab_status_lock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001721}
1722
1723static void unvote_dfab(void)
1724{
Eric Holmberg006057d2012-01-11 10:10:42 -07001725 bam_dmux_log("%s\n", __func__);
1726 mutex_lock(&dfab_status_lock);
1727 if (!dfab_is_on) {
1728 DMUX_LOG_KERR("%s: dfab is already off\n", __func__);
1729 dump_stack();
1730 mutex_unlock(&dfab_status_lock);
1731 return;
1732 }
Jeff Hugo23a812b2012-01-13 13:43:42 -07001733 clk_disable_unprepare(dfab_clk);
Stephen Boyd69d35e32012-02-14 15:33:30 -08001734 clk_disable_unprepare(xo_clk);
Eric Holmberg006057d2012-01-11 10:10:42 -07001735 dfab_is_on = 0;
1736 mutex_unlock(&dfab_status_lock);
1737}
1738
1739/* reference counting wrapper around wakelock */
1740static void grab_wakelock(void)
1741{
1742 unsigned long flags;
1743
1744 spin_lock_irqsave(&wakelock_reference_lock, flags);
1745 bam_dmux_log("%s: ref count = %d\n", __func__,
1746 wakelock_reference_count);
1747 if (wakelock_reference_count == 0)
1748 wake_lock(&bam_wakelock);
1749 ++wakelock_reference_count;
1750 spin_unlock_irqrestore(&wakelock_reference_lock, flags);
1751}
1752
1753static void release_wakelock(void)
1754{
1755 unsigned long flags;
1756
1757 spin_lock_irqsave(&wakelock_reference_lock, flags);
1758 if (wakelock_reference_count == 0) {
1759 DMUX_LOG_KERR("%s: bam_dmux wakelock not locked\n", __func__);
1760 dump_stack();
1761 spin_unlock_irqrestore(&wakelock_reference_lock, flags);
1762 return;
1763 }
1764 bam_dmux_log("%s: ref count = %d\n", __func__,
1765 wakelock_reference_count);
1766 --wakelock_reference_count;
1767 if (wakelock_reference_count == 0)
1768 wake_unlock(&bam_wakelock);
1769 spin_unlock_irqrestore(&wakelock_reference_lock, flags);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001770}
1771
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001772static int restart_notifier_cb(struct notifier_block *this,
1773 unsigned long code,
1774 void *data)
1775{
1776 int i;
1777 struct list_head *node;
1778 struct tx_pkt_info *info;
1779 int temp_remote_status;
Jeff Hugo626303bf2011-11-21 11:43:28 -07001780 unsigned long flags;
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001781
1782 if (code != SUBSYS_AFTER_SHUTDOWN)
1783 return NOTIFY_DONE;
1784
Eric Holmberg878923a2012-01-10 14:28:19 -07001785 bam_dmux_log("%s: begin\n", __func__);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001786 in_global_reset = 1;
Eric Holmberg454d9da2012-01-12 09:37:14 -07001787
1788 /* Handle uplink Powerdown */
1789 write_lock_irqsave(&ul_wakeup_lock, flags);
1790 if (bam_is_connected) {
1791 ul_powerdown();
1792 wait_for_ack = 0;
1793 }
Jeff Hugo4838f412012-01-20 11:19:37 -07001794 /*
1795 * if modem crash during ul_wakeup(), power_vote is 1, needs to be
1796 * reset to 0. harmless if bam_is_connected check above passes
1797 */
1798 power_vote(0);
Eric Holmberg454d9da2012-01-12 09:37:14 -07001799 write_unlock_irqrestore(&ul_wakeup_lock, flags);
1800 ul_powerdown_finish();
Eric Holmberg006057d2012-01-11 10:10:42 -07001801 a2_pc_disabled = 0;
Jeff Hugo583a6da2012-02-03 11:37:30 -07001802 a2_pc_disabled_wakelock_skipped = 0;
Jeff Hugo0b13a352012-03-17 23:18:30 -06001803 disconnect_ack = 0;
Eric Holmberg454d9da2012-01-12 09:37:14 -07001804
1805 /* Cleanup Channel States */
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001806 for (i = 0; i < BAM_DMUX_NUM_CHANNELS; ++i) {
1807 temp_remote_status = bam_ch_is_remote_open(i);
1808 bam_ch[i].status &= ~BAM_CH_REMOTE_OPEN;
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -07001809 bam_ch[i].num_tx_pkts = 0;
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001810 if (bam_ch_is_local_open(i))
1811 bam_ch[i].status |= BAM_CH_IN_RESET;
1812 if (temp_remote_status) {
1813 platform_device_unregister(bam_ch[i].pdev);
1814 bam_ch[i].pdev = platform_device_alloc(
1815 bam_ch[i].name, 2);
1816 }
1817 }
Eric Holmberg454d9da2012-01-12 09:37:14 -07001818
1819 /* Cleanup pending UL data */
Jeff Hugo626303bf2011-11-21 11:43:28 -07001820 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001821 while (!list_empty(&bam_tx_pool)) {
1822 node = bam_tx_pool.next;
1823 list_del(node);
1824 info = container_of(node, struct tx_pkt_info,
1825 list_node);
1826 if (!info->is_cmd) {
1827 dma_unmap_single(NULL, info->dma_address,
1828 info->skb->len,
1829 DMA_TO_DEVICE);
1830 dev_kfree_skb_any(info->skb);
1831 } else {
1832 dma_unmap_single(NULL, info->dma_address,
1833 info->len,
1834 DMA_TO_DEVICE);
1835 kfree(info->skb);
1836 }
1837 kfree(info);
1838 }
Jeff Hugo626303bf2011-11-21 11:43:28 -07001839 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
Eric Holmberg454d9da2012-01-12 09:37:14 -07001840
Eric Holmberg878923a2012-01-10 14:28:19 -07001841 bam_dmux_log("%s: complete\n", __func__);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001842 return NOTIFY_DONE;
1843}
1844
Jeff Hugo9dea05c2011-12-21 12:23:05 -07001845static int bam_init(void)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001846{
1847 u32 h;
1848 dma_addr_t dma_addr;
1849 int ret;
1850 void *a2_virt_addr;
Jeff Hugo4b2890d2012-01-16 16:14:21 -07001851 int skip_iounmap = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001852
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001853 vote_dfab();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001854 /* init BAM */
1855 a2_virt_addr = ioremap_nocache(A2_PHYS_BASE, A2_PHYS_SIZE);
1856 if (!a2_virt_addr) {
1857 pr_err("%s: ioremap failed\n", __func__);
1858 ret = -ENOMEM;
Jeff Hugo994a92d2012-01-05 13:25:21 -07001859 goto ioremap_failed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001860 }
1861 a2_props.phys_addr = A2_PHYS_BASE;
1862 a2_props.virt_addr = a2_virt_addr;
1863 a2_props.virt_size = A2_PHYS_SIZE;
1864 a2_props.irq = A2_BAM_IRQ;
Jeff Hugo927cba62011-11-11 11:49:52 -07001865 a2_props.options = SPS_BAM_OPT_IRQ_WAKEUP;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001866 a2_props.num_pipes = A2_NUM_PIPES;
1867 a2_props.summing_threshold = A2_SUMMING_THRESHOLD;
Jeff Hugo75913c82011-12-05 15:59:01 -07001868 if (cpu_is_msm9615())
1869 a2_props.manage = SPS_BAM_MGR_DEVICE_REMOTE;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001870 /* need to free on tear down */
1871 ret = sps_register_bam_device(&a2_props, &h);
1872 if (ret < 0) {
1873 pr_err("%s: register bam error %d\n", __func__, ret);
1874 goto register_bam_failed;
1875 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001876 a2_device_handle = h;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001877
1878 bam_tx_pipe = sps_alloc_endpoint();
1879 if (bam_tx_pipe == NULL) {
1880 pr_err("%s: tx alloc endpoint failed\n", __func__);
1881 ret = -ENOMEM;
Jeff Hugo8ff4a812012-01-17 11:03:13 -07001882 goto tx_alloc_endpoint_failed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001883 }
1884 ret = sps_get_config(bam_tx_pipe, &tx_connection);
1885 if (ret) {
1886 pr_err("%s: tx get config failed %d\n", __func__, ret);
1887 goto tx_get_config_failed;
1888 }
1889
1890 tx_connection.source = SPS_DEV_HANDLE_MEM;
1891 tx_connection.src_pipe_index = 0;
1892 tx_connection.destination = h;
1893 tx_connection.dest_pipe_index = 4;
1894 tx_connection.mode = SPS_MODE_DEST;
1895 tx_connection.options = SPS_O_AUTO_ENABLE | SPS_O_EOT;
1896 tx_desc_mem_buf.size = 0x800; /* 2k */
1897 tx_desc_mem_buf.base = dma_alloc_coherent(NULL, tx_desc_mem_buf.size,
1898 &dma_addr, 0);
1899 if (tx_desc_mem_buf.base == NULL) {
1900 pr_err("%s: tx memory alloc failed\n", __func__);
1901 ret = -ENOMEM;
Jeff Hugo8ff4a812012-01-17 11:03:13 -07001902 goto tx_get_config_failed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001903 }
1904 tx_desc_mem_buf.phys_base = dma_addr;
1905 memset(tx_desc_mem_buf.base, 0x0, tx_desc_mem_buf.size);
1906 tx_connection.desc = tx_desc_mem_buf;
1907 tx_connection.event_thresh = 0x10;
1908
1909 ret = sps_connect(bam_tx_pipe, &tx_connection);
1910 if (ret < 0) {
1911 pr_err("%s: tx connect error %d\n", __func__, ret);
1912 goto tx_connect_failed;
1913 }
1914
1915 bam_rx_pipe = sps_alloc_endpoint();
1916 if (bam_rx_pipe == NULL) {
1917 pr_err("%s: rx alloc endpoint failed\n", __func__);
1918 ret = -ENOMEM;
Jeff Hugo8ff4a812012-01-17 11:03:13 -07001919 goto rx_alloc_endpoint_failed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001920 }
1921 ret = sps_get_config(bam_rx_pipe, &rx_connection);
1922 if (ret) {
1923 pr_err("%s: rx get config failed %d\n", __func__, ret);
1924 goto rx_get_config_failed;
1925 }
1926
1927 rx_connection.source = h;
1928 rx_connection.src_pipe_index = 5;
1929 rx_connection.destination = SPS_DEV_HANDLE_MEM;
1930 rx_connection.dest_pipe_index = 1;
1931 rx_connection.mode = SPS_MODE_SRC;
Jeff Hugo949080a2011-08-30 11:58:56 -06001932 rx_connection.options = SPS_O_AUTO_ENABLE | SPS_O_EOT |
1933 SPS_O_ACK_TRANSFERS;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001934 rx_desc_mem_buf.size = 0x800; /* 2k */
1935 rx_desc_mem_buf.base = dma_alloc_coherent(NULL, rx_desc_mem_buf.size,
1936 &dma_addr, 0);
1937 if (rx_desc_mem_buf.base == NULL) {
1938 pr_err("%s: rx memory alloc failed\n", __func__);
1939 ret = -ENOMEM;
1940 goto rx_mem_failed;
1941 }
1942 rx_desc_mem_buf.phys_base = dma_addr;
1943 memset(rx_desc_mem_buf.base, 0x0, rx_desc_mem_buf.size);
1944 rx_connection.desc = rx_desc_mem_buf;
1945 rx_connection.event_thresh = 0x10;
1946
1947 ret = sps_connect(bam_rx_pipe, &rx_connection);
1948 if (ret < 0) {
1949 pr_err("%s: rx connect error %d\n", __func__, ret);
1950 goto rx_connect_failed;
1951 }
1952
1953 tx_register_event.options = SPS_O_EOT;
1954 tx_register_event.mode = SPS_TRIGGER_CALLBACK;
1955 tx_register_event.xfer_done = NULL;
1956 tx_register_event.callback = bam_mux_tx_notify;
1957 tx_register_event.user = NULL;
1958 ret = sps_register_event(bam_tx_pipe, &tx_register_event);
1959 if (ret < 0) {
1960 pr_err("%s: tx register event error %d\n", __func__, ret);
1961 goto rx_event_reg_failed;
1962 }
1963
Jeff Hugo33dbc002011-08-25 15:52:53 -06001964 rx_register_event.options = SPS_O_EOT;
1965 rx_register_event.mode = SPS_TRIGGER_CALLBACK;
1966 rx_register_event.xfer_done = NULL;
1967 rx_register_event.callback = bam_mux_rx_notify;
1968 rx_register_event.user = NULL;
1969 ret = sps_register_event(bam_rx_pipe, &rx_register_event);
1970 if (ret < 0) {
1971 pr_err("%s: tx register event error %d\n", __func__, ret);
1972 goto rx_event_reg_failed;
1973 }
1974
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001975 bam_mux_initialized = 1;
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001976 toggle_apps_ack();
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001977 bam_connection_is_active = 1;
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001978 complete_all(&bam_connection_completion);
Jeff Hugo2fb555e2012-03-14 16:33:47 -06001979 queue_rx();
Jeff Hugo9dea05c2011-12-21 12:23:05 -07001980 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001981
1982rx_event_reg_failed:
1983 sps_disconnect(bam_rx_pipe);
1984rx_connect_failed:
1985 dma_free_coherent(NULL, rx_desc_mem_buf.size, rx_desc_mem_buf.base,
1986 rx_desc_mem_buf.phys_base);
1987rx_mem_failed:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001988rx_get_config_failed:
1989 sps_free_endpoint(bam_rx_pipe);
Jeff Hugo8ff4a812012-01-17 11:03:13 -07001990rx_alloc_endpoint_failed:
1991 sps_disconnect(bam_tx_pipe);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001992tx_connect_failed:
1993 dma_free_coherent(NULL, tx_desc_mem_buf.size, tx_desc_mem_buf.base,
1994 tx_desc_mem_buf.phys_base);
1995tx_get_config_failed:
1996 sps_free_endpoint(bam_tx_pipe);
Jeff Hugo8ff4a812012-01-17 11:03:13 -07001997tx_alloc_endpoint_failed:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001998 sps_deregister_bam_device(h);
Jeff Hugo4b2890d2012-01-16 16:14:21 -07001999 /*
2000 * sps_deregister_bam_device() calls iounmap. calling iounmap on the
2001 * same handle below will cause a crash, so skip it if we've freed
2002 * the handle here.
2003 */
2004 skip_iounmap = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002005register_bam_failed:
Jeff Hugo4b2890d2012-01-16 16:14:21 -07002006 if (!skip_iounmap)
2007 iounmap(a2_virt_addr);
Jeff Hugo994a92d2012-01-05 13:25:21 -07002008ioremap_failed:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002009 /*destroy_workqueue(bam_mux_workqueue);*/
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002010 return ret;
2011}
2012
2013static int bam_init_fallback(void)
2014{
2015 u32 h;
2016 int ret;
2017 void *a2_virt_addr;
2018
2019 unvote_dfab();
2020 /* init BAM */
2021 a2_virt_addr = ioremap_nocache(A2_PHYS_BASE, A2_PHYS_SIZE);
2022 if (!a2_virt_addr) {
2023 pr_err("%s: ioremap failed\n", __func__);
2024 ret = -ENOMEM;
2025 goto ioremap_failed;
2026 }
2027 a2_props.phys_addr = A2_PHYS_BASE;
2028 a2_props.virt_addr = a2_virt_addr;
2029 a2_props.virt_size = A2_PHYS_SIZE;
2030 a2_props.irq = A2_BAM_IRQ;
2031 a2_props.options = SPS_BAM_OPT_IRQ_WAKEUP;
2032 a2_props.num_pipes = A2_NUM_PIPES;
2033 a2_props.summing_threshold = A2_SUMMING_THRESHOLD;
2034 if (cpu_is_msm9615())
2035 a2_props.manage = SPS_BAM_MGR_DEVICE_REMOTE;
2036 ret = sps_register_bam_device(&a2_props, &h);
2037 if (ret < 0) {
2038 pr_err("%s: register bam error %d\n", __func__, ret);
2039 goto register_bam_failed;
2040 }
2041 a2_device_handle = h;
Jeff Hugo2bec9772012-04-05 12:25:16 -06002042 toggle_apps_ack();
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002043
2044 return 0;
2045
2046register_bam_failed:
Jeff Hugo4b2890d2012-01-16 16:14:21 -07002047 iounmap(a2_virt_addr);
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002048ioremap_failed:
2049 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002050}
Jeff Hugoade1f842011-08-03 15:53:59 -06002051
Jeff Hugoa670b762012-03-15 15:58:28 -06002052static void msm9615_bam_init(void)
Eric Holmberg604ab252012-01-15 00:01:18 -07002053{
2054 int ret = 0;
2055
2056 ret = bam_init();
2057 if (ret) {
2058 ret = bam_init_fallback();
2059 if (ret)
2060 pr_err("%s: bam init fallback failed: %d",
2061 __func__, ret);
2062 }
2063}
2064
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002065static void toggle_apps_ack(void)
2066{
2067 static unsigned int clear_bit; /* 0 = set the bit, else clear bit */
Eric Holmberg878923a2012-01-10 14:28:19 -07002068
2069 bam_dmux_log("%s: apps ack %d->%d\n", __func__,
2070 clear_bit & 0x1, ~clear_bit & 0x1);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002071 smsm_change_state(SMSM_APPS_STATE,
2072 clear_bit & SMSM_A2_POWER_CONTROL_ACK,
2073 ~clear_bit & SMSM_A2_POWER_CONTROL_ACK);
2074 clear_bit = ~clear_bit;
Eric Holmberg1f1255d2012-02-22 13:37:21 -07002075 DBG_INC_ACK_OUT_CNT();
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002076}
2077
Jeff Hugoade1f842011-08-03 15:53:59 -06002078static void bam_dmux_smsm_cb(void *priv, uint32_t old_state, uint32_t new_state)
2079{
Eric Holmberg878923a2012-01-10 14:28:19 -07002080 bam_dmux_power_state = new_state & SMSM_A2_POWER_CONTROL ? 1 : 0;
Eric Holmberg1f1255d2012-02-22 13:37:21 -07002081 DBG_INC_A2_POWER_CONTROL_IN_CNT();
Eric Holmberg878923a2012-01-10 14:28:19 -07002082 bam_dmux_log("%s: 0x%08x -> 0x%08x\n", __func__, old_state,
2083 new_state);
2084
Jeff Hugoae3a85e2011-12-02 17:10:18 -07002085 if (bam_mux_initialized && new_state & SMSM_A2_POWER_CONTROL) {
Eric Holmberg878923a2012-01-10 14:28:19 -07002086 bam_dmux_log("%s: reconnect\n", __func__);
Eric Holmberg006057d2012-01-11 10:10:42 -07002087 grab_wakelock();
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002088 reconnect_to_bam();
Jeff Hugoae3a85e2011-12-02 17:10:18 -07002089 } else if (bam_mux_initialized &&
2090 !(new_state & SMSM_A2_POWER_CONTROL)) {
Eric Holmberg878923a2012-01-10 14:28:19 -07002091 bam_dmux_log("%s: disconnect\n", __func__);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002092 disconnect_to_bam();
Eric Holmberg006057d2012-01-11 10:10:42 -07002093 release_wakelock();
Jeff Hugoae3a85e2011-12-02 17:10:18 -07002094 } else if (new_state & SMSM_A2_POWER_CONTROL) {
Eric Holmberg878923a2012-01-10 14:28:19 -07002095 bam_dmux_log("%s: init\n", __func__);
Eric Holmberg006057d2012-01-11 10:10:42 -07002096 grab_wakelock();
Jeff Hugoa670b762012-03-15 15:58:28 -06002097 if (cpu_is_msm9615())
2098 msm9615_bam_init();
2099 else
Eric Holmberg604ab252012-01-15 00:01:18 -07002100 bam_init();
Jeff Hugoae3a85e2011-12-02 17:10:18 -07002101 } else {
Eric Holmberg878923a2012-01-10 14:28:19 -07002102 bam_dmux_log("%s: bad state change\n", __func__);
Jeff Hugoade1f842011-08-03 15:53:59 -06002103 pr_err("%s: unsupported state change\n", __func__);
Jeff Hugoae3a85e2011-12-02 17:10:18 -07002104 }
Jeff Hugoade1f842011-08-03 15:53:59 -06002105
2106}
2107
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002108static void bam_dmux_smsm_ack_cb(void *priv, uint32_t old_state,
2109 uint32_t new_state)
2110{
Eric Holmberg1f1255d2012-02-22 13:37:21 -07002111 DBG_INC_ACK_IN_CNT();
Eric Holmberg878923a2012-01-10 14:28:19 -07002112 bam_dmux_log("%s: 0x%08x -> 0x%08x\n", __func__, old_state,
2113 new_state);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002114 complete_all(&ul_wakeup_ack_completion);
2115}
2116
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002117static int bam_dmux_probe(struct platform_device *pdev)
2118{
2119 int rc;
2120
2121 DBG("%s probe called\n", __func__);
2122 if (bam_mux_initialized)
2123 return 0;
2124
Stephen Boyd69d35e32012-02-14 15:33:30 -08002125 xo_clk = clk_get(&pdev->dev, "xo");
2126 if (IS_ERR(xo_clk)) {
2127 pr_err("%s: did not get xo clock\n", __func__);
2128 return PTR_ERR(xo_clk);
2129 }
Stephen Boyd1c51a492011-10-26 12:11:47 -07002130 dfab_clk = clk_get(&pdev->dev, "bus_clk");
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002131 if (IS_ERR(dfab_clk)) {
2132 pr_err("%s: did not get dfab clock\n", __func__);
2133 return -EFAULT;
2134 }
2135
2136 rc = clk_set_rate(dfab_clk, 64000000);
2137 if (rc)
2138 pr_err("%s: unable to set dfab clock rate\n", __func__);
2139
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002140 bam_mux_rx_workqueue = create_singlethread_workqueue("bam_dmux_rx");
2141 if (!bam_mux_rx_workqueue)
2142 return -ENOMEM;
2143
2144 bam_mux_tx_workqueue = create_singlethread_workqueue("bam_dmux_tx");
2145 if (!bam_mux_tx_workqueue) {
2146 destroy_workqueue(bam_mux_rx_workqueue);
2147 return -ENOMEM;
2148 }
2149
Jeff Hugo7960abd2011-08-02 15:39:38 -06002150 for (rc = 0; rc < BAM_DMUX_NUM_CHANNELS; ++rc) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002151 spin_lock_init(&bam_ch[rc].lock);
Jeff Hugo7960abd2011-08-02 15:39:38 -06002152 scnprintf(bam_ch[rc].name, BAM_DMUX_CH_NAME_MAX_LEN,
2153 "bam_dmux_ch_%d", rc);
2154 /* bus 2, ie a2 stream 2 */
2155 bam_ch[rc].pdev = platform_device_alloc(bam_ch[rc].name, 2);
2156 if (!bam_ch[rc].pdev) {
2157 pr_err("%s: platform device alloc failed\n", __func__);
2158 destroy_workqueue(bam_mux_rx_workqueue);
2159 destroy_workqueue(bam_mux_tx_workqueue);
2160 return -ENOMEM;
2161 }
2162 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002163
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002164 init_completion(&ul_wakeup_ack_completion);
2165 init_completion(&bam_connection_completion);
Eric Holmberg006057d2012-01-11 10:10:42 -07002166 init_completion(&dfab_unvote_completion);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002167 INIT_DELAYED_WORK(&ul_timeout_work, ul_timeout);
Jeff Hugoae3a85e2011-12-02 17:10:18 -07002168 wake_lock_init(&bam_wakelock, WAKE_LOCK_SUSPEND, "bam_dmux_wakelock");
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002169
Jeff Hugoade1f842011-08-03 15:53:59 -06002170 rc = smsm_state_cb_register(SMSM_MODEM_STATE, SMSM_A2_POWER_CONTROL,
2171 bam_dmux_smsm_cb, NULL);
2172
2173 if (rc) {
2174 destroy_workqueue(bam_mux_rx_workqueue);
2175 destroy_workqueue(bam_mux_tx_workqueue);
2176 pr_err("%s: smsm cb register failed, rc: %d\n", __func__, rc);
2177 return -ENOMEM;
2178 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002179
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002180 rc = smsm_state_cb_register(SMSM_MODEM_STATE, SMSM_A2_POWER_CONTROL_ACK,
2181 bam_dmux_smsm_ack_cb, NULL);
2182
2183 if (rc) {
2184 destroy_workqueue(bam_mux_rx_workqueue);
2185 destroy_workqueue(bam_mux_tx_workqueue);
2186 smsm_state_cb_deregister(SMSM_MODEM_STATE,
2187 SMSM_A2_POWER_CONTROL,
2188 bam_dmux_smsm_cb, NULL);
2189 pr_err("%s: smsm ack cb register failed, rc: %d\n", __func__,
2190 rc);
2191 for (rc = 0; rc < BAM_DMUX_NUM_CHANNELS; ++rc)
2192 platform_device_put(bam_ch[rc].pdev);
2193 return -ENOMEM;
2194 }
2195
Eric Holmbergfd1e2ae2011-11-15 18:28:17 -07002196 if (smsm_get_state(SMSM_MODEM_STATE) & SMSM_A2_POWER_CONTROL)
2197 bam_dmux_smsm_cb(NULL, 0, smsm_get_state(SMSM_MODEM_STATE));
2198
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002199 return 0;
2200}
2201
2202static struct platform_driver bam_dmux_driver = {
2203 .probe = bam_dmux_probe,
2204 .driver = {
2205 .name = "BAM_RMNT",
2206 .owner = THIS_MODULE,
2207 },
2208};
2209
2210static int __init bam_dmux_init(void)
2211{
Eric Holmberg878923a2012-01-10 14:28:19 -07002212 int ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002213#ifdef CONFIG_DEBUG_FS
2214 struct dentry *dent;
2215
2216 dent = debugfs_create_dir("bam_dmux", 0);
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07002217 if (!IS_ERR(dent)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002218 debug_create("tbl", 0444, dent, debug_tbl);
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07002219 debug_create("ul_pkt_cnt", 0444, dent, debug_ul_pkt_cnt);
2220 debug_create("stats", 0444, dent, debug_stats);
Eric Holmberge4ac80b2012-01-12 09:21:59 -07002221 debug_create_multiple("log", 0444, dent, debug_log);
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07002222 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002223#endif
Eric Holmberg878923a2012-01-10 14:28:19 -07002224 ret = kfifo_alloc(&bam_dmux_state_log, PAGE_SIZE, GFP_KERNEL);
2225 if (ret) {
2226 pr_err("%s: failed to allocate log %d\n", __func__, ret);
2227 bam_dmux_state_logging_disabled = 1;
2228 }
2229
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06002230 subsys_notif_register_notifier("modem", &restart_notifier);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002231 return platform_driver_register(&bam_dmux_driver);
2232}
2233
Jeff Hugoade1f842011-08-03 15:53:59 -06002234late_initcall(bam_dmux_init); /* needs to init after SMD */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002235MODULE_DESCRIPTION("MSM BAM DMUX");
2236MODULE_LICENSE("GPL v2");