blob: 66d2a57e4024381dbb7da2112b8a3235fb4acbfc [file] [log] [blame]
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14/*
15 * BAM DMUX module.
16 */
17
18#define DEBUG
19
20#include <linux/delay.h>
21#include <linux/module.h>
22#include <linux/netdevice.h>
23#include <linux/platform_device.h>
24#include <linux/sched.h>
25#include <linux/skbuff.h>
26#include <linux/debugfs.h>
Jeff Hugoaab7ebc2011-09-07 16:46:04 -060027#include <linux/clk.h>
Jeff Hugoae3a85e2011-12-02 17:10:18 -070028#include <linux/wakelock.h>
Eric Holmberg878923a2012-01-10 14:28:19 -070029#include <linux/kfifo.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070030
31#include <mach/sps.h>
32#include <mach/bam_dmux.h>
Jeff Hugoade1f842011-08-03 15:53:59 -060033#include <mach/msm_smsm.h>
Jeff Hugo6e7a92a2011-10-24 05:25:13 -060034#include <mach/subsystem_notif.h>
Jeff Hugo75913c82011-12-05 15:59:01 -070035#include <mach/socinfo.h>
Jeff Hugo4838f412012-01-20 11:19:37 -070036#include <mach/subsystem_restart.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070037
38#define BAM_CH_LOCAL_OPEN 0x1
39#define BAM_CH_REMOTE_OPEN 0x2
Jeff Hugo6e7a92a2011-10-24 05:25:13 -060040#define BAM_CH_IN_RESET 0x4
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070041
42#define BAM_MUX_HDR_MAGIC_NO 0x33fc
43
Eric Holmberg006057d2012-01-11 10:10:42 -070044#define BAM_MUX_HDR_CMD_DATA 0
45#define BAM_MUX_HDR_CMD_OPEN 1
46#define BAM_MUX_HDR_CMD_CLOSE 2
47#define BAM_MUX_HDR_CMD_STATUS 3 /* unused */
48#define BAM_MUX_HDR_CMD_OPEN_NO_A2_PC 4
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070049
Jeff Hugo949080a2011-08-30 11:58:56 -060050#define POLLING_MIN_SLEEP 950 /* 0.95 ms */
51#define POLLING_MAX_SLEEP 1050 /* 1.05 ms */
52#define POLLING_INACTIVITY 40 /* cycles before switch to intr mode */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070053
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -070054#define LOW_WATERMARK 2
55#define HIGH_WATERMARK 4
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070056
57static int msm_bam_dmux_debug_enable;
58module_param_named(debug_enable, msm_bam_dmux_debug_enable,
59 int, S_IRUGO | S_IWUSR | S_IWGRP);
60
61#if defined(DEBUG)
62static uint32_t bam_dmux_read_cnt;
63static uint32_t bam_dmux_write_cnt;
64static uint32_t bam_dmux_write_cpy_cnt;
65static uint32_t bam_dmux_write_cpy_bytes;
Eric Holmberg2fddbcd2011-11-28 18:25:57 -070066static uint32_t bam_dmux_tx_sps_failure_cnt;
Eric Holmberg6074aba2012-01-18 17:59:44 -070067static uint32_t bam_dmux_tx_stall_cnt;
Eric Holmberg1f1255d2012-02-22 13:37:21 -070068static atomic_t bam_dmux_ack_out_cnt = ATOMIC_INIT(0);
69static atomic_t bam_dmux_ack_in_cnt = ATOMIC_INIT(0);
70static atomic_t bam_dmux_a2_pwr_cntl_in_cnt = ATOMIC_INIT(0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070071
72#define DBG(x...) do { \
73 if (msm_bam_dmux_debug_enable) \
74 pr_debug(x); \
75 } while (0)
76
77#define DBG_INC_READ_CNT(x) do { \
78 bam_dmux_read_cnt += (x); \
79 if (msm_bam_dmux_debug_enable) \
80 pr_debug("%s: total read bytes %u\n", \
81 __func__, bam_dmux_read_cnt); \
82 } while (0)
83
84#define DBG_INC_WRITE_CNT(x) do { \
85 bam_dmux_write_cnt += (x); \
86 if (msm_bam_dmux_debug_enable) \
87 pr_debug("%s: total written bytes %u\n", \
88 __func__, bam_dmux_write_cnt); \
89 } while (0)
90
91#define DBG_INC_WRITE_CPY(x) do { \
92 bam_dmux_write_cpy_bytes += (x); \
93 bam_dmux_write_cpy_cnt++; \
94 if (msm_bam_dmux_debug_enable) \
95 pr_debug("%s: total write copy cnt %u, bytes %u\n", \
96 __func__, bam_dmux_write_cpy_cnt, \
97 bam_dmux_write_cpy_bytes); \
98 } while (0)
Eric Holmberg2fddbcd2011-11-28 18:25:57 -070099
100#define DBG_INC_TX_SPS_FAILURE_CNT() do { \
101 bam_dmux_tx_sps_failure_cnt++; \
102} while (0)
103
Eric Holmberg6074aba2012-01-18 17:59:44 -0700104#define DBG_INC_TX_STALL_CNT() do { \
105 bam_dmux_tx_stall_cnt++; \
106} while (0)
107
Eric Holmberg1f1255d2012-02-22 13:37:21 -0700108#define DBG_INC_ACK_OUT_CNT() \
109 atomic_inc(&bam_dmux_ack_out_cnt)
110
111#define DBG_INC_A2_POWER_CONTROL_IN_CNT() \
112 atomic_inc(&bam_dmux_a2_pwr_cntl_in_cnt)
113
114#define DBG_INC_ACK_IN_CNT() \
115 atomic_inc(&bam_dmux_ack_in_cnt)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700116#else
117#define DBG(x...) do { } while (0)
118#define DBG_INC_READ_CNT(x...) do { } while (0)
119#define DBG_INC_WRITE_CNT(x...) do { } while (0)
120#define DBG_INC_WRITE_CPY(x...) do { } while (0)
Eric Holmberg2fddbcd2011-11-28 18:25:57 -0700121#define DBG_INC_TX_SPS_FAILURE_CNT() do { } while (0)
Eric Holmberg6074aba2012-01-18 17:59:44 -0700122#define DBG_INC_TX_STALL_CNT() do { } while (0)
Eric Holmberg1f1255d2012-02-22 13:37:21 -0700123#define DBG_INC_ACK_OUT_CNT() do { } while (0)
124#define DBG_INC_A2_POWER_CONTROL_IN_CNT() \
125 do { } while (0)
126#define DBG_INC_ACK_IN_CNT() do { } while (0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700127#endif
128
129struct bam_ch_info {
130 uint32_t status;
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600131 void (*notify)(void *, int, unsigned long);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700132 void *priv;
133 spinlock_t lock;
Jeff Hugo7960abd2011-08-02 15:39:38 -0600134 struct platform_device *pdev;
135 char name[BAM_DMUX_CH_NAME_MAX_LEN];
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700136 int num_tx_pkts;
137 int use_wm;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700138};
139
140struct tx_pkt_info {
141 struct sk_buff *skb;
142 dma_addr_t dma_address;
143 char is_cmd;
144 uint32_t len;
145 struct work_struct work;
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600146 struct list_head list_node;
Eric Holmberg878923a2012-01-10 14:28:19 -0700147 unsigned ts_sec;
148 unsigned long ts_nsec;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700149};
150
151struct rx_pkt_info {
152 struct sk_buff *skb;
153 dma_addr_t dma_address;
154 struct work_struct work;
Jeff Hugo949080a2011-08-30 11:58:56 -0600155 struct list_head list_node;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700156};
157
158#define A2_NUM_PIPES 6
159#define A2_SUMMING_THRESHOLD 4096
160#define A2_DEFAULT_DESCRIPTORS 32
161#define A2_PHYS_BASE 0x124C2000
162#define A2_PHYS_SIZE 0x2000
163#define BUFFER_SIZE 2048
164#define NUM_BUFFERS 32
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700165static struct sps_bam_props a2_props;
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600166static u32 a2_device_handle;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700167static struct sps_pipe *bam_tx_pipe;
168static struct sps_pipe *bam_rx_pipe;
169static struct sps_connect tx_connection;
170static struct sps_connect rx_connection;
171static struct sps_mem_buffer tx_desc_mem_buf;
172static struct sps_mem_buffer rx_desc_mem_buf;
173static struct sps_register_event tx_register_event;
Jeff Hugo33dbc002011-08-25 15:52:53 -0600174static struct sps_register_event rx_register_event;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700175
176static struct bam_ch_info bam_ch[BAM_DMUX_NUM_CHANNELS];
177static int bam_mux_initialized;
178
Jeff Hugo949080a2011-08-30 11:58:56 -0600179static int polling_mode;
180
181static LIST_HEAD(bam_rx_pool);
Jeff Hugoc9749932011-11-02 17:50:40 -0600182static DEFINE_MUTEX(bam_rx_pool_mutexlock);
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700183static int bam_rx_pool_len;
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600184static LIST_HEAD(bam_tx_pool);
Jeff Hugoc9749932011-11-02 17:50:40 -0600185static DEFINE_SPINLOCK(bam_tx_pool_spinlock);
Jeff Hugo949080a2011-08-30 11:58:56 -0600186
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700187struct bam_mux_hdr {
188 uint16_t magic_num;
189 uint8_t reserved;
190 uint8_t cmd;
191 uint8_t pad_len;
192 uint8_t ch_id;
193 uint16_t pkt_len;
194};
195
Jeff Hugod98b1082011-10-24 10:30:23 -0600196static void notify_all(int event, unsigned long data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700197static void bam_mux_write_done(struct work_struct *work);
198static void handle_bam_mux_cmd(struct work_struct *work);
Jeff Hugo949080a2011-08-30 11:58:56 -0600199static void rx_timer_work_func(struct work_struct *work);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700200
Jeff Hugo949080a2011-08-30 11:58:56 -0600201static DECLARE_WORK(rx_timer_work, rx_timer_work_func);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700202
203static struct workqueue_struct *bam_mux_rx_workqueue;
204static struct workqueue_struct *bam_mux_tx_workqueue;
205
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600206/* A2 power collaspe */
207#define UL_TIMEOUT_DELAY 1000 /* in ms */
Jeff Hugo0b13a352012-03-17 23:18:30 -0600208#define ENABLE_DISCONNECT_ACK 0x1
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600209static void toggle_apps_ack(void);
210static void reconnect_to_bam(void);
211static void disconnect_to_bam(void);
212static void ul_wakeup(void);
213static void ul_timeout(struct work_struct *work);
214static void vote_dfab(void);
215static void unvote_dfab(void);
Jeff Hugod98b1082011-10-24 10:30:23 -0600216static void kickoff_ul_wakeup_func(struct work_struct *work);
Eric Holmberg006057d2012-01-11 10:10:42 -0700217static void grab_wakelock(void);
218static void release_wakelock(void);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600219
220static int bam_is_connected;
221static DEFINE_MUTEX(wakeup_lock);
222static struct completion ul_wakeup_ack_completion;
223static struct completion bam_connection_completion;
224static struct delayed_work ul_timeout_work;
225static int ul_packet_written;
Eric Holmbergbc9f21c2012-01-18 11:33:33 -0700226static atomic_t ul_ondemand_vote = ATOMIC_INIT(0);
Stephen Boyd69d35e32012-02-14 15:33:30 -0800227static struct clk *dfab_clk, *xo_clk;
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600228static DEFINE_RWLOCK(ul_wakeup_lock);
Jeff Hugod98b1082011-10-24 10:30:23 -0600229static DECLARE_WORK(kickoff_ul_wakeup, kickoff_ul_wakeup_func);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600230static int bam_connection_is_active;
Jeff Hugof6c1c1e2011-12-01 17:43:49 -0700231static int wait_for_ack;
Jeff Hugoae3a85e2011-12-02 17:10:18 -0700232static struct wake_lock bam_wakelock;
Eric Holmberg006057d2012-01-11 10:10:42 -0700233static int a2_pc_disabled;
234static DEFINE_MUTEX(dfab_status_lock);
235static int dfab_is_on;
236static int wait_for_dfab;
237static struct completion dfab_unvote_completion;
238static DEFINE_SPINLOCK(wakelock_reference_lock);
239static int wakelock_reference_count;
Jeff Hugo583a6da2012-02-03 11:37:30 -0700240static int a2_pc_disabled_wakelock_skipped;
Jeff Hugo0b13a352012-03-17 23:18:30 -0600241static int disconnect_ack;
Jeff Hugocb798022012-04-09 14:55:40 -0600242static LIST_HEAD(bam_other_notify_funcs);
243
244struct outside_notify_func {
245 void (*notify)(void *, int, unsigned long);
246 void *priv;
247 struct list_head list_node;
248};
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600249/* End A2 power collaspe */
250
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600251/* subsystem restart */
252static int restart_notifier_cb(struct notifier_block *this,
253 unsigned long code,
254 void *data);
255
256static struct notifier_block restart_notifier = {
257 .notifier_call = restart_notifier_cb,
258};
259static int in_global_reset;
260/* end subsystem restart */
261
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700262#define bam_ch_is_open(x) \
263 (bam_ch[(x)].status == (BAM_CH_LOCAL_OPEN | BAM_CH_REMOTE_OPEN))
264
265#define bam_ch_is_local_open(x) \
266 (bam_ch[(x)].status & BAM_CH_LOCAL_OPEN)
267
268#define bam_ch_is_remote_open(x) \
269 (bam_ch[(x)].status & BAM_CH_REMOTE_OPEN)
270
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600271#define bam_ch_is_in_reset(x) \
272 (bam_ch[(x)].status & BAM_CH_IN_RESET)
273
Eric Holmberg878923a2012-01-10 14:28:19 -0700274#define LOG_MESSAGE_MAX_SIZE 80
275struct kfifo bam_dmux_state_log;
276static uint32_t bam_dmux_state_logging_disabled;
277static DEFINE_SPINLOCK(bam_dmux_logging_spinlock);
278static int bam_dmux_uplink_vote;
279static int bam_dmux_power_state;
280
281
282#define DMUX_LOG_KERR(fmt...) \
283do { \
284 bam_dmux_log(fmt); \
285 pr_err(fmt); \
286} while (0)
287
288/**
289 * Log a state change along with a small message.
290 *
291 * Complete size of messsage is limited to @todo.
292 */
293static void bam_dmux_log(const char *fmt, ...)
294{
295 char buff[LOG_MESSAGE_MAX_SIZE];
296 unsigned long flags;
297 va_list arg_list;
298 unsigned long long t_now;
299 unsigned long nanosec_rem;
300 int len = 0;
301
302 if (bam_dmux_state_logging_disabled)
303 return;
304
305 t_now = sched_clock();
306 nanosec_rem = do_div(t_now, 1000000000U);
307
308 /*
309 * States
Eric Holmberg006057d2012-01-11 10:10:42 -0700310 * D: 1 = Power collapse disabled
Eric Holmberg878923a2012-01-10 14:28:19 -0700311 * R: 1 = in global reset
312 * P: 1 = BAM is powered up
313 * A: 1 = BAM initialized and ready for data
314 *
315 * V: 1 = Uplink vote for power
316 * U: 1 = Uplink active
317 * W: 1 = Uplink Wait-for-ack
318 * A: 1 = Uplink ACK received
Eric Holmbergbc9f21c2012-01-18 11:33:33 -0700319 * #: >=1 On-demand uplink vote
Jeff Hugo0b13a352012-03-17 23:18:30 -0600320 * D: 1 = Disconnect ACK active
Eric Holmberg878923a2012-01-10 14:28:19 -0700321 */
322 len += scnprintf(buff, sizeof(buff),
Jeff Hugo0b13a352012-03-17 23:18:30 -0600323 "<DMUX> %u.%09lu %c%c%c%c %c%c%c%c%d%c ",
Eric Holmberg878923a2012-01-10 14:28:19 -0700324 (unsigned)t_now, nanosec_rem,
Eric Holmberg006057d2012-01-11 10:10:42 -0700325 a2_pc_disabled ? 'D' : 'd',
Eric Holmberg878923a2012-01-10 14:28:19 -0700326 in_global_reset ? 'R' : 'r',
327 bam_dmux_power_state ? 'P' : 'p',
328 bam_connection_is_active ? 'A' : 'a',
329 bam_dmux_uplink_vote ? 'V' : 'v',
330 bam_is_connected ? 'U' : 'u',
331 wait_for_ack ? 'W' : 'w',
Eric Holmbergbc9f21c2012-01-18 11:33:33 -0700332 ul_wakeup_ack_completion.done ? 'A' : 'a',
Jeff Hugo0b13a352012-03-17 23:18:30 -0600333 atomic_read(&ul_ondemand_vote),
334 disconnect_ack ? 'D' : 'd'
Eric Holmberg878923a2012-01-10 14:28:19 -0700335 );
336
337 va_start(arg_list, fmt);
338 len += vscnprintf(buff + len, sizeof(buff) - len, fmt, arg_list);
339 va_end(arg_list);
340 memset(buff + len, 0x0, sizeof(buff) - len);
341
342 spin_lock_irqsave(&bam_dmux_logging_spinlock, flags);
343 if (kfifo_avail(&bam_dmux_state_log) < LOG_MESSAGE_MAX_SIZE) {
344 char junk[LOG_MESSAGE_MAX_SIZE];
345 int ret;
346
347 ret = kfifo_out(&bam_dmux_state_log, junk, sizeof(junk));
348 if (ret != LOG_MESSAGE_MAX_SIZE) {
349 pr_err("%s: unable to empty log %d\n", __func__, ret);
350 spin_unlock_irqrestore(&bam_dmux_logging_spinlock,
351 flags);
352 return;
353 }
354 }
355 kfifo_in(&bam_dmux_state_log, buff, sizeof(buff));
356 spin_unlock_irqrestore(&bam_dmux_logging_spinlock, flags);
357}
358
359static inline void set_tx_timestamp(struct tx_pkt_info *pkt)
360{
361 unsigned long long t_now;
362
363 t_now = sched_clock();
364 pkt->ts_nsec = do_div(t_now, 1000000000U);
365 pkt->ts_sec = (unsigned)t_now;
366}
367
368static inline void verify_tx_queue_is_empty(const char *func)
369{
370 unsigned long flags;
371 struct tx_pkt_info *info;
372 int reported = 0;
373
374 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
375 list_for_each_entry(info, &bam_tx_pool, list_node) {
376 if (!reported) {
Eric Holmberg454d9da2012-01-12 09:37:14 -0700377 bam_dmux_log("%s: tx pool not empty\n", func);
378 if (!in_global_reset)
379 pr_err("%s: tx pool not empty\n", func);
Eric Holmberg878923a2012-01-10 14:28:19 -0700380 reported = 1;
381 }
Eric Holmberg454d9da2012-01-12 09:37:14 -0700382 bam_dmux_log("%s: node=%p ts=%u.%09lu\n", __func__,
383 &info->list_node, info->ts_sec, info->ts_nsec);
384 if (!in_global_reset)
385 pr_err("%s: node=%p ts=%u.%09lu\n", __func__,
386 &info->list_node, info->ts_sec, info->ts_nsec);
Eric Holmberg878923a2012-01-10 14:28:19 -0700387 }
388 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
389}
390
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700391static void queue_rx(void)
392{
393 void *ptr;
394 struct rx_pkt_info *info;
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700395 int ret;
396 int rx_len_cached;
Jeff Hugo949080a2011-08-30 11:58:56 -0600397
Jeff Hugoc9749932011-11-02 17:50:40 -0600398 mutex_lock(&bam_rx_pool_mutexlock);
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700399 rx_len_cached = bam_rx_pool_len;
Jeff Hugoc9749932011-11-02 17:50:40 -0600400 mutex_unlock(&bam_rx_pool_mutexlock);
Jeff Hugo949080a2011-08-30 11:58:56 -0600401
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700402 while (rx_len_cached < NUM_BUFFERS) {
403 if (in_global_reset)
404 goto fail;
405
406 info = kmalloc(sizeof(struct rx_pkt_info), GFP_KERNEL);
407 if (!info) {
408 pr_err("%s: unable to alloc rx_pkt_info\n", __func__);
409 goto fail;
410 }
411
412 INIT_WORK(&info->work, handle_bam_mux_cmd);
413
414 info->skb = __dev_alloc_skb(BUFFER_SIZE, GFP_KERNEL);
415 if (info->skb == NULL) {
416 DMUX_LOG_KERR("%s: unable to alloc skb\n", __func__);
417 goto fail_info;
418 }
419 ptr = skb_put(info->skb, BUFFER_SIZE);
420
421 info->dma_address = dma_map_single(NULL, ptr, BUFFER_SIZE,
422 DMA_FROM_DEVICE);
423 if (info->dma_address == 0 || info->dma_address == ~0) {
424 DMUX_LOG_KERR("%s: dma_map_single failure %p for %p\n",
425 __func__, (void *)info->dma_address, ptr);
426 goto fail_skb;
427 }
428
429 mutex_lock(&bam_rx_pool_mutexlock);
430 list_add_tail(&info->list_node, &bam_rx_pool);
431 rx_len_cached = ++bam_rx_pool_len;
432 mutex_unlock(&bam_rx_pool_mutexlock);
433
434 ret = sps_transfer_one(bam_rx_pipe, info->dma_address,
435 BUFFER_SIZE, info,
436 SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT);
437
438 if (ret) {
439 DMUX_LOG_KERR("%s: sps_transfer_one failed %d\n",
440 __func__, ret);
441 goto fail_transfer;
442 }
443 }
444 return;
445
446fail_transfer:
447 mutex_lock(&bam_rx_pool_mutexlock);
448 list_del(&info->list_node);
449 --bam_rx_pool_len;
450 rx_len_cached = bam_rx_pool_len;
451 mutex_unlock(&bam_rx_pool_mutexlock);
452
453 dma_unmap_single(NULL, info->dma_address, BUFFER_SIZE,
454 DMA_FROM_DEVICE);
455
456fail_skb:
457 dev_kfree_skb_any(info->skb);
458
459fail_info:
460 kfree(info);
461
462fail:
463 if (rx_len_cached == 0) {
464 DMUX_LOG_KERR("%s: RX queue failure\n", __func__);
465 in_global_reset = 1;
466 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700467}
468
469static void bam_mux_process_data(struct sk_buff *rx_skb)
470{
471 unsigned long flags;
472 struct bam_mux_hdr *rx_hdr;
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600473 unsigned long event_data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700474
475 rx_hdr = (struct bam_mux_hdr *)rx_skb->data;
476
477 rx_skb->data = (unsigned char *)(rx_hdr + 1);
478 rx_skb->tail = rx_skb->data + rx_hdr->pkt_len;
479 rx_skb->len = rx_hdr->pkt_len;
Jeff Hugoee88f672011-10-04 17:14:52 -0600480 rx_skb->truesize = rx_hdr->pkt_len + sizeof(struct sk_buff);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700481
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600482 event_data = (unsigned long)(rx_skb);
483
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700484 spin_lock_irqsave(&bam_ch[rx_hdr->ch_id].lock, flags);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600485 if (bam_ch[rx_hdr->ch_id].notify)
486 bam_ch[rx_hdr->ch_id].notify(
487 bam_ch[rx_hdr->ch_id].priv, BAM_DMUX_RECEIVE,
488 event_data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700489 else
490 dev_kfree_skb_any(rx_skb);
491 spin_unlock_irqrestore(&bam_ch[rx_hdr->ch_id].lock, flags);
492
493 queue_rx();
494}
495
Eric Holmberg006057d2012-01-11 10:10:42 -0700496static inline void handle_bam_mux_cmd_open(struct bam_mux_hdr *rx_hdr)
497{
498 unsigned long flags;
499 int ret;
500
501 spin_lock_irqsave(&bam_ch[rx_hdr->ch_id].lock, flags);
502 bam_ch[rx_hdr->ch_id].status |= BAM_CH_REMOTE_OPEN;
503 bam_ch[rx_hdr->ch_id].num_tx_pkts = 0;
504 spin_unlock_irqrestore(&bam_ch[rx_hdr->ch_id].lock, flags);
505 queue_rx();
506 ret = platform_device_add(bam_ch[rx_hdr->ch_id].pdev);
507 if (ret)
508 pr_err("%s: platform_device_add() error: %d\n",
509 __func__, ret);
510}
511
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700512static void handle_bam_mux_cmd(struct work_struct *work)
513{
514 unsigned long flags;
515 struct bam_mux_hdr *rx_hdr;
516 struct rx_pkt_info *info;
517 struct sk_buff *rx_skb;
518
519 info = container_of(work, struct rx_pkt_info, work);
520 rx_skb = info->skb;
Jeff Hugo949080a2011-08-30 11:58:56 -0600521 dma_unmap_single(NULL, info->dma_address, BUFFER_SIZE, DMA_FROM_DEVICE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700522 kfree(info);
523
524 rx_hdr = (struct bam_mux_hdr *)rx_skb->data;
525
526 DBG_INC_READ_CNT(sizeof(struct bam_mux_hdr));
527 DBG("%s: magic %x reserved %d cmd %d pad %d ch %d len %d\n", __func__,
528 rx_hdr->magic_num, rx_hdr->reserved, rx_hdr->cmd,
529 rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
530 if (rx_hdr->magic_num != BAM_MUX_HDR_MAGIC_NO) {
Eric Holmberg878923a2012-01-10 14:28:19 -0700531 DMUX_LOG_KERR("%s: dropping invalid hdr. magic %x"
532 " reserved %d cmd %d"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700533 " pad %d ch %d len %d\n", __func__,
534 rx_hdr->magic_num, rx_hdr->reserved, rx_hdr->cmd,
535 rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
536 dev_kfree_skb_any(rx_skb);
537 queue_rx();
538 return;
539 }
Eric Holmberg9ff40a52011-11-17 19:17:00 -0700540
541 if (rx_hdr->ch_id >= BAM_DMUX_NUM_CHANNELS) {
Eric Holmberg878923a2012-01-10 14:28:19 -0700542 DMUX_LOG_KERR("%s: dropping invalid LCID %d"
543 " reserved %d cmd %d"
Eric Holmberg9ff40a52011-11-17 19:17:00 -0700544 " pad %d ch %d len %d\n", __func__,
545 rx_hdr->ch_id, rx_hdr->reserved, rx_hdr->cmd,
546 rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
547 dev_kfree_skb_any(rx_skb);
548 queue_rx();
549 return;
550 }
551
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700552 switch (rx_hdr->cmd) {
553 case BAM_MUX_HDR_CMD_DATA:
554 DBG_INC_READ_CNT(rx_hdr->pkt_len);
555 bam_mux_process_data(rx_skb);
556 break;
557 case BAM_MUX_HDR_CMD_OPEN:
Eric Holmberg006057d2012-01-11 10:10:42 -0700558 bam_dmux_log("%s: opening cid %d PC enabled\n", __func__,
Eric Holmberg878923a2012-01-10 14:28:19 -0700559 rx_hdr->ch_id);
Eric Holmberg006057d2012-01-11 10:10:42 -0700560 handle_bam_mux_cmd_open(rx_hdr);
Jeff Hugo0b13a352012-03-17 23:18:30 -0600561 if (rx_hdr->reserved & ENABLE_DISCONNECT_ACK) {
562 bam_dmux_log("%s: activating disconnect ack\n");
563 disconnect_ack = 1;
564 }
Eric Holmberg006057d2012-01-11 10:10:42 -0700565 dev_kfree_skb_any(rx_skb);
566 break;
567 case BAM_MUX_HDR_CMD_OPEN_NO_A2_PC:
568 bam_dmux_log("%s: opening cid %d PC disabled\n", __func__,
569 rx_hdr->ch_id);
570
571 if (!a2_pc_disabled) {
572 a2_pc_disabled = 1;
Jeff Hugo322179f2012-02-29 10:52:34 -0700573 ul_wakeup();
Eric Holmberg006057d2012-01-11 10:10:42 -0700574 }
575
576 handle_bam_mux_cmd_open(rx_hdr);
Eric Holmberge779dba2011-11-04 18:22:01 -0600577 dev_kfree_skb_any(rx_skb);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700578 break;
579 case BAM_MUX_HDR_CMD_CLOSE:
580 /* probably should drop pending write */
Eric Holmberg878923a2012-01-10 14:28:19 -0700581 bam_dmux_log("%s: closing cid %d\n", __func__,
582 rx_hdr->ch_id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700583 spin_lock_irqsave(&bam_ch[rx_hdr->ch_id].lock, flags);
584 bam_ch[rx_hdr->ch_id].status &= ~BAM_CH_REMOTE_OPEN;
585 spin_unlock_irqrestore(&bam_ch[rx_hdr->ch_id].lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700586 queue_rx();
Jeff Hugo7960abd2011-08-02 15:39:38 -0600587 platform_device_unregister(bam_ch[rx_hdr->ch_id].pdev);
588 bam_ch[rx_hdr->ch_id].pdev =
589 platform_device_alloc(bam_ch[rx_hdr->ch_id].name, 2);
590 if (!bam_ch[rx_hdr->ch_id].pdev)
591 pr_err("%s: platform_device_alloc failed\n", __func__);
Eric Holmberge779dba2011-11-04 18:22:01 -0600592 dev_kfree_skb_any(rx_skb);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700593 break;
594 default:
Eric Holmberg878923a2012-01-10 14:28:19 -0700595 DMUX_LOG_KERR("%s: dropping invalid hdr. magic %x"
596 " reserved %d cmd %d pad %d ch %d len %d\n",
597 __func__, rx_hdr->magic_num, rx_hdr->reserved,
598 rx_hdr->cmd, rx_hdr->pad_len, rx_hdr->ch_id,
599 rx_hdr->pkt_len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700600 dev_kfree_skb_any(rx_skb);
601 queue_rx();
602 return;
603 }
604}
605
606static int bam_mux_write_cmd(void *data, uint32_t len)
607{
608 int rc;
609 struct tx_pkt_info *pkt;
610 dma_addr_t dma_address;
Jeff Hugo626303bf2011-11-21 11:43:28 -0700611 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700612
Eric Holmbergd83cd2b2011-11-04 15:54:17 -0600613 pkt = kmalloc(sizeof(struct tx_pkt_info), GFP_ATOMIC);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700614 if (pkt == NULL) {
615 pr_err("%s: mem alloc for tx_pkt_info failed\n", __func__);
616 rc = -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700617 return rc;
618 }
619
620 dma_address = dma_map_single(NULL, data, len,
621 DMA_TO_DEVICE);
622 if (!dma_address) {
623 pr_err("%s: dma_map_single() failed\n", __func__);
Jeff Hugo96cb7482011-12-07 13:28:31 -0700624 kfree(pkt);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700625 rc = -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700626 return rc;
627 }
628 pkt->skb = (struct sk_buff *)(data);
629 pkt->len = len;
630 pkt->dma_address = dma_address;
631 pkt->is_cmd = 1;
Eric Holmberg878923a2012-01-10 14:28:19 -0700632 set_tx_timestamp(pkt);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600633 INIT_WORK(&pkt->work, bam_mux_write_done);
Jeff Hugo626303bf2011-11-21 11:43:28 -0700634 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600635 list_add_tail(&pkt->list_node, &bam_tx_pool);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700636 rc = sps_transfer_one(bam_tx_pipe, dma_address, len,
637 pkt, SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600638 if (rc) {
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700639 DMUX_LOG_KERR("%s sps_transfer_one failed rc=%d\n",
640 __func__, rc);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600641 list_del(&pkt->list_node);
Eric Holmberg2fddbcd2011-11-28 18:25:57 -0700642 DBG_INC_TX_SPS_FAILURE_CNT();
Jeff Hugo626303bf2011-11-21 11:43:28 -0700643 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700644 dma_unmap_single(NULL, pkt->dma_address,
645 pkt->len,
646 DMA_TO_DEVICE);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600647 kfree(pkt);
Jeff Hugobb6da952012-01-16 15:02:42 -0700648 } else {
649 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600650 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700651
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600652 ul_packet_written = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700653 return rc;
654}
655
656static void bam_mux_write_done(struct work_struct *work)
657{
658 struct sk_buff *skb;
659 struct bam_mux_hdr *hdr;
660 struct tx_pkt_info *info;
Eric Holmberg1cde7a62011-12-19 18:34:01 -0700661 struct tx_pkt_info *info_expected;
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600662 unsigned long event_data;
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700663 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700664
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600665 if (in_global_reset)
666 return;
Eric Holmberg1cde7a62011-12-19 18:34:01 -0700667
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700668 info = container_of(work, struct tx_pkt_info, work);
Eric Holmberg1cde7a62011-12-19 18:34:01 -0700669
670 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
671 info_expected = list_first_entry(&bam_tx_pool,
672 struct tx_pkt_info, list_node);
673 if (unlikely(info != info_expected)) {
Eric Holmberg878923a2012-01-10 14:28:19 -0700674 struct tx_pkt_info *errant_pkt;
Eric Holmberg1cde7a62011-12-19 18:34:01 -0700675
Eric Holmberg878923a2012-01-10 14:28:19 -0700676 DMUX_LOG_KERR("%s: bam_tx_pool mismatch .next=%p,"
677 " list_node=%p, ts=%u.%09lu\n",
678 __func__, bam_tx_pool.next, &info->list_node,
679 info->ts_sec, info->ts_nsec
680 );
681
682 list_for_each_entry(errant_pkt, &bam_tx_pool, list_node) {
683 DMUX_LOG_KERR("%s: node=%p ts=%u.%09lu\n", __func__,
684 &errant_pkt->list_node, errant_pkt->ts_sec,
685 errant_pkt->ts_nsec);
686
687 }
Eric Holmberg1cde7a62011-12-19 18:34:01 -0700688 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
689 BUG();
690 }
691 list_del(&info->list_node);
692 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
693
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600694 if (info->is_cmd) {
695 kfree(info->skb);
696 kfree(info);
697 return;
698 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700699 skb = info->skb;
700 kfree(info);
701 hdr = (struct bam_mux_hdr *)skb->data;
Eric Holmberg9fdef262012-02-14 11:46:05 -0700702 DBG_INC_WRITE_CNT(skb->len);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600703 event_data = (unsigned long)(skb);
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700704 spin_lock_irqsave(&bam_ch[hdr->ch_id].lock, flags);
705 bam_ch[hdr->ch_id].num_tx_pkts--;
706 spin_unlock_irqrestore(&bam_ch[hdr->ch_id].lock, flags);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600707 if (bam_ch[hdr->ch_id].notify)
708 bam_ch[hdr->ch_id].notify(
709 bam_ch[hdr->ch_id].priv, BAM_DMUX_WRITE_DONE,
710 event_data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700711 else
712 dev_kfree_skb_any(skb);
713}
714
715int msm_bam_dmux_write(uint32_t id, struct sk_buff *skb)
716{
717 int rc = 0;
718 struct bam_mux_hdr *hdr;
719 unsigned long flags;
720 struct sk_buff *new_skb = NULL;
721 dma_addr_t dma_address;
722 struct tx_pkt_info *pkt;
723
724 if (id >= BAM_DMUX_NUM_CHANNELS)
725 return -EINVAL;
726 if (!skb)
727 return -EINVAL;
728 if (!bam_mux_initialized)
729 return -ENODEV;
730
731 DBG("%s: writing to ch %d len %d\n", __func__, id, skb->len);
732 spin_lock_irqsave(&bam_ch[id].lock, flags);
733 if (!bam_ch_is_open(id)) {
734 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
735 pr_err("%s: port not open: %d\n", __func__, bam_ch[id].status);
736 return -ENODEV;
737 }
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700738
739 if (bam_ch[id].use_wm &&
740 (bam_ch[id].num_tx_pkts >= HIGH_WATERMARK)) {
741 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
742 pr_err("%s: watermark exceeded: %d\n", __func__, id);
743 return -EAGAIN;
744 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700745 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
746
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600747 read_lock(&ul_wakeup_lock);
Jeff Hugo061ce672011-10-21 17:15:32 -0600748 if (!bam_is_connected) {
749 read_unlock(&ul_wakeup_lock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600750 ul_wakeup();
Jeff Hugo4838f412012-01-20 11:19:37 -0700751 if (unlikely(in_global_reset == 1))
752 return -EFAULT;
Jeff Hugo061ce672011-10-21 17:15:32 -0600753 read_lock(&ul_wakeup_lock);
Jeff Hugod98b1082011-10-24 10:30:23 -0600754 notify_all(BAM_DMUX_UL_CONNECTED, (unsigned long)(NULL));
Jeff Hugo061ce672011-10-21 17:15:32 -0600755 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600756
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700757 /* if skb do not have any tailroom for padding,
758 copy the skb into a new expanded skb */
759 if ((skb->len & 0x3) && (skb_tailroom(skb) < (4 - (skb->len & 0x3)))) {
760 /* revisit, probably dev_alloc_skb and memcpy is effecient */
761 new_skb = skb_copy_expand(skb, skb_headroom(skb),
762 4 - (skb->len & 0x3), GFP_ATOMIC);
763 if (new_skb == NULL) {
764 pr_err("%s: cannot allocate skb\n", __func__);
Jeff Hugoc6af54d2011-11-02 17:00:27 -0600765 goto write_fail;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700766 }
767 dev_kfree_skb_any(skb);
768 skb = new_skb;
769 DBG_INC_WRITE_CPY(skb->len);
770 }
771
772 hdr = (struct bam_mux_hdr *)skb_push(skb, sizeof(struct bam_mux_hdr));
773
774 /* caller should allocate for hdr and padding
775 hdr is fine, padding is tricky */
776 hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
777 hdr->cmd = BAM_MUX_HDR_CMD_DATA;
778 hdr->reserved = 0;
779 hdr->ch_id = id;
780 hdr->pkt_len = skb->len - sizeof(struct bam_mux_hdr);
781 if (skb->len & 0x3)
782 skb_put(skb, 4 - (skb->len & 0x3));
783
784 hdr->pad_len = skb->len - (sizeof(struct bam_mux_hdr) + hdr->pkt_len);
785
786 DBG("%s: data %p, tail %p skb len %d pkt len %d pad len %d\n",
787 __func__, skb->data, skb->tail, skb->len,
788 hdr->pkt_len, hdr->pad_len);
789
790 pkt = kmalloc(sizeof(struct tx_pkt_info), GFP_ATOMIC);
791 if (pkt == NULL) {
792 pr_err("%s: mem alloc for tx_pkt_info failed\n", __func__);
Jeff Hugoc6af54d2011-11-02 17:00:27 -0600793 goto write_fail2;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700794 }
795
796 dma_address = dma_map_single(NULL, skb->data, skb->len,
797 DMA_TO_DEVICE);
798 if (!dma_address) {
799 pr_err("%s: dma_map_single() failed\n", __func__);
Jeff Hugoc6af54d2011-11-02 17:00:27 -0600800 goto write_fail3;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700801 }
802 pkt->skb = skb;
803 pkt->dma_address = dma_address;
804 pkt->is_cmd = 0;
Eric Holmberg878923a2012-01-10 14:28:19 -0700805 set_tx_timestamp(pkt);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700806 INIT_WORK(&pkt->work, bam_mux_write_done);
Jeff Hugo626303bf2011-11-21 11:43:28 -0700807 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600808 list_add_tail(&pkt->list_node, &bam_tx_pool);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700809 rc = sps_transfer_one(bam_tx_pipe, dma_address, skb->len,
810 pkt, SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600811 if (rc) {
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700812 DMUX_LOG_KERR("%s sps_transfer_one failed rc=%d\n",
813 __func__, rc);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600814 list_del(&pkt->list_node);
Eric Holmberg2fddbcd2011-11-28 18:25:57 -0700815 DBG_INC_TX_SPS_FAILURE_CNT();
Jeff Hugo626303bf2011-11-21 11:43:28 -0700816 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700817 dma_unmap_single(NULL, pkt->dma_address,
818 pkt->skb->len, DMA_TO_DEVICE);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600819 kfree(pkt);
Jeff Hugo872bd062011-11-15 17:47:21 -0700820 if (new_skb)
821 dev_kfree_skb_any(new_skb);
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700822 } else {
Jeff Hugobb6da952012-01-16 15:02:42 -0700823 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700824 spin_lock_irqsave(&bam_ch[id].lock, flags);
825 bam_ch[id].num_tx_pkts++;
826 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600827 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600828 ul_packet_written = 1;
829 read_unlock(&ul_wakeup_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700830 return rc;
Jeff Hugoc6af54d2011-11-02 17:00:27 -0600831
832write_fail3:
833 kfree(pkt);
834write_fail2:
835 if (new_skb)
836 dev_kfree_skb_any(new_skb);
837write_fail:
838 read_unlock(&ul_wakeup_lock);
839 return -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700840}
841
842int msm_bam_dmux_open(uint32_t id, void *priv,
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600843 void (*notify)(void *, int, unsigned long))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700844{
845 struct bam_mux_hdr *hdr;
846 unsigned long flags;
847 int rc = 0;
848
849 DBG("%s: opening ch %d\n", __func__, id);
Eric Holmberg5d775432011-11-09 10:23:35 -0700850 if (!bam_mux_initialized) {
851 DBG("%s: not inititialized\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700852 return -ENODEV;
Eric Holmberg5d775432011-11-09 10:23:35 -0700853 }
854 if (id >= BAM_DMUX_NUM_CHANNELS) {
855 pr_err("%s: invalid channel id %d\n", __func__, id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700856 return -EINVAL;
Eric Holmberg5d775432011-11-09 10:23:35 -0700857 }
858 if (notify == NULL) {
859 pr_err("%s: notify function is NULL\n", __func__);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600860 return -EINVAL;
Eric Holmberg5d775432011-11-09 10:23:35 -0700861 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700862
863 hdr = kmalloc(sizeof(struct bam_mux_hdr), GFP_KERNEL);
864 if (hdr == NULL) {
865 pr_err("%s: hdr kmalloc failed. ch: %d\n", __func__, id);
866 return -ENOMEM;
867 }
868 spin_lock_irqsave(&bam_ch[id].lock, flags);
869 if (bam_ch_is_open(id)) {
870 DBG("%s: Already opened %d\n", __func__, id);
871 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
872 kfree(hdr);
873 goto open_done;
874 }
875 if (!bam_ch_is_remote_open(id)) {
876 DBG("%s: Remote not open; ch: %d\n", __func__, id);
877 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
878 kfree(hdr);
Eric Holmberg5d775432011-11-09 10:23:35 -0700879 return -ENODEV;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700880 }
881
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600882 bam_ch[id].notify = notify;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700883 bam_ch[id].priv = priv;
884 bam_ch[id].status |= BAM_CH_LOCAL_OPEN;
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700885 bam_ch[id].num_tx_pkts = 0;
886 bam_ch[id].use_wm = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700887 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
888
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600889 read_lock(&ul_wakeup_lock);
Jeff Hugo061ce672011-10-21 17:15:32 -0600890 if (!bam_is_connected) {
891 read_unlock(&ul_wakeup_lock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600892 ul_wakeup();
Jeff Hugo4838f412012-01-20 11:19:37 -0700893 if (unlikely(in_global_reset == 1))
894 return -EFAULT;
Jeff Hugo061ce672011-10-21 17:15:32 -0600895 read_lock(&ul_wakeup_lock);
Jeff Hugod98b1082011-10-24 10:30:23 -0600896 notify_all(BAM_DMUX_UL_CONNECTED, (unsigned long)(NULL));
Jeff Hugo061ce672011-10-21 17:15:32 -0600897 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600898
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700899 hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
900 hdr->cmd = BAM_MUX_HDR_CMD_OPEN;
901 hdr->reserved = 0;
902 hdr->ch_id = id;
903 hdr->pkt_len = 0;
904 hdr->pad_len = 0;
905
906 rc = bam_mux_write_cmd((void *)hdr, sizeof(struct bam_mux_hdr));
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600907 read_unlock(&ul_wakeup_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700908
909open_done:
910 DBG("%s: opened ch %d\n", __func__, id);
911 return rc;
912}
913
914int msm_bam_dmux_close(uint32_t id)
915{
916 struct bam_mux_hdr *hdr;
917 unsigned long flags;
918 int rc;
919
920 if (id >= BAM_DMUX_NUM_CHANNELS)
921 return -EINVAL;
922 DBG("%s: closing ch %d\n", __func__, id);
923 if (!bam_mux_initialized)
924 return -ENODEV;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700925
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600926 read_lock(&ul_wakeup_lock);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600927 if (!bam_is_connected && !bam_ch_is_in_reset(id)) {
Jeff Hugo061ce672011-10-21 17:15:32 -0600928 read_unlock(&ul_wakeup_lock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600929 ul_wakeup();
Jeff Hugo4838f412012-01-20 11:19:37 -0700930 if (unlikely(in_global_reset == 1))
931 return -EFAULT;
Jeff Hugo061ce672011-10-21 17:15:32 -0600932 read_lock(&ul_wakeup_lock);
Jeff Hugod98b1082011-10-24 10:30:23 -0600933 notify_all(BAM_DMUX_UL_CONNECTED, (unsigned long)(NULL));
Jeff Hugo061ce672011-10-21 17:15:32 -0600934 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600935
Jeff Hugo061ce672011-10-21 17:15:32 -0600936 spin_lock_irqsave(&bam_ch[id].lock, flags);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600937 bam_ch[id].notify = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700938 bam_ch[id].priv = NULL;
939 bam_ch[id].status &= ~BAM_CH_LOCAL_OPEN;
940 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
941
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600942 if (bam_ch_is_in_reset(id)) {
943 read_unlock(&ul_wakeup_lock);
944 bam_ch[id].status &= ~BAM_CH_IN_RESET;
945 return 0;
946 }
947
Jeff Hugobb5802f2011-11-02 17:10:29 -0600948 hdr = kmalloc(sizeof(struct bam_mux_hdr), GFP_ATOMIC);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700949 if (hdr == NULL) {
950 pr_err("%s: hdr kmalloc failed. ch: %d\n", __func__, id);
Jeff Hugoc6af54d2011-11-02 17:00:27 -0600951 read_unlock(&ul_wakeup_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700952 return -ENOMEM;
953 }
954 hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
955 hdr->cmd = BAM_MUX_HDR_CMD_CLOSE;
956 hdr->reserved = 0;
957 hdr->ch_id = id;
958 hdr->pkt_len = 0;
959 hdr->pad_len = 0;
960
961 rc = bam_mux_write_cmd((void *)hdr, sizeof(struct bam_mux_hdr));
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600962 read_unlock(&ul_wakeup_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700963
964 DBG("%s: closed ch %d\n", __func__, id);
965 return rc;
966}
967
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700968int msm_bam_dmux_is_ch_full(uint32_t id)
969{
970 unsigned long flags;
971 int ret;
972
973 if (id >= BAM_DMUX_NUM_CHANNELS)
974 return -EINVAL;
975
976 spin_lock_irqsave(&bam_ch[id].lock, flags);
977 bam_ch[id].use_wm = 1;
978 ret = bam_ch[id].num_tx_pkts >= HIGH_WATERMARK;
979 DBG("%s: ch %d num tx pkts=%d, HWM=%d\n", __func__,
980 id, bam_ch[id].num_tx_pkts, ret);
981 if (!bam_ch_is_local_open(id)) {
982 ret = -ENODEV;
983 pr_err("%s: port not open: %d\n", __func__, bam_ch[id].status);
984 }
985 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
986
987 return ret;
988}
989
990int msm_bam_dmux_is_ch_low(uint32_t id)
991{
Eric Holmberged3ca0a2012-04-09 15:44:58 -0600992 unsigned long flags;
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700993 int ret;
994
995 if (id >= BAM_DMUX_NUM_CHANNELS)
996 return -EINVAL;
997
Eric Holmberged3ca0a2012-04-09 15:44:58 -0600998 spin_lock_irqsave(&bam_ch[id].lock, flags);
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700999 bam_ch[id].use_wm = 1;
1000 ret = bam_ch[id].num_tx_pkts <= LOW_WATERMARK;
1001 DBG("%s: ch %d num tx pkts=%d, LWM=%d\n", __func__,
1002 id, bam_ch[id].num_tx_pkts, ret);
1003 if (!bam_ch_is_local_open(id)) {
1004 ret = -ENODEV;
1005 pr_err("%s: port not open: %d\n", __func__, bam_ch[id].status);
1006 }
Eric Holmberged3ca0a2012-04-09 15:44:58 -06001007 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -07001008
1009 return ret;
1010}
1011
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001012static void rx_switch_to_interrupt_mode(void)
1013{
1014 struct sps_connect cur_rx_conn;
1015 struct sps_iovec iov;
1016 struct rx_pkt_info *info;
1017 int ret;
1018
1019 /*
1020 * Attempt to enable interrupts - if this fails,
1021 * continue polling and we will retry later.
1022 */
1023 ret = sps_get_config(bam_rx_pipe, &cur_rx_conn);
1024 if (ret) {
1025 pr_err("%s: sps_get_config() failed %d\n", __func__, ret);
1026 goto fail;
1027 }
1028
1029 rx_register_event.options = SPS_O_EOT;
1030 ret = sps_register_event(bam_rx_pipe, &rx_register_event);
1031 if (ret) {
1032 pr_err("%s: sps_register_event() failed %d\n", __func__, ret);
1033 goto fail;
1034 }
1035
1036 cur_rx_conn.options = SPS_O_AUTO_ENABLE |
1037 SPS_O_EOT | SPS_O_ACK_TRANSFERS;
1038 ret = sps_set_config(bam_rx_pipe, &cur_rx_conn);
1039 if (ret) {
1040 pr_err("%s: sps_set_config() failed %d\n", __func__, ret);
1041 goto fail;
1042 }
1043 polling_mode = 0;
Eric Holmberg006057d2012-01-11 10:10:42 -07001044 release_wakelock();
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001045
1046 /* handle any rx packets before interrupt was enabled */
1047 while (bam_connection_is_active && !polling_mode) {
1048 ret = sps_get_iovec(bam_rx_pipe, &iov);
1049 if (ret) {
1050 pr_err("%s: sps_get_iovec failed %d\n",
1051 __func__, ret);
1052 break;
1053 }
1054 if (iov.addr == 0)
1055 break;
1056
1057 mutex_lock(&bam_rx_pool_mutexlock);
1058 if (unlikely(list_empty(&bam_rx_pool))) {
1059 mutex_unlock(&bam_rx_pool_mutexlock);
1060 continue;
1061 }
1062 info = list_first_entry(&bam_rx_pool, struct rx_pkt_info,
1063 list_node);
1064 list_del(&info->list_node);
Eric Holmbergb5b08e52012-01-20 14:19:00 -07001065 --bam_rx_pool_len;
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001066 mutex_unlock(&bam_rx_pool_mutexlock);
Eric Holmbergb5b08e52012-01-20 14:19:00 -07001067 if (info->dma_address != iov.addr)
1068 DMUX_LOG_KERR("%s: iovec %p != dma %p\n",
1069 __func__,
1070 (void *)info->dma_address, (void *)iov.addr);
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001071 handle_bam_mux_cmd(&info->work);
1072 }
1073 return;
1074
1075fail:
1076 pr_err("%s: reverting to polling\n", __func__);
1077 queue_work(bam_mux_rx_workqueue, &rx_timer_work);
1078}
1079
Jeff Hugo949080a2011-08-30 11:58:56 -06001080static void rx_timer_work_func(struct work_struct *work)
1081{
1082 struct sps_iovec iov;
Jeff Hugo949080a2011-08-30 11:58:56 -06001083 struct rx_pkt_info *info;
1084 int inactive_cycles = 0;
1085 int ret;
Jeff Hugo949080a2011-08-30 11:58:56 -06001086
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001087 while (bam_connection_is_active) { /* timer loop */
Jeff Hugo949080a2011-08-30 11:58:56 -06001088 ++inactive_cycles;
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001089 while (bam_connection_is_active) { /* deplete queue loop */
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001090 if (in_global_reset)
1091 return;
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001092
1093 ret = sps_get_iovec(bam_rx_pipe, &iov);
1094 if (ret) {
1095 pr_err("%s: sps_get_iovec failed %d\n",
1096 __func__, ret);
1097 break;
1098 }
Jeff Hugo949080a2011-08-30 11:58:56 -06001099 if (iov.addr == 0)
1100 break;
1101 inactive_cycles = 0;
Jeff Hugoc9749932011-11-02 17:50:40 -06001102 mutex_lock(&bam_rx_pool_mutexlock);
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001103 if (unlikely(list_empty(&bam_rx_pool))) {
1104 mutex_unlock(&bam_rx_pool_mutexlock);
1105 continue;
1106 }
1107 info = list_first_entry(&bam_rx_pool,
1108 struct rx_pkt_info, list_node);
Eric Holmbergb5b08e52012-01-20 14:19:00 -07001109 --bam_rx_pool_len;
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001110 list_del(&info->list_node);
Jeff Hugoc9749932011-11-02 17:50:40 -06001111 mutex_unlock(&bam_rx_pool_mutexlock);
Jeff Hugo949080a2011-08-30 11:58:56 -06001112 handle_bam_mux_cmd(&info->work);
1113 }
1114
1115 if (inactive_cycles == POLLING_INACTIVITY) {
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001116 rx_switch_to_interrupt_mode();
1117 break;
Jeff Hugo949080a2011-08-30 11:58:56 -06001118 }
1119
1120 usleep_range(POLLING_MIN_SLEEP, POLLING_MAX_SLEEP);
1121 }
1122}
1123
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001124static void bam_mux_tx_notify(struct sps_event_notify *notify)
1125{
1126 struct tx_pkt_info *pkt;
1127
1128 DBG("%s: event %d notified\n", __func__, notify->event_id);
1129
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001130 if (in_global_reset)
1131 return;
1132
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001133 switch (notify->event_id) {
1134 case SPS_EVENT_EOT:
1135 pkt = notify->data.transfer.user;
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001136 if (!pkt->is_cmd)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001137 dma_unmap_single(NULL, pkt->dma_address,
1138 pkt->skb->len,
1139 DMA_TO_DEVICE);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001140 else
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001141 dma_unmap_single(NULL, pkt->dma_address,
1142 pkt->len,
1143 DMA_TO_DEVICE);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001144 queue_work(bam_mux_tx_workqueue, &pkt->work);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001145 break;
1146 default:
1147 pr_err("%s: recieved unexpected event id %d\n", __func__,
1148 notify->event_id);
1149 }
1150}
1151
Jeff Hugo33dbc002011-08-25 15:52:53 -06001152static void bam_mux_rx_notify(struct sps_event_notify *notify)
1153{
Jeff Hugo949080a2011-08-30 11:58:56 -06001154 int ret;
1155 struct sps_connect cur_rx_conn;
Jeff Hugo33dbc002011-08-25 15:52:53 -06001156
1157 DBG("%s: event %d notified\n", __func__, notify->event_id);
1158
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001159 if (in_global_reset)
1160 return;
1161
Jeff Hugo33dbc002011-08-25 15:52:53 -06001162 switch (notify->event_id) {
1163 case SPS_EVENT_EOT:
Jeff Hugo949080a2011-08-30 11:58:56 -06001164 /* attempt to disable interrupts in this pipe */
1165 if (!polling_mode) {
1166 ret = sps_get_config(bam_rx_pipe, &cur_rx_conn);
1167 if (ret) {
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001168 pr_err("%s: sps_get_config() failed %d, interrupts"
1169 " not disabled\n", __func__, ret);
Jeff Hugo949080a2011-08-30 11:58:56 -06001170 break;
1171 }
Jeff Hugoa9d32ba2011-11-21 14:59:48 -07001172 cur_rx_conn.options = SPS_O_AUTO_ENABLE |
Jeff Hugo949080a2011-08-30 11:58:56 -06001173 SPS_O_ACK_TRANSFERS | SPS_O_POLL;
1174 ret = sps_set_config(bam_rx_pipe, &cur_rx_conn);
1175 if (ret) {
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001176 pr_err("%s: sps_set_config() failed %d, interrupts"
1177 " not disabled\n", __func__, ret);
Jeff Hugo949080a2011-08-30 11:58:56 -06001178 break;
1179 }
Eric Holmberg006057d2012-01-11 10:10:42 -07001180 grab_wakelock();
Jeff Hugo949080a2011-08-30 11:58:56 -06001181 polling_mode = 1;
1182 queue_work(bam_mux_rx_workqueue, &rx_timer_work);
1183 }
Jeff Hugo33dbc002011-08-25 15:52:53 -06001184 break;
1185 default:
1186 pr_err("%s: recieved unexpected event id %d\n", __func__,
1187 notify->event_id);
1188 }
1189}
1190
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001191#ifdef CONFIG_DEBUG_FS
1192
1193static int debug_tbl(char *buf, int max)
1194{
1195 int i = 0;
1196 int j;
1197
1198 for (j = 0; j < BAM_DMUX_NUM_CHANNELS; ++j) {
1199 i += scnprintf(buf + i, max - i,
1200 "ch%02d local open=%s remote open=%s\n",
1201 j, bam_ch_is_local_open(j) ? "Y" : "N",
1202 bam_ch_is_remote_open(j) ? "Y" : "N");
1203 }
1204
1205 return i;
1206}
1207
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07001208static int debug_ul_pkt_cnt(char *buf, int max)
1209{
1210 struct list_head *p;
1211 unsigned long flags;
1212 int n = 0;
1213
1214 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
1215 __list_for_each(p, &bam_tx_pool) {
1216 ++n;
1217 }
1218 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
1219
1220 return scnprintf(buf, max, "Number of UL packets in flight: %d\n", n);
1221}
1222
1223static int debug_stats(char *buf, int max)
1224{
1225 int i = 0;
1226
1227 i += scnprintf(buf + i, max - i,
Eric Holmberg9fdef262012-02-14 11:46:05 -07001228 "skb read cnt: %u\n"
1229 "skb write cnt: %u\n"
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07001230 "skb copy cnt: %u\n"
1231 "skb copy bytes: %u\n"
Eric Holmberg6074aba2012-01-18 17:59:44 -07001232 "sps tx failures: %u\n"
Eric Holmbergb5b08e52012-01-20 14:19:00 -07001233 "sps tx stalls: %u\n"
Eric Holmberg1f1255d2012-02-22 13:37:21 -07001234 "rx queue len: %d\n"
1235 "a2 ack out cnt: %d\n"
1236 "a2 ack in cnt: %d\n"
1237 "a2 pwr cntl in: %d\n",
Eric Holmberg9fdef262012-02-14 11:46:05 -07001238 bam_dmux_read_cnt,
1239 bam_dmux_write_cnt,
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07001240 bam_dmux_write_cpy_cnt,
1241 bam_dmux_write_cpy_bytes,
Eric Holmberg6074aba2012-01-18 17:59:44 -07001242 bam_dmux_tx_sps_failure_cnt,
Eric Holmbergb5b08e52012-01-20 14:19:00 -07001243 bam_dmux_tx_stall_cnt,
Eric Holmberg1f1255d2012-02-22 13:37:21 -07001244 bam_rx_pool_len,
1245 atomic_read(&bam_dmux_ack_out_cnt),
1246 atomic_read(&bam_dmux_ack_in_cnt),
1247 atomic_read(&bam_dmux_a2_pwr_cntl_in_cnt)
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07001248 );
1249
1250 return i;
1251}
1252
Eric Holmberg878923a2012-01-10 14:28:19 -07001253static int debug_log(char *buff, int max, loff_t *ppos)
1254{
1255 unsigned long flags;
1256 int i = 0;
1257
1258 if (bam_dmux_state_logging_disabled) {
1259 i += scnprintf(buff - i, max - i, "Logging disabled\n");
1260 return i;
1261 }
1262
1263 if (*ppos == 0) {
1264 i += scnprintf(buff - i, max - i,
1265 "<DMUX> timestamp FLAGS [Message]\n"
1266 "FLAGS:\n"
Eric Holmberg006057d2012-01-11 10:10:42 -07001267 "\tD: 1 = Power collapse disabled\n"
Eric Holmberg878923a2012-01-10 14:28:19 -07001268 "\tR: 1 = in global reset\n"
1269 "\tP: 1 = BAM is powered up\n"
1270 "\tA: 1 = BAM initialized and ready for data\n"
1271 "\n"
1272 "\tV: 1 = Uplink vote for power\n"
1273 "\tU: 1 = Uplink active\n"
1274 "\tW: 1 = Uplink Wait-for-ack\n"
1275 "\tA: 1 = Uplink ACK received\n"
Eric Holmbergbc9f21c2012-01-18 11:33:33 -07001276 "\t#: >=1 On-demand uplink vote\n"
Jeff Hugo0b13a352012-03-17 23:18:30 -06001277 "\tD: 1 = Disconnect ACK active\n"
Eric Holmberg878923a2012-01-10 14:28:19 -07001278 );
1279 buff += i;
1280 }
1281
1282 spin_lock_irqsave(&bam_dmux_logging_spinlock, flags);
1283 while (kfifo_len(&bam_dmux_state_log)
1284 && (i + LOG_MESSAGE_MAX_SIZE) < max) {
1285 int k_len;
1286 k_len = kfifo_out(&bam_dmux_state_log,
1287 buff, LOG_MESSAGE_MAX_SIZE);
1288 if (k_len != LOG_MESSAGE_MAX_SIZE) {
1289 pr_err("%s: retrieve failure %d\n", __func__, k_len);
1290 break;
1291 }
1292
1293 /* keep non-null portion of string and add line break */
1294 k_len = strnlen(buff, LOG_MESSAGE_MAX_SIZE);
1295 buff += k_len;
1296 i += k_len;
1297 if (k_len && *(buff - 1) != '\n') {
1298 *buff++ = '\n';
1299 ++i;
1300 }
1301 }
1302 spin_unlock_irqrestore(&bam_dmux_logging_spinlock, flags);
1303
1304 return i;
1305}
1306
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001307#define DEBUG_BUFMAX 4096
1308static char debug_buffer[DEBUG_BUFMAX];
1309
1310static ssize_t debug_read(struct file *file, char __user *buf,
1311 size_t count, loff_t *ppos)
1312{
1313 int (*fill)(char *buf, int max) = file->private_data;
1314 int bsize = fill(debug_buffer, DEBUG_BUFMAX);
1315 return simple_read_from_buffer(buf, count, ppos, debug_buffer, bsize);
1316}
1317
Eric Holmberg878923a2012-01-10 14:28:19 -07001318static ssize_t debug_read_multiple(struct file *file, char __user *buff,
1319 size_t count, loff_t *ppos)
1320{
1321 int (*util_func)(char *buf, int max, loff_t *) = file->private_data;
1322 char *buffer;
1323 int bsize;
1324
1325 buffer = kmalloc(count, GFP_KERNEL);
1326 if (!buffer)
1327 return -ENOMEM;
1328
1329 bsize = util_func(buffer, count, ppos);
1330
1331 if (bsize >= 0) {
1332 if (copy_to_user(buff, buffer, bsize)) {
1333 kfree(buffer);
1334 return -EFAULT;
1335 }
1336 *ppos += bsize;
1337 }
1338 kfree(buffer);
1339 return bsize;
1340}
1341
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001342static int debug_open(struct inode *inode, struct file *file)
1343{
1344 file->private_data = inode->i_private;
1345 return 0;
1346}
1347
1348
1349static const struct file_operations debug_ops = {
1350 .read = debug_read,
1351 .open = debug_open,
1352};
1353
Eric Holmberg878923a2012-01-10 14:28:19 -07001354static const struct file_operations debug_ops_multiple = {
1355 .read = debug_read_multiple,
1356 .open = debug_open,
1357};
1358
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001359static void debug_create(const char *name, mode_t mode,
1360 struct dentry *dent,
1361 int (*fill)(char *buf, int max))
1362{
Eric Holmberge4ac80b2012-01-12 09:21:59 -07001363 struct dentry *file;
1364
1365 file = debugfs_create_file(name, mode, dent, fill, &debug_ops);
1366 if (IS_ERR(file))
1367 pr_err("%s: debugfs create failed %d\n", __func__,
1368 (int)PTR_ERR(file));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001369}
1370
Eric Holmberge4ac80b2012-01-12 09:21:59 -07001371static void debug_create_multiple(const char *name, mode_t mode,
1372 struct dentry *dent,
1373 int (*fill)(char *buf, int max, loff_t *ppos))
1374{
1375 struct dentry *file;
1376
1377 file = debugfs_create_file(name, mode, dent, fill, &debug_ops_multiple);
1378 if (IS_ERR(file))
1379 pr_err("%s: debugfs create failed %d\n", __func__,
1380 (int)PTR_ERR(file));
1381}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001382#endif
1383
Jeff Hugod98b1082011-10-24 10:30:23 -06001384static void notify_all(int event, unsigned long data)
1385{
1386 int i;
Jeff Hugocb798022012-04-09 14:55:40 -06001387 struct list_head *temp;
1388 struct outside_notify_func *func;
Jeff Hugod98b1082011-10-24 10:30:23 -06001389
1390 for (i = 0; i < BAM_DMUX_NUM_CHANNELS; ++i) {
Eric Holmberg454d9da2012-01-12 09:37:14 -07001391 if (bam_ch_is_open(i)) {
Jeff Hugod98b1082011-10-24 10:30:23 -06001392 bam_ch[i].notify(bam_ch[i].priv, event, data);
Eric Holmberg454d9da2012-01-12 09:37:14 -07001393 bam_dmux_log("%s: cid=%d, event=%d, data=%lu\n",
1394 __func__, i, event, data);
1395 }
Jeff Hugod98b1082011-10-24 10:30:23 -06001396 }
Jeff Hugocb798022012-04-09 14:55:40 -06001397
1398 __list_for_each(temp, &bam_other_notify_funcs) {
1399 func = container_of(temp, struct outside_notify_func,
1400 list_node);
1401 func->notify(func->priv, event, data);
1402 }
Jeff Hugod98b1082011-10-24 10:30:23 -06001403}
1404
1405static void kickoff_ul_wakeup_func(struct work_struct *work)
1406{
1407 read_lock(&ul_wakeup_lock);
1408 if (!bam_is_connected) {
1409 read_unlock(&ul_wakeup_lock);
1410 ul_wakeup();
Jeff Hugo4838f412012-01-20 11:19:37 -07001411 if (unlikely(in_global_reset == 1))
1412 return;
Jeff Hugod98b1082011-10-24 10:30:23 -06001413 read_lock(&ul_wakeup_lock);
1414 ul_packet_written = 1;
1415 notify_all(BAM_DMUX_UL_CONNECTED, (unsigned long)(NULL));
1416 }
1417 read_unlock(&ul_wakeup_lock);
1418}
1419
Eric Holmbergbc9f21c2012-01-18 11:33:33 -07001420int msm_bam_dmux_kickoff_ul_wakeup(void)
Jeff Hugod98b1082011-10-24 10:30:23 -06001421{
Eric Holmbergbc9f21c2012-01-18 11:33:33 -07001422 int is_connected;
1423
1424 read_lock(&ul_wakeup_lock);
1425 ul_packet_written = 1;
1426 is_connected = bam_is_connected;
1427 if (!is_connected)
1428 queue_work(bam_mux_tx_workqueue, &kickoff_ul_wakeup);
1429 read_unlock(&ul_wakeup_lock);
1430
1431 return is_connected;
Jeff Hugod98b1082011-10-24 10:30:23 -06001432}
1433
Eric Holmberg878923a2012-01-10 14:28:19 -07001434static void power_vote(int vote)
1435{
1436 bam_dmux_log("%s: curr=%d, vote=%d\n", __func__,
1437 bam_dmux_uplink_vote, vote);
1438
1439 if (bam_dmux_uplink_vote == vote)
1440 bam_dmux_log("%s: warning - duplicate power vote\n", __func__);
1441
1442 bam_dmux_uplink_vote = vote;
1443 if (vote)
1444 smsm_change_state(SMSM_APPS_STATE, 0, SMSM_A2_POWER_CONTROL);
1445 else
1446 smsm_change_state(SMSM_APPS_STATE, SMSM_A2_POWER_CONTROL, 0);
1447}
1448
Eric Holmberg454d9da2012-01-12 09:37:14 -07001449/*
1450 * @note: Must be called with ul_wakeup_lock locked.
1451 */
1452static inline void ul_powerdown(void)
1453{
1454 bam_dmux_log("%s: powerdown\n", __func__);
1455 verify_tx_queue_is_empty(__func__);
1456
1457 if (a2_pc_disabled) {
1458 wait_for_dfab = 1;
1459 INIT_COMPLETION(dfab_unvote_completion);
1460 release_wakelock();
1461 } else {
1462 wait_for_ack = 1;
1463 INIT_COMPLETION(ul_wakeup_ack_completion);
1464 power_vote(0);
1465 }
1466 bam_is_connected = 0;
1467 notify_all(BAM_DMUX_UL_DISCONNECTED, (unsigned long)(NULL));
1468}
1469
1470static inline void ul_powerdown_finish(void)
1471{
1472 if (a2_pc_disabled && wait_for_dfab) {
1473 unvote_dfab();
1474 complete_all(&dfab_unvote_completion);
1475 wait_for_dfab = 0;
1476 }
1477}
1478
Eric Holmbergbc9f21c2012-01-18 11:33:33 -07001479/*
1480 * Votes for UL power and returns current power state.
1481 *
1482 * @returns true if currently connected
1483 */
1484int msm_bam_dmux_ul_power_vote(void)
1485{
1486 int is_connected;
1487
1488 read_lock(&ul_wakeup_lock);
1489 atomic_inc(&ul_ondemand_vote);
1490 is_connected = bam_is_connected;
1491 if (!is_connected)
1492 queue_work(bam_mux_tx_workqueue, &kickoff_ul_wakeup);
1493 read_unlock(&ul_wakeup_lock);
1494
1495 return is_connected;
1496}
1497
1498/*
1499 * Unvotes for UL power.
1500 *
1501 * @returns true if vote count is 0 (UL shutdown possible)
1502 */
1503int msm_bam_dmux_ul_power_unvote(void)
1504{
1505 int vote;
1506
1507 read_lock(&ul_wakeup_lock);
1508 vote = atomic_dec_return(&ul_ondemand_vote);
1509 if (unlikely(vote) < 0)
1510 DMUX_LOG_KERR("%s: invalid power vote %d\n", __func__, vote);
1511 read_unlock(&ul_wakeup_lock);
1512
1513 return vote == 0;
1514}
1515
Jeff Hugocb798022012-04-09 14:55:40 -06001516int msm_bam_dmux_reg_notify(void *priv,
1517 void (*notify)(void *priv, int event_type,
1518 unsigned long data))
1519{
1520 struct outside_notify_func *func;
1521
1522 if (!notify)
1523 return -EINVAL;
1524
1525 func = kmalloc(sizeof(struct outside_notify_func), GFP_KERNEL);
1526 if (!func)
1527 return -ENOMEM;
1528
1529 func->notify = notify;
1530 func->priv = priv;
1531 list_add(&func->list_node, &bam_other_notify_funcs);
1532
1533 return 0;
1534}
1535
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001536static void ul_timeout(struct work_struct *work)
1537{
Jeff Hugoc040a5b2011-11-15 14:26:01 -07001538 unsigned long flags;
1539 int ret;
1540
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001541 if (in_global_reset)
1542 return;
Jeff Hugoc040a5b2011-11-15 14:26:01 -07001543 ret = write_trylock_irqsave(&ul_wakeup_lock, flags);
1544 if (!ret) { /* failed to grab lock, reschedule and bail */
1545 schedule_delayed_work(&ul_timeout_work,
1546 msecs_to_jiffies(UL_TIMEOUT_DELAY));
1547 return;
1548 }
Eric Holmberg454d9da2012-01-12 09:37:14 -07001549 if (bam_is_connected) {
Eric Holmberg6074aba2012-01-18 17:59:44 -07001550 if (!ul_packet_written) {
1551 spin_lock(&bam_tx_pool_spinlock);
1552 if (!list_empty(&bam_tx_pool)) {
1553 struct tx_pkt_info *info;
1554
1555 info = list_first_entry(&bam_tx_pool,
1556 struct tx_pkt_info, list_node);
1557 DMUX_LOG_KERR("%s: UL delayed ts=%u.%09lu\n",
1558 __func__, info->ts_sec, info->ts_nsec);
1559 DBG_INC_TX_STALL_CNT();
1560 ul_packet_written = 1;
1561 }
1562 spin_unlock(&bam_tx_pool_spinlock);
1563 }
1564
Eric Holmbergbc9f21c2012-01-18 11:33:33 -07001565 if (ul_packet_written || atomic_read(&ul_ondemand_vote)) {
1566 bam_dmux_log("%s: pkt written %d\n",
1567 __func__, ul_packet_written);
Eric Holmberg454d9da2012-01-12 09:37:14 -07001568 ul_packet_written = 0;
1569 schedule_delayed_work(&ul_timeout_work,
1570 msecs_to_jiffies(UL_TIMEOUT_DELAY));
Eric Holmberg006057d2012-01-11 10:10:42 -07001571 } else {
Eric Holmberg454d9da2012-01-12 09:37:14 -07001572 ul_powerdown();
Eric Holmberg006057d2012-01-11 10:10:42 -07001573 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001574 }
Jeff Hugoc040a5b2011-11-15 14:26:01 -07001575 write_unlock_irqrestore(&ul_wakeup_lock, flags);
Eric Holmberg454d9da2012-01-12 09:37:14 -07001576 ul_powerdown_finish();
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001577}
Jeff Hugo4838f412012-01-20 11:19:37 -07001578
1579static int ssrestart_check(void)
1580{
Eric Holmberg90285e22012-02-22 12:33:05 -07001581 DMUX_LOG_KERR("%s: modem timeout: BAM DMUX disabled\n", __func__);
1582 in_global_reset = 1;
1583 if (get_restart_level() <= RESET_SOC)
1584 DMUX_LOG_KERR("%s: ssrestart not enabled\n", __func__);
1585 return 1;
Jeff Hugo4838f412012-01-20 11:19:37 -07001586}
1587
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001588static void ul_wakeup(void)
1589{
Jeff Hugof6c1c1e2011-12-01 17:43:49 -07001590 int ret;
1591
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001592 mutex_lock(&wakeup_lock);
1593 if (bam_is_connected) { /* bam got connected before lock grabbed */
Eric Holmberg878923a2012-01-10 14:28:19 -07001594 bam_dmux_log("%s Already awake\n", __func__);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001595 mutex_unlock(&wakeup_lock);
1596 return;
1597 }
Eric Holmberg878923a2012-01-10 14:28:19 -07001598
Eric Holmberg006057d2012-01-11 10:10:42 -07001599 if (a2_pc_disabled) {
1600 /*
1601 * don't grab the wakelock the first time because it is
1602 * already grabbed when a2 powers on
1603 */
Jeff Hugo583a6da2012-02-03 11:37:30 -07001604 if (likely(a2_pc_disabled_wakelock_skipped))
Eric Holmberg006057d2012-01-11 10:10:42 -07001605 grab_wakelock();
1606 else
Jeff Hugo583a6da2012-02-03 11:37:30 -07001607 a2_pc_disabled_wakelock_skipped = 1;
Eric Holmberg006057d2012-01-11 10:10:42 -07001608 if (wait_for_dfab) {
Jeff Hugo66f7f1e2012-01-16 14:30:42 -07001609 ret = wait_for_completion_timeout(
Eric Holmberg006057d2012-01-11 10:10:42 -07001610 &dfab_unvote_completion, HZ);
1611 BUG_ON(ret == 0);
1612 }
1613 vote_dfab();
1614 schedule_delayed_work(&ul_timeout_work,
1615 msecs_to_jiffies(UL_TIMEOUT_DELAY));
1616 bam_is_connected = 1;
1617 mutex_unlock(&wakeup_lock);
1618 return;
1619 }
1620
Jeff Hugof6c1c1e2011-12-01 17:43:49 -07001621 /*
1622 * must wait for the previous power down request to have been acked
1623 * chances are it already came in and this will just fall through
1624 * instead of waiting
1625 */
1626 if (wait_for_ack) {
Eric Holmberg878923a2012-01-10 14:28:19 -07001627 bam_dmux_log("%s waiting for previous ack\n", __func__);
Jeff Hugo66f7f1e2012-01-16 14:30:42 -07001628 ret = wait_for_completion_timeout(
Jeff Hugof6c1c1e2011-12-01 17:43:49 -07001629 &ul_wakeup_ack_completion, HZ);
Eric Holmberg006057d2012-01-11 10:10:42 -07001630 wait_for_ack = 0;
Jeff Hugo4838f412012-01-20 11:19:37 -07001631 if (unlikely(ret == 0) && ssrestart_check()) {
1632 mutex_unlock(&wakeup_lock);
1633 bam_dmux_log("%s timeout previous ack\n", __func__);
1634 return;
1635 }
Jeff Hugof6c1c1e2011-12-01 17:43:49 -07001636 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001637 INIT_COMPLETION(ul_wakeup_ack_completion);
Eric Holmberg878923a2012-01-10 14:28:19 -07001638 power_vote(1);
1639 bam_dmux_log("%s waiting for wakeup ack\n", __func__);
Jeff Hugo66f7f1e2012-01-16 14:30:42 -07001640 ret = wait_for_completion_timeout(&ul_wakeup_ack_completion, HZ);
Jeff Hugo4838f412012-01-20 11:19:37 -07001641 if (unlikely(ret == 0) && ssrestart_check()) {
1642 mutex_unlock(&wakeup_lock);
1643 bam_dmux_log("%s timeout wakeup ack\n", __func__);
1644 return;
1645 }
Eric Holmberg878923a2012-01-10 14:28:19 -07001646 bam_dmux_log("%s waiting completion\n", __func__);
Jeff Hugo66f7f1e2012-01-16 14:30:42 -07001647 ret = wait_for_completion_timeout(&bam_connection_completion, HZ);
Jeff Hugo4838f412012-01-20 11:19:37 -07001648 if (unlikely(ret == 0) && ssrestart_check()) {
1649 mutex_unlock(&wakeup_lock);
1650 bam_dmux_log("%s timeout power on\n", __func__);
1651 return;
1652 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001653
1654 bam_is_connected = 1;
Eric Holmberg878923a2012-01-10 14:28:19 -07001655 bam_dmux_log("%s complete\n", __func__);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001656 schedule_delayed_work(&ul_timeout_work,
1657 msecs_to_jiffies(UL_TIMEOUT_DELAY));
1658 mutex_unlock(&wakeup_lock);
1659}
1660
1661static void reconnect_to_bam(void)
1662{
1663 int i;
1664
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001665 in_global_reset = 0;
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001666 vote_dfab();
1667 i = sps_device_reset(a2_device_handle);
1668 if (i)
1669 pr_err("%s: device reset failed rc = %d\n", __func__, i);
1670 i = sps_connect(bam_tx_pipe, &tx_connection);
1671 if (i)
1672 pr_err("%s: tx connection failed rc = %d\n", __func__, i);
1673 i = sps_connect(bam_rx_pipe, &rx_connection);
1674 if (i)
1675 pr_err("%s: rx connection failed rc = %d\n", __func__, i);
1676 i = sps_register_event(bam_tx_pipe, &tx_register_event);
1677 if (i)
1678 pr_err("%s: tx event reg failed rc = %d\n", __func__, i);
1679 i = sps_register_event(bam_rx_pipe, &rx_register_event);
1680 if (i)
1681 pr_err("%s: rx event reg failed rc = %d\n", __func__, i);
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001682
1683 bam_connection_is_active = 1;
1684
1685 if (polling_mode)
1686 rx_switch_to_interrupt_mode();
1687
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001688 toggle_apps_ack();
1689 complete_all(&bam_connection_completion);
Jeff Hugo2fb555e2012-03-14 16:33:47 -06001690 queue_rx();
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001691}
1692
1693static void disconnect_to_bam(void)
1694{
1695 struct list_head *node;
1696 struct rx_pkt_info *info;
Eric Holmberg454d9da2012-01-12 09:37:14 -07001697 unsigned long flags;
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001698
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001699 bam_connection_is_active = 0;
Eric Holmberg454d9da2012-01-12 09:37:14 -07001700
1701 /* handle disconnect during active UL */
1702 write_lock_irqsave(&ul_wakeup_lock, flags);
1703 if (bam_is_connected) {
1704 bam_dmux_log("%s: UL active - forcing powerdown\n", __func__);
1705 ul_powerdown();
1706 }
1707 write_unlock_irqrestore(&ul_wakeup_lock, flags);
1708 ul_powerdown_finish();
1709
1710 /* tear down BAM connection */
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001711 INIT_COMPLETION(bam_connection_completion);
1712 sps_disconnect(bam_tx_pipe);
1713 sps_disconnect(bam_rx_pipe);
1714 unvote_dfab();
1715 __memzero(rx_desc_mem_buf.base, rx_desc_mem_buf.size);
1716 __memzero(tx_desc_mem_buf.base, tx_desc_mem_buf.size);
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001717
1718 mutex_lock(&bam_rx_pool_mutexlock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001719 while (!list_empty(&bam_rx_pool)) {
1720 node = bam_rx_pool.next;
1721 list_del(node);
1722 info = container_of(node, struct rx_pkt_info, list_node);
1723 dma_unmap_single(NULL, info->dma_address, BUFFER_SIZE,
1724 DMA_FROM_DEVICE);
1725 dev_kfree_skb_any(info->skb);
1726 kfree(info);
1727 }
Eric Holmbergb5b08e52012-01-20 14:19:00 -07001728 bam_rx_pool_len = 0;
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001729 mutex_unlock(&bam_rx_pool_mutexlock);
Eric Holmberg878923a2012-01-10 14:28:19 -07001730
Jeff Hugo0b13a352012-03-17 23:18:30 -06001731 if (disconnect_ack)
1732 toggle_apps_ack();
1733
Eric Holmberg878923a2012-01-10 14:28:19 -07001734 verify_tx_queue_is_empty(__func__);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001735}
1736
1737static void vote_dfab(void)
1738{
Jeff Hugoca0caa82011-12-05 16:05:23 -07001739 int rc;
1740
Eric Holmberg006057d2012-01-11 10:10:42 -07001741 bam_dmux_log("%s\n", __func__);
1742 mutex_lock(&dfab_status_lock);
1743 if (dfab_is_on) {
1744 bam_dmux_log("%s: dfab is already on\n", __func__);
1745 mutex_unlock(&dfab_status_lock);
1746 return;
1747 }
Jeff Hugo23a812b2012-01-13 13:43:42 -07001748 rc = clk_prepare_enable(dfab_clk);
Jeff Hugoca0caa82011-12-05 16:05:23 -07001749 if (rc)
Eric Holmberg006057d2012-01-11 10:10:42 -07001750 DMUX_LOG_KERR("bam_dmux vote for dfab failed rc = %d\n", rc);
Stephen Boyd69d35e32012-02-14 15:33:30 -08001751 rc = clk_prepare_enable(xo_clk);
1752 if (rc)
1753 DMUX_LOG_KERR("bam_dmux vote for xo failed rc = %d\n", rc);
Eric Holmberg006057d2012-01-11 10:10:42 -07001754 dfab_is_on = 1;
1755 mutex_unlock(&dfab_status_lock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001756}
1757
1758static void unvote_dfab(void)
1759{
Eric Holmberg006057d2012-01-11 10:10:42 -07001760 bam_dmux_log("%s\n", __func__);
1761 mutex_lock(&dfab_status_lock);
1762 if (!dfab_is_on) {
1763 DMUX_LOG_KERR("%s: dfab is already off\n", __func__);
1764 dump_stack();
1765 mutex_unlock(&dfab_status_lock);
1766 return;
1767 }
Jeff Hugo23a812b2012-01-13 13:43:42 -07001768 clk_disable_unprepare(dfab_clk);
Stephen Boyd69d35e32012-02-14 15:33:30 -08001769 clk_disable_unprepare(xo_clk);
Eric Holmberg006057d2012-01-11 10:10:42 -07001770 dfab_is_on = 0;
1771 mutex_unlock(&dfab_status_lock);
1772}
1773
1774/* reference counting wrapper around wakelock */
1775static void grab_wakelock(void)
1776{
1777 unsigned long flags;
1778
1779 spin_lock_irqsave(&wakelock_reference_lock, flags);
1780 bam_dmux_log("%s: ref count = %d\n", __func__,
1781 wakelock_reference_count);
1782 if (wakelock_reference_count == 0)
1783 wake_lock(&bam_wakelock);
1784 ++wakelock_reference_count;
1785 spin_unlock_irqrestore(&wakelock_reference_lock, flags);
1786}
1787
1788static void release_wakelock(void)
1789{
1790 unsigned long flags;
1791
1792 spin_lock_irqsave(&wakelock_reference_lock, flags);
1793 if (wakelock_reference_count == 0) {
1794 DMUX_LOG_KERR("%s: bam_dmux wakelock not locked\n", __func__);
1795 dump_stack();
1796 spin_unlock_irqrestore(&wakelock_reference_lock, flags);
1797 return;
1798 }
1799 bam_dmux_log("%s: ref count = %d\n", __func__,
1800 wakelock_reference_count);
1801 --wakelock_reference_count;
1802 if (wakelock_reference_count == 0)
1803 wake_unlock(&bam_wakelock);
1804 spin_unlock_irqrestore(&wakelock_reference_lock, flags);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001805}
1806
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001807static int restart_notifier_cb(struct notifier_block *this,
1808 unsigned long code,
1809 void *data)
1810{
1811 int i;
1812 struct list_head *node;
1813 struct tx_pkt_info *info;
1814 int temp_remote_status;
Jeff Hugo626303bf2011-11-21 11:43:28 -07001815 unsigned long flags;
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001816
1817 if (code != SUBSYS_AFTER_SHUTDOWN)
1818 return NOTIFY_DONE;
1819
Eric Holmberg878923a2012-01-10 14:28:19 -07001820 bam_dmux_log("%s: begin\n", __func__);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001821 in_global_reset = 1;
Eric Holmberg454d9da2012-01-12 09:37:14 -07001822
1823 /* Handle uplink Powerdown */
1824 write_lock_irqsave(&ul_wakeup_lock, flags);
1825 if (bam_is_connected) {
1826 ul_powerdown();
1827 wait_for_ack = 0;
1828 }
Jeff Hugo4838f412012-01-20 11:19:37 -07001829 /*
1830 * if modem crash during ul_wakeup(), power_vote is 1, needs to be
1831 * reset to 0. harmless if bam_is_connected check above passes
1832 */
1833 power_vote(0);
Eric Holmberg454d9da2012-01-12 09:37:14 -07001834 write_unlock_irqrestore(&ul_wakeup_lock, flags);
1835 ul_powerdown_finish();
Eric Holmberg006057d2012-01-11 10:10:42 -07001836 a2_pc_disabled = 0;
Jeff Hugo583a6da2012-02-03 11:37:30 -07001837 a2_pc_disabled_wakelock_skipped = 0;
Jeff Hugo0b13a352012-03-17 23:18:30 -06001838 disconnect_ack = 0;
Eric Holmberg454d9da2012-01-12 09:37:14 -07001839
1840 /* Cleanup Channel States */
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001841 for (i = 0; i < BAM_DMUX_NUM_CHANNELS; ++i) {
1842 temp_remote_status = bam_ch_is_remote_open(i);
1843 bam_ch[i].status &= ~BAM_CH_REMOTE_OPEN;
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -07001844 bam_ch[i].num_tx_pkts = 0;
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001845 if (bam_ch_is_local_open(i))
1846 bam_ch[i].status |= BAM_CH_IN_RESET;
1847 if (temp_remote_status) {
1848 platform_device_unregister(bam_ch[i].pdev);
1849 bam_ch[i].pdev = platform_device_alloc(
1850 bam_ch[i].name, 2);
1851 }
1852 }
Eric Holmberg454d9da2012-01-12 09:37:14 -07001853
1854 /* Cleanup pending UL data */
Jeff Hugo626303bf2011-11-21 11:43:28 -07001855 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001856 while (!list_empty(&bam_tx_pool)) {
1857 node = bam_tx_pool.next;
1858 list_del(node);
1859 info = container_of(node, struct tx_pkt_info,
1860 list_node);
1861 if (!info->is_cmd) {
1862 dma_unmap_single(NULL, info->dma_address,
1863 info->skb->len,
1864 DMA_TO_DEVICE);
1865 dev_kfree_skb_any(info->skb);
1866 } else {
1867 dma_unmap_single(NULL, info->dma_address,
1868 info->len,
1869 DMA_TO_DEVICE);
1870 kfree(info->skb);
1871 }
1872 kfree(info);
1873 }
Jeff Hugo626303bf2011-11-21 11:43:28 -07001874 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
Eric Holmberg454d9da2012-01-12 09:37:14 -07001875
Eric Holmberg878923a2012-01-10 14:28:19 -07001876 bam_dmux_log("%s: complete\n", __func__);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001877 return NOTIFY_DONE;
1878}
1879
Jeff Hugo9dea05c2011-12-21 12:23:05 -07001880static int bam_init(void)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001881{
1882 u32 h;
1883 dma_addr_t dma_addr;
1884 int ret;
1885 void *a2_virt_addr;
Jeff Hugo4b2890d2012-01-16 16:14:21 -07001886 int skip_iounmap = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001887
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001888 vote_dfab();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001889 /* init BAM */
1890 a2_virt_addr = ioremap_nocache(A2_PHYS_BASE, A2_PHYS_SIZE);
1891 if (!a2_virt_addr) {
1892 pr_err("%s: ioremap failed\n", __func__);
1893 ret = -ENOMEM;
Jeff Hugo994a92d2012-01-05 13:25:21 -07001894 goto ioremap_failed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001895 }
1896 a2_props.phys_addr = A2_PHYS_BASE;
1897 a2_props.virt_addr = a2_virt_addr;
1898 a2_props.virt_size = A2_PHYS_SIZE;
1899 a2_props.irq = A2_BAM_IRQ;
Jeff Hugo927cba62011-11-11 11:49:52 -07001900 a2_props.options = SPS_BAM_OPT_IRQ_WAKEUP;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001901 a2_props.num_pipes = A2_NUM_PIPES;
1902 a2_props.summing_threshold = A2_SUMMING_THRESHOLD;
Jeff Hugo75913c82011-12-05 15:59:01 -07001903 if (cpu_is_msm9615())
1904 a2_props.manage = SPS_BAM_MGR_DEVICE_REMOTE;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001905 /* need to free on tear down */
1906 ret = sps_register_bam_device(&a2_props, &h);
1907 if (ret < 0) {
1908 pr_err("%s: register bam error %d\n", __func__, ret);
1909 goto register_bam_failed;
1910 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001911 a2_device_handle = h;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001912
1913 bam_tx_pipe = sps_alloc_endpoint();
1914 if (bam_tx_pipe == NULL) {
1915 pr_err("%s: tx alloc endpoint failed\n", __func__);
1916 ret = -ENOMEM;
Jeff Hugo8ff4a812012-01-17 11:03:13 -07001917 goto tx_alloc_endpoint_failed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001918 }
1919 ret = sps_get_config(bam_tx_pipe, &tx_connection);
1920 if (ret) {
1921 pr_err("%s: tx get config failed %d\n", __func__, ret);
1922 goto tx_get_config_failed;
1923 }
1924
1925 tx_connection.source = SPS_DEV_HANDLE_MEM;
1926 tx_connection.src_pipe_index = 0;
1927 tx_connection.destination = h;
1928 tx_connection.dest_pipe_index = 4;
1929 tx_connection.mode = SPS_MODE_DEST;
1930 tx_connection.options = SPS_O_AUTO_ENABLE | SPS_O_EOT;
1931 tx_desc_mem_buf.size = 0x800; /* 2k */
1932 tx_desc_mem_buf.base = dma_alloc_coherent(NULL, tx_desc_mem_buf.size,
1933 &dma_addr, 0);
1934 if (tx_desc_mem_buf.base == NULL) {
1935 pr_err("%s: tx memory alloc failed\n", __func__);
1936 ret = -ENOMEM;
Jeff Hugo8ff4a812012-01-17 11:03:13 -07001937 goto tx_get_config_failed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001938 }
1939 tx_desc_mem_buf.phys_base = dma_addr;
1940 memset(tx_desc_mem_buf.base, 0x0, tx_desc_mem_buf.size);
1941 tx_connection.desc = tx_desc_mem_buf;
1942 tx_connection.event_thresh = 0x10;
1943
1944 ret = sps_connect(bam_tx_pipe, &tx_connection);
1945 if (ret < 0) {
1946 pr_err("%s: tx connect error %d\n", __func__, ret);
1947 goto tx_connect_failed;
1948 }
1949
1950 bam_rx_pipe = sps_alloc_endpoint();
1951 if (bam_rx_pipe == NULL) {
1952 pr_err("%s: rx alloc endpoint failed\n", __func__);
1953 ret = -ENOMEM;
Jeff Hugo8ff4a812012-01-17 11:03:13 -07001954 goto rx_alloc_endpoint_failed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001955 }
1956 ret = sps_get_config(bam_rx_pipe, &rx_connection);
1957 if (ret) {
1958 pr_err("%s: rx get config failed %d\n", __func__, ret);
1959 goto rx_get_config_failed;
1960 }
1961
1962 rx_connection.source = h;
1963 rx_connection.src_pipe_index = 5;
1964 rx_connection.destination = SPS_DEV_HANDLE_MEM;
1965 rx_connection.dest_pipe_index = 1;
1966 rx_connection.mode = SPS_MODE_SRC;
Jeff Hugo949080a2011-08-30 11:58:56 -06001967 rx_connection.options = SPS_O_AUTO_ENABLE | SPS_O_EOT |
1968 SPS_O_ACK_TRANSFERS;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001969 rx_desc_mem_buf.size = 0x800; /* 2k */
1970 rx_desc_mem_buf.base = dma_alloc_coherent(NULL, rx_desc_mem_buf.size,
1971 &dma_addr, 0);
1972 if (rx_desc_mem_buf.base == NULL) {
1973 pr_err("%s: rx memory alloc failed\n", __func__);
1974 ret = -ENOMEM;
1975 goto rx_mem_failed;
1976 }
1977 rx_desc_mem_buf.phys_base = dma_addr;
1978 memset(rx_desc_mem_buf.base, 0x0, rx_desc_mem_buf.size);
1979 rx_connection.desc = rx_desc_mem_buf;
1980 rx_connection.event_thresh = 0x10;
1981
1982 ret = sps_connect(bam_rx_pipe, &rx_connection);
1983 if (ret < 0) {
1984 pr_err("%s: rx connect error %d\n", __func__, ret);
1985 goto rx_connect_failed;
1986 }
1987
1988 tx_register_event.options = SPS_O_EOT;
1989 tx_register_event.mode = SPS_TRIGGER_CALLBACK;
1990 tx_register_event.xfer_done = NULL;
1991 tx_register_event.callback = bam_mux_tx_notify;
1992 tx_register_event.user = NULL;
1993 ret = sps_register_event(bam_tx_pipe, &tx_register_event);
1994 if (ret < 0) {
1995 pr_err("%s: tx register event error %d\n", __func__, ret);
1996 goto rx_event_reg_failed;
1997 }
1998
Jeff Hugo33dbc002011-08-25 15:52:53 -06001999 rx_register_event.options = SPS_O_EOT;
2000 rx_register_event.mode = SPS_TRIGGER_CALLBACK;
2001 rx_register_event.xfer_done = NULL;
2002 rx_register_event.callback = bam_mux_rx_notify;
2003 rx_register_event.user = NULL;
2004 ret = sps_register_event(bam_rx_pipe, &rx_register_event);
2005 if (ret < 0) {
2006 pr_err("%s: tx register event error %d\n", __func__, ret);
2007 goto rx_event_reg_failed;
2008 }
2009
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002010 bam_mux_initialized = 1;
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002011 toggle_apps_ack();
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06002012 bam_connection_is_active = 1;
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002013 complete_all(&bam_connection_completion);
Jeff Hugo2fb555e2012-03-14 16:33:47 -06002014 queue_rx();
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002015 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002016
2017rx_event_reg_failed:
2018 sps_disconnect(bam_rx_pipe);
2019rx_connect_failed:
2020 dma_free_coherent(NULL, rx_desc_mem_buf.size, rx_desc_mem_buf.base,
2021 rx_desc_mem_buf.phys_base);
2022rx_mem_failed:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002023rx_get_config_failed:
2024 sps_free_endpoint(bam_rx_pipe);
Jeff Hugo8ff4a812012-01-17 11:03:13 -07002025rx_alloc_endpoint_failed:
2026 sps_disconnect(bam_tx_pipe);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002027tx_connect_failed:
2028 dma_free_coherent(NULL, tx_desc_mem_buf.size, tx_desc_mem_buf.base,
2029 tx_desc_mem_buf.phys_base);
2030tx_get_config_failed:
2031 sps_free_endpoint(bam_tx_pipe);
Jeff Hugo8ff4a812012-01-17 11:03:13 -07002032tx_alloc_endpoint_failed:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002033 sps_deregister_bam_device(h);
Jeff Hugo4b2890d2012-01-16 16:14:21 -07002034 /*
2035 * sps_deregister_bam_device() calls iounmap. calling iounmap on the
2036 * same handle below will cause a crash, so skip it if we've freed
2037 * the handle here.
2038 */
2039 skip_iounmap = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002040register_bam_failed:
Jeff Hugo4b2890d2012-01-16 16:14:21 -07002041 if (!skip_iounmap)
2042 iounmap(a2_virt_addr);
Jeff Hugo994a92d2012-01-05 13:25:21 -07002043ioremap_failed:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002044 /*destroy_workqueue(bam_mux_workqueue);*/
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002045 return ret;
2046}
2047
2048static int bam_init_fallback(void)
2049{
2050 u32 h;
2051 int ret;
2052 void *a2_virt_addr;
2053
2054 unvote_dfab();
2055 /* init BAM */
2056 a2_virt_addr = ioremap_nocache(A2_PHYS_BASE, A2_PHYS_SIZE);
2057 if (!a2_virt_addr) {
2058 pr_err("%s: ioremap failed\n", __func__);
2059 ret = -ENOMEM;
2060 goto ioremap_failed;
2061 }
2062 a2_props.phys_addr = A2_PHYS_BASE;
2063 a2_props.virt_addr = a2_virt_addr;
2064 a2_props.virt_size = A2_PHYS_SIZE;
2065 a2_props.irq = A2_BAM_IRQ;
2066 a2_props.options = SPS_BAM_OPT_IRQ_WAKEUP;
2067 a2_props.num_pipes = A2_NUM_PIPES;
2068 a2_props.summing_threshold = A2_SUMMING_THRESHOLD;
2069 if (cpu_is_msm9615())
2070 a2_props.manage = SPS_BAM_MGR_DEVICE_REMOTE;
2071 ret = sps_register_bam_device(&a2_props, &h);
2072 if (ret < 0) {
2073 pr_err("%s: register bam error %d\n", __func__, ret);
2074 goto register_bam_failed;
2075 }
2076 a2_device_handle = h;
Jeff Hugo2bec9772012-04-05 12:25:16 -06002077 toggle_apps_ack();
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002078
2079 return 0;
2080
2081register_bam_failed:
Jeff Hugo4b2890d2012-01-16 16:14:21 -07002082 iounmap(a2_virt_addr);
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002083ioremap_failed:
2084 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002085}
Jeff Hugoade1f842011-08-03 15:53:59 -06002086
Jeff Hugoa670b762012-03-15 15:58:28 -06002087static void msm9615_bam_init(void)
Eric Holmberg604ab252012-01-15 00:01:18 -07002088{
2089 int ret = 0;
2090
2091 ret = bam_init();
2092 if (ret) {
2093 ret = bam_init_fallback();
2094 if (ret)
2095 pr_err("%s: bam init fallback failed: %d",
2096 __func__, ret);
2097 }
2098}
2099
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002100static void toggle_apps_ack(void)
2101{
2102 static unsigned int clear_bit; /* 0 = set the bit, else clear bit */
Eric Holmberg878923a2012-01-10 14:28:19 -07002103
2104 bam_dmux_log("%s: apps ack %d->%d\n", __func__,
2105 clear_bit & 0x1, ~clear_bit & 0x1);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002106 smsm_change_state(SMSM_APPS_STATE,
2107 clear_bit & SMSM_A2_POWER_CONTROL_ACK,
2108 ~clear_bit & SMSM_A2_POWER_CONTROL_ACK);
2109 clear_bit = ~clear_bit;
Eric Holmberg1f1255d2012-02-22 13:37:21 -07002110 DBG_INC_ACK_OUT_CNT();
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002111}
2112
Jeff Hugoade1f842011-08-03 15:53:59 -06002113static void bam_dmux_smsm_cb(void *priv, uint32_t old_state, uint32_t new_state)
2114{
Eric Holmberg878923a2012-01-10 14:28:19 -07002115 bam_dmux_power_state = new_state & SMSM_A2_POWER_CONTROL ? 1 : 0;
Eric Holmberg1f1255d2012-02-22 13:37:21 -07002116 DBG_INC_A2_POWER_CONTROL_IN_CNT();
Eric Holmberg878923a2012-01-10 14:28:19 -07002117 bam_dmux_log("%s: 0x%08x -> 0x%08x\n", __func__, old_state,
2118 new_state);
2119
Jeff Hugoae3a85e2011-12-02 17:10:18 -07002120 if (bam_mux_initialized && new_state & SMSM_A2_POWER_CONTROL) {
Eric Holmberg878923a2012-01-10 14:28:19 -07002121 bam_dmux_log("%s: reconnect\n", __func__);
Eric Holmberg006057d2012-01-11 10:10:42 -07002122 grab_wakelock();
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002123 reconnect_to_bam();
Jeff Hugoae3a85e2011-12-02 17:10:18 -07002124 } else if (bam_mux_initialized &&
2125 !(new_state & SMSM_A2_POWER_CONTROL)) {
Eric Holmberg878923a2012-01-10 14:28:19 -07002126 bam_dmux_log("%s: disconnect\n", __func__);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002127 disconnect_to_bam();
Eric Holmberg006057d2012-01-11 10:10:42 -07002128 release_wakelock();
Jeff Hugoae3a85e2011-12-02 17:10:18 -07002129 } else if (new_state & SMSM_A2_POWER_CONTROL) {
Eric Holmberg878923a2012-01-10 14:28:19 -07002130 bam_dmux_log("%s: init\n", __func__);
Eric Holmberg006057d2012-01-11 10:10:42 -07002131 grab_wakelock();
Jeff Hugoa670b762012-03-15 15:58:28 -06002132 if (cpu_is_msm9615())
2133 msm9615_bam_init();
2134 else
Eric Holmberg604ab252012-01-15 00:01:18 -07002135 bam_init();
Jeff Hugoae3a85e2011-12-02 17:10:18 -07002136 } else {
Eric Holmberg878923a2012-01-10 14:28:19 -07002137 bam_dmux_log("%s: bad state change\n", __func__);
Jeff Hugoade1f842011-08-03 15:53:59 -06002138 pr_err("%s: unsupported state change\n", __func__);
Jeff Hugoae3a85e2011-12-02 17:10:18 -07002139 }
Jeff Hugoade1f842011-08-03 15:53:59 -06002140
2141}
2142
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002143static void bam_dmux_smsm_ack_cb(void *priv, uint32_t old_state,
2144 uint32_t new_state)
2145{
Eric Holmberg1f1255d2012-02-22 13:37:21 -07002146 DBG_INC_ACK_IN_CNT();
Eric Holmberg878923a2012-01-10 14:28:19 -07002147 bam_dmux_log("%s: 0x%08x -> 0x%08x\n", __func__, old_state,
2148 new_state);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002149 complete_all(&ul_wakeup_ack_completion);
2150}
2151
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002152static int bam_dmux_probe(struct platform_device *pdev)
2153{
2154 int rc;
2155
2156 DBG("%s probe called\n", __func__);
2157 if (bam_mux_initialized)
2158 return 0;
2159
Stephen Boyd69d35e32012-02-14 15:33:30 -08002160 xo_clk = clk_get(&pdev->dev, "xo");
2161 if (IS_ERR(xo_clk)) {
2162 pr_err("%s: did not get xo clock\n", __func__);
2163 return PTR_ERR(xo_clk);
2164 }
Stephen Boyd1c51a492011-10-26 12:11:47 -07002165 dfab_clk = clk_get(&pdev->dev, "bus_clk");
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002166 if (IS_ERR(dfab_clk)) {
2167 pr_err("%s: did not get dfab clock\n", __func__);
2168 return -EFAULT;
2169 }
2170
2171 rc = clk_set_rate(dfab_clk, 64000000);
2172 if (rc)
2173 pr_err("%s: unable to set dfab clock rate\n", __func__);
2174
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002175 bam_mux_rx_workqueue = create_singlethread_workqueue("bam_dmux_rx");
2176 if (!bam_mux_rx_workqueue)
2177 return -ENOMEM;
2178
2179 bam_mux_tx_workqueue = create_singlethread_workqueue("bam_dmux_tx");
2180 if (!bam_mux_tx_workqueue) {
2181 destroy_workqueue(bam_mux_rx_workqueue);
2182 return -ENOMEM;
2183 }
2184
Jeff Hugo7960abd2011-08-02 15:39:38 -06002185 for (rc = 0; rc < BAM_DMUX_NUM_CHANNELS; ++rc) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002186 spin_lock_init(&bam_ch[rc].lock);
Jeff Hugo7960abd2011-08-02 15:39:38 -06002187 scnprintf(bam_ch[rc].name, BAM_DMUX_CH_NAME_MAX_LEN,
2188 "bam_dmux_ch_%d", rc);
2189 /* bus 2, ie a2 stream 2 */
2190 bam_ch[rc].pdev = platform_device_alloc(bam_ch[rc].name, 2);
2191 if (!bam_ch[rc].pdev) {
2192 pr_err("%s: platform device alloc failed\n", __func__);
2193 destroy_workqueue(bam_mux_rx_workqueue);
2194 destroy_workqueue(bam_mux_tx_workqueue);
2195 return -ENOMEM;
2196 }
2197 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002198
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002199 init_completion(&ul_wakeup_ack_completion);
2200 init_completion(&bam_connection_completion);
Eric Holmberg006057d2012-01-11 10:10:42 -07002201 init_completion(&dfab_unvote_completion);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002202 INIT_DELAYED_WORK(&ul_timeout_work, ul_timeout);
Jeff Hugoae3a85e2011-12-02 17:10:18 -07002203 wake_lock_init(&bam_wakelock, WAKE_LOCK_SUSPEND, "bam_dmux_wakelock");
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002204
Jeff Hugoade1f842011-08-03 15:53:59 -06002205 rc = smsm_state_cb_register(SMSM_MODEM_STATE, SMSM_A2_POWER_CONTROL,
2206 bam_dmux_smsm_cb, NULL);
2207
2208 if (rc) {
2209 destroy_workqueue(bam_mux_rx_workqueue);
2210 destroy_workqueue(bam_mux_tx_workqueue);
2211 pr_err("%s: smsm cb register failed, rc: %d\n", __func__, rc);
2212 return -ENOMEM;
2213 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002214
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002215 rc = smsm_state_cb_register(SMSM_MODEM_STATE, SMSM_A2_POWER_CONTROL_ACK,
2216 bam_dmux_smsm_ack_cb, NULL);
2217
2218 if (rc) {
2219 destroy_workqueue(bam_mux_rx_workqueue);
2220 destroy_workqueue(bam_mux_tx_workqueue);
2221 smsm_state_cb_deregister(SMSM_MODEM_STATE,
2222 SMSM_A2_POWER_CONTROL,
2223 bam_dmux_smsm_cb, NULL);
2224 pr_err("%s: smsm ack cb register failed, rc: %d\n", __func__,
2225 rc);
2226 for (rc = 0; rc < BAM_DMUX_NUM_CHANNELS; ++rc)
2227 platform_device_put(bam_ch[rc].pdev);
2228 return -ENOMEM;
2229 }
2230
Eric Holmbergfd1e2ae2011-11-15 18:28:17 -07002231 if (smsm_get_state(SMSM_MODEM_STATE) & SMSM_A2_POWER_CONTROL)
2232 bam_dmux_smsm_cb(NULL, 0, smsm_get_state(SMSM_MODEM_STATE));
2233
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002234 return 0;
2235}
2236
2237static struct platform_driver bam_dmux_driver = {
2238 .probe = bam_dmux_probe,
2239 .driver = {
2240 .name = "BAM_RMNT",
2241 .owner = THIS_MODULE,
2242 },
2243};
2244
2245static int __init bam_dmux_init(void)
2246{
Eric Holmberg878923a2012-01-10 14:28:19 -07002247 int ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002248#ifdef CONFIG_DEBUG_FS
2249 struct dentry *dent;
2250
2251 dent = debugfs_create_dir("bam_dmux", 0);
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07002252 if (!IS_ERR(dent)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002253 debug_create("tbl", 0444, dent, debug_tbl);
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07002254 debug_create("ul_pkt_cnt", 0444, dent, debug_ul_pkt_cnt);
2255 debug_create("stats", 0444, dent, debug_stats);
Eric Holmberge4ac80b2012-01-12 09:21:59 -07002256 debug_create_multiple("log", 0444, dent, debug_log);
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07002257 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002258#endif
Eric Holmberg878923a2012-01-10 14:28:19 -07002259 ret = kfifo_alloc(&bam_dmux_state_log, PAGE_SIZE, GFP_KERNEL);
2260 if (ret) {
2261 pr_err("%s: failed to allocate log %d\n", __func__, ret);
2262 bam_dmux_state_logging_disabled = 1;
2263 }
2264
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06002265 subsys_notif_register_notifier("modem", &restart_notifier);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002266 return platform_driver_register(&bam_dmux_driver);
2267}
2268
Jeff Hugoade1f842011-08-03 15:53:59 -06002269late_initcall(bam_dmux_init); /* needs to init after SMD */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002270MODULE_DESCRIPTION("MSM BAM DMUX");
2271MODULE_LICENSE("GPL v2");