blob: 3df566cf07d77799e7a654d808cfd9e1d2c484ca [file] [log] [blame]
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14/*
15 * BAM DMUX module.
16 */
17
18#define DEBUG
19
20#include <linux/delay.h>
21#include <linux/module.h>
22#include <linux/netdevice.h>
23#include <linux/platform_device.h>
24#include <linux/sched.h>
25#include <linux/skbuff.h>
26#include <linux/debugfs.h>
Jeff Hugoaab7ebc2011-09-07 16:46:04 -060027#include <linux/clk.h>
Jeff Hugoae3a85e2011-12-02 17:10:18 -070028#include <linux/wakelock.h>
Eric Holmberg878923a2012-01-10 14:28:19 -070029#include <linux/kfifo.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070030
31#include <mach/sps.h>
32#include <mach/bam_dmux.h>
Jeff Hugoade1f842011-08-03 15:53:59 -060033#include <mach/msm_smsm.h>
Jeff Hugo6e7a92a2011-10-24 05:25:13 -060034#include <mach/subsystem_notif.h>
Jeff Hugo75913c82011-12-05 15:59:01 -070035#include <mach/socinfo.h>
Jeff Hugo4838f412012-01-20 11:19:37 -070036#include <mach/subsystem_restart.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070037
38#define BAM_CH_LOCAL_OPEN 0x1
39#define BAM_CH_REMOTE_OPEN 0x2
Jeff Hugo6e7a92a2011-10-24 05:25:13 -060040#define BAM_CH_IN_RESET 0x4
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070041
42#define BAM_MUX_HDR_MAGIC_NO 0x33fc
43
Eric Holmberg006057d2012-01-11 10:10:42 -070044#define BAM_MUX_HDR_CMD_DATA 0
45#define BAM_MUX_HDR_CMD_OPEN 1
46#define BAM_MUX_HDR_CMD_CLOSE 2
47#define BAM_MUX_HDR_CMD_STATUS 3 /* unused */
48#define BAM_MUX_HDR_CMD_OPEN_NO_A2_PC 4
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070049
Jeff Hugo949080a2011-08-30 11:58:56 -060050#define POLLING_MIN_SLEEP 950 /* 0.95 ms */
51#define POLLING_MAX_SLEEP 1050 /* 1.05 ms */
52#define POLLING_INACTIVITY 40 /* cycles before switch to intr mode */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070053
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -070054#define LOW_WATERMARK 2
55#define HIGH_WATERMARK 4
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070056
57static int msm_bam_dmux_debug_enable;
58module_param_named(debug_enable, msm_bam_dmux_debug_enable,
59 int, S_IRUGO | S_IWUSR | S_IWGRP);
60
61#if defined(DEBUG)
62static uint32_t bam_dmux_read_cnt;
63static uint32_t bam_dmux_write_cnt;
64static uint32_t bam_dmux_write_cpy_cnt;
65static uint32_t bam_dmux_write_cpy_bytes;
Eric Holmberg2fddbcd2011-11-28 18:25:57 -070066static uint32_t bam_dmux_tx_sps_failure_cnt;
Eric Holmberg6074aba2012-01-18 17:59:44 -070067static uint32_t bam_dmux_tx_stall_cnt;
Eric Holmberg1f1255d2012-02-22 13:37:21 -070068static atomic_t bam_dmux_ack_out_cnt = ATOMIC_INIT(0);
69static atomic_t bam_dmux_ack_in_cnt = ATOMIC_INIT(0);
70static atomic_t bam_dmux_a2_pwr_cntl_in_cnt = ATOMIC_INIT(0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070071
72#define DBG(x...) do { \
73 if (msm_bam_dmux_debug_enable) \
74 pr_debug(x); \
75 } while (0)
76
77#define DBG_INC_READ_CNT(x) do { \
78 bam_dmux_read_cnt += (x); \
79 if (msm_bam_dmux_debug_enable) \
80 pr_debug("%s: total read bytes %u\n", \
81 __func__, bam_dmux_read_cnt); \
82 } while (0)
83
84#define DBG_INC_WRITE_CNT(x) do { \
85 bam_dmux_write_cnt += (x); \
86 if (msm_bam_dmux_debug_enable) \
87 pr_debug("%s: total written bytes %u\n", \
88 __func__, bam_dmux_write_cnt); \
89 } while (0)
90
91#define DBG_INC_WRITE_CPY(x) do { \
92 bam_dmux_write_cpy_bytes += (x); \
93 bam_dmux_write_cpy_cnt++; \
94 if (msm_bam_dmux_debug_enable) \
95 pr_debug("%s: total write copy cnt %u, bytes %u\n", \
96 __func__, bam_dmux_write_cpy_cnt, \
97 bam_dmux_write_cpy_bytes); \
98 } while (0)
Eric Holmberg2fddbcd2011-11-28 18:25:57 -070099
100#define DBG_INC_TX_SPS_FAILURE_CNT() do { \
101 bam_dmux_tx_sps_failure_cnt++; \
102} while (0)
103
Eric Holmberg6074aba2012-01-18 17:59:44 -0700104#define DBG_INC_TX_STALL_CNT() do { \
105 bam_dmux_tx_stall_cnt++; \
106} while (0)
107
Eric Holmberg1f1255d2012-02-22 13:37:21 -0700108#define DBG_INC_ACK_OUT_CNT() \
109 atomic_inc(&bam_dmux_ack_out_cnt)
110
111#define DBG_INC_A2_POWER_CONTROL_IN_CNT() \
112 atomic_inc(&bam_dmux_a2_pwr_cntl_in_cnt)
113
114#define DBG_INC_ACK_IN_CNT() \
115 atomic_inc(&bam_dmux_ack_in_cnt)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700116#else
117#define DBG(x...) do { } while (0)
118#define DBG_INC_READ_CNT(x...) do { } while (0)
119#define DBG_INC_WRITE_CNT(x...) do { } while (0)
120#define DBG_INC_WRITE_CPY(x...) do { } while (0)
Eric Holmberg2fddbcd2011-11-28 18:25:57 -0700121#define DBG_INC_TX_SPS_FAILURE_CNT() do { } while (0)
Eric Holmberg6074aba2012-01-18 17:59:44 -0700122#define DBG_INC_TX_STALL_CNT() do { } while (0)
Eric Holmberg1f1255d2012-02-22 13:37:21 -0700123#define DBG_INC_ACK_OUT_CNT() do { } while (0)
124#define DBG_INC_A2_POWER_CONTROL_IN_CNT() \
125 do { } while (0)
126#define DBG_INC_ACK_IN_CNT() do { } while (0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700127#endif
128
129struct bam_ch_info {
130 uint32_t status;
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600131 void (*notify)(void *, int, unsigned long);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700132 void *priv;
133 spinlock_t lock;
Jeff Hugo7960abd2011-08-02 15:39:38 -0600134 struct platform_device *pdev;
135 char name[BAM_DMUX_CH_NAME_MAX_LEN];
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700136 int num_tx_pkts;
137 int use_wm;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700138};
139
140struct tx_pkt_info {
141 struct sk_buff *skb;
142 dma_addr_t dma_address;
143 char is_cmd;
144 uint32_t len;
145 struct work_struct work;
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600146 struct list_head list_node;
Eric Holmberg878923a2012-01-10 14:28:19 -0700147 unsigned ts_sec;
148 unsigned long ts_nsec;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700149};
150
151struct rx_pkt_info {
152 struct sk_buff *skb;
153 dma_addr_t dma_address;
154 struct work_struct work;
Jeff Hugo949080a2011-08-30 11:58:56 -0600155 struct list_head list_node;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700156};
157
158#define A2_NUM_PIPES 6
159#define A2_SUMMING_THRESHOLD 4096
160#define A2_DEFAULT_DESCRIPTORS 32
161#define A2_PHYS_BASE 0x124C2000
162#define A2_PHYS_SIZE 0x2000
163#define BUFFER_SIZE 2048
164#define NUM_BUFFERS 32
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700165static struct sps_bam_props a2_props;
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600166static u32 a2_device_handle;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700167static struct sps_pipe *bam_tx_pipe;
168static struct sps_pipe *bam_rx_pipe;
169static struct sps_connect tx_connection;
170static struct sps_connect rx_connection;
171static struct sps_mem_buffer tx_desc_mem_buf;
172static struct sps_mem_buffer rx_desc_mem_buf;
173static struct sps_register_event tx_register_event;
Jeff Hugo33dbc002011-08-25 15:52:53 -0600174static struct sps_register_event rx_register_event;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700175
176static struct bam_ch_info bam_ch[BAM_DMUX_NUM_CHANNELS];
177static int bam_mux_initialized;
178
Jeff Hugo949080a2011-08-30 11:58:56 -0600179static int polling_mode;
180
181static LIST_HEAD(bam_rx_pool);
Jeff Hugoc9749932011-11-02 17:50:40 -0600182static DEFINE_MUTEX(bam_rx_pool_mutexlock);
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700183static int bam_rx_pool_len;
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600184static LIST_HEAD(bam_tx_pool);
Jeff Hugoc9749932011-11-02 17:50:40 -0600185static DEFINE_SPINLOCK(bam_tx_pool_spinlock);
Jeff Hugo949080a2011-08-30 11:58:56 -0600186
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700187struct bam_mux_hdr {
188 uint16_t magic_num;
189 uint8_t reserved;
190 uint8_t cmd;
191 uint8_t pad_len;
192 uint8_t ch_id;
193 uint16_t pkt_len;
194};
195
Jeff Hugod98b1082011-10-24 10:30:23 -0600196static void notify_all(int event, unsigned long data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700197static void bam_mux_write_done(struct work_struct *work);
198static void handle_bam_mux_cmd(struct work_struct *work);
Jeff Hugo949080a2011-08-30 11:58:56 -0600199static void rx_timer_work_func(struct work_struct *work);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700200
Jeff Hugo949080a2011-08-30 11:58:56 -0600201static DECLARE_WORK(rx_timer_work, rx_timer_work_func);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700202
203static struct workqueue_struct *bam_mux_rx_workqueue;
204static struct workqueue_struct *bam_mux_tx_workqueue;
205
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600206/* A2 power collaspe */
207#define UL_TIMEOUT_DELAY 1000 /* in ms */
Jeff Hugo0b13a352012-03-17 23:18:30 -0600208#define ENABLE_DISCONNECT_ACK 0x1
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600209static void toggle_apps_ack(void);
210static void reconnect_to_bam(void);
211static void disconnect_to_bam(void);
212static void ul_wakeup(void);
213static void ul_timeout(struct work_struct *work);
214static void vote_dfab(void);
215static void unvote_dfab(void);
Jeff Hugod98b1082011-10-24 10:30:23 -0600216static void kickoff_ul_wakeup_func(struct work_struct *work);
Eric Holmberg006057d2012-01-11 10:10:42 -0700217static void grab_wakelock(void);
218static void release_wakelock(void);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600219
220static int bam_is_connected;
221static DEFINE_MUTEX(wakeup_lock);
222static struct completion ul_wakeup_ack_completion;
223static struct completion bam_connection_completion;
224static struct delayed_work ul_timeout_work;
225static int ul_packet_written;
Eric Holmbergbc9f21c2012-01-18 11:33:33 -0700226static atomic_t ul_ondemand_vote = ATOMIC_INIT(0);
Stephen Boyd69d35e32012-02-14 15:33:30 -0800227static struct clk *dfab_clk, *xo_clk;
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600228static DEFINE_RWLOCK(ul_wakeup_lock);
Jeff Hugod98b1082011-10-24 10:30:23 -0600229static DECLARE_WORK(kickoff_ul_wakeup, kickoff_ul_wakeup_func);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600230static int bam_connection_is_active;
Jeff Hugof6c1c1e2011-12-01 17:43:49 -0700231static int wait_for_ack;
Jeff Hugoae3a85e2011-12-02 17:10:18 -0700232static struct wake_lock bam_wakelock;
Eric Holmberg006057d2012-01-11 10:10:42 -0700233static int a2_pc_disabled;
234static DEFINE_MUTEX(dfab_status_lock);
235static int dfab_is_on;
236static int wait_for_dfab;
237static struct completion dfab_unvote_completion;
238static DEFINE_SPINLOCK(wakelock_reference_lock);
239static int wakelock_reference_count;
Jeff Hugo583a6da2012-02-03 11:37:30 -0700240static int a2_pc_disabled_wakelock_skipped;
Jeff Hugob1e7c582012-06-20 15:02:11 -0600241static int disconnect_ack = 1;
Jeff Hugocb798022012-04-09 14:55:40 -0600242static LIST_HEAD(bam_other_notify_funcs);
Jeff Hugo4b7c7b32012-04-18 16:25:14 -0600243static DEFINE_MUTEX(smsm_cb_lock);
Jeff Hugoc2696142012-05-03 11:42:13 -0600244static DEFINE_MUTEX(delayed_ul_vote_lock);
245static int need_delayed_ul_vote;
Jeff Hugo18792a32012-06-20 15:25:55 -0600246static int power_management_only_mode;
Jeff Hugocb798022012-04-09 14:55:40 -0600247
248struct outside_notify_func {
249 void (*notify)(void *, int, unsigned long);
250 void *priv;
251 struct list_head list_node;
252};
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600253/* End A2 power collaspe */
254
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600255/* subsystem restart */
256static int restart_notifier_cb(struct notifier_block *this,
257 unsigned long code,
258 void *data);
259
260static struct notifier_block restart_notifier = {
261 .notifier_call = restart_notifier_cb,
262};
263static int in_global_reset;
264/* end subsystem restart */
265
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700266#define bam_ch_is_open(x) \
267 (bam_ch[(x)].status == (BAM_CH_LOCAL_OPEN | BAM_CH_REMOTE_OPEN))
268
269#define bam_ch_is_local_open(x) \
270 (bam_ch[(x)].status & BAM_CH_LOCAL_OPEN)
271
272#define bam_ch_is_remote_open(x) \
273 (bam_ch[(x)].status & BAM_CH_REMOTE_OPEN)
274
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600275#define bam_ch_is_in_reset(x) \
276 (bam_ch[(x)].status & BAM_CH_IN_RESET)
277
Eric Holmberg878923a2012-01-10 14:28:19 -0700278#define LOG_MESSAGE_MAX_SIZE 80
279struct kfifo bam_dmux_state_log;
280static uint32_t bam_dmux_state_logging_disabled;
281static DEFINE_SPINLOCK(bam_dmux_logging_spinlock);
282static int bam_dmux_uplink_vote;
283static int bam_dmux_power_state;
284
285
286#define DMUX_LOG_KERR(fmt...) \
287do { \
288 bam_dmux_log(fmt); \
289 pr_err(fmt); \
290} while (0)
291
292/**
293 * Log a state change along with a small message.
294 *
295 * Complete size of messsage is limited to @todo.
296 */
297static void bam_dmux_log(const char *fmt, ...)
298{
299 char buff[LOG_MESSAGE_MAX_SIZE];
300 unsigned long flags;
301 va_list arg_list;
302 unsigned long long t_now;
303 unsigned long nanosec_rem;
304 int len = 0;
305
306 if (bam_dmux_state_logging_disabled)
307 return;
308
309 t_now = sched_clock();
310 nanosec_rem = do_div(t_now, 1000000000U);
311
312 /*
313 * States
Eric Holmberg006057d2012-01-11 10:10:42 -0700314 * D: 1 = Power collapse disabled
Eric Holmberg878923a2012-01-10 14:28:19 -0700315 * R: 1 = in global reset
316 * P: 1 = BAM is powered up
317 * A: 1 = BAM initialized and ready for data
318 *
319 * V: 1 = Uplink vote for power
320 * U: 1 = Uplink active
321 * W: 1 = Uplink Wait-for-ack
322 * A: 1 = Uplink ACK received
Eric Holmbergbc9f21c2012-01-18 11:33:33 -0700323 * #: >=1 On-demand uplink vote
Jeff Hugo0b13a352012-03-17 23:18:30 -0600324 * D: 1 = Disconnect ACK active
Eric Holmberg878923a2012-01-10 14:28:19 -0700325 */
326 len += scnprintf(buff, sizeof(buff),
Jeff Hugo0b13a352012-03-17 23:18:30 -0600327 "<DMUX> %u.%09lu %c%c%c%c %c%c%c%c%d%c ",
Eric Holmberg878923a2012-01-10 14:28:19 -0700328 (unsigned)t_now, nanosec_rem,
Eric Holmberg006057d2012-01-11 10:10:42 -0700329 a2_pc_disabled ? 'D' : 'd',
Eric Holmberg878923a2012-01-10 14:28:19 -0700330 in_global_reset ? 'R' : 'r',
331 bam_dmux_power_state ? 'P' : 'p',
332 bam_connection_is_active ? 'A' : 'a',
333 bam_dmux_uplink_vote ? 'V' : 'v',
334 bam_is_connected ? 'U' : 'u',
335 wait_for_ack ? 'W' : 'w',
Eric Holmbergbc9f21c2012-01-18 11:33:33 -0700336 ul_wakeup_ack_completion.done ? 'A' : 'a',
Jeff Hugo0b13a352012-03-17 23:18:30 -0600337 atomic_read(&ul_ondemand_vote),
338 disconnect_ack ? 'D' : 'd'
Eric Holmberg878923a2012-01-10 14:28:19 -0700339 );
340
341 va_start(arg_list, fmt);
342 len += vscnprintf(buff + len, sizeof(buff) - len, fmt, arg_list);
343 va_end(arg_list);
344 memset(buff + len, 0x0, sizeof(buff) - len);
345
346 spin_lock_irqsave(&bam_dmux_logging_spinlock, flags);
347 if (kfifo_avail(&bam_dmux_state_log) < LOG_MESSAGE_MAX_SIZE) {
348 char junk[LOG_MESSAGE_MAX_SIZE];
349 int ret;
350
351 ret = kfifo_out(&bam_dmux_state_log, junk, sizeof(junk));
352 if (ret != LOG_MESSAGE_MAX_SIZE) {
353 pr_err("%s: unable to empty log %d\n", __func__, ret);
354 spin_unlock_irqrestore(&bam_dmux_logging_spinlock,
355 flags);
356 return;
357 }
358 }
359 kfifo_in(&bam_dmux_state_log, buff, sizeof(buff));
360 spin_unlock_irqrestore(&bam_dmux_logging_spinlock, flags);
361}
362
363static inline void set_tx_timestamp(struct tx_pkt_info *pkt)
364{
365 unsigned long long t_now;
366
367 t_now = sched_clock();
368 pkt->ts_nsec = do_div(t_now, 1000000000U);
369 pkt->ts_sec = (unsigned)t_now;
370}
371
372static inline void verify_tx_queue_is_empty(const char *func)
373{
374 unsigned long flags;
375 struct tx_pkt_info *info;
376 int reported = 0;
377
378 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
379 list_for_each_entry(info, &bam_tx_pool, list_node) {
380 if (!reported) {
Eric Holmberg454d9da2012-01-12 09:37:14 -0700381 bam_dmux_log("%s: tx pool not empty\n", func);
382 if (!in_global_reset)
383 pr_err("%s: tx pool not empty\n", func);
Eric Holmberg878923a2012-01-10 14:28:19 -0700384 reported = 1;
385 }
Eric Holmberg454d9da2012-01-12 09:37:14 -0700386 bam_dmux_log("%s: node=%p ts=%u.%09lu\n", __func__,
387 &info->list_node, info->ts_sec, info->ts_nsec);
388 if (!in_global_reset)
389 pr_err("%s: node=%p ts=%u.%09lu\n", __func__,
390 &info->list_node, info->ts_sec, info->ts_nsec);
Eric Holmberg878923a2012-01-10 14:28:19 -0700391 }
392 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
393}
394
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700395static void queue_rx(void)
396{
397 void *ptr;
398 struct rx_pkt_info *info;
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700399 int ret;
400 int rx_len_cached;
Jeff Hugo949080a2011-08-30 11:58:56 -0600401
Jeff Hugoc9749932011-11-02 17:50:40 -0600402 mutex_lock(&bam_rx_pool_mutexlock);
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700403 rx_len_cached = bam_rx_pool_len;
Jeff Hugoc9749932011-11-02 17:50:40 -0600404 mutex_unlock(&bam_rx_pool_mutexlock);
Jeff Hugo949080a2011-08-30 11:58:56 -0600405
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700406 while (rx_len_cached < NUM_BUFFERS) {
407 if (in_global_reset)
408 goto fail;
409
410 info = kmalloc(sizeof(struct rx_pkt_info), GFP_KERNEL);
411 if (!info) {
412 pr_err("%s: unable to alloc rx_pkt_info\n", __func__);
413 goto fail;
414 }
415
416 INIT_WORK(&info->work, handle_bam_mux_cmd);
417
418 info->skb = __dev_alloc_skb(BUFFER_SIZE, GFP_KERNEL);
419 if (info->skb == NULL) {
420 DMUX_LOG_KERR("%s: unable to alloc skb\n", __func__);
421 goto fail_info;
422 }
423 ptr = skb_put(info->skb, BUFFER_SIZE);
424
425 info->dma_address = dma_map_single(NULL, ptr, BUFFER_SIZE,
426 DMA_FROM_DEVICE);
427 if (info->dma_address == 0 || info->dma_address == ~0) {
428 DMUX_LOG_KERR("%s: dma_map_single failure %p for %p\n",
429 __func__, (void *)info->dma_address, ptr);
430 goto fail_skb;
431 }
432
433 mutex_lock(&bam_rx_pool_mutexlock);
434 list_add_tail(&info->list_node, &bam_rx_pool);
435 rx_len_cached = ++bam_rx_pool_len;
436 mutex_unlock(&bam_rx_pool_mutexlock);
437
438 ret = sps_transfer_one(bam_rx_pipe, info->dma_address,
439 BUFFER_SIZE, info,
440 SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT);
441
442 if (ret) {
443 DMUX_LOG_KERR("%s: sps_transfer_one failed %d\n",
444 __func__, ret);
445 goto fail_transfer;
446 }
447 }
448 return;
449
450fail_transfer:
451 mutex_lock(&bam_rx_pool_mutexlock);
452 list_del(&info->list_node);
453 --bam_rx_pool_len;
454 rx_len_cached = bam_rx_pool_len;
455 mutex_unlock(&bam_rx_pool_mutexlock);
456
457 dma_unmap_single(NULL, info->dma_address, BUFFER_SIZE,
458 DMA_FROM_DEVICE);
459
460fail_skb:
461 dev_kfree_skb_any(info->skb);
462
463fail_info:
464 kfree(info);
465
466fail:
467 if (rx_len_cached == 0) {
468 DMUX_LOG_KERR("%s: RX queue failure\n", __func__);
469 in_global_reset = 1;
470 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700471}
472
473static void bam_mux_process_data(struct sk_buff *rx_skb)
474{
475 unsigned long flags;
476 struct bam_mux_hdr *rx_hdr;
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600477 unsigned long event_data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700478
479 rx_hdr = (struct bam_mux_hdr *)rx_skb->data;
480
481 rx_skb->data = (unsigned char *)(rx_hdr + 1);
482 rx_skb->tail = rx_skb->data + rx_hdr->pkt_len;
483 rx_skb->len = rx_hdr->pkt_len;
Jeff Hugoee88f672011-10-04 17:14:52 -0600484 rx_skb->truesize = rx_hdr->pkt_len + sizeof(struct sk_buff);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700485
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600486 event_data = (unsigned long)(rx_skb);
487
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700488 spin_lock_irqsave(&bam_ch[rx_hdr->ch_id].lock, flags);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600489 if (bam_ch[rx_hdr->ch_id].notify)
490 bam_ch[rx_hdr->ch_id].notify(
491 bam_ch[rx_hdr->ch_id].priv, BAM_DMUX_RECEIVE,
492 event_data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700493 else
494 dev_kfree_skb_any(rx_skb);
495 spin_unlock_irqrestore(&bam_ch[rx_hdr->ch_id].lock, flags);
496
497 queue_rx();
498}
499
Eric Holmberg006057d2012-01-11 10:10:42 -0700500static inline void handle_bam_mux_cmd_open(struct bam_mux_hdr *rx_hdr)
501{
502 unsigned long flags;
503 int ret;
504
505 spin_lock_irqsave(&bam_ch[rx_hdr->ch_id].lock, flags);
506 bam_ch[rx_hdr->ch_id].status |= BAM_CH_REMOTE_OPEN;
507 bam_ch[rx_hdr->ch_id].num_tx_pkts = 0;
508 spin_unlock_irqrestore(&bam_ch[rx_hdr->ch_id].lock, flags);
509 queue_rx();
510 ret = platform_device_add(bam_ch[rx_hdr->ch_id].pdev);
511 if (ret)
512 pr_err("%s: platform_device_add() error: %d\n",
513 __func__, ret);
514}
515
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700516static void handle_bam_mux_cmd(struct work_struct *work)
517{
518 unsigned long flags;
519 struct bam_mux_hdr *rx_hdr;
520 struct rx_pkt_info *info;
521 struct sk_buff *rx_skb;
522
523 info = container_of(work, struct rx_pkt_info, work);
524 rx_skb = info->skb;
Jeff Hugo949080a2011-08-30 11:58:56 -0600525 dma_unmap_single(NULL, info->dma_address, BUFFER_SIZE, DMA_FROM_DEVICE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700526 kfree(info);
527
528 rx_hdr = (struct bam_mux_hdr *)rx_skb->data;
529
530 DBG_INC_READ_CNT(sizeof(struct bam_mux_hdr));
531 DBG("%s: magic %x reserved %d cmd %d pad %d ch %d len %d\n", __func__,
532 rx_hdr->magic_num, rx_hdr->reserved, rx_hdr->cmd,
533 rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
534 if (rx_hdr->magic_num != BAM_MUX_HDR_MAGIC_NO) {
Eric Holmberg878923a2012-01-10 14:28:19 -0700535 DMUX_LOG_KERR("%s: dropping invalid hdr. magic %x"
536 " reserved %d cmd %d"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700537 " pad %d ch %d len %d\n", __func__,
538 rx_hdr->magic_num, rx_hdr->reserved, rx_hdr->cmd,
539 rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
540 dev_kfree_skb_any(rx_skb);
541 queue_rx();
542 return;
543 }
Eric Holmberg9ff40a52011-11-17 19:17:00 -0700544
545 if (rx_hdr->ch_id >= BAM_DMUX_NUM_CHANNELS) {
Eric Holmberg878923a2012-01-10 14:28:19 -0700546 DMUX_LOG_KERR("%s: dropping invalid LCID %d"
547 " reserved %d cmd %d"
Eric Holmberg9ff40a52011-11-17 19:17:00 -0700548 " pad %d ch %d len %d\n", __func__,
549 rx_hdr->ch_id, rx_hdr->reserved, rx_hdr->cmd,
550 rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
551 dev_kfree_skb_any(rx_skb);
552 queue_rx();
553 return;
554 }
555
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700556 switch (rx_hdr->cmd) {
557 case BAM_MUX_HDR_CMD_DATA:
558 DBG_INC_READ_CNT(rx_hdr->pkt_len);
559 bam_mux_process_data(rx_skb);
560 break;
561 case BAM_MUX_HDR_CMD_OPEN:
Eric Holmberg006057d2012-01-11 10:10:42 -0700562 bam_dmux_log("%s: opening cid %d PC enabled\n", __func__,
Eric Holmberg878923a2012-01-10 14:28:19 -0700563 rx_hdr->ch_id);
Eric Holmberg006057d2012-01-11 10:10:42 -0700564 handle_bam_mux_cmd_open(rx_hdr);
Jeff Hugob1e7c582012-06-20 15:02:11 -0600565 if (!(rx_hdr->reserved & ENABLE_DISCONNECT_ACK)) {
566 bam_dmux_log("%s: deactivating disconnect ack\n");
567 disconnect_ack = 0;
Jeff Hugo0b13a352012-03-17 23:18:30 -0600568 }
Eric Holmberg006057d2012-01-11 10:10:42 -0700569 dev_kfree_skb_any(rx_skb);
570 break;
571 case BAM_MUX_HDR_CMD_OPEN_NO_A2_PC:
572 bam_dmux_log("%s: opening cid %d PC disabled\n", __func__,
573 rx_hdr->ch_id);
574
575 if (!a2_pc_disabled) {
576 a2_pc_disabled = 1;
Jeff Hugo322179f2012-02-29 10:52:34 -0700577 ul_wakeup();
Eric Holmberg006057d2012-01-11 10:10:42 -0700578 }
579
580 handle_bam_mux_cmd_open(rx_hdr);
Eric Holmberge779dba2011-11-04 18:22:01 -0600581 dev_kfree_skb_any(rx_skb);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700582 break;
583 case BAM_MUX_HDR_CMD_CLOSE:
584 /* probably should drop pending write */
Eric Holmberg878923a2012-01-10 14:28:19 -0700585 bam_dmux_log("%s: closing cid %d\n", __func__,
586 rx_hdr->ch_id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700587 spin_lock_irqsave(&bam_ch[rx_hdr->ch_id].lock, flags);
588 bam_ch[rx_hdr->ch_id].status &= ~BAM_CH_REMOTE_OPEN;
589 spin_unlock_irqrestore(&bam_ch[rx_hdr->ch_id].lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700590 queue_rx();
Jeff Hugo7960abd2011-08-02 15:39:38 -0600591 platform_device_unregister(bam_ch[rx_hdr->ch_id].pdev);
592 bam_ch[rx_hdr->ch_id].pdev =
593 platform_device_alloc(bam_ch[rx_hdr->ch_id].name, 2);
594 if (!bam_ch[rx_hdr->ch_id].pdev)
595 pr_err("%s: platform_device_alloc failed\n", __func__);
Eric Holmberge779dba2011-11-04 18:22:01 -0600596 dev_kfree_skb_any(rx_skb);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700597 break;
598 default:
Eric Holmberg878923a2012-01-10 14:28:19 -0700599 DMUX_LOG_KERR("%s: dropping invalid hdr. magic %x"
600 " reserved %d cmd %d pad %d ch %d len %d\n",
601 __func__, rx_hdr->magic_num, rx_hdr->reserved,
602 rx_hdr->cmd, rx_hdr->pad_len, rx_hdr->ch_id,
603 rx_hdr->pkt_len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700604 dev_kfree_skb_any(rx_skb);
605 queue_rx();
606 return;
607 }
608}
609
610static int bam_mux_write_cmd(void *data, uint32_t len)
611{
612 int rc;
613 struct tx_pkt_info *pkt;
614 dma_addr_t dma_address;
Jeff Hugo626303bf2011-11-21 11:43:28 -0700615 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700616
Eric Holmbergd83cd2b2011-11-04 15:54:17 -0600617 pkt = kmalloc(sizeof(struct tx_pkt_info), GFP_ATOMIC);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700618 if (pkt == NULL) {
619 pr_err("%s: mem alloc for tx_pkt_info failed\n", __func__);
620 rc = -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700621 return rc;
622 }
623
624 dma_address = dma_map_single(NULL, data, len,
625 DMA_TO_DEVICE);
626 if (!dma_address) {
627 pr_err("%s: dma_map_single() failed\n", __func__);
Jeff Hugo96cb7482011-12-07 13:28:31 -0700628 kfree(pkt);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700629 rc = -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700630 return rc;
631 }
632 pkt->skb = (struct sk_buff *)(data);
633 pkt->len = len;
634 pkt->dma_address = dma_address;
635 pkt->is_cmd = 1;
Eric Holmberg878923a2012-01-10 14:28:19 -0700636 set_tx_timestamp(pkt);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600637 INIT_WORK(&pkt->work, bam_mux_write_done);
Jeff Hugo626303bf2011-11-21 11:43:28 -0700638 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600639 list_add_tail(&pkt->list_node, &bam_tx_pool);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700640 rc = sps_transfer_one(bam_tx_pipe, dma_address, len,
641 pkt, SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600642 if (rc) {
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700643 DMUX_LOG_KERR("%s sps_transfer_one failed rc=%d\n",
644 __func__, rc);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600645 list_del(&pkt->list_node);
Eric Holmberg2fddbcd2011-11-28 18:25:57 -0700646 DBG_INC_TX_SPS_FAILURE_CNT();
Jeff Hugo626303bf2011-11-21 11:43:28 -0700647 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700648 dma_unmap_single(NULL, pkt->dma_address,
649 pkt->len,
650 DMA_TO_DEVICE);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600651 kfree(pkt);
Jeff Hugobb6da952012-01-16 15:02:42 -0700652 } else {
653 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600654 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700655
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600656 ul_packet_written = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700657 return rc;
658}
659
660static void bam_mux_write_done(struct work_struct *work)
661{
662 struct sk_buff *skb;
663 struct bam_mux_hdr *hdr;
664 struct tx_pkt_info *info;
Eric Holmberg1cde7a62011-12-19 18:34:01 -0700665 struct tx_pkt_info *info_expected;
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600666 unsigned long event_data;
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700667 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700668
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600669 if (in_global_reset)
670 return;
Eric Holmberg1cde7a62011-12-19 18:34:01 -0700671
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700672 info = container_of(work, struct tx_pkt_info, work);
Eric Holmberg1cde7a62011-12-19 18:34:01 -0700673
674 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
675 info_expected = list_first_entry(&bam_tx_pool,
676 struct tx_pkt_info, list_node);
677 if (unlikely(info != info_expected)) {
Eric Holmberg878923a2012-01-10 14:28:19 -0700678 struct tx_pkt_info *errant_pkt;
Eric Holmberg1cde7a62011-12-19 18:34:01 -0700679
Eric Holmberg878923a2012-01-10 14:28:19 -0700680 DMUX_LOG_KERR("%s: bam_tx_pool mismatch .next=%p,"
681 " list_node=%p, ts=%u.%09lu\n",
682 __func__, bam_tx_pool.next, &info->list_node,
683 info->ts_sec, info->ts_nsec
684 );
685
686 list_for_each_entry(errant_pkt, &bam_tx_pool, list_node) {
687 DMUX_LOG_KERR("%s: node=%p ts=%u.%09lu\n", __func__,
688 &errant_pkt->list_node, errant_pkt->ts_sec,
689 errant_pkt->ts_nsec);
690
691 }
Eric Holmberg1cde7a62011-12-19 18:34:01 -0700692 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
693 BUG();
694 }
695 list_del(&info->list_node);
696 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
697
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600698 if (info->is_cmd) {
699 kfree(info->skb);
700 kfree(info);
701 return;
702 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700703 skb = info->skb;
704 kfree(info);
705 hdr = (struct bam_mux_hdr *)skb->data;
Eric Holmberg9fdef262012-02-14 11:46:05 -0700706 DBG_INC_WRITE_CNT(skb->len);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600707 event_data = (unsigned long)(skb);
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700708 spin_lock_irqsave(&bam_ch[hdr->ch_id].lock, flags);
709 bam_ch[hdr->ch_id].num_tx_pkts--;
710 spin_unlock_irqrestore(&bam_ch[hdr->ch_id].lock, flags);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600711 if (bam_ch[hdr->ch_id].notify)
712 bam_ch[hdr->ch_id].notify(
713 bam_ch[hdr->ch_id].priv, BAM_DMUX_WRITE_DONE,
714 event_data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700715 else
716 dev_kfree_skb_any(skb);
717}
718
719int msm_bam_dmux_write(uint32_t id, struct sk_buff *skb)
720{
721 int rc = 0;
722 struct bam_mux_hdr *hdr;
723 unsigned long flags;
724 struct sk_buff *new_skb = NULL;
725 dma_addr_t dma_address;
726 struct tx_pkt_info *pkt;
727
728 if (id >= BAM_DMUX_NUM_CHANNELS)
729 return -EINVAL;
730 if (!skb)
731 return -EINVAL;
732 if (!bam_mux_initialized)
733 return -ENODEV;
734
735 DBG("%s: writing to ch %d len %d\n", __func__, id, skb->len);
736 spin_lock_irqsave(&bam_ch[id].lock, flags);
737 if (!bam_ch_is_open(id)) {
738 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
739 pr_err("%s: port not open: %d\n", __func__, bam_ch[id].status);
740 return -ENODEV;
741 }
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700742
743 if (bam_ch[id].use_wm &&
744 (bam_ch[id].num_tx_pkts >= HIGH_WATERMARK)) {
745 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
746 pr_err("%s: watermark exceeded: %d\n", __func__, id);
747 return -EAGAIN;
748 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700749 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
750
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600751 read_lock(&ul_wakeup_lock);
Jeff Hugo061ce672011-10-21 17:15:32 -0600752 if (!bam_is_connected) {
753 read_unlock(&ul_wakeup_lock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600754 ul_wakeup();
Jeff Hugo4838f412012-01-20 11:19:37 -0700755 if (unlikely(in_global_reset == 1))
756 return -EFAULT;
Jeff Hugo061ce672011-10-21 17:15:32 -0600757 read_lock(&ul_wakeup_lock);
Jeff Hugod98b1082011-10-24 10:30:23 -0600758 notify_all(BAM_DMUX_UL_CONNECTED, (unsigned long)(NULL));
Jeff Hugo061ce672011-10-21 17:15:32 -0600759 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600760
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700761 /* if skb do not have any tailroom for padding,
762 copy the skb into a new expanded skb */
763 if ((skb->len & 0x3) && (skb_tailroom(skb) < (4 - (skb->len & 0x3)))) {
764 /* revisit, probably dev_alloc_skb and memcpy is effecient */
765 new_skb = skb_copy_expand(skb, skb_headroom(skb),
766 4 - (skb->len & 0x3), GFP_ATOMIC);
767 if (new_skb == NULL) {
768 pr_err("%s: cannot allocate skb\n", __func__);
Jeff Hugoc6af54d2011-11-02 17:00:27 -0600769 goto write_fail;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700770 }
771 dev_kfree_skb_any(skb);
772 skb = new_skb;
773 DBG_INC_WRITE_CPY(skb->len);
774 }
775
776 hdr = (struct bam_mux_hdr *)skb_push(skb, sizeof(struct bam_mux_hdr));
777
778 /* caller should allocate for hdr and padding
779 hdr is fine, padding is tricky */
780 hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
781 hdr->cmd = BAM_MUX_HDR_CMD_DATA;
782 hdr->reserved = 0;
783 hdr->ch_id = id;
784 hdr->pkt_len = skb->len - sizeof(struct bam_mux_hdr);
785 if (skb->len & 0x3)
786 skb_put(skb, 4 - (skb->len & 0x3));
787
788 hdr->pad_len = skb->len - (sizeof(struct bam_mux_hdr) + hdr->pkt_len);
789
790 DBG("%s: data %p, tail %p skb len %d pkt len %d pad len %d\n",
791 __func__, skb->data, skb->tail, skb->len,
792 hdr->pkt_len, hdr->pad_len);
793
794 pkt = kmalloc(sizeof(struct tx_pkt_info), GFP_ATOMIC);
795 if (pkt == NULL) {
796 pr_err("%s: mem alloc for tx_pkt_info failed\n", __func__);
Jeff Hugoc6af54d2011-11-02 17:00:27 -0600797 goto write_fail2;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700798 }
799
800 dma_address = dma_map_single(NULL, skb->data, skb->len,
801 DMA_TO_DEVICE);
802 if (!dma_address) {
803 pr_err("%s: dma_map_single() failed\n", __func__);
Jeff Hugoc6af54d2011-11-02 17:00:27 -0600804 goto write_fail3;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700805 }
806 pkt->skb = skb;
807 pkt->dma_address = dma_address;
808 pkt->is_cmd = 0;
Eric Holmberg878923a2012-01-10 14:28:19 -0700809 set_tx_timestamp(pkt);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700810 INIT_WORK(&pkt->work, bam_mux_write_done);
Jeff Hugo626303bf2011-11-21 11:43:28 -0700811 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600812 list_add_tail(&pkt->list_node, &bam_tx_pool);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700813 rc = sps_transfer_one(bam_tx_pipe, dma_address, skb->len,
814 pkt, SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600815 if (rc) {
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700816 DMUX_LOG_KERR("%s sps_transfer_one failed rc=%d\n",
817 __func__, rc);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600818 list_del(&pkt->list_node);
Eric Holmberg2fddbcd2011-11-28 18:25:57 -0700819 DBG_INC_TX_SPS_FAILURE_CNT();
Jeff Hugo626303bf2011-11-21 11:43:28 -0700820 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700821 dma_unmap_single(NULL, pkt->dma_address,
822 pkt->skb->len, DMA_TO_DEVICE);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600823 kfree(pkt);
Jeff Hugo872bd062011-11-15 17:47:21 -0700824 if (new_skb)
825 dev_kfree_skb_any(new_skb);
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700826 } else {
Jeff Hugobb6da952012-01-16 15:02:42 -0700827 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700828 spin_lock_irqsave(&bam_ch[id].lock, flags);
829 bam_ch[id].num_tx_pkts++;
830 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600831 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600832 ul_packet_written = 1;
833 read_unlock(&ul_wakeup_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700834 return rc;
Jeff Hugoc6af54d2011-11-02 17:00:27 -0600835
836write_fail3:
837 kfree(pkt);
838write_fail2:
839 if (new_skb)
840 dev_kfree_skb_any(new_skb);
841write_fail:
842 read_unlock(&ul_wakeup_lock);
843 return -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700844}
845
846int msm_bam_dmux_open(uint32_t id, void *priv,
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600847 void (*notify)(void *, int, unsigned long))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700848{
849 struct bam_mux_hdr *hdr;
850 unsigned long flags;
851 int rc = 0;
852
853 DBG("%s: opening ch %d\n", __func__, id);
Eric Holmberg5d775432011-11-09 10:23:35 -0700854 if (!bam_mux_initialized) {
855 DBG("%s: not inititialized\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700856 return -ENODEV;
Eric Holmberg5d775432011-11-09 10:23:35 -0700857 }
858 if (id >= BAM_DMUX_NUM_CHANNELS) {
859 pr_err("%s: invalid channel id %d\n", __func__, id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700860 return -EINVAL;
Eric Holmberg5d775432011-11-09 10:23:35 -0700861 }
862 if (notify == NULL) {
863 pr_err("%s: notify function is NULL\n", __func__);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600864 return -EINVAL;
Eric Holmberg5d775432011-11-09 10:23:35 -0700865 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700866
867 hdr = kmalloc(sizeof(struct bam_mux_hdr), GFP_KERNEL);
868 if (hdr == NULL) {
869 pr_err("%s: hdr kmalloc failed. ch: %d\n", __func__, id);
870 return -ENOMEM;
871 }
872 spin_lock_irqsave(&bam_ch[id].lock, flags);
873 if (bam_ch_is_open(id)) {
874 DBG("%s: Already opened %d\n", __func__, id);
875 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
876 kfree(hdr);
877 goto open_done;
878 }
879 if (!bam_ch_is_remote_open(id)) {
880 DBG("%s: Remote not open; ch: %d\n", __func__, id);
881 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
882 kfree(hdr);
Eric Holmberg5d775432011-11-09 10:23:35 -0700883 return -ENODEV;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700884 }
885
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600886 bam_ch[id].notify = notify;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700887 bam_ch[id].priv = priv;
888 bam_ch[id].status |= BAM_CH_LOCAL_OPEN;
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700889 bam_ch[id].num_tx_pkts = 0;
890 bam_ch[id].use_wm = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700891 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
892
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600893 read_lock(&ul_wakeup_lock);
Jeff Hugo061ce672011-10-21 17:15:32 -0600894 if (!bam_is_connected) {
895 read_unlock(&ul_wakeup_lock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600896 ul_wakeup();
Jeff Hugo4838f412012-01-20 11:19:37 -0700897 if (unlikely(in_global_reset == 1))
898 return -EFAULT;
Jeff Hugo061ce672011-10-21 17:15:32 -0600899 read_lock(&ul_wakeup_lock);
Jeff Hugod98b1082011-10-24 10:30:23 -0600900 notify_all(BAM_DMUX_UL_CONNECTED, (unsigned long)(NULL));
Jeff Hugo061ce672011-10-21 17:15:32 -0600901 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600902
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700903 hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
904 hdr->cmd = BAM_MUX_HDR_CMD_OPEN;
905 hdr->reserved = 0;
906 hdr->ch_id = id;
907 hdr->pkt_len = 0;
908 hdr->pad_len = 0;
909
910 rc = bam_mux_write_cmd((void *)hdr, sizeof(struct bam_mux_hdr));
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600911 read_unlock(&ul_wakeup_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700912
913open_done:
914 DBG("%s: opened ch %d\n", __func__, id);
915 return rc;
916}
917
918int msm_bam_dmux_close(uint32_t id)
919{
920 struct bam_mux_hdr *hdr;
921 unsigned long flags;
922 int rc;
923
924 if (id >= BAM_DMUX_NUM_CHANNELS)
925 return -EINVAL;
926 DBG("%s: closing ch %d\n", __func__, id);
927 if (!bam_mux_initialized)
928 return -ENODEV;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700929
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600930 read_lock(&ul_wakeup_lock);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600931 if (!bam_is_connected && !bam_ch_is_in_reset(id)) {
Jeff Hugo061ce672011-10-21 17:15:32 -0600932 read_unlock(&ul_wakeup_lock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600933 ul_wakeup();
Jeff Hugo4838f412012-01-20 11:19:37 -0700934 if (unlikely(in_global_reset == 1))
935 return -EFAULT;
Jeff Hugo061ce672011-10-21 17:15:32 -0600936 read_lock(&ul_wakeup_lock);
Jeff Hugod98b1082011-10-24 10:30:23 -0600937 notify_all(BAM_DMUX_UL_CONNECTED, (unsigned long)(NULL));
Jeff Hugo061ce672011-10-21 17:15:32 -0600938 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600939
Jeff Hugo061ce672011-10-21 17:15:32 -0600940 spin_lock_irqsave(&bam_ch[id].lock, flags);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600941 bam_ch[id].notify = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700942 bam_ch[id].priv = NULL;
943 bam_ch[id].status &= ~BAM_CH_LOCAL_OPEN;
944 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
945
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600946 if (bam_ch_is_in_reset(id)) {
947 read_unlock(&ul_wakeup_lock);
948 bam_ch[id].status &= ~BAM_CH_IN_RESET;
949 return 0;
950 }
951
Jeff Hugobb5802f2011-11-02 17:10:29 -0600952 hdr = kmalloc(sizeof(struct bam_mux_hdr), GFP_ATOMIC);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700953 if (hdr == NULL) {
954 pr_err("%s: hdr kmalloc failed. ch: %d\n", __func__, id);
Jeff Hugoc6af54d2011-11-02 17:00:27 -0600955 read_unlock(&ul_wakeup_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700956 return -ENOMEM;
957 }
958 hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
959 hdr->cmd = BAM_MUX_HDR_CMD_CLOSE;
960 hdr->reserved = 0;
961 hdr->ch_id = id;
962 hdr->pkt_len = 0;
963 hdr->pad_len = 0;
964
965 rc = bam_mux_write_cmd((void *)hdr, sizeof(struct bam_mux_hdr));
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600966 read_unlock(&ul_wakeup_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700967
968 DBG("%s: closed ch %d\n", __func__, id);
969 return rc;
970}
971
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700972int msm_bam_dmux_is_ch_full(uint32_t id)
973{
974 unsigned long flags;
975 int ret;
976
977 if (id >= BAM_DMUX_NUM_CHANNELS)
978 return -EINVAL;
979
980 spin_lock_irqsave(&bam_ch[id].lock, flags);
981 bam_ch[id].use_wm = 1;
982 ret = bam_ch[id].num_tx_pkts >= HIGH_WATERMARK;
983 DBG("%s: ch %d num tx pkts=%d, HWM=%d\n", __func__,
984 id, bam_ch[id].num_tx_pkts, ret);
985 if (!bam_ch_is_local_open(id)) {
986 ret = -ENODEV;
987 pr_err("%s: port not open: %d\n", __func__, bam_ch[id].status);
988 }
989 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
990
991 return ret;
992}
993
994int msm_bam_dmux_is_ch_low(uint32_t id)
995{
Eric Holmberged3ca0a2012-04-09 15:44:58 -0600996 unsigned long flags;
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700997 int ret;
998
999 if (id >= BAM_DMUX_NUM_CHANNELS)
1000 return -EINVAL;
1001
Eric Holmberged3ca0a2012-04-09 15:44:58 -06001002 spin_lock_irqsave(&bam_ch[id].lock, flags);
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -07001003 bam_ch[id].use_wm = 1;
1004 ret = bam_ch[id].num_tx_pkts <= LOW_WATERMARK;
1005 DBG("%s: ch %d num tx pkts=%d, LWM=%d\n", __func__,
1006 id, bam_ch[id].num_tx_pkts, ret);
1007 if (!bam_ch_is_local_open(id)) {
1008 ret = -ENODEV;
1009 pr_err("%s: port not open: %d\n", __func__, bam_ch[id].status);
1010 }
Eric Holmberged3ca0a2012-04-09 15:44:58 -06001011 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -07001012
1013 return ret;
1014}
1015
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001016static void rx_switch_to_interrupt_mode(void)
1017{
1018 struct sps_connect cur_rx_conn;
1019 struct sps_iovec iov;
1020 struct rx_pkt_info *info;
1021 int ret;
1022
1023 /*
1024 * Attempt to enable interrupts - if this fails,
1025 * continue polling and we will retry later.
1026 */
1027 ret = sps_get_config(bam_rx_pipe, &cur_rx_conn);
1028 if (ret) {
1029 pr_err("%s: sps_get_config() failed %d\n", __func__, ret);
1030 goto fail;
1031 }
1032
1033 rx_register_event.options = SPS_O_EOT;
1034 ret = sps_register_event(bam_rx_pipe, &rx_register_event);
1035 if (ret) {
1036 pr_err("%s: sps_register_event() failed %d\n", __func__, ret);
1037 goto fail;
1038 }
1039
1040 cur_rx_conn.options = SPS_O_AUTO_ENABLE |
1041 SPS_O_EOT | SPS_O_ACK_TRANSFERS;
1042 ret = sps_set_config(bam_rx_pipe, &cur_rx_conn);
1043 if (ret) {
1044 pr_err("%s: sps_set_config() failed %d\n", __func__, ret);
1045 goto fail;
1046 }
1047 polling_mode = 0;
Eric Holmberg006057d2012-01-11 10:10:42 -07001048 release_wakelock();
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001049
1050 /* handle any rx packets before interrupt was enabled */
1051 while (bam_connection_is_active && !polling_mode) {
1052 ret = sps_get_iovec(bam_rx_pipe, &iov);
1053 if (ret) {
1054 pr_err("%s: sps_get_iovec failed %d\n",
1055 __func__, ret);
1056 break;
1057 }
1058 if (iov.addr == 0)
1059 break;
1060
1061 mutex_lock(&bam_rx_pool_mutexlock);
1062 if (unlikely(list_empty(&bam_rx_pool))) {
1063 mutex_unlock(&bam_rx_pool_mutexlock);
1064 continue;
1065 }
1066 info = list_first_entry(&bam_rx_pool, struct rx_pkt_info,
1067 list_node);
1068 list_del(&info->list_node);
Eric Holmbergb5b08e52012-01-20 14:19:00 -07001069 --bam_rx_pool_len;
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001070 mutex_unlock(&bam_rx_pool_mutexlock);
Eric Holmbergb5b08e52012-01-20 14:19:00 -07001071 if (info->dma_address != iov.addr)
1072 DMUX_LOG_KERR("%s: iovec %p != dma %p\n",
1073 __func__,
1074 (void *)info->dma_address, (void *)iov.addr);
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001075 handle_bam_mux_cmd(&info->work);
1076 }
1077 return;
1078
1079fail:
1080 pr_err("%s: reverting to polling\n", __func__);
Jeff Hugofff43af92012-03-29 17:54:52 -06001081 queue_work_on(0, bam_mux_rx_workqueue, &rx_timer_work);
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001082}
1083
Jeff Hugo949080a2011-08-30 11:58:56 -06001084static void rx_timer_work_func(struct work_struct *work)
1085{
1086 struct sps_iovec iov;
Jeff Hugo949080a2011-08-30 11:58:56 -06001087 struct rx_pkt_info *info;
1088 int inactive_cycles = 0;
1089 int ret;
Jeff Hugo949080a2011-08-30 11:58:56 -06001090
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001091 while (bam_connection_is_active) { /* timer loop */
Jeff Hugo949080a2011-08-30 11:58:56 -06001092 ++inactive_cycles;
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001093 while (bam_connection_is_active) { /* deplete queue loop */
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001094 if (in_global_reset)
1095 return;
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001096
1097 ret = sps_get_iovec(bam_rx_pipe, &iov);
1098 if (ret) {
1099 pr_err("%s: sps_get_iovec failed %d\n",
1100 __func__, ret);
1101 break;
1102 }
Jeff Hugo949080a2011-08-30 11:58:56 -06001103 if (iov.addr == 0)
1104 break;
1105 inactive_cycles = 0;
Jeff Hugoc9749932011-11-02 17:50:40 -06001106 mutex_lock(&bam_rx_pool_mutexlock);
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001107 if (unlikely(list_empty(&bam_rx_pool))) {
1108 mutex_unlock(&bam_rx_pool_mutexlock);
1109 continue;
1110 }
1111 info = list_first_entry(&bam_rx_pool,
1112 struct rx_pkt_info, list_node);
Eric Holmbergb5b08e52012-01-20 14:19:00 -07001113 --bam_rx_pool_len;
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001114 list_del(&info->list_node);
Jeff Hugoc9749932011-11-02 17:50:40 -06001115 mutex_unlock(&bam_rx_pool_mutexlock);
Jeff Hugo949080a2011-08-30 11:58:56 -06001116 handle_bam_mux_cmd(&info->work);
1117 }
1118
1119 if (inactive_cycles == POLLING_INACTIVITY) {
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001120 rx_switch_to_interrupt_mode();
1121 break;
Jeff Hugo949080a2011-08-30 11:58:56 -06001122 }
1123
1124 usleep_range(POLLING_MIN_SLEEP, POLLING_MAX_SLEEP);
1125 }
1126}
1127
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001128static void bam_mux_tx_notify(struct sps_event_notify *notify)
1129{
1130 struct tx_pkt_info *pkt;
1131
1132 DBG("%s: event %d notified\n", __func__, notify->event_id);
1133
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001134 if (in_global_reset)
1135 return;
1136
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001137 switch (notify->event_id) {
1138 case SPS_EVENT_EOT:
1139 pkt = notify->data.transfer.user;
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001140 if (!pkt->is_cmd)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001141 dma_unmap_single(NULL, pkt->dma_address,
1142 pkt->skb->len,
1143 DMA_TO_DEVICE);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001144 else
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001145 dma_unmap_single(NULL, pkt->dma_address,
1146 pkt->len,
1147 DMA_TO_DEVICE);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001148 queue_work(bam_mux_tx_workqueue, &pkt->work);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001149 break;
1150 default:
1151 pr_err("%s: recieved unexpected event id %d\n", __func__,
1152 notify->event_id);
1153 }
1154}
1155
Jeff Hugo33dbc002011-08-25 15:52:53 -06001156static void bam_mux_rx_notify(struct sps_event_notify *notify)
1157{
Jeff Hugo949080a2011-08-30 11:58:56 -06001158 int ret;
1159 struct sps_connect cur_rx_conn;
Jeff Hugo33dbc002011-08-25 15:52:53 -06001160
1161 DBG("%s: event %d notified\n", __func__, notify->event_id);
1162
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001163 if (in_global_reset)
1164 return;
1165
Jeff Hugo33dbc002011-08-25 15:52:53 -06001166 switch (notify->event_id) {
1167 case SPS_EVENT_EOT:
Jeff Hugo949080a2011-08-30 11:58:56 -06001168 /* attempt to disable interrupts in this pipe */
1169 if (!polling_mode) {
1170 ret = sps_get_config(bam_rx_pipe, &cur_rx_conn);
1171 if (ret) {
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001172 pr_err("%s: sps_get_config() failed %d, interrupts"
1173 " not disabled\n", __func__, ret);
Jeff Hugo949080a2011-08-30 11:58:56 -06001174 break;
1175 }
Jeff Hugoa9d32ba2011-11-21 14:59:48 -07001176 cur_rx_conn.options = SPS_O_AUTO_ENABLE |
Jeff Hugo949080a2011-08-30 11:58:56 -06001177 SPS_O_ACK_TRANSFERS | SPS_O_POLL;
1178 ret = sps_set_config(bam_rx_pipe, &cur_rx_conn);
1179 if (ret) {
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001180 pr_err("%s: sps_set_config() failed %d, interrupts"
1181 " not disabled\n", __func__, ret);
Jeff Hugo949080a2011-08-30 11:58:56 -06001182 break;
1183 }
Eric Holmberg006057d2012-01-11 10:10:42 -07001184 grab_wakelock();
Jeff Hugo949080a2011-08-30 11:58:56 -06001185 polling_mode = 1;
Jeff Hugofff43af92012-03-29 17:54:52 -06001186 /*
1187 * run on core 0 so that netif_rx() in rmnet uses only
1188 * one queue
1189 */
1190 queue_work_on(0, bam_mux_rx_workqueue, &rx_timer_work);
Jeff Hugo949080a2011-08-30 11:58:56 -06001191 }
Jeff Hugo33dbc002011-08-25 15:52:53 -06001192 break;
1193 default:
1194 pr_err("%s: recieved unexpected event id %d\n", __func__,
1195 notify->event_id);
1196 }
1197}
1198
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001199#ifdef CONFIG_DEBUG_FS
1200
1201static int debug_tbl(char *buf, int max)
1202{
1203 int i = 0;
1204 int j;
1205
1206 for (j = 0; j < BAM_DMUX_NUM_CHANNELS; ++j) {
1207 i += scnprintf(buf + i, max - i,
1208 "ch%02d local open=%s remote open=%s\n",
1209 j, bam_ch_is_local_open(j) ? "Y" : "N",
1210 bam_ch_is_remote_open(j) ? "Y" : "N");
1211 }
1212
1213 return i;
1214}
1215
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07001216static int debug_ul_pkt_cnt(char *buf, int max)
1217{
1218 struct list_head *p;
1219 unsigned long flags;
1220 int n = 0;
1221
1222 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
1223 __list_for_each(p, &bam_tx_pool) {
1224 ++n;
1225 }
1226 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
1227
1228 return scnprintf(buf, max, "Number of UL packets in flight: %d\n", n);
1229}
1230
1231static int debug_stats(char *buf, int max)
1232{
1233 int i = 0;
1234
1235 i += scnprintf(buf + i, max - i,
Eric Holmberg9fdef262012-02-14 11:46:05 -07001236 "skb read cnt: %u\n"
1237 "skb write cnt: %u\n"
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07001238 "skb copy cnt: %u\n"
1239 "skb copy bytes: %u\n"
Eric Holmberg6074aba2012-01-18 17:59:44 -07001240 "sps tx failures: %u\n"
Eric Holmbergb5b08e52012-01-20 14:19:00 -07001241 "sps tx stalls: %u\n"
Eric Holmberg1f1255d2012-02-22 13:37:21 -07001242 "rx queue len: %d\n"
1243 "a2 ack out cnt: %d\n"
1244 "a2 ack in cnt: %d\n"
1245 "a2 pwr cntl in: %d\n",
Eric Holmberg9fdef262012-02-14 11:46:05 -07001246 bam_dmux_read_cnt,
1247 bam_dmux_write_cnt,
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07001248 bam_dmux_write_cpy_cnt,
1249 bam_dmux_write_cpy_bytes,
Eric Holmberg6074aba2012-01-18 17:59:44 -07001250 bam_dmux_tx_sps_failure_cnt,
Eric Holmbergb5b08e52012-01-20 14:19:00 -07001251 bam_dmux_tx_stall_cnt,
Eric Holmberg1f1255d2012-02-22 13:37:21 -07001252 bam_rx_pool_len,
1253 atomic_read(&bam_dmux_ack_out_cnt),
1254 atomic_read(&bam_dmux_ack_in_cnt),
1255 atomic_read(&bam_dmux_a2_pwr_cntl_in_cnt)
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07001256 );
1257
1258 return i;
1259}
1260
Eric Holmberg878923a2012-01-10 14:28:19 -07001261static int debug_log(char *buff, int max, loff_t *ppos)
1262{
1263 unsigned long flags;
1264 int i = 0;
1265
1266 if (bam_dmux_state_logging_disabled) {
1267 i += scnprintf(buff - i, max - i, "Logging disabled\n");
1268 return i;
1269 }
1270
1271 if (*ppos == 0) {
1272 i += scnprintf(buff - i, max - i,
1273 "<DMUX> timestamp FLAGS [Message]\n"
1274 "FLAGS:\n"
Eric Holmberg006057d2012-01-11 10:10:42 -07001275 "\tD: 1 = Power collapse disabled\n"
Eric Holmberg878923a2012-01-10 14:28:19 -07001276 "\tR: 1 = in global reset\n"
1277 "\tP: 1 = BAM is powered up\n"
1278 "\tA: 1 = BAM initialized and ready for data\n"
1279 "\n"
1280 "\tV: 1 = Uplink vote for power\n"
1281 "\tU: 1 = Uplink active\n"
1282 "\tW: 1 = Uplink Wait-for-ack\n"
1283 "\tA: 1 = Uplink ACK received\n"
Eric Holmbergbc9f21c2012-01-18 11:33:33 -07001284 "\t#: >=1 On-demand uplink vote\n"
Jeff Hugo0b13a352012-03-17 23:18:30 -06001285 "\tD: 1 = Disconnect ACK active\n"
Eric Holmberg878923a2012-01-10 14:28:19 -07001286 );
1287 buff += i;
1288 }
1289
1290 spin_lock_irqsave(&bam_dmux_logging_spinlock, flags);
1291 while (kfifo_len(&bam_dmux_state_log)
1292 && (i + LOG_MESSAGE_MAX_SIZE) < max) {
1293 int k_len;
1294 k_len = kfifo_out(&bam_dmux_state_log,
1295 buff, LOG_MESSAGE_MAX_SIZE);
1296 if (k_len != LOG_MESSAGE_MAX_SIZE) {
1297 pr_err("%s: retrieve failure %d\n", __func__, k_len);
1298 break;
1299 }
1300
1301 /* keep non-null portion of string and add line break */
1302 k_len = strnlen(buff, LOG_MESSAGE_MAX_SIZE);
1303 buff += k_len;
1304 i += k_len;
1305 if (k_len && *(buff - 1) != '\n') {
1306 *buff++ = '\n';
1307 ++i;
1308 }
1309 }
1310 spin_unlock_irqrestore(&bam_dmux_logging_spinlock, flags);
1311
1312 return i;
1313}
1314
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001315#define DEBUG_BUFMAX 4096
1316static char debug_buffer[DEBUG_BUFMAX];
1317
1318static ssize_t debug_read(struct file *file, char __user *buf,
1319 size_t count, loff_t *ppos)
1320{
1321 int (*fill)(char *buf, int max) = file->private_data;
1322 int bsize = fill(debug_buffer, DEBUG_BUFMAX);
1323 return simple_read_from_buffer(buf, count, ppos, debug_buffer, bsize);
1324}
1325
Eric Holmberg878923a2012-01-10 14:28:19 -07001326static ssize_t debug_read_multiple(struct file *file, char __user *buff,
1327 size_t count, loff_t *ppos)
1328{
1329 int (*util_func)(char *buf, int max, loff_t *) = file->private_data;
1330 char *buffer;
1331 int bsize;
1332
1333 buffer = kmalloc(count, GFP_KERNEL);
1334 if (!buffer)
1335 return -ENOMEM;
1336
1337 bsize = util_func(buffer, count, ppos);
1338
1339 if (bsize >= 0) {
1340 if (copy_to_user(buff, buffer, bsize)) {
1341 kfree(buffer);
1342 return -EFAULT;
1343 }
1344 *ppos += bsize;
1345 }
1346 kfree(buffer);
1347 return bsize;
1348}
1349
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001350static int debug_open(struct inode *inode, struct file *file)
1351{
1352 file->private_data = inode->i_private;
1353 return 0;
1354}
1355
1356
1357static const struct file_operations debug_ops = {
1358 .read = debug_read,
1359 .open = debug_open,
1360};
1361
Eric Holmberg878923a2012-01-10 14:28:19 -07001362static const struct file_operations debug_ops_multiple = {
1363 .read = debug_read_multiple,
1364 .open = debug_open,
1365};
1366
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001367static void debug_create(const char *name, mode_t mode,
1368 struct dentry *dent,
1369 int (*fill)(char *buf, int max))
1370{
Eric Holmberge4ac80b2012-01-12 09:21:59 -07001371 struct dentry *file;
1372
1373 file = debugfs_create_file(name, mode, dent, fill, &debug_ops);
1374 if (IS_ERR(file))
1375 pr_err("%s: debugfs create failed %d\n", __func__,
1376 (int)PTR_ERR(file));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001377}
1378
Eric Holmberge4ac80b2012-01-12 09:21:59 -07001379static void debug_create_multiple(const char *name, mode_t mode,
1380 struct dentry *dent,
1381 int (*fill)(char *buf, int max, loff_t *ppos))
1382{
1383 struct dentry *file;
1384
1385 file = debugfs_create_file(name, mode, dent, fill, &debug_ops_multiple);
1386 if (IS_ERR(file))
1387 pr_err("%s: debugfs create failed %d\n", __func__,
1388 (int)PTR_ERR(file));
1389}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001390#endif
1391
Jeff Hugod98b1082011-10-24 10:30:23 -06001392static void notify_all(int event, unsigned long data)
1393{
1394 int i;
Jeff Hugocb798022012-04-09 14:55:40 -06001395 struct list_head *temp;
1396 struct outside_notify_func *func;
Jeff Hugod98b1082011-10-24 10:30:23 -06001397
1398 for (i = 0; i < BAM_DMUX_NUM_CHANNELS; ++i) {
Eric Holmberg454d9da2012-01-12 09:37:14 -07001399 if (bam_ch_is_open(i)) {
Jeff Hugod98b1082011-10-24 10:30:23 -06001400 bam_ch[i].notify(bam_ch[i].priv, event, data);
Eric Holmberg454d9da2012-01-12 09:37:14 -07001401 bam_dmux_log("%s: cid=%d, event=%d, data=%lu\n",
1402 __func__, i, event, data);
1403 }
Jeff Hugod98b1082011-10-24 10:30:23 -06001404 }
Jeff Hugocb798022012-04-09 14:55:40 -06001405
1406 __list_for_each(temp, &bam_other_notify_funcs) {
1407 func = container_of(temp, struct outside_notify_func,
1408 list_node);
1409 func->notify(func->priv, event, data);
1410 }
Jeff Hugod98b1082011-10-24 10:30:23 -06001411}
1412
1413static void kickoff_ul_wakeup_func(struct work_struct *work)
1414{
1415 read_lock(&ul_wakeup_lock);
1416 if (!bam_is_connected) {
1417 read_unlock(&ul_wakeup_lock);
1418 ul_wakeup();
Jeff Hugo4838f412012-01-20 11:19:37 -07001419 if (unlikely(in_global_reset == 1))
1420 return;
Jeff Hugod98b1082011-10-24 10:30:23 -06001421 read_lock(&ul_wakeup_lock);
1422 ul_packet_written = 1;
1423 notify_all(BAM_DMUX_UL_CONNECTED, (unsigned long)(NULL));
1424 }
1425 read_unlock(&ul_wakeup_lock);
1426}
1427
Eric Holmbergbc9f21c2012-01-18 11:33:33 -07001428int msm_bam_dmux_kickoff_ul_wakeup(void)
Jeff Hugod98b1082011-10-24 10:30:23 -06001429{
Eric Holmbergbc9f21c2012-01-18 11:33:33 -07001430 int is_connected;
1431
1432 read_lock(&ul_wakeup_lock);
1433 ul_packet_written = 1;
1434 is_connected = bam_is_connected;
1435 if (!is_connected)
1436 queue_work(bam_mux_tx_workqueue, &kickoff_ul_wakeup);
1437 read_unlock(&ul_wakeup_lock);
1438
1439 return is_connected;
Jeff Hugod98b1082011-10-24 10:30:23 -06001440}
1441
Eric Holmberg878923a2012-01-10 14:28:19 -07001442static void power_vote(int vote)
1443{
1444 bam_dmux_log("%s: curr=%d, vote=%d\n", __func__,
1445 bam_dmux_uplink_vote, vote);
1446
1447 if (bam_dmux_uplink_vote == vote)
1448 bam_dmux_log("%s: warning - duplicate power vote\n", __func__);
1449
1450 bam_dmux_uplink_vote = vote;
1451 if (vote)
1452 smsm_change_state(SMSM_APPS_STATE, 0, SMSM_A2_POWER_CONTROL);
1453 else
1454 smsm_change_state(SMSM_APPS_STATE, SMSM_A2_POWER_CONTROL, 0);
1455}
1456
Eric Holmberg454d9da2012-01-12 09:37:14 -07001457/*
1458 * @note: Must be called with ul_wakeup_lock locked.
1459 */
1460static inline void ul_powerdown(void)
1461{
1462 bam_dmux_log("%s: powerdown\n", __func__);
1463 verify_tx_queue_is_empty(__func__);
1464
1465 if (a2_pc_disabled) {
1466 wait_for_dfab = 1;
1467 INIT_COMPLETION(dfab_unvote_completion);
1468 release_wakelock();
1469 } else {
1470 wait_for_ack = 1;
1471 INIT_COMPLETION(ul_wakeup_ack_completion);
1472 power_vote(0);
1473 }
1474 bam_is_connected = 0;
1475 notify_all(BAM_DMUX_UL_DISCONNECTED, (unsigned long)(NULL));
1476}
1477
1478static inline void ul_powerdown_finish(void)
1479{
1480 if (a2_pc_disabled && wait_for_dfab) {
1481 unvote_dfab();
1482 complete_all(&dfab_unvote_completion);
1483 wait_for_dfab = 0;
1484 }
1485}
1486
Eric Holmbergbc9f21c2012-01-18 11:33:33 -07001487/*
1488 * Votes for UL power and returns current power state.
1489 *
1490 * @returns true if currently connected
1491 */
1492int msm_bam_dmux_ul_power_vote(void)
1493{
1494 int is_connected;
1495
1496 read_lock(&ul_wakeup_lock);
1497 atomic_inc(&ul_ondemand_vote);
1498 is_connected = bam_is_connected;
1499 if (!is_connected)
1500 queue_work(bam_mux_tx_workqueue, &kickoff_ul_wakeup);
1501 read_unlock(&ul_wakeup_lock);
1502
1503 return is_connected;
1504}
1505
1506/*
1507 * Unvotes for UL power.
1508 *
1509 * @returns true if vote count is 0 (UL shutdown possible)
1510 */
1511int msm_bam_dmux_ul_power_unvote(void)
1512{
1513 int vote;
1514
1515 read_lock(&ul_wakeup_lock);
1516 vote = atomic_dec_return(&ul_ondemand_vote);
1517 if (unlikely(vote) < 0)
1518 DMUX_LOG_KERR("%s: invalid power vote %d\n", __func__, vote);
1519 read_unlock(&ul_wakeup_lock);
1520
1521 return vote == 0;
1522}
1523
Jeff Hugocb798022012-04-09 14:55:40 -06001524int msm_bam_dmux_reg_notify(void *priv,
1525 void (*notify)(void *priv, int event_type,
1526 unsigned long data))
1527{
1528 struct outside_notify_func *func;
1529
1530 if (!notify)
1531 return -EINVAL;
1532
1533 func = kmalloc(sizeof(struct outside_notify_func), GFP_KERNEL);
1534 if (!func)
1535 return -ENOMEM;
1536
1537 func->notify = notify;
1538 func->priv = priv;
1539 list_add(&func->list_node, &bam_other_notify_funcs);
1540
1541 return 0;
1542}
1543
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001544static void ul_timeout(struct work_struct *work)
1545{
Jeff Hugoc040a5b2011-11-15 14:26:01 -07001546 unsigned long flags;
1547 int ret;
1548
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001549 if (in_global_reset)
1550 return;
Jeff Hugoc040a5b2011-11-15 14:26:01 -07001551 ret = write_trylock_irqsave(&ul_wakeup_lock, flags);
1552 if (!ret) { /* failed to grab lock, reschedule and bail */
1553 schedule_delayed_work(&ul_timeout_work,
1554 msecs_to_jiffies(UL_TIMEOUT_DELAY));
1555 return;
1556 }
Eric Holmberg454d9da2012-01-12 09:37:14 -07001557 if (bam_is_connected) {
Eric Holmberg6074aba2012-01-18 17:59:44 -07001558 if (!ul_packet_written) {
1559 spin_lock(&bam_tx_pool_spinlock);
1560 if (!list_empty(&bam_tx_pool)) {
1561 struct tx_pkt_info *info;
1562
1563 info = list_first_entry(&bam_tx_pool,
1564 struct tx_pkt_info, list_node);
1565 DMUX_LOG_KERR("%s: UL delayed ts=%u.%09lu\n",
1566 __func__, info->ts_sec, info->ts_nsec);
1567 DBG_INC_TX_STALL_CNT();
1568 ul_packet_written = 1;
1569 }
1570 spin_unlock(&bam_tx_pool_spinlock);
1571 }
1572
Eric Holmbergbc9f21c2012-01-18 11:33:33 -07001573 if (ul_packet_written || atomic_read(&ul_ondemand_vote)) {
1574 bam_dmux_log("%s: pkt written %d\n",
1575 __func__, ul_packet_written);
Eric Holmberg454d9da2012-01-12 09:37:14 -07001576 ul_packet_written = 0;
1577 schedule_delayed_work(&ul_timeout_work,
1578 msecs_to_jiffies(UL_TIMEOUT_DELAY));
Eric Holmberg006057d2012-01-11 10:10:42 -07001579 } else {
Eric Holmberg454d9da2012-01-12 09:37:14 -07001580 ul_powerdown();
Eric Holmberg006057d2012-01-11 10:10:42 -07001581 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001582 }
Jeff Hugoc040a5b2011-11-15 14:26:01 -07001583 write_unlock_irqrestore(&ul_wakeup_lock, flags);
Eric Holmberg454d9da2012-01-12 09:37:14 -07001584 ul_powerdown_finish();
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001585}
Jeff Hugo4838f412012-01-20 11:19:37 -07001586
1587static int ssrestart_check(void)
1588{
Eric Holmberg90285e22012-02-22 12:33:05 -07001589 DMUX_LOG_KERR("%s: modem timeout: BAM DMUX disabled\n", __func__);
1590 in_global_reset = 1;
1591 if (get_restart_level() <= RESET_SOC)
1592 DMUX_LOG_KERR("%s: ssrestart not enabled\n", __func__);
1593 return 1;
Jeff Hugo4838f412012-01-20 11:19:37 -07001594}
1595
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001596static void ul_wakeup(void)
1597{
Jeff Hugof6c1c1e2011-12-01 17:43:49 -07001598 int ret;
Jeff Hugo5f57ec92012-05-14 13:34:28 -06001599 int do_vote_dfab = 0;
Jeff Hugof6c1c1e2011-12-01 17:43:49 -07001600
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001601 mutex_lock(&wakeup_lock);
1602 if (bam_is_connected) { /* bam got connected before lock grabbed */
Eric Holmberg878923a2012-01-10 14:28:19 -07001603 bam_dmux_log("%s Already awake\n", __func__);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001604 mutex_unlock(&wakeup_lock);
1605 return;
1606 }
Eric Holmberg878923a2012-01-10 14:28:19 -07001607
Jeff Hugoc2696142012-05-03 11:42:13 -06001608 /*
1609 * if someone is voting for UL before bam is inited (modem up first
1610 * time), set flag for init to kickoff ul wakeup once bam is inited
1611 */
1612 mutex_lock(&delayed_ul_vote_lock);
1613 if (unlikely(!bam_mux_initialized)) {
1614 need_delayed_ul_vote = 1;
1615 mutex_unlock(&delayed_ul_vote_lock);
1616 mutex_unlock(&wakeup_lock);
1617 return;
1618 }
1619 mutex_unlock(&delayed_ul_vote_lock);
1620
Eric Holmberg006057d2012-01-11 10:10:42 -07001621 if (a2_pc_disabled) {
1622 /*
1623 * don't grab the wakelock the first time because it is
1624 * already grabbed when a2 powers on
1625 */
Jeff Hugo5f57ec92012-05-14 13:34:28 -06001626 if (likely(a2_pc_disabled_wakelock_skipped)) {
Eric Holmberg006057d2012-01-11 10:10:42 -07001627 grab_wakelock();
Jeff Hugo5f57ec92012-05-14 13:34:28 -06001628 do_vote_dfab = 1; /* vote must occur after wait */
1629 } else {
Jeff Hugo583a6da2012-02-03 11:37:30 -07001630 a2_pc_disabled_wakelock_skipped = 1;
Jeff Hugo5f57ec92012-05-14 13:34:28 -06001631 }
Eric Holmberg006057d2012-01-11 10:10:42 -07001632 if (wait_for_dfab) {
Jeff Hugo66f7f1e2012-01-16 14:30:42 -07001633 ret = wait_for_completion_timeout(
Eric Holmberg006057d2012-01-11 10:10:42 -07001634 &dfab_unvote_completion, HZ);
1635 BUG_ON(ret == 0);
1636 }
Jeff Hugo5f57ec92012-05-14 13:34:28 -06001637 if (likely(do_vote_dfab))
1638 vote_dfab();
Eric Holmberg006057d2012-01-11 10:10:42 -07001639 schedule_delayed_work(&ul_timeout_work,
1640 msecs_to_jiffies(UL_TIMEOUT_DELAY));
1641 bam_is_connected = 1;
1642 mutex_unlock(&wakeup_lock);
1643 return;
1644 }
1645
Jeff Hugof6c1c1e2011-12-01 17:43:49 -07001646 /*
1647 * must wait for the previous power down request to have been acked
1648 * chances are it already came in and this will just fall through
1649 * instead of waiting
1650 */
1651 if (wait_for_ack) {
Eric Holmberg878923a2012-01-10 14:28:19 -07001652 bam_dmux_log("%s waiting for previous ack\n", __func__);
Jeff Hugo66f7f1e2012-01-16 14:30:42 -07001653 ret = wait_for_completion_timeout(
Jeff Hugof6c1c1e2011-12-01 17:43:49 -07001654 &ul_wakeup_ack_completion, HZ);
Eric Holmberg006057d2012-01-11 10:10:42 -07001655 wait_for_ack = 0;
Jeff Hugo4838f412012-01-20 11:19:37 -07001656 if (unlikely(ret == 0) && ssrestart_check()) {
1657 mutex_unlock(&wakeup_lock);
1658 bam_dmux_log("%s timeout previous ack\n", __func__);
1659 return;
1660 }
Jeff Hugof6c1c1e2011-12-01 17:43:49 -07001661 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001662 INIT_COMPLETION(ul_wakeup_ack_completion);
Eric Holmberg878923a2012-01-10 14:28:19 -07001663 power_vote(1);
1664 bam_dmux_log("%s waiting for wakeup ack\n", __func__);
Jeff Hugo66f7f1e2012-01-16 14:30:42 -07001665 ret = wait_for_completion_timeout(&ul_wakeup_ack_completion, HZ);
Jeff Hugo4838f412012-01-20 11:19:37 -07001666 if (unlikely(ret == 0) && ssrestart_check()) {
1667 mutex_unlock(&wakeup_lock);
1668 bam_dmux_log("%s timeout wakeup ack\n", __func__);
1669 return;
1670 }
Eric Holmberg878923a2012-01-10 14:28:19 -07001671 bam_dmux_log("%s waiting completion\n", __func__);
Jeff Hugo66f7f1e2012-01-16 14:30:42 -07001672 ret = wait_for_completion_timeout(&bam_connection_completion, HZ);
Jeff Hugo4838f412012-01-20 11:19:37 -07001673 if (unlikely(ret == 0) && ssrestart_check()) {
1674 mutex_unlock(&wakeup_lock);
1675 bam_dmux_log("%s timeout power on\n", __func__);
1676 return;
1677 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001678
1679 bam_is_connected = 1;
Eric Holmberg878923a2012-01-10 14:28:19 -07001680 bam_dmux_log("%s complete\n", __func__);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001681 schedule_delayed_work(&ul_timeout_work,
1682 msecs_to_jiffies(UL_TIMEOUT_DELAY));
1683 mutex_unlock(&wakeup_lock);
1684}
1685
1686static void reconnect_to_bam(void)
1687{
1688 int i;
1689
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001690 in_global_reset = 0;
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001691 vote_dfab();
Jeff Hugo18792a32012-06-20 15:25:55 -06001692 if (!power_management_only_mode) {
1693 i = sps_device_reset(a2_device_handle);
1694 if (i)
1695 pr_err("%s: device reset failed rc = %d\n", __func__,
1696 i);
1697 i = sps_connect(bam_tx_pipe, &tx_connection);
1698 if (i)
1699 pr_err("%s: tx connection failed rc = %d\n", __func__,
1700 i);
1701 i = sps_connect(bam_rx_pipe, &rx_connection);
1702 if (i)
1703 pr_err("%s: rx connection failed rc = %d\n", __func__,
1704 i);
1705 i = sps_register_event(bam_tx_pipe, &tx_register_event);
1706 if (i)
1707 pr_err("%s: tx event reg failed rc = %d\n", __func__,
1708 i);
1709 i = sps_register_event(bam_rx_pipe, &rx_register_event);
1710 if (i)
1711 pr_err("%s: rx event reg failed rc = %d\n", __func__,
1712 i);
1713 }
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001714
1715 bam_connection_is_active = 1;
1716
1717 if (polling_mode)
1718 rx_switch_to_interrupt_mode();
1719
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001720 toggle_apps_ack();
1721 complete_all(&bam_connection_completion);
Jeff Hugo18792a32012-06-20 15:25:55 -06001722 if (!power_management_only_mode)
1723 queue_rx();
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001724}
1725
1726static void disconnect_to_bam(void)
1727{
1728 struct list_head *node;
1729 struct rx_pkt_info *info;
Eric Holmberg454d9da2012-01-12 09:37:14 -07001730 unsigned long flags;
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001731
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001732 bam_connection_is_active = 0;
Eric Holmberg454d9da2012-01-12 09:37:14 -07001733
1734 /* handle disconnect during active UL */
1735 write_lock_irqsave(&ul_wakeup_lock, flags);
1736 if (bam_is_connected) {
1737 bam_dmux_log("%s: UL active - forcing powerdown\n", __func__);
1738 ul_powerdown();
1739 }
1740 write_unlock_irqrestore(&ul_wakeup_lock, flags);
1741 ul_powerdown_finish();
1742
1743 /* tear down BAM connection */
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001744 INIT_COMPLETION(bam_connection_completion);
Jeff Hugo18792a32012-06-20 15:25:55 -06001745 if (!power_management_only_mode) {
1746 sps_disconnect(bam_tx_pipe);
1747 sps_disconnect(bam_rx_pipe);
1748 __memzero(rx_desc_mem_buf.base, rx_desc_mem_buf.size);
1749 __memzero(tx_desc_mem_buf.base, tx_desc_mem_buf.size);
1750 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001751 unvote_dfab();
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001752
1753 mutex_lock(&bam_rx_pool_mutexlock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001754 while (!list_empty(&bam_rx_pool)) {
1755 node = bam_rx_pool.next;
1756 list_del(node);
1757 info = container_of(node, struct rx_pkt_info, list_node);
1758 dma_unmap_single(NULL, info->dma_address, BUFFER_SIZE,
1759 DMA_FROM_DEVICE);
1760 dev_kfree_skb_any(info->skb);
1761 kfree(info);
1762 }
Eric Holmbergb5b08e52012-01-20 14:19:00 -07001763 bam_rx_pool_len = 0;
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001764 mutex_unlock(&bam_rx_pool_mutexlock);
Eric Holmberg878923a2012-01-10 14:28:19 -07001765
Jeff Hugo0b13a352012-03-17 23:18:30 -06001766 if (disconnect_ack)
1767 toggle_apps_ack();
1768
Eric Holmberg878923a2012-01-10 14:28:19 -07001769 verify_tx_queue_is_empty(__func__);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001770}
1771
1772static void vote_dfab(void)
1773{
Jeff Hugoca0caa82011-12-05 16:05:23 -07001774 int rc;
1775
Eric Holmberg006057d2012-01-11 10:10:42 -07001776 bam_dmux_log("%s\n", __func__);
1777 mutex_lock(&dfab_status_lock);
1778 if (dfab_is_on) {
1779 bam_dmux_log("%s: dfab is already on\n", __func__);
1780 mutex_unlock(&dfab_status_lock);
1781 return;
1782 }
Jeff Hugo23a812b2012-01-13 13:43:42 -07001783 rc = clk_prepare_enable(dfab_clk);
Jeff Hugoca0caa82011-12-05 16:05:23 -07001784 if (rc)
Eric Holmberg006057d2012-01-11 10:10:42 -07001785 DMUX_LOG_KERR("bam_dmux vote for dfab failed rc = %d\n", rc);
Stephen Boyd69d35e32012-02-14 15:33:30 -08001786 rc = clk_prepare_enable(xo_clk);
1787 if (rc)
1788 DMUX_LOG_KERR("bam_dmux vote for xo failed rc = %d\n", rc);
Eric Holmberg006057d2012-01-11 10:10:42 -07001789 dfab_is_on = 1;
1790 mutex_unlock(&dfab_status_lock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001791}
1792
1793static void unvote_dfab(void)
1794{
Eric Holmberg006057d2012-01-11 10:10:42 -07001795 bam_dmux_log("%s\n", __func__);
1796 mutex_lock(&dfab_status_lock);
1797 if (!dfab_is_on) {
1798 DMUX_LOG_KERR("%s: dfab is already off\n", __func__);
1799 dump_stack();
1800 mutex_unlock(&dfab_status_lock);
1801 return;
1802 }
Jeff Hugo23a812b2012-01-13 13:43:42 -07001803 clk_disable_unprepare(dfab_clk);
Stephen Boyd69d35e32012-02-14 15:33:30 -08001804 clk_disable_unprepare(xo_clk);
Eric Holmberg006057d2012-01-11 10:10:42 -07001805 dfab_is_on = 0;
1806 mutex_unlock(&dfab_status_lock);
1807}
1808
1809/* reference counting wrapper around wakelock */
1810static void grab_wakelock(void)
1811{
1812 unsigned long flags;
1813
1814 spin_lock_irqsave(&wakelock_reference_lock, flags);
1815 bam_dmux_log("%s: ref count = %d\n", __func__,
1816 wakelock_reference_count);
1817 if (wakelock_reference_count == 0)
1818 wake_lock(&bam_wakelock);
1819 ++wakelock_reference_count;
1820 spin_unlock_irqrestore(&wakelock_reference_lock, flags);
1821}
1822
1823static void release_wakelock(void)
1824{
1825 unsigned long flags;
1826
1827 spin_lock_irqsave(&wakelock_reference_lock, flags);
1828 if (wakelock_reference_count == 0) {
1829 DMUX_LOG_KERR("%s: bam_dmux wakelock not locked\n", __func__);
1830 dump_stack();
1831 spin_unlock_irqrestore(&wakelock_reference_lock, flags);
1832 return;
1833 }
1834 bam_dmux_log("%s: ref count = %d\n", __func__,
1835 wakelock_reference_count);
1836 --wakelock_reference_count;
1837 if (wakelock_reference_count == 0)
1838 wake_unlock(&bam_wakelock);
1839 spin_unlock_irqrestore(&wakelock_reference_lock, flags);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001840}
1841
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001842static int restart_notifier_cb(struct notifier_block *this,
1843 unsigned long code,
1844 void *data)
1845{
1846 int i;
1847 struct list_head *node;
1848 struct tx_pkt_info *info;
1849 int temp_remote_status;
Jeff Hugo626303bf2011-11-21 11:43:28 -07001850 unsigned long flags;
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001851
1852 if (code != SUBSYS_AFTER_SHUTDOWN)
1853 return NOTIFY_DONE;
1854
Eric Holmberg878923a2012-01-10 14:28:19 -07001855 bam_dmux_log("%s: begin\n", __func__);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001856 in_global_reset = 1;
Eric Holmberg454d9da2012-01-12 09:37:14 -07001857
1858 /* Handle uplink Powerdown */
1859 write_lock_irqsave(&ul_wakeup_lock, flags);
1860 if (bam_is_connected) {
1861 ul_powerdown();
1862 wait_for_ack = 0;
1863 }
Jeff Hugo4838f412012-01-20 11:19:37 -07001864 /*
1865 * if modem crash during ul_wakeup(), power_vote is 1, needs to be
1866 * reset to 0. harmless if bam_is_connected check above passes
1867 */
1868 power_vote(0);
Eric Holmberg454d9da2012-01-12 09:37:14 -07001869 write_unlock_irqrestore(&ul_wakeup_lock, flags);
1870 ul_powerdown_finish();
Eric Holmberg006057d2012-01-11 10:10:42 -07001871 a2_pc_disabled = 0;
Jeff Hugo583a6da2012-02-03 11:37:30 -07001872 a2_pc_disabled_wakelock_skipped = 0;
Jeff Hugo0b13a352012-03-17 23:18:30 -06001873 disconnect_ack = 0;
Eric Holmberg454d9da2012-01-12 09:37:14 -07001874
1875 /* Cleanup Channel States */
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001876 for (i = 0; i < BAM_DMUX_NUM_CHANNELS; ++i) {
1877 temp_remote_status = bam_ch_is_remote_open(i);
1878 bam_ch[i].status &= ~BAM_CH_REMOTE_OPEN;
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -07001879 bam_ch[i].num_tx_pkts = 0;
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001880 if (bam_ch_is_local_open(i))
1881 bam_ch[i].status |= BAM_CH_IN_RESET;
1882 if (temp_remote_status) {
1883 platform_device_unregister(bam_ch[i].pdev);
1884 bam_ch[i].pdev = platform_device_alloc(
1885 bam_ch[i].name, 2);
1886 }
1887 }
Eric Holmberg454d9da2012-01-12 09:37:14 -07001888
1889 /* Cleanup pending UL data */
Jeff Hugo626303bf2011-11-21 11:43:28 -07001890 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001891 while (!list_empty(&bam_tx_pool)) {
1892 node = bam_tx_pool.next;
1893 list_del(node);
1894 info = container_of(node, struct tx_pkt_info,
1895 list_node);
1896 if (!info->is_cmd) {
1897 dma_unmap_single(NULL, info->dma_address,
1898 info->skb->len,
1899 DMA_TO_DEVICE);
1900 dev_kfree_skb_any(info->skb);
1901 } else {
1902 dma_unmap_single(NULL, info->dma_address,
1903 info->len,
1904 DMA_TO_DEVICE);
1905 kfree(info->skb);
1906 }
1907 kfree(info);
1908 }
Jeff Hugo626303bf2011-11-21 11:43:28 -07001909 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
Eric Holmberg454d9da2012-01-12 09:37:14 -07001910
Eric Holmberg878923a2012-01-10 14:28:19 -07001911 bam_dmux_log("%s: complete\n", __func__);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001912 return NOTIFY_DONE;
1913}
1914
Jeff Hugo9dea05c2011-12-21 12:23:05 -07001915static int bam_init(void)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001916{
1917 u32 h;
1918 dma_addr_t dma_addr;
1919 int ret;
1920 void *a2_virt_addr;
Jeff Hugo4b2890d2012-01-16 16:14:21 -07001921 int skip_iounmap = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001922
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001923 vote_dfab();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001924 /* init BAM */
1925 a2_virt_addr = ioremap_nocache(A2_PHYS_BASE, A2_PHYS_SIZE);
1926 if (!a2_virt_addr) {
1927 pr_err("%s: ioremap failed\n", __func__);
1928 ret = -ENOMEM;
Jeff Hugo994a92d2012-01-05 13:25:21 -07001929 goto ioremap_failed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001930 }
1931 a2_props.phys_addr = A2_PHYS_BASE;
1932 a2_props.virt_addr = a2_virt_addr;
1933 a2_props.virt_size = A2_PHYS_SIZE;
1934 a2_props.irq = A2_BAM_IRQ;
Jeff Hugo927cba62011-11-11 11:49:52 -07001935 a2_props.options = SPS_BAM_OPT_IRQ_WAKEUP;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001936 a2_props.num_pipes = A2_NUM_PIPES;
1937 a2_props.summing_threshold = A2_SUMMING_THRESHOLD;
Jeff Hugo75913c82011-12-05 15:59:01 -07001938 if (cpu_is_msm9615())
1939 a2_props.manage = SPS_BAM_MGR_DEVICE_REMOTE;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001940 /* need to free on tear down */
1941 ret = sps_register_bam_device(&a2_props, &h);
1942 if (ret < 0) {
1943 pr_err("%s: register bam error %d\n", __func__, ret);
1944 goto register_bam_failed;
1945 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001946 a2_device_handle = h;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001947
1948 bam_tx_pipe = sps_alloc_endpoint();
1949 if (bam_tx_pipe == NULL) {
1950 pr_err("%s: tx alloc endpoint failed\n", __func__);
1951 ret = -ENOMEM;
Jeff Hugo8ff4a812012-01-17 11:03:13 -07001952 goto tx_alloc_endpoint_failed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001953 }
1954 ret = sps_get_config(bam_tx_pipe, &tx_connection);
1955 if (ret) {
1956 pr_err("%s: tx get config failed %d\n", __func__, ret);
1957 goto tx_get_config_failed;
1958 }
1959
1960 tx_connection.source = SPS_DEV_HANDLE_MEM;
1961 tx_connection.src_pipe_index = 0;
1962 tx_connection.destination = h;
1963 tx_connection.dest_pipe_index = 4;
1964 tx_connection.mode = SPS_MODE_DEST;
1965 tx_connection.options = SPS_O_AUTO_ENABLE | SPS_O_EOT;
1966 tx_desc_mem_buf.size = 0x800; /* 2k */
1967 tx_desc_mem_buf.base = dma_alloc_coherent(NULL, tx_desc_mem_buf.size,
1968 &dma_addr, 0);
1969 if (tx_desc_mem_buf.base == NULL) {
1970 pr_err("%s: tx memory alloc failed\n", __func__);
1971 ret = -ENOMEM;
Jeff Hugo8ff4a812012-01-17 11:03:13 -07001972 goto tx_get_config_failed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001973 }
1974 tx_desc_mem_buf.phys_base = dma_addr;
1975 memset(tx_desc_mem_buf.base, 0x0, tx_desc_mem_buf.size);
1976 tx_connection.desc = tx_desc_mem_buf;
1977 tx_connection.event_thresh = 0x10;
1978
1979 ret = sps_connect(bam_tx_pipe, &tx_connection);
1980 if (ret < 0) {
1981 pr_err("%s: tx connect error %d\n", __func__, ret);
1982 goto tx_connect_failed;
1983 }
1984
1985 bam_rx_pipe = sps_alloc_endpoint();
1986 if (bam_rx_pipe == NULL) {
1987 pr_err("%s: rx alloc endpoint failed\n", __func__);
1988 ret = -ENOMEM;
Jeff Hugo8ff4a812012-01-17 11:03:13 -07001989 goto rx_alloc_endpoint_failed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001990 }
1991 ret = sps_get_config(bam_rx_pipe, &rx_connection);
1992 if (ret) {
1993 pr_err("%s: rx get config failed %d\n", __func__, ret);
1994 goto rx_get_config_failed;
1995 }
1996
1997 rx_connection.source = h;
1998 rx_connection.src_pipe_index = 5;
1999 rx_connection.destination = SPS_DEV_HANDLE_MEM;
2000 rx_connection.dest_pipe_index = 1;
2001 rx_connection.mode = SPS_MODE_SRC;
Jeff Hugo949080a2011-08-30 11:58:56 -06002002 rx_connection.options = SPS_O_AUTO_ENABLE | SPS_O_EOT |
2003 SPS_O_ACK_TRANSFERS;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002004 rx_desc_mem_buf.size = 0x800; /* 2k */
2005 rx_desc_mem_buf.base = dma_alloc_coherent(NULL, rx_desc_mem_buf.size,
2006 &dma_addr, 0);
2007 if (rx_desc_mem_buf.base == NULL) {
2008 pr_err("%s: rx memory alloc failed\n", __func__);
2009 ret = -ENOMEM;
2010 goto rx_mem_failed;
2011 }
2012 rx_desc_mem_buf.phys_base = dma_addr;
2013 memset(rx_desc_mem_buf.base, 0x0, rx_desc_mem_buf.size);
2014 rx_connection.desc = rx_desc_mem_buf;
2015 rx_connection.event_thresh = 0x10;
2016
2017 ret = sps_connect(bam_rx_pipe, &rx_connection);
2018 if (ret < 0) {
2019 pr_err("%s: rx connect error %d\n", __func__, ret);
2020 goto rx_connect_failed;
2021 }
2022
2023 tx_register_event.options = SPS_O_EOT;
2024 tx_register_event.mode = SPS_TRIGGER_CALLBACK;
2025 tx_register_event.xfer_done = NULL;
2026 tx_register_event.callback = bam_mux_tx_notify;
2027 tx_register_event.user = NULL;
2028 ret = sps_register_event(bam_tx_pipe, &tx_register_event);
2029 if (ret < 0) {
2030 pr_err("%s: tx register event error %d\n", __func__, ret);
2031 goto rx_event_reg_failed;
2032 }
2033
Jeff Hugo33dbc002011-08-25 15:52:53 -06002034 rx_register_event.options = SPS_O_EOT;
2035 rx_register_event.mode = SPS_TRIGGER_CALLBACK;
2036 rx_register_event.xfer_done = NULL;
2037 rx_register_event.callback = bam_mux_rx_notify;
2038 rx_register_event.user = NULL;
2039 ret = sps_register_event(bam_rx_pipe, &rx_register_event);
2040 if (ret < 0) {
2041 pr_err("%s: tx register event error %d\n", __func__, ret);
2042 goto rx_event_reg_failed;
2043 }
2044
Jeff Hugoc2696142012-05-03 11:42:13 -06002045 mutex_lock(&delayed_ul_vote_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002046 bam_mux_initialized = 1;
Jeff Hugoc2696142012-05-03 11:42:13 -06002047 if (need_delayed_ul_vote) {
2048 need_delayed_ul_vote = 0;
2049 msm_bam_dmux_kickoff_ul_wakeup();
2050 }
2051 mutex_unlock(&delayed_ul_vote_lock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002052 toggle_apps_ack();
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06002053 bam_connection_is_active = 1;
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002054 complete_all(&bam_connection_completion);
Jeff Hugo2fb555e2012-03-14 16:33:47 -06002055 queue_rx();
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002056 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002057
2058rx_event_reg_failed:
2059 sps_disconnect(bam_rx_pipe);
2060rx_connect_failed:
2061 dma_free_coherent(NULL, rx_desc_mem_buf.size, rx_desc_mem_buf.base,
2062 rx_desc_mem_buf.phys_base);
2063rx_mem_failed:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002064rx_get_config_failed:
2065 sps_free_endpoint(bam_rx_pipe);
Jeff Hugo8ff4a812012-01-17 11:03:13 -07002066rx_alloc_endpoint_failed:
2067 sps_disconnect(bam_tx_pipe);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002068tx_connect_failed:
2069 dma_free_coherent(NULL, tx_desc_mem_buf.size, tx_desc_mem_buf.base,
2070 tx_desc_mem_buf.phys_base);
2071tx_get_config_failed:
2072 sps_free_endpoint(bam_tx_pipe);
Jeff Hugo8ff4a812012-01-17 11:03:13 -07002073tx_alloc_endpoint_failed:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002074 sps_deregister_bam_device(h);
Jeff Hugo4b2890d2012-01-16 16:14:21 -07002075 /*
2076 * sps_deregister_bam_device() calls iounmap. calling iounmap on the
2077 * same handle below will cause a crash, so skip it if we've freed
2078 * the handle here.
2079 */
2080 skip_iounmap = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002081register_bam_failed:
Jeff Hugo4b2890d2012-01-16 16:14:21 -07002082 if (!skip_iounmap)
2083 iounmap(a2_virt_addr);
Jeff Hugo994a92d2012-01-05 13:25:21 -07002084ioremap_failed:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002085 /*destroy_workqueue(bam_mux_workqueue);*/
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002086 return ret;
2087}
2088
2089static int bam_init_fallback(void)
2090{
2091 u32 h;
2092 int ret;
2093 void *a2_virt_addr;
2094
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002095 /* init BAM */
2096 a2_virt_addr = ioremap_nocache(A2_PHYS_BASE, A2_PHYS_SIZE);
2097 if (!a2_virt_addr) {
2098 pr_err("%s: ioremap failed\n", __func__);
2099 ret = -ENOMEM;
2100 goto ioremap_failed;
2101 }
2102 a2_props.phys_addr = A2_PHYS_BASE;
2103 a2_props.virt_addr = a2_virt_addr;
2104 a2_props.virt_size = A2_PHYS_SIZE;
2105 a2_props.irq = A2_BAM_IRQ;
2106 a2_props.options = SPS_BAM_OPT_IRQ_WAKEUP;
2107 a2_props.num_pipes = A2_NUM_PIPES;
2108 a2_props.summing_threshold = A2_SUMMING_THRESHOLD;
2109 if (cpu_is_msm9615())
2110 a2_props.manage = SPS_BAM_MGR_DEVICE_REMOTE;
2111 ret = sps_register_bam_device(&a2_props, &h);
2112 if (ret < 0) {
2113 pr_err("%s: register bam error %d\n", __func__, ret);
2114 goto register_bam_failed;
2115 }
2116 a2_device_handle = h;
Jeff Hugoc2696142012-05-03 11:42:13 -06002117
2118 mutex_lock(&delayed_ul_vote_lock);
2119 bam_mux_initialized = 1;
2120 if (need_delayed_ul_vote) {
2121 need_delayed_ul_vote = 0;
2122 msm_bam_dmux_kickoff_ul_wakeup();
2123 }
2124 mutex_unlock(&delayed_ul_vote_lock);
Jeff Hugo2bec9772012-04-05 12:25:16 -06002125 toggle_apps_ack();
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002126
Jeff Hugo18792a32012-06-20 15:25:55 -06002127 power_management_only_mode = 1;
2128 bam_connection_is_active = 1;
2129 complete_all(&bam_connection_completion);
2130
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002131 return 0;
2132
2133register_bam_failed:
Jeff Hugo4b2890d2012-01-16 16:14:21 -07002134 iounmap(a2_virt_addr);
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002135ioremap_failed:
2136 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002137}
Jeff Hugoade1f842011-08-03 15:53:59 -06002138
Jeff Hugoa670b762012-03-15 15:58:28 -06002139static void msm9615_bam_init(void)
Eric Holmberg604ab252012-01-15 00:01:18 -07002140{
2141 int ret = 0;
2142
2143 ret = bam_init();
2144 if (ret) {
2145 ret = bam_init_fallback();
2146 if (ret)
2147 pr_err("%s: bam init fallback failed: %d",
2148 __func__, ret);
2149 }
2150}
2151
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002152static void toggle_apps_ack(void)
2153{
2154 static unsigned int clear_bit; /* 0 = set the bit, else clear bit */
Eric Holmberg878923a2012-01-10 14:28:19 -07002155
2156 bam_dmux_log("%s: apps ack %d->%d\n", __func__,
2157 clear_bit & 0x1, ~clear_bit & 0x1);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002158 smsm_change_state(SMSM_APPS_STATE,
2159 clear_bit & SMSM_A2_POWER_CONTROL_ACK,
2160 ~clear_bit & SMSM_A2_POWER_CONTROL_ACK);
2161 clear_bit = ~clear_bit;
Eric Holmberg1f1255d2012-02-22 13:37:21 -07002162 DBG_INC_ACK_OUT_CNT();
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002163}
2164
Jeff Hugoade1f842011-08-03 15:53:59 -06002165static void bam_dmux_smsm_cb(void *priv, uint32_t old_state, uint32_t new_state)
2166{
Jeff Hugo4b7c7b32012-04-18 16:25:14 -06002167 static int last_processed_state;
2168
2169 mutex_lock(&smsm_cb_lock);
Eric Holmberg878923a2012-01-10 14:28:19 -07002170 bam_dmux_power_state = new_state & SMSM_A2_POWER_CONTROL ? 1 : 0;
Eric Holmberg1f1255d2012-02-22 13:37:21 -07002171 DBG_INC_A2_POWER_CONTROL_IN_CNT();
Eric Holmberg878923a2012-01-10 14:28:19 -07002172 bam_dmux_log("%s: 0x%08x -> 0x%08x\n", __func__, old_state,
2173 new_state);
Jeff Hugo4b7c7b32012-04-18 16:25:14 -06002174 if (last_processed_state == (new_state & SMSM_A2_POWER_CONTROL)) {
2175 bam_dmux_log("%s: already processed this state\n", __func__);
2176 mutex_unlock(&smsm_cb_lock);
2177 return;
2178 }
2179
2180 last_processed_state = new_state & SMSM_A2_POWER_CONTROL;
Eric Holmberg878923a2012-01-10 14:28:19 -07002181
Jeff Hugoae3a85e2011-12-02 17:10:18 -07002182 if (bam_mux_initialized && new_state & SMSM_A2_POWER_CONTROL) {
Eric Holmberg878923a2012-01-10 14:28:19 -07002183 bam_dmux_log("%s: reconnect\n", __func__);
Eric Holmberg006057d2012-01-11 10:10:42 -07002184 grab_wakelock();
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002185 reconnect_to_bam();
Jeff Hugoae3a85e2011-12-02 17:10:18 -07002186 } else if (bam_mux_initialized &&
2187 !(new_state & SMSM_A2_POWER_CONTROL)) {
Eric Holmberg878923a2012-01-10 14:28:19 -07002188 bam_dmux_log("%s: disconnect\n", __func__);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002189 disconnect_to_bam();
Eric Holmberg006057d2012-01-11 10:10:42 -07002190 release_wakelock();
Jeff Hugoae3a85e2011-12-02 17:10:18 -07002191 } else if (new_state & SMSM_A2_POWER_CONTROL) {
Eric Holmberg878923a2012-01-10 14:28:19 -07002192 bam_dmux_log("%s: init\n", __func__);
Eric Holmberg006057d2012-01-11 10:10:42 -07002193 grab_wakelock();
Jeff Hugoa670b762012-03-15 15:58:28 -06002194 if (cpu_is_msm9615())
2195 msm9615_bam_init();
2196 else
Eric Holmberg604ab252012-01-15 00:01:18 -07002197 bam_init();
Jeff Hugoae3a85e2011-12-02 17:10:18 -07002198 } else {
Eric Holmberg878923a2012-01-10 14:28:19 -07002199 bam_dmux_log("%s: bad state change\n", __func__);
Jeff Hugoade1f842011-08-03 15:53:59 -06002200 pr_err("%s: unsupported state change\n", __func__);
Jeff Hugoae3a85e2011-12-02 17:10:18 -07002201 }
Jeff Hugo4b7c7b32012-04-18 16:25:14 -06002202 mutex_unlock(&smsm_cb_lock);
Jeff Hugoade1f842011-08-03 15:53:59 -06002203
2204}
2205
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002206static void bam_dmux_smsm_ack_cb(void *priv, uint32_t old_state,
2207 uint32_t new_state)
2208{
Eric Holmberg1f1255d2012-02-22 13:37:21 -07002209 DBG_INC_ACK_IN_CNT();
Eric Holmberg878923a2012-01-10 14:28:19 -07002210 bam_dmux_log("%s: 0x%08x -> 0x%08x\n", __func__, old_state,
2211 new_state);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002212 complete_all(&ul_wakeup_ack_completion);
2213}
2214
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002215static int bam_dmux_probe(struct platform_device *pdev)
2216{
2217 int rc;
2218
2219 DBG("%s probe called\n", __func__);
2220 if (bam_mux_initialized)
2221 return 0;
2222
Stephen Boyd69d35e32012-02-14 15:33:30 -08002223 xo_clk = clk_get(&pdev->dev, "xo");
2224 if (IS_ERR(xo_clk)) {
2225 pr_err("%s: did not get xo clock\n", __func__);
2226 return PTR_ERR(xo_clk);
2227 }
Stephen Boyd1c51a492011-10-26 12:11:47 -07002228 dfab_clk = clk_get(&pdev->dev, "bus_clk");
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002229 if (IS_ERR(dfab_clk)) {
2230 pr_err("%s: did not get dfab clock\n", __func__);
2231 return -EFAULT;
2232 }
2233
2234 rc = clk_set_rate(dfab_clk, 64000000);
2235 if (rc)
2236 pr_err("%s: unable to set dfab clock rate\n", __func__);
2237
Jeff Hugofff43af92012-03-29 17:54:52 -06002238 /*
2239 * setup the workqueue so that it can be pinned to core 0 and not
2240 * block the watchdog pet function, so that netif_rx() in rmnet
2241 * only uses one queue.
2242 */
2243 bam_mux_rx_workqueue = alloc_workqueue("bam_dmux_rx",
2244 WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002245 if (!bam_mux_rx_workqueue)
2246 return -ENOMEM;
2247
2248 bam_mux_tx_workqueue = create_singlethread_workqueue("bam_dmux_tx");
2249 if (!bam_mux_tx_workqueue) {
2250 destroy_workqueue(bam_mux_rx_workqueue);
2251 return -ENOMEM;
2252 }
2253
Jeff Hugo7960abd2011-08-02 15:39:38 -06002254 for (rc = 0; rc < BAM_DMUX_NUM_CHANNELS; ++rc) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002255 spin_lock_init(&bam_ch[rc].lock);
Jeff Hugo7960abd2011-08-02 15:39:38 -06002256 scnprintf(bam_ch[rc].name, BAM_DMUX_CH_NAME_MAX_LEN,
2257 "bam_dmux_ch_%d", rc);
2258 /* bus 2, ie a2 stream 2 */
2259 bam_ch[rc].pdev = platform_device_alloc(bam_ch[rc].name, 2);
2260 if (!bam_ch[rc].pdev) {
2261 pr_err("%s: platform device alloc failed\n", __func__);
2262 destroy_workqueue(bam_mux_rx_workqueue);
2263 destroy_workqueue(bam_mux_tx_workqueue);
2264 return -ENOMEM;
2265 }
2266 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002267
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002268 init_completion(&ul_wakeup_ack_completion);
2269 init_completion(&bam_connection_completion);
Eric Holmberg006057d2012-01-11 10:10:42 -07002270 init_completion(&dfab_unvote_completion);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002271 INIT_DELAYED_WORK(&ul_timeout_work, ul_timeout);
Jeff Hugoae3a85e2011-12-02 17:10:18 -07002272 wake_lock_init(&bam_wakelock, WAKE_LOCK_SUSPEND, "bam_dmux_wakelock");
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002273
Jeff Hugoade1f842011-08-03 15:53:59 -06002274 rc = smsm_state_cb_register(SMSM_MODEM_STATE, SMSM_A2_POWER_CONTROL,
2275 bam_dmux_smsm_cb, NULL);
2276
2277 if (rc) {
2278 destroy_workqueue(bam_mux_rx_workqueue);
2279 destroy_workqueue(bam_mux_tx_workqueue);
2280 pr_err("%s: smsm cb register failed, rc: %d\n", __func__, rc);
2281 return -ENOMEM;
2282 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002283
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002284 rc = smsm_state_cb_register(SMSM_MODEM_STATE, SMSM_A2_POWER_CONTROL_ACK,
2285 bam_dmux_smsm_ack_cb, NULL);
2286
2287 if (rc) {
2288 destroy_workqueue(bam_mux_rx_workqueue);
2289 destroy_workqueue(bam_mux_tx_workqueue);
2290 smsm_state_cb_deregister(SMSM_MODEM_STATE,
2291 SMSM_A2_POWER_CONTROL,
2292 bam_dmux_smsm_cb, NULL);
2293 pr_err("%s: smsm ack cb register failed, rc: %d\n", __func__,
2294 rc);
2295 for (rc = 0; rc < BAM_DMUX_NUM_CHANNELS; ++rc)
2296 platform_device_put(bam_ch[rc].pdev);
2297 return -ENOMEM;
2298 }
2299
Eric Holmbergfd1e2ae2011-11-15 18:28:17 -07002300 if (smsm_get_state(SMSM_MODEM_STATE) & SMSM_A2_POWER_CONTROL)
2301 bam_dmux_smsm_cb(NULL, 0, smsm_get_state(SMSM_MODEM_STATE));
2302
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002303 return 0;
2304}
2305
2306static struct platform_driver bam_dmux_driver = {
2307 .probe = bam_dmux_probe,
2308 .driver = {
2309 .name = "BAM_RMNT",
2310 .owner = THIS_MODULE,
2311 },
2312};
2313
2314static int __init bam_dmux_init(void)
2315{
Eric Holmberg878923a2012-01-10 14:28:19 -07002316 int ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002317#ifdef CONFIG_DEBUG_FS
2318 struct dentry *dent;
2319
2320 dent = debugfs_create_dir("bam_dmux", 0);
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07002321 if (!IS_ERR(dent)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002322 debug_create("tbl", 0444, dent, debug_tbl);
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07002323 debug_create("ul_pkt_cnt", 0444, dent, debug_ul_pkt_cnt);
2324 debug_create("stats", 0444, dent, debug_stats);
Eric Holmberge4ac80b2012-01-12 09:21:59 -07002325 debug_create_multiple("log", 0444, dent, debug_log);
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07002326 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002327#endif
Eric Holmberg878923a2012-01-10 14:28:19 -07002328 ret = kfifo_alloc(&bam_dmux_state_log, PAGE_SIZE, GFP_KERNEL);
2329 if (ret) {
2330 pr_err("%s: failed to allocate log %d\n", __func__, ret);
2331 bam_dmux_state_logging_disabled = 1;
2332 }
2333
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06002334 subsys_notif_register_notifier("modem", &restart_notifier);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002335 return platform_driver_register(&bam_dmux_driver);
2336}
2337
Jeff Hugoade1f842011-08-03 15:53:59 -06002338late_initcall(bam_dmux_init); /* needs to init after SMD */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002339MODULE_DESCRIPTION("MSM BAM DMUX");
2340MODULE_LICENSE("GPL v2");