blob: 5a10ddb3106d58c35c071f6ad02ced15281aa07c [file] [log] [blame]
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14/*
15 * BAM DMUX module.
16 */
17
18#define DEBUG
19
20#include <linux/delay.h>
21#include <linux/module.h>
22#include <linux/netdevice.h>
23#include <linux/platform_device.h>
24#include <linux/sched.h>
25#include <linux/skbuff.h>
26#include <linux/debugfs.h>
Jeff Hugoaab7ebc2011-09-07 16:46:04 -060027#include <linux/clk.h>
Jeff Hugoae3a85e2011-12-02 17:10:18 -070028#include <linux/wakelock.h>
Eric Holmberg878923a2012-01-10 14:28:19 -070029#include <linux/kfifo.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070030
31#include <mach/sps.h>
32#include <mach/bam_dmux.h>
Jeff Hugoade1f842011-08-03 15:53:59 -060033#include <mach/msm_smsm.h>
Jeff Hugo6e7a92a2011-10-24 05:25:13 -060034#include <mach/subsystem_notif.h>
Jeff Hugo75913c82011-12-05 15:59:01 -070035#include <mach/socinfo.h>
Jeff Hugo4838f412012-01-20 11:19:37 -070036#include <mach/subsystem_restart.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070037
38#define BAM_CH_LOCAL_OPEN 0x1
39#define BAM_CH_REMOTE_OPEN 0x2
Jeff Hugo6e7a92a2011-10-24 05:25:13 -060040#define BAM_CH_IN_RESET 0x4
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070041
42#define BAM_MUX_HDR_MAGIC_NO 0x33fc
43
Eric Holmberg006057d2012-01-11 10:10:42 -070044#define BAM_MUX_HDR_CMD_DATA 0
45#define BAM_MUX_HDR_CMD_OPEN 1
46#define BAM_MUX_HDR_CMD_CLOSE 2
47#define BAM_MUX_HDR_CMD_STATUS 3 /* unused */
48#define BAM_MUX_HDR_CMD_OPEN_NO_A2_PC 4
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070049
Jeff Hugo949080a2011-08-30 11:58:56 -060050#define POLLING_MIN_SLEEP 950 /* 0.95 ms */
51#define POLLING_MAX_SLEEP 1050 /* 1.05 ms */
52#define POLLING_INACTIVITY 40 /* cycles before switch to intr mode */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070053
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -070054#define LOW_WATERMARK 2
55#define HIGH_WATERMARK 4
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070056
57static int msm_bam_dmux_debug_enable;
58module_param_named(debug_enable, msm_bam_dmux_debug_enable,
59 int, S_IRUGO | S_IWUSR | S_IWGRP);
60
61#if defined(DEBUG)
62static uint32_t bam_dmux_read_cnt;
63static uint32_t bam_dmux_write_cnt;
64static uint32_t bam_dmux_write_cpy_cnt;
65static uint32_t bam_dmux_write_cpy_bytes;
Eric Holmberg2fddbcd2011-11-28 18:25:57 -070066static uint32_t bam_dmux_tx_sps_failure_cnt;
Eric Holmberg6074aba2012-01-18 17:59:44 -070067static uint32_t bam_dmux_tx_stall_cnt;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070068
69#define DBG(x...) do { \
70 if (msm_bam_dmux_debug_enable) \
71 pr_debug(x); \
72 } while (0)
73
74#define DBG_INC_READ_CNT(x) do { \
75 bam_dmux_read_cnt += (x); \
76 if (msm_bam_dmux_debug_enable) \
77 pr_debug("%s: total read bytes %u\n", \
78 __func__, bam_dmux_read_cnt); \
79 } while (0)
80
81#define DBG_INC_WRITE_CNT(x) do { \
82 bam_dmux_write_cnt += (x); \
83 if (msm_bam_dmux_debug_enable) \
84 pr_debug("%s: total written bytes %u\n", \
85 __func__, bam_dmux_write_cnt); \
86 } while (0)
87
88#define DBG_INC_WRITE_CPY(x) do { \
89 bam_dmux_write_cpy_bytes += (x); \
90 bam_dmux_write_cpy_cnt++; \
91 if (msm_bam_dmux_debug_enable) \
92 pr_debug("%s: total write copy cnt %u, bytes %u\n", \
93 __func__, bam_dmux_write_cpy_cnt, \
94 bam_dmux_write_cpy_bytes); \
95 } while (0)
Eric Holmberg2fddbcd2011-11-28 18:25:57 -070096
97#define DBG_INC_TX_SPS_FAILURE_CNT() do { \
98 bam_dmux_tx_sps_failure_cnt++; \
99} while (0)
100
Eric Holmberg6074aba2012-01-18 17:59:44 -0700101#define DBG_INC_TX_STALL_CNT() do { \
102 bam_dmux_tx_stall_cnt++; \
103} while (0)
104
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700105#else
106#define DBG(x...) do { } while (0)
107#define DBG_INC_READ_CNT(x...) do { } while (0)
108#define DBG_INC_WRITE_CNT(x...) do { } while (0)
109#define DBG_INC_WRITE_CPY(x...) do { } while (0)
Eric Holmberg2fddbcd2011-11-28 18:25:57 -0700110#define DBG_INC_TX_SPS_FAILURE_CNT() do { } while (0)
Eric Holmberg6074aba2012-01-18 17:59:44 -0700111#define DBG_INC_TX_STALL_CNT() do { } while (0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700112#endif
113
114struct bam_ch_info {
115 uint32_t status;
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600116 void (*notify)(void *, int, unsigned long);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700117 void *priv;
118 spinlock_t lock;
Jeff Hugo7960abd2011-08-02 15:39:38 -0600119 struct platform_device *pdev;
120 char name[BAM_DMUX_CH_NAME_MAX_LEN];
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700121 int num_tx_pkts;
122 int use_wm;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700123};
124
125struct tx_pkt_info {
126 struct sk_buff *skb;
127 dma_addr_t dma_address;
128 char is_cmd;
129 uint32_t len;
130 struct work_struct work;
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600131 struct list_head list_node;
Eric Holmberg878923a2012-01-10 14:28:19 -0700132 unsigned ts_sec;
133 unsigned long ts_nsec;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700134};
135
136struct rx_pkt_info {
137 struct sk_buff *skb;
138 dma_addr_t dma_address;
139 struct work_struct work;
Jeff Hugo949080a2011-08-30 11:58:56 -0600140 struct list_head list_node;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700141};
142
143#define A2_NUM_PIPES 6
144#define A2_SUMMING_THRESHOLD 4096
145#define A2_DEFAULT_DESCRIPTORS 32
146#define A2_PHYS_BASE 0x124C2000
147#define A2_PHYS_SIZE 0x2000
148#define BUFFER_SIZE 2048
149#define NUM_BUFFERS 32
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700150static struct sps_bam_props a2_props;
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600151static u32 a2_device_handle;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700152static struct sps_pipe *bam_tx_pipe;
153static struct sps_pipe *bam_rx_pipe;
154static struct sps_connect tx_connection;
155static struct sps_connect rx_connection;
156static struct sps_mem_buffer tx_desc_mem_buf;
157static struct sps_mem_buffer rx_desc_mem_buf;
158static struct sps_register_event tx_register_event;
Jeff Hugo33dbc002011-08-25 15:52:53 -0600159static struct sps_register_event rx_register_event;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700160
161static struct bam_ch_info bam_ch[BAM_DMUX_NUM_CHANNELS];
162static int bam_mux_initialized;
163
Jeff Hugo949080a2011-08-30 11:58:56 -0600164static int polling_mode;
165
166static LIST_HEAD(bam_rx_pool);
Jeff Hugoc9749932011-11-02 17:50:40 -0600167static DEFINE_MUTEX(bam_rx_pool_mutexlock);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600168static LIST_HEAD(bam_tx_pool);
Jeff Hugoc9749932011-11-02 17:50:40 -0600169static DEFINE_SPINLOCK(bam_tx_pool_spinlock);
Jeff Hugo949080a2011-08-30 11:58:56 -0600170
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700171struct bam_mux_hdr {
172 uint16_t magic_num;
173 uint8_t reserved;
174 uint8_t cmd;
175 uint8_t pad_len;
176 uint8_t ch_id;
177 uint16_t pkt_len;
178};
179
Jeff Hugod98b1082011-10-24 10:30:23 -0600180static void notify_all(int event, unsigned long data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700181static void bam_mux_write_done(struct work_struct *work);
182static void handle_bam_mux_cmd(struct work_struct *work);
Jeff Hugo949080a2011-08-30 11:58:56 -0600183static void rx_timer_work_func(struct work_struct *work);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700184
Jeff Hugo949080a2011-08-30 11:58:56 -0600185static DECLARE_WORK(rx_timer_work, rx_timer_work_func);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700186
187static struct workqueue_struct *bam_mux_rx_workqueue;
188static struct workqueue_struct *bam_mux_tx_workqueue;
189
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600190/* A2 power collaspe */
191#define UL_TIMEOUT_DELAY 1000 /* in ms */
192static void toggle_apps_ack(void);
193static void reconnect_to_bam(void);
194static void disconnect_to_bam(void);
195static void ul_wakeup(void);
196static void ul_timeout(struct work_struct *work);
197static void vote_dfab(void);
198static void unvote_dfab(void);
Jeff Hugod98b1082011-10-24 10:30:23 -0600199static void kickoff_ul_wakeup_func(struct work_struct *work);
Eric Holmberg006057d2012-01-11 10:10:42 -0700200static void grab_wakelock(void);
201static void release_wakelock(void);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600202
203static int bam_is_connected;
204static DEFINE_MUTEX(wakeup_lock);
205static struct completion ul_wakeup_ack_completion;
206static struct completion bam_connection_completion;
207static struct delayed_work ul_timeout_work;
208static int ul_packet_written;
209static struct clk *dfab_clk;
210static DEFINE_RWLOCK(ul_wakeup_lock);
Jeff Hugod98b1082011-10-24 10:30:23 -0600211static DECLARE_WORK(kickoff_ul_wakeup, kickoff_ul_wakeup_func);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600212static int bam_connection_is_active;
Jeff Hugof6c1c1e2011-12-01 17:43:49 -0700213static int wait_for_ack;
Jeff Hugoae3a85e2011-12-02 17:10:18 -0700214static struct wake_lock bam_wakelock;
Eric Holmberg006057d2012-01-11 10:10:42 -0700215static int a2_pc_disabled;
216static DEFINE_MUTEX(dfab_status_lock);
217static int dfab_is_on;
218static int wait_for_dfab;
219static struct completion dfab_unvote_completion;
220static DEFINE_SPINLOCK(wakelock_reference_lock);
221static int wakelock_reference_count;
Eric Holmberg604ab252012-01-15 00:01:18 -0700222static struct delayed_work msm9615_bam_init_work;
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600223/* End A2 power collaspe */
224
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600225/* subsystem restart */
226static int restart_notifier_cb(struct notifier_block *this,
227 unsigned long code,
228 void *data);
229
230static struct notifier_block restart_notifier = {
231 .notifier_call = restart_notifier_cb,
232};
233static int in_global_reset;
234/* end subsystem restart */
235
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700236#define bam_ch_is_open(x) \
237 (bam_ch[(x)].status == (BAM_CH_LOCAL_OPEN | BAM_CH_REMOTE_OPEN))
238
239#define bam_ch_is_local_open(x) \
240 (bam_ch[(x)].status & BAM_CH_LOCAL_OPEN)
241
242#define bam_ch_is_remote_open(x) \
243 (bam_ch[(x)].status & BAM_CH_REMOTE_OPEN)
244
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600245#define bam_ch_is_in_reset(x) \
246 (bam_ch[(x)].status & BAM_CH_IN_RESET)
247
Eric Holmberg878923a2012-01-10 14:28:19 -0700248#define LOG_MESSAGE_MAX_SIZE 80
249struct kfifo bam_dmux_state_log;
250static uint32_t bam_dmux_state_logging_disabled;
251static DEFINE_SPINLOCK(bam_dmux_logging_spinlock);
252static int bam_dmux_uplink_vote;
253static int bam_dmux_power_state;
254
255
256#define DMUX_LOG_KERR(fmt...) \
257do { \
258 bam_dmux_log(fmt); \
259 pr_err(fmt); \
260} while (0)
261
262/**
263 * Log a state change along with a small message.
264 *
265 * Complete size of messsage is limited to @todo.
266 */
267static void bam_dmux_log(const char *fmt, ...)
268{
269 char buff[LOG_MESSAGE_MAX_SIZE];
270 unsigned long flags;
271 va_list arg_list;
272 unsigned long long t_now;
273 unsigned long nanosec_rem;
274 int len = 0;
275
276 if (bam_dmux_state_logging_disabled)
277 return;
278
279 t_now = sched_clock();
280 nanosec_rem = do_div(t_now, 1000000000U);
281
282 /*
283 * States
Eric Holmberg006057d2012-01-11 10:10:42 -0700284 * D: 1 = Power collapse disabled
Eric Holmberg878923a2012-01-10 14:28:19 -0700285 * R: 1 = in global reset
286 * P: 1 = BAM is powered up
287 * A: 1 = BAM initialized and ready for data
288 *
289 * V: 1 = Uplink vote for power
290 * U: 1 = Uplink active
291 * W: 1 = Uplink Wait-for-ack
292 * A: 1 = Uplink ACK received
293 */
294 len += scnprintf(buff, sizeof(buff),
Eric Holmberg006057d2012-01-11 10:10:42 -0700295 "<DMUX> %u.%09lu %c%c%c%c %c%c%c%c ",
Eric Holmberg878923a2012-01-10 14:28:19 -0700296 (unsigned)t_now, nanosec_rem,
Eric Holmberg006057d2012-01-11 10:10:42 -0700297 a2_pc_disabled ? 'D' : 'd',
Eric Holmberg878923a2012-01-10 14:28:19 -0700298 in_global_reset ? 'R' : 'r',
299 bam_dmux_power_state ? 'P' : 'p',
300 bam_connection_is_active ? 'A' : 'a',
301 bam_dmux_uplink_vote ? 'V' : 'v',
302 bam_is_connected ? 'U' : 'u',
303 wait_for_ack ? 'W' : 'w',
304 ul_wakeup_ack_completion.done ? 'A' : 'a'
305 );
306
307 va_start(arg_list, fmt);
308 len += vscnprintf(buff + len, sizeof(buff) - len, fmt, arg_list);
309 va_end(arg_list);
310 memset(buff + len, 0x0, sizeof(buff) - len);
311
312 spin_lock_irqsave(&bam_dmux_logging_spinlock, flags);
313 if (kfifo_avail(&bam_dmux_state_log) < LOG_MESSAGE_MAX_SIZE) {
314 char junk[LOG_MESSAGE_MAX_SIZE];
315 int ret;
316
317 ret = kfifo_out(&bam_dmux_state_log, junk, sizeof(junk));
318 if (ret != LOG_MESSAGE_MAX_SIZE) {
319 pr_err("%s: unable to empty log %d\n", __func__, ret);
320 spin_unlock_irqrestore(&bam_dmux_logging_spinlock,
321 flags);
322 return;
323 }
324 }
325 kfifo_in(&bam_dmux_state_log, buff, sizeof(buff));
326 spin_unlock_irqrestore(&bam_dmux_logging_spinlock, flags);
327}
328
329static inline void set_tx_timestamp(struct tx_pkt_info *pkt)
330{
331 unsigned long long t_now;
332
333 t_now = sched_clock();
334 pkt->ts_nsec = do_div(t_now, 1000000000U);
335 pkt->ts_sec = (unsigned)t_now;
336}
337
338static inline void verify_tx_queue_is_empty(const char *func)
339{
340 unsigned long flags;
341 struct tx_pkt_info *info;
342 int reported = 0;
343
344 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
345 list_for_each_entry(info, &bam_tx_pool, list_node) {
346 if (!reported) {
Eric Holmberg454d9da2012-01-12 09:37:14 -0700347 bam_dmux_log("%s: tx pool not empty\n", func);
348 if (!in_global_reset)
349 pr_err("%s: tx pool not empty\n", func);
Eric Holmberg878923a2012-01-10 14:28:19 -0700350 reported = 1;
351 }
Eric Holmberg454d9da2012-01-12 09:37:14 -0700352 bam_dmux_log("%s: node=%p ts=%u.%09lu\n", __func__,
353 &info->list_node, info->ts_sec, info->ts_nsec);
354 if (!in_global_reset)
355 pr_err("%s: node=%p ts=%u.%09lu\n", __func__,
356 &info->list_node, info->ts_sec, info->ts_nsec);
Eric Holmberg878923a2012-01-10 14:28:19 -0700357 }
358 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
359}
360
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700361static void queue_rx(void)
362{
363 void *ptr;
364 struct rx_pkt_info *info;
365
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600366 if (in_global_reset)
367 return;
368
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700369 info = kmalloc(sizeof(struct rx_pkt_info), GFP_KERNEL);
Jeff Hugoe05bc222011-12-07 13:57:23 -0700370 if (!info) {
371 pr_err("%s: unable to alloc rx_pkt_info\n", __func__);
372 return;
373 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700374
375 INIT_WORK(&info->work, handle_bam_mux_cmd);
376
377 info->skb = __dev_alloc_skb(BUFFER_SIZE, GFP_KERNEL);
Jeff Hugo4ba22f92011-12-07 12:42:47 -0700378 if (info->skb == NULL) {
379 pr_err("%s: unable to alloc skb\n", __func__);
380 kfree(info);
381 return;
382 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700383 ptr = skb_put(info->skb, BUFFER_SIZE);
Jeff Hugo949080a2011-08-30 11:58:56 -0600384
Jeff Hugoc9749932011-11-02 17:50:40 -0600385 mutex_lock(&bam_rx_pool_mutexlock);
Jeff Hugo949080a2011-08-30 11:58:56 -0600386 list_add_tail(&info->list_node, &bam_rx_pool);
Jeff Hugoc9749932011-11-02 17:50:40 -0600387 mutex_unlock(&bam_rx_pool_mutexlock);
Jeff Hugo949080a2011-08-30 11:58:56 -0600388
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700389 /* need a way to handle error case */
390 info->dma_address = dma_map_single(NULL, ptr, BUFFER_SIZE,
391 DMA_FROM_DEVICE);
392 sps_transfer_one(bam_rx_pipe, info->dma_address,
Jeff Hugo33dbc002011-08-25 15:52:53 -0600393 BUFFER_SIZE, info,
394 SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700395}
396
397static void bam_mux_process_data(struct sk_buff *rx_skb)
398{
399 unsigned long flags;
400 struct bam_mux_hdr *rx_hdr;
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600401 unsigned long event_data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700402
403 rx_hdr = (struct bam_mux_hdr *)rx_skb->data;
404
405 rx_skb->data = (unsigned char *)(rx_hdr + 1);
406 rx_skb->tail = rx_skb->data + rx_hdr->pkt_len;
407 rx_skb->len = rx_hdr->pkt_len;
Jeff Hugoee88f672011-10-04 17:14:52 -0600408 rx_skb->truesize = rx_hdr->pkt_len + sizeof(struct sk_buff);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700409
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600410 event_data = (unsigned long)(rx_skb);
411
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700412 spin_lock_irqsave(&bam_ch[rx_hdr->ch_id].lock, flags);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600413 if (bam_ch[rx_hdr->ch_id].notify)
414 bam_ch[rx_hdr->ch_id].notify(
415 bam_ch[rx_hdr->ch_id].priv, BAM_DMUX_RECEIVE,
416 event_data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700417 else
418 dev_kfree_skb_any(rx_skb);
419 spin_unlock_irqrestore(&bam_ch[rx_hdr->ch_id].lock, flags);
420
421 queue_rx();
422}
423
Eric Holmberg006057d2012-01-11 10:10:42 -0700424static inline void handle_bam_mux_cmd_open(struct bam_mux_hdr *rx_hdr)
425{
426 unsigned long flags;
427 int ret;
428
429 spin_lock_irqsave(&bam_ch[rx_hdr->ch_id].lock, flags);
430 bam_ch[rx_hdr->ch_id].status |= BAM_CH_REMOTE_OPEN;
431 bam_ch[rx_hdr->ch_id].num_tx_pkts = 0;
432 spin_unlock_irqrestore(&bam_ch[rx_hdr->ch_id].lock, flags);
433 queue_rx();
434 ret = platform_device_add(bam_ch[rx_hdr->ch_id].pdev);
435 if (ret)
436 pr_err("%s: platform_device_add() error: %d\n",
437 __func__, ret);
438}
439
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700440static void handle_bam_mux_cmd(struct work_struct *work)
441{
442 unsigned long flags;
443 struct bam_mux_hdr *rx_hdr;
444 struct rx_pkt_info *info;
445 struct sk_buff *rx_skb;
446
447 info = container_of(work, struct rx_pkt_info, work);
448 rx_skb = info->skb;
Jeff Hugo949080a2011-08-30 11:58:56 -0600449 dma_unmap_single(NULL, info->dma_address, BUFFER_SIZE, DMA_FROM_DEVICE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700450 kfree(info);
451
452 rx_hdr = (struct bam_mux_hdr *)rx_skb->data;
453
454 DBG_INC_READ_CNT(sizeof(struct bam_mux_hdr));
455 DBG("%s: magic %x reserved %d cmd %d pad %d ch %d len %d\n", __func__,
456 rx_hdr->magic_num, rx_hdr->reserved, rx_hdr->cmd,
457 rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
458 if (rx_hdr->magic_num != BAM_MUX_HDR_MAGIC_NO) {
Eric Holmberg878923a2012-01-10 14:28:19 -0700459 DMUX_LOG_KERR("%s: dropping invalid hdr. magic %x"
460 " reserved %d cmd %d"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700461 " pad %d ch %d len %d\n", __func__,
462 rx_hdr->magic_num, rx_hdr->reserved, rx_hdr->cmd,
463 rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
464 dev_kfree_skb_any(rx_skb);
465 queue_rx();
466 return;
467 }
Eric Holmberg9ff40a52011-11-17 19:17:00 -0700468
469 if (rx_hdr->ch_id >= BAM_DMUX_NUM_CHANNELS) {
Eric Holmberg878923a2012-01-10 14:28:19 -0700470 DMUX_LOG_KERR("%s: dropping invalid LCID %d"
471 " reserved %d cmd %d"
Eric Holmberg9ff40a52011-11-17 19:17:00 -0700472 " pad %d ch %d len %d\n", __func__,
473 rx_hdr->ch_id, rx_hdr->reserved, rx_hdr->cmd,
474 rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
475 dev_kfree_skb_any(rx_skb);
476 queue_rx();
477 return;
478 }
479
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700480 switch (rx_hdr->cmd) {
481 case BAM_MUX_HDR_CMD_DATA:
482 DBG_INC_READ_CNT(rx_hdr->pkt_len);
483 bam_mux_process_data(rx_skb);
484 break;
485 case BAM_MUX_HDR_CMD_OPEN:
Eric Holmberg006057d2012-01-11 10:10:42 -0700486 bam_dmux_log("%s: opening cid %d PC enabled\n", __func__,
Eric Holmberg878923a2012-01-10 14:28:19 -0700487 rx_hdr->ch_id);
Eric Holmberg006057d2012-01-11 10:10:42 -0700488 handle_bam_mux_cmd_open(rx_hdr);
489 dev_kfree_skb_any(rx_skb);
490 break;
491 case BAM_MUX_HDR_CMD_OPEN_NO_A2_PC:
492 bam_dmux_log("%s: opening cid %d PC disabled\n", __func__,
493 rx_hdr->ch_id);
494
495 if (!a2_pc_disabled) {
496 a2_pc_disabled = 1;
497 schedule_delayed_work(&ul_timeout_work,
498 msecs_to_jiffies(UL_TIMEOUT_DELAY));
499 }
500
501 handle_bam_mux_cmd_open(rx_hdr);
Eric Holmberge779dba2011-11-04 18:22:01 -0600502 dev_kfree_skb_any(rx_skb);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700503 break;
504 case BAM_MUX_HDR_CMD_CLOSE:
505 /* probably should drop pending write */
Eric Holmberg878923a2012-01-10 14:28:19 -0700506 bam_dmux_log("%s: closing cid %d\n", __func__,
507 rx_hdr->ch_id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700508 spin_lock_irqsave(&bam_ch[rx_hdr->ch_id].lock, flags);
509 bam_ch[rx_hdr->ch_id].status &= ~BAM_CH_REMOTE_OPEN;
510 spin_unlock_irqrestore(&bam_ch[rx_hdr->ch_id].lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700511 queue_rx();
Jeff Hugo7960abd2011-08-02 15:39:38 -0600512 platform_device_unregister(bam_ch[rx_hdr->ch_id].pdev);
513 bam_ch[rx_hdr->ch_id].pdev =
514 platform_device_alloc(bam_ch[rx_hdr->ch_id].name, 2);
515 if (!bam_ch[rx_hdr->ch_id].pdev)
516 pr_err("%s: platform_device_alloc failed\n", __func__);
Eric Holmberge779dba2011-11-04 18:22:01 -0600517 dev_kfree_skb_any(rx_skb);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700518 break;
519 default:
Eric Holmberg878923a2012-01-10 14:28:19 -0700520 DMUX_LOG_KERR("%s: dropping invalid hdr. magic %x"
521 " reserved %d cmd %d pad %d ch %d len %d\n",
522 __func__, rx_hdr->magic_num, rx_hdr->reserved,
523 rx_hdr->cmd, rx_hdr->pad_len, rx_hdr->ch_id,
524 rx_hdr->pkt_len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700525 dev_kfree_skb_any(rx_skb);
526 queue_rx();
527 return;
528 }
529}
530
531static int bam_mux_write_cmd(void *data, uint32_t len)
532{
533 int rc;
534 struct tx_pkt_info *pkt;
535 dma_addr_t dma_address;
Jeff Hugo626303bf2011-11-21 11:43:28 -0700536 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700537
Eric Holmbergd83cd2b2011-11-04 15:54:17 -0600538 pkt = kmalloc(sizeof(struct tx_pkt_info), GFP_ATOMIC);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700539 if (pkt == NULL) {
540 pr_err("%s: mem alloc for tx_pkt_info failed\n", __func__);
541 rc = -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700542 return rc;
543 }
544
545 dma_address = dma_map_single(NULL, data, len,
546 DMA_TO_DEVICE);
547 if (!dma_address) {
548 pr_err("%s: dma_map_single() failed\n", __func__);
Jeff Hugo96cb7482011-12-07 13:28:31 -0700549 kfree(pkt);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700550 rc = -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700551 return rc;
552 }
553 pkt->skb = (struct sk_buff *)(data);
554 pkt->len = len;
555 pkt->dma_address = dma_address;
556 pkt->is_cmd = 1;
Eric Holmberg878923a2012-01-10 14:28:19 -0700557 set_tx_timestamp(pkt);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600558 INIT_WORK(&pkt->work, bam_mux_write_done);
Jeff Hugo626303bf2011-11-21 11:43:28 -0700559 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600560 list_add_tail(&pkt->list_node, &bam_tx_pool);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700561 rc = sps_transfer_one(bam_tx_pipe, dma_address, len,
562 pkt, SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600563 if (rc) {
564 DBG("%s sps_transfer_one failed rc=%d\n", __func__, rc);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600565 list_del(&pkt->list_node);
Eric Holmberg2fddbcd2011-11-28 18:25:57 -0700566 DBG_INC_TX_SPS_FAILURE_CNT();
Jeff Hugo626303bf2011-11-21 11:43:28 -0700567 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600568 kfree(pkt);
Jeff Hugobb6da952012-01-16 15:02:42 -0700569 } else {
570 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600571 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700572
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600573 ul_packet_written = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700574 return rc;
575}
576
577static void bam_mux_write_done(struct work_struct *work)
578{
579 struct sk_buff *skb;
580 struct bam_mux_hdr *hdr;
581 struct tx_pkt_info *info;
Eric Holmberg1cde7a62011-12-19 18:34:01 -0700582 struct tx_pkt_info *info_expected;
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600583 unsigned long event_data;
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700584 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700585
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600586 if (in_global_reset)
587 return;
Eric Holmberg1cde7a62011-12-19 18:34:01 -0700588
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700589 info = container_of(work, struct tx_pkt_info, work);
Eric Holmberg1cde7a62011-12-19 18:34:01 -0700590
591 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
592 info_expected = list_first_entry(&bam_tx_pool,
593 struct tx_pkt_info, list_node);
594 if (unlikely(info != info_expected)) {
Eric Holmberg878923a2012-01-10 14:28:19 -0700595 struct tx_pkt_info *errant_pkt;
Eric Holmberg1cde7a62011-12-19 18:34:01 -0700596
Eric Holmberg878923a2012-01-10 14:28:19 -0700597 DMUX_LOG_KERR("%s: bam_tx_pool mismatch .next=%p,"
598 " list_node=%p, ts=%u.%09lu\n",
599 __func__, bam_tx_pool.next, &info->list_node,
600 info->ts_sec, info->ts_nsec
601 );
602
603 list_for_each_entry(errant_pkt, &bam_tx_pool, list_node) {
604 DMUX_LOG_KERR("%s: node=%p ts=%u.%09lu\n", __func__,
605 &errant_pkt->list_node, errant_pkt->ts_sec,
606 errant_pkt->ts_nsec);
607
608 }
Eric Holmberg1cde7a62011-12-19 18:34:01 -0700609 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
610 BUG();
611 }
612 list_del(&info->list_node);
613 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
614
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600615 if (info->is_cmd) {
616 kfree(info->skb);
617 kfree(info);
618 return;
619 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700620 skb = info->skb;
621 kfree(info);
622 hdr = (struct bam_mux_hdr *)skb->data;
623 DBG_INC_WRITE_CNT(skb->data_len);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600624 event_data = (unsigned long)(skb);
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700625 spin_lock_irqsave(&bam_ch[hdr->ch_id].lock, flags);
626 bam_ch[hdr->ch_id].num_tx_pkts--;
627 spin_unlock_irqrestore(&bam_ch[hdr->ch_id].lock, flags);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600628 if (bam_ch[hdr->ch_id].notify)
629 bam_ch[hdr->ch_id].notify(
630 bam_ch[hdr->ch_id].priv, BAM_DMUX_WRITE_DONE,
631 event_data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700632 else
633 dev_kfree_skb_any(skb);
634}
635
636int msm_bam_dmux_write(uint32_t id, struct sk_buff *skb)
637{
638 int rc = 0;
639 struct bam_mux_hdr *hdr;
640 unsigned long flags;
641 struct sk_buff *new_skb = NULL;
642 dma_addr_t dma_address;
643 struct tx_pkt_info *pkt;
644
645 if (id >= BAM_DMUX_NUM_CHANNELS)
646 return -EINVAL;
647 if (!skb)
648 return -EINVAL;
649 if (!bam_mux_initialized)
650 return -ENODEV;
651
652 DBG("%s: writing to ch %d len %d\n", __func__, id, skb->len);
653 spin_lock_irqsave(&bam_ch[id].lock, flags);
654 if (!bam_ch_is_open(id)) {
655 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
656 pr_err("%s: port not open: %d\n", __func__, bam_ch[id].status);
657 return -ENODEV;
658 }
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700659
660 if (bam_ch[id].use_wm &&
661 (bam_ch[id].num_tx_pkts >= HIGH_WATERMARK)) {
662 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
663 pr_err("%s: watermark exceeded: %d\n", __func__, id);
664 return -EAGAIN;
665 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700666 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
667
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600668 read_lock(&ul_wakeup_lock);
Jeff Hugo061ce672011-10-21 17:15:32 -0600669 if (!bam_is_connected) {
670 read_unlock(&ul_wakeup_lock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600671 ul_wakeup();
Jeff Hugo4838f412012-01-20 11:19:37 -0700672 if (unlikely(in_global_reset == 1))
673 return -EFAULT;
Jeff Hugo061ce672011-10-21 17:15:32 -0600674 read_lock(&ul_wakeup_lock);
Jeff Hugod98b1082011-10-24 10:30:23 -0600675 notify_all(BAM_DMUX_UL_CONNECTED, (unsigned long)(NULL));
Jeff Hugo061ce672011-10-21 17:15:32 -0600676 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600677
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700678 /* if skb do not have any tailroom for padding,
679 copy the skb into a new expanded skb */
680 if ((skb->len & 0x3) && (skb_tailroom(skb) < (4 - (skb->len & 0x3)))) {
681 /* revisit, probably dev_alloc_skb and memcpy is effecient */
682 new_skb = skb_copy_expand(skb, skb_headroom(skb),
683 4 - (skb->len & 0x3), GFP_ATOMIC);
684 if (new_skb == NULL) {
685 pr_err("%s: cannot allocate skb\n", __func__);
Jeff Hugoc6af54d2011-11-02 17:00:27 -0600686 goto write_fail;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700687 }
688 dev_kfree_skb_any(skb);
689 skb = new_skb;
690 DBG_INC_WRITE_CPY(skb->len);
691 }
692
693 hdr = (struct bam_mux_hdr *)skb_push(skb, sizeof(struct bam_mux_hdr));
694
695 /* caller should allocate for hdr and padding
696 hdr is fine, padding is tricky */
697 hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
698 hdr->cmd = BAM_MUX_HDR_CMD_DATA;
699 hdr->reserved = 0;
700 hdr->ch_id = id;
701 hdr->pkt_len = skb->len - sizeof(struct bam_mux_hdr);
702 if (skb->len & 0x3)
703 skb_put(skb, 4 - (skb->len & 0x3));
704
705 hdr->pad_len = skb->len - (sizeof(struct bam_mux_hdr) + hdr->pkt_len);
706
707 DBG("%s: data %p, tail %p skb len %d pkt len %d pad len %d\n",
708 __func__, skb->data, skb->tail, skb->len,
709 hdr->pkt_len, hdr->pad_len);
710
711 pkt = kmalloc(sizeof(struct tx_pkt_info), GFP_ATOMIC);
712 if (pkt == NULL) {
713 pr_err("%s: mem alloc for tx_pkt_info failed\n", __func__);
Jeff Hugoc6af54d2011-11-02 17:00:27 -0600714 goto write_fail2;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700715 }
716
717 dma_address = dma_map_single(NULL, skb->data, skb->len,
718 DMA_TO_DEVICE);
719 if (!dma_address) {
720 pr_err("%s: dma_map_single() failed\n", __func__);
Jeff Hugoc6af54d2011-11-02 17:00:27 -0600721 goto write_fail3;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700722 }
723 pkt->skb = skb;
724 pkt->dma_address = dma_address;
725 pkt->is_cmd = 0;
Eric Holmberg878923a2012-01-10 14:28:19 -0700726 set_tx_timestamp(pkt);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700727 INIT_WORK(&pkt->work, bam_mux_write_done);
Jeff Hugo626303bf2011-11-21 11:43:28 -0700728 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600729 list_add_tail(&pkt->list_node, &bam_tx_pool);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700730 rc = sps_transfer_one(bam_tx_pipe, dma_address, skb->len,
731 pkt, SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600732 if (rc) {
733 DBG("%s sps_transfer_one failed rc=%d\n", __func__, rc);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600734 list_del(&pkt->list_node);
Eric Holmberg2fddbcd2011-11-28 18:25:57 -0700735 DBG_INC_TX_SPS_FAILURE_CNT();
Jeff Hugo626303bf2011-11-21 11:43:28 -0700736 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600737 kfree(pkt);
Jeff Hugo872bd062011-11-15 17:47:21 -0700738 if (new_skb)
739 dev_kfree_skb_any(new_skb);
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700740 } else {
Jeff Hugobb6da952012-01-16 15:02:42 -0700741 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700742 spin_lock_irqsave(&bam_ch[id].lock, flags);
743 bam_ch[id].num_tx_pkts++;
744 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600745 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600746 ul_packet_written = 1;
747 read_unlock(&ul_wakeup_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700748 return rc;
Jeff Hugoc6af54d2011-11-02 17:00:27 -0600749
750write_fail3:
751 kfree(pkt);
752write_fail2:
753 if (new_skb)
754 dev_kfree_skb_any(new_skb);
755write_fail:
756 read_unlock(&ul_wakeup_lock);
757 return -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700758}
759
760int msm_bam_dmux_open(uint32_t id, void *priv,
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600761 void (*notify)(void *, int, unsigned long))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700762{
763 struct bam_mux_hdr *hdr;
764 unsigned long flags;
765 int rc = 0;
766
767 DBG("%s: opening ch %d\n", __func__, id);
Eric Holmberg5d775432011-11-09 10:23:35 -0700768 if (!bam_mux_initialized) {
769 DBG("%s: not inititialized\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700770 return -ENODEV;
Eric Holmberg5d775432011-11-09 10:23:35 -0700771 }
772 if (id >= BAM_DMUX_NUM_CHANNELS) {
773 pr_err("%s: invalid channel id %d\n", __func__, id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700774 return -EINVAL;
Eric Holmberg5d775432011-11-09 10:23:35 -0700775 }
776 if (notify == NULL) {
777 pr_err("%s: notify function is NULL\n", __func__);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600778 return -EINVAL;
Eric Holmberg5d775432011-11-09 10:23:35 -0700779 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700780
781 hdr = kmalloc(sizeof(struct bam_mux_hdr), GFP_KERNEL);
782 if (hdr == NULL) {
783 pr_err("%s: hdr kmalloc failed. ch: %d\n", __func__, id);
784 return -ENOMEM;
785 }
786 spin_lock_irqsave(&bam_ch[id].lock, flags);
787 if (bam_ch_is_open(id)) {
788 DBG("%s: Already opened %d\n", __func__, id);
789 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
790 kfree(hdr);
791 goto open_done;
792 }
793 if (!bam_ch_is_remote_open(id)) {
794 DBG("%s: Remote not open; ch: %d\n", __func__, id);
795 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
796 kfree(hdr);
Eric Holmberg5d775432011-11-09 10:23:35 -0700797 return -ENODEV;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700798 }
799
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600800 bam_ch[id].notify = notify;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700801 bam_ch[id].priv = priv;
802 bam_ch[id].status |= BAM_CH_LOCAL_OPEN;
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700803 bam_ch[id].num_tx_pkts = 0;
804 bam_ch[id].use_wm = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700805 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
806
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600807 read_lock(&ul_wakeup_lock);
Jeff Hugo061ce672011-10-21 17:15:32 -0600808 if (!bam_is_connected) {
809 read_unlock(&ul_wakeup_lock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600810 ul_wakeup();
Jeff Hugo4838f412012-01-20 11:19:37 -0700811 if (unlikely(in_global_reset == 1))
812 return -EFAULT;
Jeff Hugo061ce672011-10-21 17:15:32 -0600813 read_lock(&ul_wakeup_lock);
Jeff Hugod98b1082011-10-24 10:30:23 -0600814 notify_all(BAM_DMUX_UL_CONNECTED, (unsigned long)(NULL));
Jeff Hugo061ce672011-10-21 17:15:32 -0600815 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600816
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700817 hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
818 hdr->cmd = BAM_MUX_HDR_CMD_OPEN;
819 hdr->reserved = 0;
820 hdr->ch_id = id;
821 hdr->pkt_len = 0;
822 hdr->pad_len = 0;
823
824 rc = bam_mux_write_cmd((void *)hdr, sizeof(struct bam_mux_hdr));
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600825 read_unlock(&ul_wakeup_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700826
827open_done:
828 DBG("%s: opened ch %d\n", __func__, id);
829 return rc;
830}
831
832int msm_bam_dmux_close(uint32_t id)
833{
834 struct bam_mux_hdr *hdr;
835 unsigned long flags;
836 int rc;
837
838 if (id >= BAM_DMUX_NUM_CHANNELS)
839 return -EINVAL;
840 DBG("%s: closing ch %d\n", __func__, id);
841 if (!bam_mux_initialized)
842 return -ENODEV;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700843
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600844 read_lock(&ul_wakeup_lock);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600845 if (!bam_is_connected && !bam_ch_is_in_reset(id)) {
Jeff Hugo061ce672011-10-21 17:15:32 -0600846 read_unlock(&ul_wakeup_lock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600847 ul_wakeup();
Jeff Hugo4838f412012-01-20 11:19:37 -0700848 if (unlikely(in_global_reset == 1))
849 return -EFAULT;
Jeff Hugo061ce672011-10-21 17:15:32 -0600850 read_lock(&ul_wakeup_lock);
Jeff Hugod98b1082011-10-24 10:30:23 -0600851 notify_all(BAM_DMUX_UL_CONNECTED, (unsigned long)(NULL));
Jeff Hugo061ce672011-10-21 17:15:32 -0600852 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600853
Jeff Hugo061ce672011-10-21 17:15:32 -0600854 spin_lock_irqsave(&bam_ch[id].lock, flags);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600855 bam_ch[id].notify = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700856 bam_ch[id].priv = NULL;
857 bam_ch[id].status &= ~BAM_CH_LOCAL_OPEN;
858 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
859
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600860 if (bam_ch_is_in_reset(id)) {
861 read_unlock(&ul_wakeup_lock);
862 bam_ch[id].status &= ~BAM_CH_IN_RESET;
863 return 0;
864 }
865
Jeff Hugobb5802f2011-11-02 17:10:29 -0600866 hdr = kmalloc(sizeof(struct bam_mux_hdr), GFP_ATOMIC);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700867 if (hdr == NULL) {
868 pr_err("%s: hdr kmalloc failed. ch: %d\n", __func__, id);
Jeff Hugoc6af54d2011-11-02 17:00:27 -0600869 read_unlock(&ul_wakeup_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700870 return -ENOMEM;
871 }
872 hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
873 hdr->cmd = BAM_MUX_HDR_CMD_CLOSE;
874 hdr->reserved = 0;
875 hdr->ch_id = id;
876 hdr->pkt_len = 0;
877 hdr->pad_len = 0;
878
879 rc = bam_mux_write_cmd((void *)hdr, sizeof(struct bam_mux_hdr));
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600880 read_unlock(&ul_wakeup_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700881
882 DBG("%s: closed ch %d\n", __func__, id);
883 return rc;
884}
885
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700886int msm_bam_dmux_is_ch_full(uint32_t id)
887{
888 unsigned long flags;
889 int ret;
890
891 if (id >= BAM_DMUX_NUM_CHANNELS)
892 return -EINVAL;
893
894 spin_lock_irqsave(&bam_ch[id].lock, flags);
895 bam_ch[id].use_wm = 1;
896 ret = bam_ch[id].num_tx_pkts >= HIGH_WATERMARK;
897 DBG("%s: ch %d num tx pkts=%d, HWM=%d\n", __func__,
898 id, bam_ch[id].num_tx_pkts, ret);
899 if (!bam_ch_is_local_open(id)) {
900 ret = -ENODEV;
901 pr_err("%s: port not open: %d\n", __func__, bam_ch[id].status);
902 }
903 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
904
905 return ret;
906}
907
908int msm_bam_dmux_is_ch_low(uint32_t id)
909{
910 int ret;
911
912 if (id >= BAM_DMUX_NUM_CHANNELS)
913 return -EINVAL;
914
915 bam_ch[id].use_wm = 1;
916 ret = bam_ch[id].num_tx_pkts <= LOW_WATERMARK;
917 DBG("%s: ch %d num tx pkts=%d, LWM=%d\n", __func__,
918 id, bam_ch[id].num_tx_pkts, ret);
919 if (!bam_ch_is_local_open(id)) {
920 ret = -ENODEV;
921 pr_err("%s: port not open: %d\n", __func__, bam_ch[id].status);
922 }
923
924 return ret;
925}
926
Eric Holmberg8df0cdb2012-01-04 17:40:46 -0700927static void rx_switch_to_interrupt_mode(void)
928{
929 struct sps_connect cur_rx_conn;
930 struct sps_iovec iov;
931 struct rx_pkt_info *info;
932 int ret;
933
934 /*
935 * Attempt to enable interrupts - if this fails,
936 * continue polling and we will retry later.
937 */
938 ret = sps_get_config(bam_rx_pipe, &cur_rx_conn);
939 if (ret) {
940 pr_err("%s: sps_get_config() failed %d\n", __func__, ret);
941 goto fail;
942 }
943
944 rx_register_event.options = SPS_O_EOT;
945 ret = sps_register_event(bam_rx_pipe, &rx_register_event);
946 if (ret) {
947 pr_err("%s: sps_register_event() failed %d\n", __func__, ret);
948 goto fail;
949 }
950
951 cur_rx_conn.options = SPS_O_AUTO_ENABLE |
952 SPS_O_EOT | SPS_O_ACK_TRANSFERS;
953 ret = sps_set_config(bam_rx_pipe, &cur_rx_conn);
954 if (ret) {
955 pr_err("%s: sps_set_config() failed %d\n", __func__, ret);
956 goto fail;
957 }
958 polling_mode = 0;
Eric Holmberg006057d2012-01-11 10:10:42 -0700959 release_wakelock();
Eric Holmberg8df0cdb2012-01-04 17:40:46 -0700960
961 /* handle any rx packets before interrupt was enabled */
962 while (bam_connection_is_active && !polling_mode) {
963 ret = sps_get_iovec(bam_rx_pipe, &iov);
964 if (ret) {
965 pr_err("%s: sps_get_iovec failed %d\n",
966 __func__, ret);
967 break;
968 }
969 if (iov.addr == 0)
970 break;
971
972 mutex_lock(&bam_rx_pool_mutexlock);
973 if (unlikely(list_empty(&bam_rx_pool))) {
974 mutex_unlock(&bam_rx_pool_mutexlock);
975 continue;
976 }
977 info = list_first_entry(&bam_rx_pool, struct rx_pkt_info,
978 list_node);
979 list_del(&info->list_node);
980 mutex_unlock(&bam_rx_pool_mutexlock);
981 handle_bam_mux_cmd(&info->work);
982 }
983 return;
984
985fail:
986 pr_err("%s: reverting to polling\n", __func__);
987 queue_work(bam_mux_rx_workqueue, &rx_timer_work);
988}
989
Jeff Hugo949080a2011-08-30 11:58:56 -0600990static void rx_timer_work_func(struct work_struct *work)
991{
992 struct sps_iovec iov;
Jeff Hugo949080a2011-08-30 11:58:56 -0600993 struct rx_pkt_info *info;
994 int inactive_cycles = 0;
995 int ret;
Jeff Hugo949080a2011-08-30 11:58:56 -0600996
Eric Holmberg8df0cdb2012-01-04 17:40:46 -0700997 while (bam_connection_is_active) { /* timer loop */
Jeff Hugo949080a2011-08-30 11:58:56 -0600998 ++inactive_cycles;
Eric Holmberg8df0cdb2012-01-04 17:40:46 -0700999 while (bam_connection_is_active) { /* deplete queue loop */
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001000 if (in_global_reset)
1001 return;
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001002
1003 ret = sps_get_iovec(bam_rx_pipe, &iov);
1004 if (ret) {
1005 pr_err("%s: sps_get_iovec failed %d\n",
1006 __func__, ret);
1007 break;
1008 }
Jeff Hugo949080a2011-08-30 11:58:56 -06001009 if (iov.addr == 0)
1010 break;
1011 inactive_cycles = 0;
Jeff Hugoc9749932011-11-02 17:50:40 -06001012 mutex_lock(&bam_rx_pool_mutexlock);
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001013 if (unlikely(list_empty(&bam_rx_pool))) {
1014 mutex_unlock(&bam_rx_pool_mutexlock);
1015 continue;
1016 }
1017 info = list_first_entry(&bam_rx_pool,
1018 struct rx_pkt_info, list_node);
1019 list_del(&info->list_node);
Jeff Hugoc9749932011-11-02 17:50:40 -06001020 mutex_unlock(&bam_rx_pool_mutexlock);
Jeff Hugo949080a2011-08-30 11:58:56 -06001021 handle_bam_mux_cmd(&info->work);
1022 }
1023
1024 if (inactive_cycles == POLLING_INACTIVITY) {
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001025 rx_switch_to_interrupt_mode();
1026 break;
Jeff Hugo949080a2011-08-30 11:58:56 -06001027 }
1028
1029 usleep_range(POLLING_MIN_SLEEP, POLLING_MAX_SLEEP);
1030 }
1031}
1032
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001033static void bam_mux_tx_notify(struct sps_event_notify *notify)
1034{
1035 struct tx_pkt_info *pkt;
1036
1037 DBG("%s: event %d notified\n", __func__, notify->event_id);
1038
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001039 if (in_global_reset)
1040 return;
1041
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001042 switch (notify->event_id) {
1043 case SPS_EVENT_EOT:
1044 pkt = notify->data.transfer.user;
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001045 if (!pkt->is_cmd)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001046 dma_unmap_single(NULL, pkt->dma_address,
1047 pkt->skb->len,
1048 DMA_TO_DEVICE);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001049 else
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001050 dma_unmap_single(NULL, pkt->dma_address,
1051 pkt->len,
1052 DMA_TO_DEVICE);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001053 queue_work(bam_mux_tx_workqueue, &pkt->work);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001054 break;
1055 default:
1056 pr_err("%s: recieved unexpected event id %d\n", __func__,
1057 notify->event_id);
1058 }
1059}
1060
Jeff Hugo33dbc002011-08-25 15:52:53 -06001061static void bam_mux_rx_notify(struct sps_event_notify *notify)
1062{
Jeff Hugo949080a2011-08-30 11:58:56 -06001063 int ret;
1064 struct sps_connect cur_rx_conn;
Jeff Hugo33dbc002011-08-25 15:52:53 -06001065
1066 DBG("%s: event %d notified\n", __func__, notify->event_id);
1067
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001068 if (in_global_reset)
1069 return;
1070
Jeff Hugo33dbc002011-08-25 15:52:53 -06001071 switch (notify->event_id) {
1072 case SPS_EVENT_EOT:
Jeff Hugo949080a2011-08-30 11:58:56 -06001073 /* attempt to disable interrupts in this pipe */
1074 if (!polling_mode) {
1075 ret = sps_get_config(bam_rx_pipe, &cur_rx_conn);
1076 if (ret) {
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001077 pr_err("%s: sps_get_config() failed %d, interrupts"
1078 " not disabled\n", __func__, ret);
Jeff Hugo949080a2011-08-30 11:58:56 -06001079 break;
1080 }
Jeff Hugoa9d32ba2011-11-21 14:59:48 -07001081 cur_rx_conn.options = SPS_O_AUTO_ENABLE |
Jeff Hugo949080a2011-08-30 11:58:56 -06001082 SPS_O_ACK_TRANSFERS | SPS_O_POLL;
1083 ret = sps_set_config(bam_rx_pipe, &cur_rx_conn);
1084 if (ret) {
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001085 pr_err("%s: sps_set_config() failed %d, interrupts"
1086 " not disabled\n", __func__, ret);
Jeff Hugo949080a2011-08-30 11:58:56 -06001087 break;
1088 }
Eric Holmberg006057d2012-01-11 10:10:42 -07001089 grab_wakelock();
Jeff Hugo949080a2011-08-30 11:58:56 -06001090 polling_mode = 1;
1091 queue_work(bam_mux_rx_workqueue, &rx_timer_work);
1092 }
Jeff Hugo33dbc002011-08-25 15:52:53 -06001093 break;
1094 default:
1095 pr_err("%s: recieved unexpected event id %d\n", __func__,
1096 notify->event_id);
1097 }
1098}
1099
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001100#ifdef CONFIG_DEBUG_FS
1101
1102static int debug_tbl(char *buf, int max)
1103{
1104 int i = 0;
1105 int j;
1106
1107 for (j = 0; j < BAM_DMUX_NUM_CHANNELS; ++j) {
1108 i += scnprintf(buf + i, max - i,
1109 "ch%02d local open=%s remote open=%s\n",
1110 j, bam_ch_is_local_open(j) ? "Y" : "N",
1111 bam_ch_is_remote_open(j) ? "Y" : "N");
1112 }
1113
1114 return i;
1115}
1116
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07001117static int debug_ul_pkt_cnt(char *buf, int max)
1118{
1119 struct list_head *p;
1120 unsigned long flags;
1121 int n = 0;
1122
1123 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
1124 __list_for_each(p, &bam_tx_pool) {
1125 ++n;
1126 }
1127 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
1128
1129 return scnprintf(buf, max, "Number of UL packets in flight: %d\n", n);
1130}
1131
1132static int debug_stats(char *buf, int max)
1133{
1134 int i = 0;
1135
1136 i += scnprintf(buf + i, max - i,
1137 "skb copy cnt: %u\n"
1138 "skb copy bytes: %u\n"
Eric Holmberg6074aba2012-01-18 17:59:44 -07001139 "sps tx failures: %u\n"
1140 "sps tx stalls: %u\n",
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07001141 bam_dmux_write_cpy_cnt,
1142 bam_dmux_write_cpy_bytes,
Eric Holmberg6074aba2012-01-18 17:59:44 -07001143 bam_dmux_tx_sps_failure_cnt,
1144 bam_dmux_tx_stall_cnt
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07001145 );
1146
1147 return i;
1148}
1149
Eric Holmberg878923a2012-01-10 14:28:19 -07001150static int debug_log(char *buff, int max, loff_t *ppos)
1151{
1152 unsigned long flags;
1153 int i = 0;
1154
1155 if (bam_dmux_state_logging_disabled) {
1156 i += scnprintf(buff - i, max - i, "Logging disabled\n");
1157 return i;
1158 }
1159
1160 if (*ppos == 0) {
1161 i += scnprintf(buff - i, max - i,
1162 "<DMUX> timestamp FLAGS [Message]\n"
1163 "FLAGS:\n"
Eric Holmberg006057d2012-01-11 10:10:42 -07001164 "\tD: 1 = Power collapse disabled\n"
Eric Holmberg878923a2012-01-10 14:28:19 -07001165 "\tR: 1 = in global reset\n"
1166 "\tP: 1 = BAM is powered up\n"
1167 "\tA: 1 = BAM initialized and ready for data\n"
1168 "\n"
1169 "\tV: 1 = Uplink vote for power\n"
1170 "\tU: 1 = Uplink active\n"
1171 "\tW: 1 = Uplink Wait-for-ack\n"
1172 "\tA: 1 = Uplink ACK received\n"
1173 );
1174 buff += i;
1175 }
1176
1177 spin_lock_irqsave(&bam_dmux_logging_spinlock, flags);
1178 while (kfifo_len(&bam_dmux_state_log)
1179 && (i + LOG_MESSAGE_MAX_SIZE) < max) {
1180 int k_len;
1181 k_len = kfifo_out(&bam_dmux_state_log,
1182 buff, LOG_MESSAGE_MAX_SIZE);
1183 if (k_len != LOG_MESSAGE_MAX_SIZE) {
1184 pr_err("%s: retrieve failure %d\n", __func__, k_len);
1185 break;
1186 }
1187
1188 /* keep non-null portion of string and add line break */
1189 k_len = strnlen(buff, LOG_MESSAGE_MAX_SIZE);
1190 buff += k_len;
1191 i += k_len;
1192 if (k_len && *(buff - 1) != '\n') {
1193 *buff++ = '\n';
1194 ++i;
1195 }
1196 }
1197 spin_unlock_irqrestore(&bam_dmux_logging_spinlock, flags);
1198
1199 return i;
1200}
1201
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001202#define DEBUG_BUFMAX 4096
1203static char debug_buffer[DEBUG_BUFMAX];
1204
1205static ssize_t debug_read(struct file *file, char __user *buf,
1206 size_t count, loff_t *ppos)
1207{
1208 int (*fill)(char *buf, int max) = file->private_data;
1209 int bsize = fill(debug_buffer, DEBUG_BUFMAX);
1210 return simple_read_from_buffer(buf, count, ppos, debug_buffer, bsize);
1211}
1212
Eric Holmberg878923a2012-01-10 14:28:19 -07001213static ssize_t debug_read_multiple(struct file *file, char __user *buff,
1214 size_t count, loff_t *ppos)
1215{
1216 int (*util_func)(char *buf, int max, loff_t *) = file->private_data;
1217 char *buffer;
1218 int bsize;
1219
1220 buffer = kmalloc(count, GFP_KERNEL);
1221 if (!buffer)
1222 return -ENOMEM;
1223
1224 bsize = util_func(buffer, count, ppos);
1225
1226 if (bsize >= 0) {
1227 if (copy_to_user(buff, buffer, bsize)) {
1228 kfree(buffer);
1229 return -EFAULT;
1230 }
1231 *ppos += bsize;
1232 }
1233 kfree(buffer);
1234 return bsize;
1235}
1236
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001237static int debug_open(struct inode *inode, struct file *file)
1238{
1239 file->private_data = inode->i_private;
1240 return 0;
1241}
1242
1243
1244static const struct file_operations debug_ops = {
1245 .read = debug_read,
1246 .open = debug_open,
1247};
1248
Eric Holmberg878923a2012-01-10 14:28:19 -07001249static const struct file_operations debug_ops_multiple = {
1250 .read = debug_read_multiple,
1251 .open = debug_open,
1252};
1253
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001254static void debug_create(const char *name, mode_t mode,
1255 struct dentry *dent,
1256 int (*fill)(char *buf, int max))
1257{
Eric Holmberge4ac80b2012-01-12 09:21:59 -07001258 struct dentry *file;
1259
1260 file = debugfs_create_file(name, mode, dent, fill, &debug_ops);
1261 if (IS_ERR(file))
1262 pr_err("%s: debugfs create failed %d\n", __func__,
1263 (int)PTR_ERR(file));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001264}
1265
Eric Holmberge4ac80b2012-01-12 09:21:59 -07001266static void debug_create_multiple(const char *name, mode_t mode,
1267 struct dentry *dent,
1268 int (*fill)(char *buf, int max, loff_t *ppos))
1269{
1270 struct dentry *file;
1271
1272 file = debugfs_create_file(name, mode, dent, fill, &debug_ops_multiple);
1273 if (IS_ERR(file))
1274 pr_err("%s: debugfs create failed %d\n", __func__,
1275 (int)PTR_ERR(file));
1276}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001277#endif
1278
Jeff Hugod98b1082011-10-24 10:30:23 -06001279static void notify_all(int event, unsigned long data)
1280{
1281 int i;
1282
1283 for (i = 0; i < BAM_DMUX_NUM_CHANNELS; ++i) {
Eric Holmberg454d9da2012-01-12 09:37:14 -07001284 if (bam_ch_is_open(i)) {
Jeff Hugod98b1082011-10-24 10:30:23 -06001285 bam_ch[i].notify(bam_ch[i].priv, event, data);
Eric Holmberg454d9da2012-01-12 09:37:14 -07001286 bam_dmux_log("%s: cid=%d, event=%d, data=%lu\n",
1287 __func__, i, event, data);
1288 }
Jeff Hugod98b1082011-10-24 10:30:23 -06001289 }
1290}
1291
1292static void kickoff_ul_wakeup_func(struct work_struct *work)
1293{
1294 read_lock(&ul_wakeup_lock);
1295 if (!bam_is_connected) {
1296 read_unlock(&ul_wakeup_lock);
1297 ul_wakeup();
Jeff Hugo4838f412012-01-20 11:19:37 -07001298 if (unlikely(in_global_reset == 1))
1299 return;
Jeff Hugod98b1082011-10-24 10:30:23 -06001300 read_lock(&ul_wakeup_lock);
1301 ul_packet_written = 1;
1302 notify_all(BAM_DMUX_UL_CONNECTED, (unsigned long)(NULL));
1303 }
1304 read_unlock(&ul_wakeup_lock);
1305}
1306
1307void msm_bam_dmux_kickoff_ul_wakeup(void)
1308{
1309 queue_work(bam_mux_tx_workqueue, &kickoff_ul_wakeup);
1310}
1311
Eric Holmberg878923a2012-01-10 14:28:19 -07001312static void power_vote(int vote)
1313{
1314 bam_dmux_log("%s: curr=%d, vote=%d\n", __func__,
1315 bam_dmux_uplink_vote, vote);
1316
1317 if (bam_dmux_uplink_vote == vote)
1318 bam_dmux_log("%s: warning - duplicate power vote\n", __func__);
1319
1320 bam_dmux_uplink_vote = vote;
1321 if (vote)
1322 smsm_change_state(SMSM_APPS_STATE, 0, SMSM_A2_POWER_CONTROL);
1323 else
1324 smsm_change_state(SMSM_APPS_STATE, SMSM_A2_POWER_CONTROL, 0);
1325}
1326
Eric Holmberg454d9da2012-01-12 09:37:14 -07001327/*
1328 * @note: Must be called with ul_wakeup_lock locked.
1329 */
1330static inline void ul_powerdown(void)
1331{
1332 bam_dmux_log("%s: powerdown\n", __func__);
1333 verify_tx_queue_is_empty(__func__);
1334
1335 if (a2_pc_disabled) {
1336 wait_for_dfab = 1;
1337 INIT_COMPLETION(dfab_unvote_completion);
1338 release_wakelock();
1339 } else {
1340 wait_for_ack = 1;
1341 INIT_COMPLETION(ul_wakeup_ack_completion);
1342 power_vote(0);
1343 }
1344 bam_is_connected = 0;
1345 notify_all(BAM_DMUX_UL_DISCONNECTED, (unsigned long)(NULL));
1346}
1347
1348static inline void ul_powerdown_finish(void)
1349{
1350 if (a2_pc_disabled && wait_for_dfab) {
1351 unvote_dfab();
1352 complete_all(&dfab_unvote_completion);
1353 wait_for_dfab = 0;
1354 }
1355}
1356
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001357static void ul_timeout(struct work_struct *work)
1358{
Jeff Hugoc040a5b2011-11-15 14:26:01 -07001359 unsigned long flags;
1360 int ret;
1361
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001362 if (in_global_reset)
1363 return;
Jeff Hugoc040a5b2011-11-15 14:26:01 -07001364 ret = write_trylock_irqsave(&ul_wakeup_lock, flags);
1365 if (!ret) { /* failed to grab lock, reschedule and bail */
1366 schedule_delayed_work(&ul_timeout_work,
1367 msecs_to_jiffies(UL_TIMEOUT_DELAY));
1368 return;
1369 }
Eric Holmberg454d9da2012-01-12 09:37:14 -07001370 if (bam_is_connected) {
Eric Holmberg6074aba2012-01-18 17:59:44 -07001371 if (!ul_packet_written) {
1372 spin_lock(&bam_tx_pool_spinlock);
1373 if (!list_empty(&bam_tx_pool)) {
1374 struct tx_pkt_info *info;
1375
1376 info = list_first_entry(&bam_tx_pool,
1377 struct tx_pkt_info, list_node);
1378 DMUX_LOG_KERR("%s: UL delayed ts=%u.%09lu\n",
1379 __func__, info->ts_sec, info->ts_nsec);
1380 DBG_INC_TX_STALL_CNT();
1381 ul_packet_written = 1;
1382 }
1383 spin_unlock(&bam_tx_pool_spinlock);
1384 }
1385
Eric Holmberg454d9da2012-01-12 09:37:14 -07001386 if (ul_packet_written) {
1387 bam_dmux_log("%s: packet written\n", __func__);
1388 ul_packet_written = 0;
1389 schedule_delayed_work(&ul_timeout_work,
1390 msecs_to_jiffies(UL_TIMEOUT_DELAY));
Eric Holmberg006057d2012-01-11 10:10:42 -07001391 } else {
Eric Holmberg454d9da2012-01-12 09:37:14 -07001392 ul_powerdown();
Eric Holmberg006057d2012-01-11 10:10:42 -07001393 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001394 }
Jeff Hugoc040a5b2011-11-15 14:26:01 -07001395 write_unlock_irqrestore(&ul_wakeup_lock, flags);
Eric Holmberg454d9da2012-01-12 09:37:14 -07001396 ul_powerdown_finish();
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001397}
Jeff Hugo4838f412012-01-20 11:19:37 -07001398
1399static int ssrestart_check(void)
1400{
1401 /*
1402 * if the restart level is RESET_SOC, SSR is not on
1403 * so the crashed modem will end up crashing the system
1404 * anyways, so use BUG() to report the error
1405 * else prepare for the restart event which should
1406 * happen soon
1407 */
1408 DMUX_LOG_KERR("%s: modem timeout\n", __func__);
1409 if (get_restart_level() <= RESET_SOC) {
1410 BUG();
1411 return 0;
1412 } else {
1413 in_global_reset = 1;
1414 return 1;
1415 }
1416}
1417
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001418static void ul_wakeup(void)
1419{
Jeff Hugof6c1c1e2011-12-01 17:43:49 -07001420 int ret;
Eric Holmberg006057d2012-01-11 10:10:42 -07001421 static int called_before;
Jeff Hugof6c1c1e2011-12-01 17:43:49 -07001422
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001423 mutex_lock(&wakeup_lock);
1424 if (bam_is_connected) { /* bam got connected before lock grabbed */
Eric Holmberg878923a2012-01-10 14:28:19 -07001425 bam_dmux_log("%s Already awake\n", __func__);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001426 mutex_unlock(&wakeup_lock);
1427 return;
1428 }
Eric Holmberg878923a2012-01-10 14:28:19 -07001429
Eric Holmberg006057d2012-01-11 10:10:42 -07001430 if (a2_pc_disabled) {
1431 /*
1432 * don't grab the wakelock the first time because it is
1433 * already grabbed when a2 powers on
1434 */
1435 if (likely(called_before))
1436 grab_wakelock();
1437 else
1438 called_before = 1;
1439 if (wait_for_dfab) {
Jeff Hugo66f7f1e2012-01-16 14:30:42 -07001440 ret = wait_for_completion_timeout(
Eric Holmberg006057d2012-01-11 10:10:42 -07001441 &dfab_unvote_completion, HZ);
1442 BUG_ON(ret == 0);
1443 }
1444 vote_dfab();
1445 schedule_delayed_work(&ul_timeout_work,
1446 msecs_to_jiffies(UL_TIMEOUT_DELAY));
1447 bam_is_connected = 1;
1448 mutex_unlock(&wakeup_lock);
1449 return;
1450 }
1451
Jeff Hugof6c1c1e2011-12-01 17:43:49 -07001452 /*
1453 * must wait for the previous power down request to have been acked
1454 * chances are it already came in and this will just fall through
1455 * instead of waiting
1456 */
1457 if (wait_for_ack) {
Eric Holmberg878923a2012-01-10 14:28:19 -07001458 bam_dmux_log("%s waiting for previous ack\n", __func__);
Jeff Hugo66f7f1e2012-01-16 14:30:42 -07001459 ret = wait_for_completion_timeout(
Jeff Hugof6c1c1e2011-12-01 17:43:49 -07001460 &ul_wakeup_ack_completion, HZ);
Eric Holmberg006057d2012-01-11 10:10:42 -07001461 wait_for_ack = 0;
Jeff Hugo4838f412012-01-20 11:19:37 -07001462 if (unlikely(ret == 0) && ssrestart_check()) {
1463 mutex_unlock(&wakeup_lock);
1464 bam_dmux_log("%s timeout previous ack\n", __func__);
1465 return;
1466 }
Jeff Hugof6c1c1e2011-12-01 17:43:49 -07001467 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001468 INIT_COMPLETION(ul_wakeup_ack_completion);
Eric Holmberg878923a2012-01-10 14:28:19 -07001469 power_vote(1);
1470 bam_dmux_log("%s waiting for wakeup ack\n", __func__);
Jeff Hugo66f7f1e2012-01-16 14:30:42 -07001471 ret = wait_for_completion_timeout(&ul_wakeup_ack_completion, HZ);
Jeff Hugo4838f412012-01-20 11:19:37 -07001472 if (unlikely(ret == 0) && ssrestart_check()) {
1473 mutex_unlock(&wakeup_lock);
1474 bam_dmux_log("%s timeout wakeup ack\n", __func__);
1475 return;
1476 }
Eric Holmberg878923a2012-01-10 14:28:19 -07001477 bam_dmux_log("%s waiting completion\n", __func__);
Jeff Hugo66f7f1e2012-01-16 14:30:42 -07001478 ret = wait_for_completion_timeout(&bam_connection_completion, HZ);
Jeff Hugo4838f412012-01-20 11:19:37 -07001479 if (unlikely(ret == 0) && ssrestart_check()) {
1480 mutex_unlock(&wakeup_lock);
1481 bam_dmux_log("%s timeout power on\n", __func__);
1482 return;
1483 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001484
1485 bam_is_connected = 1;
Eric Holmberg878923a2012-01-10 14:28:19 -07001486 bam_dmux_log("%s complete\n", __func__);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001487 schedule_delayed_work(&ul_timeout_work,
1488 msecs_to_jiffies(UL_TIMEOUT_DELAY));
1489 mutex_unlock(&wakeup_lock);
1490}
1491
1492static void reconnect_to_bam(void)
1493{
1494 int i;
1495
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001496 in_global_reset = 0;
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001497 vote_dfab();
1498 i = sps_device_reset(a2_device_handle);
1499 if (i)
1500 pr_err("%s: device reset failed rc = %d\n", __func__, i);
1501 i = sps_connect(bam_tx_pipe, &tx_connection);
1502 if (i)
1503 pr_err("%s: tx connection failed rc = %d\n", __func__, i);
1504 i = sps_connect(bam_rx_pipe, &rx_connection);
1505 if (i)
1506 pr_err("%s: rx connection failed rc = %d\n", __func__, i);
1507 i = sps_register_event(bam_tx_pipe, &tx_register_event);
1508 if (i)
1509 pr_err("%s: tx event reg failed rc = %d\n", __func__, i);
1510 i = sps_register_event(bam_rx_pipe, &rx_register_event);
1511 if (i)
1512 pr_err("%s: rx event reg failed rc = %d\n", __func__, i);
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001513
1514 bam_connection_is_active = 1;
1515
1516 if (polling_mode)
1517 rx_switch_to_interrupt_mode();
1518
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001519 for (i = 0; i < NUM_BUFFERS; ++i)
1520 queue_rx();
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001521
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001522 toggle_apps_ack();
1523 complete_all(&bam_connection_completion);
1524}
1525
1526static void disconnect_to_bam(void)
1527{
1528 struct list_head *node;
1529 struct rx_pkt_info *info;
Eric Holmberg454d9da2012-01-12 09:37:14 -07001530 unsigned long flags;
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001531
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001532 bam_connection_is_active = 0;
Eric Holmberg454d9da2012-01-12 09:37:14 -07001533
1534 /* handle disconnect during active UL */
1535 write_lock_irqsave(&ul_wakeup_lock, flags);
1536 if (bam_is_connected) {
1537 bam_dmux_log("%s: UL active - forcing powerdown\n", __func__);
1538 ul_powerdown();
1539 }
1540 write_unlock_irqrestore(&ul_wakeup_lock, flags);
1541 ul_powerdown_finish();
1542
1543 /* tear down BAM connection */
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001544 INIT_COMPLETION(bam_connection_completion);
1545 sps_disconnect(bam_tx_pipe);
1546 sps_disconnect(bam_rx_pipe);
1547 unvote_dfab();
1548 __memzero(rx_desc_mem_buf.base, rx_desc_mem_buf.size);
1549 __memzero(tx_desc_mem_buf.base, tx_desc_mem_buf.size);
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001550
1551 mutex_lock(&bam_rx_pool_mutexlock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001552 while (!list_empty(&bam_rx_pool)) {
1553 node = bam_rx_pool.next;
1554 list_del(node);
1555 info = container_of(node, struct rx_pkt_info, list_node);
1556 dma_unmap_single(NULL, info->dma_address, BUFFER_SIZE,
1557 DMA_FROM_DEVICE);
1558 dev_kfree_skb_any(info->skb);
1559 kfree(info);
1560 }
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001561 mutex_unlock(&bam_rx_pool_mutexlock);
Eric Holmberg878923a2012-01-10 14:28:19 -07001562
1563 verify_tx_queue_is_empty(__func__);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001564}
1565
1566static void vote_dfab(void)
1567{
Jeff Hugoca0caa82011-12-05 16:05:23 -07001568 int rc;
1569
Eric Holmberg006057d2012-01-11 10:10:42 -07001570 bam_dmux_log("%s\n", __func__);
1571 mutex_lock(&dfab_status_lock);
1572 if (dfab_is_on) {
1573 bam_dmux_log("%s: dfab is already on\n", __func__);
1574 mutex_unlock(&dfab_status_lock);
1575 return;
1576 }
Jeff Hugo23a812b2012-01-13 13:43:42 -07001577 rc = clk_prepare_enable(dfab_clk);
Jeff Hugoca0caa82011-12-05 16:05:23 -07001578 if (rc)
Eric Holmberg006057d2012-01-11 10:10:42 -07001579 DMUX_LOG_KERR("bam_dmux vote for dfab failed rc = %d\n", rc);
1580 dfab_is_on = 1;
1581 mutex_unlock(&dfab_status_lock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001582}
1583
1584static void unvote_dfab(void)
1585{
Eric Holmberg006057d2012-01-11 10:10:42 -07001586 bam_dmux_log("%s\n", __func__);
1587 mutex_lock(&dfab_status_lock);
1588 if (!dfab_is_on) {
1589 DMUX_LOG_KERR("%s: dfab is already off\n", __func__);
1590 dump_stack();
1591 mutex_unlock(&dfab_status_lock);
1592 return;
1593 }
Jeff Hugo23a812b2012-01-13 13:43:42 -07001594 clk_disable_unprepare(dfab_clk);
Eric Holmberg006057d2012-01-11 10:10:42 -07001595 dfab_is_on = 0;
1596 mutex_unlock(&dfab_status_lock);
1597}
1598
1599/* reference counting wrapper around wakelock */
1600static void grab_wakelock(void)
1601{
1602 unsigned long flags;
1603
1604 spin_lock_irqsave(&wakelock_reference_lock, flags);
1605 bam_dmux_log("%s: ref count = %d\n", __func__,
1606 wakelock_reference_count);
1607 if (wakelock_reference_count == 0)
1608 wake_lock(&bam_wakelock);
1609 ++wakelock_reference_count;
1610 spin_unlock_irqrestore(&wakelock_reference_lock, flags);
1611}
1612
1613static void release_wakelock(void)
1614{
1615 unsigned long flags;
1616
1617 spin_lock_irqsave(&wakelock_reference_lock, flags);
1618 if (wakelock_reference_count == 0) {
1619 DMUX_LOG_KERR("%s: bam_dmux wakelock not locked\n", __func__);
1620 dump_stack();
1621 spin_unlock_irqrestore(&wakelock_reference_lock, flags);
1622 return;
1623 }
1624 bam_dmux_log("%s: ref count = %d\n", __func__,
1625 wakelock_reference_count);
1626 --wakelock_reference_count;
1627 if (wakelock_reference_count == 0)
1628 wake_unlock(&bam_wakelock);
1629 spin_unlock_irqrestore(&wakelock_reference_lock, flags);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001630}
1631
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001632static int restart_notifier_cb(struct notifier_block *this,
1633 unsigned long code,
1634 void *data)
1635{
1636 int i;
1637 struct list_head *node;
1638 struct tx_pkt_info *info;
1639 int temp_remote_status;
Jeff Hugo626303bf2011-11-21 11:43:28 -07001640 unsigned long flags;
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001641
1642 if (code != SUBSYS_AFTER_SHUTDOWN)
1643 return NOTIFY_DONE;
1644
Eric Holmberg878923a2012-01-10 14:28:19 -07001645 bam_dmux_log("%s: begin\n", __func__);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001646 in_global_reset = 1;
Eric Holmberg454d9da2012-01-12 09:37:14 -07001647
1648 /* Handle uplink Powerdown */
1649 write_lock_irqsave(&ul_wakeup_lock, flags);
1650 if (bam_is_connected) {
1651 ul_powerdown();
1652 wait_for_ack = 0;
1653 }
Jeff Hugo4838f412012-01-20 11:19:37 -07001654 /*
1655 * if modem crash during ul_wakeup(), power_vote is 1, needs to be
1656 * reset to 0. harmless if bam_is_connected check above passes
1657 */
1658 power_vote(0);
Eric Holmberg454d9da2012-01-12 09:37:14 -07001659 write_unlock_irqrestore(&ul_wakeup_lock, flags);
1660 ul_powerdown_finish();
Eric Holmberg006057d2012-01-11 10:10:42 -07001661 a2_pc_disabled = 0;
Eric Holmberg454d9da2012-01-12 09:37:14 -07001662
1663 /* Cleanup Channel States */
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001664 for (i = 0; i < BAM_DMUX_NUM_CHANNELS; ++i) {
1665 temp_remote_status = bam_ch_is_remote_open(i);
1666 bam_ch[i].status &= ~BAM_CH_REMOTE_OPEN;
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -07001667 bam_ch[i].num_tx_pkts = 0;
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001668 if (bam_ch_is_local_open(i))
1669 bam_ch[i].status |= BAM_CH_IN_RESET;
1670 if (temp_remote_status) {
1671 platform_device_unregister(bam_ch[i].pdev);
1672 bam_ch[i].pdev = platform_device_alloc(
1673 bam_ch[i].name, 2);
1674 }
1675 }
Eric Holmberg454d9da2012-01-12 09:37:14 -07001676
1677 /* Cleanup pending UL data */
Jeff Hugo626303bf2011-11-21 11:43:28 -07001678 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001679 while (!list_empty(&bam_tx_pool)) {
1680 node = bam_tx_pool.next;
1681 list_del(node);
1682 info = container_of(node, struct tx_pkt_info,
1683 list_node);
1684 if (!info->is_cmd) {
1685 dma_unmap_single(NULL, info->dma_address,
1686 info->skb->len,
1687 DMA_TO_DEVICE);
1688 dev_kfree_skb_any(info->skb);
1689 } else {
1690 dma_unmap_single(NULL, info->dma_address,
1691 info->len,
1692 DMA_TO_DEVICE);
1693 kfree(info->skb);
1694 }
1695 kfree(info);
1696 }
Jeff Hugo626303bf2011-11-21 11:43:28 -07001697 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
Eric Holmberg454d9da2012-01-12 09:37:14 -07001698
Eric Holmberg878923a2012-01-10 14:28:19 -07001699 bam_dmux_log("%s: complete\n", __func__);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001700 return NOTIFY_DONE;
1701}
1702
Jeff Hugo9dea05c2011-12-21 12:23:05 -07001703static int bam_init(void)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001704{
1705 u32 h;
1706 dma_addr_t dma_addr;
1707 int ret;
1708 void *a2_virt_addr;
1709 int i;
Jeff Hugo4b2890d2012-01-16 16:14:21 -07001710 int skip_iounmap = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001711
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001712 vote_dfab();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001713 /* init BAM */
1714 a2_virt_addr = ioremap_nocache(A2_PHYS_BASE, A2_PHYS_SIZE);
1715 if (!a2_virt_addr) {
1716 pr_err("%s: ioremap failed\n", __func__);
1717 ret = -ENOMEM;
Jeff Hugo994a92d2012-01-05 13:25:21 -07001718 goto ioremap_failed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001719 }
1720 a2_props.phys_addr = A2_PHYS_BASE;
1721 a2_props.virt_addr = a2_virt_addr;
1722 a2_props.virt_size = A2_PHYS_SIZE;
1723 a2_props.irq = A2_BAM_IRQ;
Jeff Hugo927cba62011-11-11 11:49:52 -07001724 a2_props.options = SPS_BAM_OPT_IRQ_WAKEUP;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001725 a2_props.num_pipes = A2_NUM_PIPES;
1726 a2_props.summing_threshold = A2_SUMMING_THRESHOLD;
Jeff Hugo75913c82011-12-05 15:59:01 -07001727 if (cpu_is_msm9615())
1728 a2_props.manage = SPS_BAM_MGR_DEVICE_REMOTE;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001729 /* need to free on tear down */
1730 ret = sps_register_bam_device(&a2_props, &h);
1731 if (ret < 0) {
1732 pr_err("%s: register bam error %d\n", __func__, ret);
1733 goto register_bam_failed;
1734 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001735 a2_device_handle = h;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001736
1737 bam_tx_pipe = sps_alloc_endpoint();
1738 if (bam_tx_pipe == NULL) {
1739 pr_err("%s: tx alloc endpoint failed\n", __func__);
1740 ret = -ENOMEM;
1741 goto register_bam_failed;
1742 }
1743 ret = sps_get_config(bam_tx_pipe, &tx_connection);
1744 if (ret) {
1745 pr_err("%s: tx get config failed %d\n", __func__, ret);
1746 goto tx_get_config_failed;
1747 }
1748
1749 tx_connection.source = SPS_DEV_HANDLE_MEM;
1750 tx_connection.src_pipe_index = 0;
1751 tx_connection.destination = h;
1752 tx_connection.dest_pipe_index = 4;
1753 tx_connection.mode = SPS_MODE_DEST;
1754 tx_connection.options = SPS_O_AUTO_ENABLE | SPS_O_EOT;
1755 tx_desc_mem_buf.size = 0x800; /* 2k */
1756 tx_desc_mem_buf.base = dma_alloc_coherent(NULL, tx_desc_mem_buf.size,
1757 &dma_addr, 0);
1758 if (tx_desc_mem_buf.base == NULL) {
1759 pr_err("%s: tx memory alloc failed\n", __func__);
1760 ret = -ENOMEM;
1761 goto tx_mem_failed;
1762 }
1763 tx_desc_mem_buf.phys_base = dma_addr;
1764 memset(tx_desc_mem_buf.base, 0x0, tx_desc_mem_buf.size);
1765 tx_connection.desc = tx_desc_mem_buf;
1766 tx_connection.event_thresh = 0x10;
1767
1768 ret = sps_connect(bam_tx_pipe, &tx_connection);
1769 if (ret < 0) {
1770 pr_err("%s: tx connect error %d\n", __func__, ret);
1771 goto tx_connect_failed;
1772 }
1773
1774 bam_rx_pipe = sps_alloc_endpoint();
1775 if (bam_rx_pipe == NULL) {
1776 pr_err("%s: rx alloc endpoint failed\n", __func__);
1777 ret = -ENOMEM;
1778 goto tx_connect_failed;
1779 }
1780 ret = sps_get_config(bam_rx_pipe, &rx_connection);
1781 if (ret) {
1782 pr_err("%s: rx get config failed %d\n", __func__, ret);
1783 goto rx_get_config_failed;
1784 }
1785
1786 rx_connection.source = h;
1787 rx_connection.src_pipe_index = 5;
1788 rx_connection.destination = SPS_DEV_HANDLE_MEM;
1789 rx_connection.dest_pipe_index = 1;
1790 rx_connection.mode = SPS_MODE_SRC;
Jeff Hugo949080a2011-08-30 11:58:56 -06001791 rx_connection.options = SPS_O_AUTO_ENABLE | SPS_O_EOT |
1792 SPS_O_ACK_TRANSFERS;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001793 rx_desc_mem_buf.size = 0x800; /* 2k */
1794 rx_desc_mem_buf.base = dma_alloc_coherent(NULL, rx_desc_mem_buf.size,
1795 &dma_addr, 0);
1796 if (rx_desc_mem_buf.base == NULL) {
1797 pr_err("%s: rx memory alloc failed\n", __func__);
1798 ret = -ENOMEM;
1799 goto rx_mem_failed;
1800 }
1801 rx_desc_mem_buf.phys_base = dma_addr;
1802 memset(rx_desc_mem_buf.base, 0x0, rx_desc_mem_buf.size);
1803 rx_connection.desc = rx_desc_mem_buf;
1804 rx_connection.event_thresh = 0x10;
1805
1806 ret = sps_connect(bam_rx_pipe, &rx_connection);
1807 if (ret < 0) {
1808 pr_err("%s: rx connect error %d\n", __func__, ret);
1809 goto rx_connect_failed;
1810 }
1811
1812 tx_register_event.options = SPS_O_EOT;
1813 tx_register_event.mode = SPS_TRIGGER_CALLBACK;
1814 tx_register_event.xfer_done = NULL;
1815 tx_register_event.callback = bam_mux_tx_notify;
1816 tx_register_event.user = NULL;
1817 ret = sps_register_event(bam_tx_pipe, &tx_register_event);
1818 if (ret < 0) {
1819 pr_err("%s: tx register event error %d\n", __func__, ret);
1820 goto rx_event_reg_failed;
1821 }
1822
Jeff Hugo33dbc002011-08-25 15:52:53 -06001823 rx_register_event.options = SPS_O_EOT;
1824 rx_register_event.mode = SPS_TRIGGER_CALLBACK;
1825 rx_register_event.xfer_done = NULL;
1826 rx_register_event.callback = bam_mux_rx_notify;
1827 rx_register_event.user = NULL;
1828 ret = sps_register_event(bam_rx_pipe, &rx_register_event);
1829 if (ret < 0) {
1830 pr_err("%s: tx register event error %d\n", __func__, ret);
1831 goto rx_event_reg_failed;
1832 }
1833
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001834 bam_mux_initialized = 1;
1835 for (i = 0; i < NUM_BUFFERS; ++i)
1836 queue_rx();
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001837 toggle_apps_ack();
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001838 bam_connection_is_active = 1;
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001839 complete_all(&bam_connection_completion);
Jeff Hugo9dea05c2011-12-21 12:23:05 -07001840 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001841
1842rx_event_reg_failed:
1843 sps_disconnect(bam_rx_pipe);
1844rx_connect_failed:
1845 dma_free_coherent(NULL, rx_desc_mem_buf.size, rx_desc_mem_buf.base,
1846 rx_desc_mem_buf.phys_base);
1847rx_mem_failed:
1848 sps_disconnect(bam_tx_pipe);
1849rx_get_config_failed:
1850 sps_free_endpoint(bam_rx_pipe);
1851tx_connect_failed:
1852 dma_free_coherent(NULL, tx_desc_mem_buf.size, tx_desc_mem_buf.base,
1853 tx_desc_mem_buf.phys_base);
1854tx_get_config_failed:
1855 sps_free_endpoint(bam_tx_pipe);
1856tx_mem_failed:
1857 sps_deregister_bam_device(h);
Jeff Hugo4b2890d2012-01-16 16:14:21 -07001858 /*
1859 * sps_deregister_bam_device() calls iounmap. calling iounmap on the
1860 * same handle below will cause a crash, so skip it if we've freed
1861 * the handle here.
1862 */
1863 skip_iounmap = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001864register_bam_failed:
Jeff Hugo4b2890d2012-01-16 16:14:21 -07001865 if (!skip_iounmap)
1866 iounmap(a2_virt_addr);
Jeff Hugo994a92d2012-01-05 13:25:21 -07001867ioremap_failed:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001868 /*destroy_workqueue(bam_mux_workqueue);*/
Jeff Hugo9dea05c2011-12-21 12:23:05 -07001869 return ret;
1870}
1871
1872static int bam_init_fallback(void)
1873{
1874 u32 h;
1875 int ret;
1876 void *a2_virt_addr;
1877
1878 unvote_dfab();
1879 /* init BAM */
1880 a2_virt_addr = ioremap_nocache(A2_PHYS_BASE, A2_PHYS_SIZE);
1881 if (!a2_virt_addr) {
1882 pr_err("%s: ioremap failed\n", __func__);
1883 ret = -ENOMEM;
1884 goto ioremap_failed;
1885 }
1886 a2_props.phys_addr = A2_PHYS_BASE;
1887 a2_props.virt_addr = a2_virt_addr;
1888 a2_props.virt_size = A2_PHYS_SIZE;
1889 a2_props.irq = A2_BAM_IRQ;
1890 a2_props.options = SPS_BAM_OPT_IRQ_WAKEUP;
1891 a2_props.num_pipes = A2_NUM_PIPES;
1892 a2_props.summing_threshold = A2_SUMMING_THRESHOLD;
1893 if (cpu_is_msm9615())
1894 a2_props.manage = SPS_BAM_MGR_DEVICE_REMOTE;
1895 ret = sps_register_bam_device(&a2_props, &h);
1896 if (ret < 0) {
1897 pr_err("%s: register bam error %d\n", __func__, ret);
1898 goto register_bam_failed;
1899 }
1900 a2_device_handle = h;
1901
1902 return 0;
1903
1904register_bam_failed:
Jeff Hugo4b2890d2012-01-16 16:14:21 -07001905 iounmap(a2_virt_addr);
Jeff Hugo9dea05c2011-12-21 12:23:05 -07001906ioremap_failed:
1907 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001908}
Jeff Hugoade1f842011-08-03 15:53:59 -06001909
Eric Holmberg604ab252012-01-15 00:01:18 -07001910static void msm9615_bam_init(struct work_struct *work)
1911{
1912 int ret = 0;
1913
1914 ret = bam_init();
1915 if (ret) {
1916 ret = bam_init_fallback();
1917 if (ret)
1918 pr_err("%s: bam init fallback failed: %d",
1919 __func__, ret);
1920 }
1921}
1922
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001923static void toggle_apps_ack(void)
1924{
1925 static unsigned int clear_bit; /* 0 = set the bit, else clear bit */
Eric Holmberg878923a2012-01-10 14:28:19 -07001926
1927 bam_dmux_log("%s: apps ack %d->%d\n", __func__,
1928 clear_bit & 0x1, ~clear_bit & 0x1);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001929 smsm_change_state(SMSM_APPS_STATE,
1930 clear_bit & SMSM_A2_POWER_CONTROL_ACK,
1931 ~clear_bit & SMSM_A2_POWER_CONTROL_ACK);
1932 clear_bit = ~clear_bit;
1933}
1934
Jeff Hugoade1f842011-08-03 15:53:59 -06001935static void bam_dmux_smsm_cb(void *priv, uint32_t old_state, uint32_t new_state)
1936{
Eric Holmberg878923a2012-01-10 14:28:19 -07001937 bam_dmux_power_state = new_state & SMSM_A2_POWER_CONTROL ? 1 : 0;
1938 bam_dmux_log("%s: 0x%08x -> 0x%08x\n", __func__, old_state,
1939 new_state);
1940
Jeff Hugoae3a85e2011-12-02 17:10:18 -07001941 if (bam_mux_initialized && new_state & SMSM_A2_POWER_CONTROL) {
Eric Holmberg878923a2012-01-10 14:28:19 -07001942 bam_dmux_log("%s: reconnect\n", __func__);
Eric Holmberg006057d2012-01-11 10:10:42 -07001943 grab_wakelock();
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001944 reconnect_to_bam();
Jeff Hugoae3a85e2011-12-02 17:10:18 -07001945 } else if (bam_mux_initialized &&
1946 !(new_state & SMSM_A2_POWER_CONTROL)) {
Eric Holmberg878923a2012-01-10 14:28:19 -07001947 bam_dmux_log("%s: disconnect\n", __func__);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001948 disconnect_to_bam();
Eric Holmberg006057d2012-01-11 10:10:42 -07001949 release_wakelock();
Jeff Hugoae3a85e2011-12-02 17:10:18 -07001950 } else if (new_state & SMSM_A2_POWER_CONTROL) {
Eric Holmberg878923a2012-01-10 14:28:19 -07001951 bam_dmux_log("%s: init\n", __func__);
Eric Holmberg006057d2012-01-11 10:10:42 -07001952 grab_wakelock();
Eric Holmberg604ab252012-01-15 00:01:18 -07001953 if (cpu_is_msm9615()) {
1954 /*
1955 * even though a2 has signaled it is ready via the
1956 * SMSM_A2_POWER_CONTROL bit, it has not yet
1957 * enabled the pipes as needed by sps_connect
1958 * in satallite mode. Add a short delay to give modem
1959 * time to enable the pipes.
1960 */
1961 schedule_delayed_work(&msm9615_bam_init_work,
1962 msecs_to_jiffies(100));
1963 } else {
1964 bam_init();
Jeff Hugo9dea05c2011-12-21 12:23:05 -07001965 }
Jeff Hugoae3a85e2011-12-02 17:10:18 -07001966 } else {
Eric Holmberg878923a2012-01-10 14:28:19 -07001967 bam_dmux_log("%s: bad state change\n", __func__);
Jeff Hugoade1f842011-08-03 15:53:59 -06001968 pr_err("%s: unsupported state change\n", __func__);
Jeff Hugoae3a85e2011-12-02 17:10:18 -07001969 }
Jeff Hugoade1f842011-08-03 15:53:59 -06001970
1971}
1972
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001973static void bam_dmux_smsm_ack_cb(void *priv, uint32_t old_state,
1974 uint32_t new_state)
1975{
Eric Holmberg878923a2012-01-10 14:28:19 -07001976 bam_dmux_log("%s: 0x%08x -> 0x%08x\n", __func__, old_state,
1977 new_state);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001978 complete_all(&ul_wakeup_ack_completion);
1979}
1980
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001981static int bam_dmux_probe(struct platform_device *pdev)
1982{
1983 int rc;
1984
1985 DBG("%s probe called\n", __func__);
1986 if (bam_mux_initialized)
1987 return 0;
1988
Stephen Boyd1c51a492011-10-26 12:11:47 -07001989 dfab_clk = clk_get(&pdev->dev, "bus_clk");
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001990 if (IS_ERR(dfab_clk)) {
1991 pr_err("%s: did not get dfab clock\n", __func__);
1992 return -EFAULT;
1993 }
1994
1995 rc = clk_set_rate(dfab_clk, 64000000);
1996 if (rc)
1997 pr_err("%s: unable to set dfab clock rate\n", __func__);
1998
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001999 bam_mux_rx_workqueue = create_singlethread_workqueue("bam_dmux_rx");
2000 if (!bam_mux_rx_workqueue)
2001 return -ENOMEM;
2002
2003 bam_mux_tx_workqueue = create_singlethread_workqueue("bam_dmux_tx");
2004 if (!bam_mux_tx_workqueue) {
2005 destroy_workqueue(bam_mux_rx_workqueue);
2006 return -ENOMEM;
2007 }
2008
Jeff Hugo7960abd2011-08-02 15:39:38 -06002009 for (rc = 0; rc < BAM_DMUX_NUM_CHANNELS; ++rc) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002010 spin_lock_init(&bam_ch[rc].lock);
Jeff Hugo7960abd2011-08-02 15:39:38 -06002011 scnprintf(bam_ch[rc].name, BAM_DMUX_CH_NAME_MAX_LEN,
2012 "bam_dmux_ch_%d", rc);
2013 /* bus 2, ie a2 stream 2 */
2014 bam_ch[rc].pdev = platform_device_alloc(bam_ch[rc].name, 2);
2015 if (!bam_ch[rc].pdev) {
2016 pr_err("%s: platform device alloc failed\n", __func__);
2017 destroy_workqueue(bam_mux_rx_workqueue);
2018 destroy_workqueue(bam_mux_tx_workqueue);
2019 return -ENOMEM;
2020 }
2021 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002022
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002023 init_completion(&ul_wakeup_ack_completion);
2024 init_completion(&bam_connection_completion);
Eric Holmberg006057d2012-01-11 10:10:42 -07002025 init_completion(&dfab_unvote_completion);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002026 INIT_DELAYED_WORK(&ul_timeout_work, ul_timeout);
Eric Holmberg604ab252012-01-15 00:01:18 -07002027 INIT_DELAYED_WORK(&msm9615_bam_init_work, msm9615_bam_init);
Jeff Hugoae3a85e2011-12-02 17:10:18 -07002028 wake_lock_init(&bam_wakelock, WAKE_LOCK_SUSPEND, "bam_dmux_wakelock");
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002029
Jeff Hugoade1f842011-08-03 15:53:59 -06002030 rc = smsm_state_cb_register(SMSM_MODEM_STATE, SMSM_A2_POWER_CONTROL,
2031 bam_dmux_smsm_cb, NULL);
2032
2033 if (rc) {
2034 destroy_workqueue(bam_mux_rx_workqueue);
2035 destroy_workqueue(bam_mux_tx_workqueue);
2036 pr_err("%s: smsm cb register failed, rc: %d\n", __func__, rc);
2037 return -ENOMEM;
2038 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002039
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002040 rc = smsm_state_cb_register(SMSM_MODEM_STATE, SMSM_A2_POWER_CONTROL_ACK,
2041 bam_dmux_smsm_ack_cb, NULL);
2042
2043 if (rc) {
2044 destroy_workqueue(bam_mux_rx_workqueue);
2045 destroy_workqueue(bam_mux_tx_workqueue);
2046 smsm_state_cb_deregister(SMSM_MODEM_STATE,
2047 SMSM_A2_POWER_CONTROL,
2048 bam_dmux_smsm_cb, NULL);
2049 pr_err("%s: smsm ack cb register failed, rc: %d\n", __func__,
2050 rc);
2051 for (rc = 0; rc < BAM_DMUX_NUM_CHANNELS; ++rc)
2052 platform_device_put(bam_ch[rc].pdev);
2053 return -ENOMEM;
2054 }
2055
Eric Holmbergfd1e2ae2011-11-15 18:28:17 -07002056 if (smsm_get_state(SMSM_MODEM_STATE) & SMSM_A2_POWER_CONTROL)
2057 bam_dmux_smsm_cb(NULL, 0, smsm_get_state(SMSM_MODEM_STATE));
2058
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002059 return 0;
2060}
2061
2062static struct platform_driver bam_dmux_driver = {
2063 .probe = bam_dmux_probe,
2064 .driver = {
2065 .name = "BAM_RMNT",
2066 .owner = THIS_MODULE,
2067 },
2068};
2069
2070static int __init bam_dmux_init(void)
2071{
Eric Holmberg878923a2012-01-10 14:28:19 -07002072 int ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002073#ifdef CONFIG_DEBUG_FS
2074 struct dentry *dent;
2075
2076 dent = debugfs_create_dir("bam_dmux", 0);
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07002077 if (!IS_ERR(dent)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002078 debug_create("tbl", 0444, dent, debug_tbl);
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07002079 debug_create("ul_pkt_cnt", 0444, dent, debug_ul_pkt_cnt);
2080 debug_create("stats", 0444, dent, debug_stats);
Eric Holmberge4ac80b2012-01-12 09:21:59 -07002081 debug_create_multiple("log", 0444, dent, debug_log);
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07002082 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002083#endif
Eric Holmberg878923a2012-01-10 14:28:19 -07002084 ret = kfifo_alloc(&bam_dmux_state_log, PAGE_SIZE, GFP_KERNEL);
2085 if (ret) {
2086 pr_err("%s: failed to allocate log %d\n", __func__, ret);
2087 bam_dmux_state_logging_disabled = 1;
2088 }
2089
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06002090 subsys_notif_register_notifier("modem", &restart_notifier);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002091 return platform_driver_register(&bam_dmux_driver);
2092}
2093
Jeff Hugoade1f842011-08-03 15:53:59 -06002094late_initcall(bam_dmux_init); /* needs to init after SMD */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002095MODULE_DESCRIPTION("MSM BAM DMUX");
2096MODULE_LICENSE("GPL v2");