blob: 83a12902db92aab38211a6ba870c7621b1dd0313 [file] [log] [blame]
Arun Kumar Neelakantam406e5692013-01-17 18:58:04 +05301/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14/*
15 * BAM DMUX module.
16 */
17
18#define DEBUG
19
20#include <linux/delay.h>
21#include <linux/module.h>
22#include <linux/netdevice.h>
23#include <linux/platform_device.h>
24#include <linux/sched.h>
25#include <linux/skbuff.h>
26#include <linux/debugfs.h>
Jeff Hugoaab7ebc2011-09-07 16:46:04 -060027#include <linux/clk.h>
Jeff Hugoae3a85e2011-12-02 17:10:18 -070028#include <linux/wakelock.h>
Eric Holmberg878923a2012-01-10 14:28:19 -070029#include <linux/kfifo.h>
Jeff Hugo3910ee12012-08-21 14:08:20 -060030#include <linux/of.h>
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +053031#include <mach/msm_ipc_logging.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070032#include <mach/sps.h>
33#include <mach/bam_dmux.h>
Jeff Hugoade1f842011-08-03 15:53:59 -060034#include <mach/msm_smsm.h>
Jeff Hugo6e7a92a2011-10-24 05:25:13 -060035#include <mach/subsystem_notif.h>
Jeff Hugo75913c82011-12-05 15:59:01 -070036#include <mach/socinfo.h>
Jeff Hugo4838f412012-01-20 11:19:37 -070037#include <mach/subsystem_restart.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070038
Brent Hronik89c96ba2013-08-27 14:34:22 -060039#include "bam_dmux_private.h"
40
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070041#define BAM_CH_LOCAL_OPEN 0x1
42#define BAM_CH_REMOTE_OPEN 0x2
Jeff Hugo6e7a92a2011-10-24 05:25:13 -060043#define BAM_CH_IN_RESET 0x4
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070044
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -070045#define LOW_WATERMARK 2
46#define HIGH_WATERMARK 4
Anurag Singhdcd8b4e2012-07-30 16:46:37 -070047#define DEFAULT_POLLING_MIN_SLEEP (950)
48#define MAX_POLLING_SLEEP (6050)
49#define MIN_POLLING_SLEEP (950)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070050
51static int msm_bam_dmux_debug_enable;
52module_param_named(debug_enable, msm_bam_dmux_debug_enable,
53 int, S_IRUGO | S_IWUSR | S_IWGRP);
Anurag Singh308c3862013-08-13 17:22:41 -070054static int POLLING_MIN_SLEEP = 2950;
Anurag Singhdcd8b4e2012-07-30 16:46:37 -070055module_param_named(min_sleep, POLLING_MIN_SLEEP,
56 int, S_IRUGO | S_IWUSR | S_IWGRP);
Anurag Singh308c3862013-08-13 17:22:41 -070057static int POLLING_MAX_SLEEP = 3050;
Anurag Singhdcd8b4e2012-07-30 16:46:37 -070058module_param_named(max_sleep, POLLING_MAX_SLEEP,
59 int, S_IRUGO | S_IWUSR | S_IWGRP);
Anurag Singh308c3862013-08-13 17:22:41 -070060static int POLLING_INACTIVITY = 1;
Anurag Singhdcd8b4e2012-07-30 16:46:37 -070061module_param_named(inactivity, POLLING_INACTIVITY,
62 int, S_IRUGO | S_IWUSR | S_IWGRP);
Anurag Singh308c3862013-08-13 17:22:41 -070063static int bam_adaptive_timer_enabled;
Anurag Singhdcd8b4e2012-07-30 16:46:37 -070064module_param_named(adaptive_timer_enabled,
65 bam_adaptive_timer_enabled,
66 int, S_IRUGO | S_IWUSR | S_IWGRP);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070067
Brent Hronik89c96ba2013-08-27 14:34:22 -060068static struct bam_ops_if bam_default_ops = {
69 /* smsm */
70 .smsm_change_state_ptr = &smsm_change_state,
71 .smsm_get_state_ptr = &smsm_get_state,
72 .smsm_state_cb_register_ptr = &smsm_state_cb_register,
73 .smsm_state_cb_deregister_ptr = &smsm_state_cb_deregister,
74
75 /* sps */
76 .sps_connect_ptr = &sps_connect,
77 .sps_disconnect_ptr = &sps_disconnect,
78 .sps_register_bam_device_ptr = &sps_register_bam_device,
79 .sps_deregister_bam_device_ptr = &sps_deregister_bam_device,
80 .sps_alloc_endpoint_ptr = &sps_alloc_endpoint,
81 .sps_free_endpoint_ptr = &sps_free_endpoint,
82 .sps_set_config_ptr = &sps_set_config,
83 .sps_get_config_ptr = &sps_get_config,
84 .sps_device_reset_ptr = &sps_device_reset,
85 .sps_register_event_ptr = &sps_register_event,
86 .sps_transfer_one_ptr = &sps_transfer_one,
87 .sps_get_iovec_ptr = &sps_get_iovec,
88 .sps_get_unused_desc_num_ptr = &sps_get_unused_desc_num,
89
90 .dma_to = DMA_TO_DEVICE,
91 .dma_from = DMA_FROM_DEVICE,
92};
93static struct bam_ops_if *bam_ops = &bam_default_ops;
94
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070095#if defined(DEBUG)
96static uint32_t bam_dmux_read_cnt;
97static uint32_t bam_dmux_write_cnt;
98static uint32_t bam_dmux_write_cpy_cnt;
99static uint32_t bam_dmux_write_cpy_bytes;
Eric Holmberg2fddbcd2011-11-28 18:25:57 -0700100static uint32_t bam_dmux_tx_sps_failure_cnt;
Eric Holmberg6074aba2012-01-18 17:59:44 -0700101static uint32_t bam_dmux_tx_stall_cnt;
Eric Holmberg1f1255d2012-02-22 13:37:21 -0700102static atomic_t bam_dmux_ack_out_cnt = ATOMIC_INIT(0);
103static atomic_t bam_dmux_ack_in_cnt = ATOMIC_INIT(0);
104static atomic_t bam_dmux_a2_pwr_cntl_in_cnt = ATOMIC_INIT(0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700105
106#define DBG(x...) do { \
107 if (msm_bam_dmux_debug_enable) \
108 pr_debug(x); \
109 } while (0)
110
111#define DBG_INC_READ_CNT(x) do { \
112 bam_dmux_read_cnt += (x); \
113 if (msm_bam_dmux_debug_enable) \
114 pr_debug("%s: total read bytes %u\n", \
115 __func__, bam_dmux_read_cnt); \
116 } while (0)
117
118#define DBG_INC_WRITE_CNT(x) do { \
119 bam_dmux_write_cnt += (x); \
120 if (msm_bam_dmux_debug_enable) \
121 pr_debug("%s: total written bytes %u\n", \
122 __func__, bam_dmux_write_cnt); \
123 } while (0)
124
125#define DBG_INC_WRITE_CPY(x) do { \
126 bam_dmux_write_cpy_bytes += (x); \
127 bam_dmux_write_cpy_cnt++; \
128 if (msm_bam_dmux_debug_enable) \
129 pr_debug("%s: total write copy cnt %u, bytes %u\n", \
130 __func__, bam_dmux_write_cpy_cnt, \
131 bam_dmux_write_cpy_bytes); \
132 } while (0)
Eric Holmberg2fddbcd2011-11-28 18:25:57 -0700133
134#define DBG_INC_TX_SPS_FAILURE_CNT() do { \
135 bam_dmux_tx_sps_failure_cnt++; \
136} while (0)
137
Eric Holmberg6074aba2012-01-18 17:59:44 -0700138#define DBG_INC_TX_STALL_CNT() do { \
139 bam_dmux_tx_stall_cnt++; \
140} while (0)
141
Eric Holmberg1f1255d2012-02-22 13:37:21 -0700142#define DBG_INC_ACK_OUT_CNT() \
143 atomic_inc(&bam_dmux_ack_out_cnt)
144
145#define DBG_INC_A2_POWER_CONTROL_IN_CNT() \
146 atomic_inc(&bam_dmux_a2_pwr_cntl_in_cnt)
147
148#define DBG_INC_ACK_IN_CNT() \
149 atomic_inc(&bam_dmux_ack_in_cnt)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700150#else
151#define DBG(x...) do { } while (0)
152#define DBG_INC_READ_CNT(x...) do { } while (0)
153#define DBG_INC_WRITE_CNT(x...) do { } while (0)
154#define DBG_INC_WRITE_CPY(x...) do { } while (0)
Eric Holmberg2fddbcd2011-11-28 18:25:57 -0700155#define DBG_INC_TX_SPS_FAILURE_CNT() do { } while (0)
Eric Holmberg6074aba2012-01-18 17:59:44 -0700156#define DBG_INC_TX_STALL_CNT() do { } while (0)
Eric Holmberg1f1255d2012-02-22 13:37:21 -0700157#define DBG_INC_ACK_OUT_CNT() do { } while (0)
158#define DBG_INC_A2_POWER_CONTROL_IN_CNT() \
159 do { } while (0)
160#define DBG_INC_ACK_IN_CNT() do { } while (0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700161#endif
162
163struct bam_ch_info {
164 uint32_t status;
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600165 void (*notify)(void *, int, unsigned long);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700166 void *priv;
167 spinlock_t lock;
Jeff Hugo7960abd2011-08-02 15:39:38 -0600168 struct platform_device *pdev;
169 char name[BAM_DMUX_CH_NAME_MAX_LEN];
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700170 int num_tx_pkts;
171 int use_wm;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700172};
173
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700174#define A2_NUM_PIPES 6
175#define A2_SUMMING_THRESHOLD 4096
176#define A2_DEFAULT_DESCRIPTORS 32
177#define A2_PHYS_BASE 0x124C2000
178#define A2_PHYS_SIZE 0x2000
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700179#define NUM_BUFFERS 32
Jeff Hugo3910ee12012-08-21 14:08:20 -0600180
181#ifndef A2_BAM_IRQ
182#define A2_BAM_IRQ -1
183#endif
184
185static void *a2_phys_base;
186static uint32_t a2_phys_size;
187static int a2_bam_irq;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700188static struct sps_bam_props a2_props;
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600189static u32 a2_device_handle;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700190static struct sps_pipe *bam_tx_pipe;
191static struct sps_pipe *bam_rx_pipe;
192static struct sps_connect tx_connection;
193static struct sps_connect rx_connection;
194static struct sps_mem_buffer tx_desc_mem_buf;
195static struct sps_mem_buffer rx_desc_mem_buf;
196static struct sps_register_event tx_register_event;
Jeff Hugo33dbc002011-08-25 15:52:53 -0600197static struct sps_register_event rx_register_event;
Jeff Hugo7c185602013-09-11 17:39:54 -0600198static unsigned long long last_rx_pkt_timestamp;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700199
200static struct bam_ch_info bam_ch[BAM_DMUX_NUM_CHANNELS];
201static int bam_mux_initialized;
202
Jeff Hugo949080a2011-08-30 11:58:56 -0600203static int polling_mode;
Anurag Singhdcd8b4e2012-07-30 16:46:37 -0700204static unsigned long rx_timer_interval;
Jeff Hugo949080a2011-08-30 11:58:56 -0600205
206static LIST_HEAD(bam_rx_pool);
Jeff Hugoc9749932011-11-02 17:50:40 -0600207static DEFINE_MUTEX(bam_rx_pool_mutexlock);
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700208static int bam_rx_pool_len;
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600209static LIST_HEAD(bam_tx_pool);
Jeff Hugoc9749932011-11-02 17:50:40 -0600210static DEFINE_SPINLOCK(bam_tx_pool_spinlock);
Eric Holmberga623da82012-07-12 09:37:09 -0600211static DEFINE_MUTEX(bam_pdev_mutexlock);
Jeff Hugo949080a2011-08-30 11:58:56 -0600212
Jeff Hugod98b1082011-10-24 10:30:23 -0600213static void notify_all(int event, unsigned long data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700214static void bam_mux_write_done(struct work_struct *work);
215static void handle_bam_mux_cmd(struct work_struct *work);
Jeff Hugo949080a2011-08-30 11:58:56 -0600216static void rx_timer_work_func(struct work_struct *work);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700217
Jeff Hugo949080a2011-08-30 11:58:56 -0600218static DECLARE_WORK(rx_timer_work, rx_timer_work_func);
Jeff Hugo988e7ba2012-10-03 15:53:54 -0600219static struct delayed_work queue_rx_work;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700220
221static struct workqueue_struct *bam_mux_rx_workqueue;
222static struct workqueue_struct *bam_mux_tx_workqueue;
223
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600224/* A2 power collaspe */
225#define UL_TIMEOUT_DELAY 1000 /* in ms */
Jeff Hugo0b13a352012-03-17 23:18:30 -0600226#define ENABLE_DISCONNECT_ACK 0x1
Brent Hronik096f7d32013-06-28 15:43:08 -0600227#define SHUTDOWN_TIMEOUT_MS 500
Jeff Hugo1f317392013-07-24 16:28:52 -0600228#define UL_WAKEUP_TIMEOUT_MS 2000
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600229static void toggle_apps_ack(void);
230static void reconnect_to_bam(void);
231static void disconnect_to_bam(void);
232static void ul_wakeup(void);
233static void ul_timeout(struct work_struct *work);
234static void vote_dfab(void);
235static void unvote_dfab(void);
Jeff Hugod98b1082011-10-24 10:30:23 -0600236static void kickoff_ul_wakeup_func(struct work_struct *work);
Eric Holmberg006057d2012-01-11 10:10:42 -0700237static void grab_wakelock(void);
238static void release_wakelock(void);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600239
240static int bam_is_connected;
241static DEFINE_MUTEX(wakeup_lock);
242static struct completion ul_wakeup_ack_completion;
243static struct completion bam_connection_completion;
244static struct delayed_work ul_timeout_work;
245static int ul_packet_written;
Eric Holmbergbc9f21c2012-01-18 11:33:33 -0700246static atomic_t ul_ondemand_vote = ATOMIC_INIT(0);
Stephen Boyd69d35e32012-02-14 15:33:30 -0800247static struct clk *dfab_clk, *xo_clk;
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600248static DEFINE_RWLOCK(ul_wakeup_lock);
Jeff Hugod98b1082011-10-24 10:30:23 -0600249static DECLARE_WORK(kickoff_ul_wakeup, kickoff_ul_wakeup_func);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600250static int bam_connection_is_active;
Jeff Hugof6c1c1e2011-12-01 17:43:49 -0700251static int wait_for_ack;
Jeff Hugoae3a85e2011-12-02 17:10:18 -0700252static struct wake_lock bam_wakelock;
Eric Holmberg006057d2012-01-11 10:10:42 -0700253static int a2_pc_disabled;
254static DEFINE_MUTEX(dfab_status_lock);
255static int dfab_is_on;
256static int wait_for_dfab;
257static struct completion dfab_unvote_completion;
258static DEFINE_SPINLOCK(wakelock_reference_lock);
259static int wakelock_reference_count;
Jeff Hugo583a6da2012-02-03 11:37:30 -0700260static int a2_pc_disabled_wakelock_skipped;
Jeff Hugob1e7c582012-06-20 15:02:11 -0600261static int disconnect_ack = 1;
Jeff Hugocb798022012-04-09 14:55:40 -0600262static LIST_HEAD(bam_other_notify_funcs);
Jeff Hugo4b7c7b32012-04-18 16:25:14 -0600263static DEFINE_MUTEX(smsm_cb_lock);
Jeff Hugoc2696142012-05-03 11:42:13 -0600264static DEFINE_MUTEX(delayed_ul_vote_lock);
265static int need_delayed_ul_vote;
Jeff Hugo18792a32012-06-20 15:25:55 -0600266static int power_management_only_mode;
Jeff Hugoa82a95c2012-12-14 17:56:19 -0700267static int in_ssr;
268static int ssr_skipped_disconnect;
Brent Hronik096f7d32013-06-28 15:43:08 -0600269static struct completion shutdown_completion;
Jeff Hugocb798022012-04-09 14:55:40 -0600270
271struct outside_notify_func {
272 void (*notify)(void *, int, unsigned long);
273 void *priv;
274 struct list_head list_node;
275};
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600276/* End A2 power collaspe */
277
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600278/* subsystem restart */
279static int restart_notifier_cb(struct notifier_block *this,
280 unsigned long code,
281 void *data);
282
283static struct notifier_block restart_notifier = {
284 .notifier_call = restart_notifier_cb,
285};
286static int in_global_reset;
287/* end subsystem restart */
288
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700289#define bam_ch_is_open(x) \
290 (bam_ch[(x)].status == (BAM_CH_LOCAL_OPEN | BAM_CH_REMOTE_OPEN))
291
292#define bam_ch_is_local_open(x) \
293 (bam_ch[(x)].status & BAM_CH_LOCAL_OPEN)
294
295#define bam_ch_is_remote_open(x) \
296 (bam_ch[(x)].status & BAM_CH_REMOTE_OPEN)
297
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600298#define bam_ch_is_in_reset(x) \
299 (bam_ch[(x)].status & BAM_CH_IN_RESET)
300
Eric Holmberg878923a2012-01-10 14:28:19 -0700301struct kfifo bam_dmux_state_log;
Eric Holmberg878923a2012-01-10 14:28:19 -0700302static int bam_dmux_uplink_vote;
303static int bam_dmux_power_state;
304
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +0530305static void *bam_ipc_log_txt;
306
307#define BAM_IPC_LOG_PAGES 5
308
Eric Holmberg878923a2012-01-10 14:28:19 -0700309/**
310 * Log a state change along with a small message.
Eric Holmberg878923a2012-01-10 14:28:19 -0700311 * Complete size of messsage is limited to @todo.
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +0530312 * Logging is done using IPC Logging infrastructure.
313 *
314 * States
315 * D: 1 = Power collapse disabled
316 * R: 1 = in global reset
317 * P: 1 = BAM is powered up
318 * A: 1 = BAM initialized and ready for data
319 * V: 1 = Uplink vote for power
320 * U: 1 = Uplink active
321 * W: 1 = Uplink Wait-for-ack
322 * A: 1 = Uplink ACK received
323 * #: >=1 On-demand uplink vote
324 * D: 1 = Disconnect ACK active
Eric Holmberg878923a2012-01-10 14:28:19 -0700325 */
Eric Holmberg878923a2012-01-10 14:28:19 -0700326
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +0530327#define BAM_DMUX_LOG(fmt, args...) \
328do { \
329 if (bam_ipc_log_txt) { \
330 ipc_log_string(bam_ipc_log_txt, \
331 "<DMUX> %c%c%c%c %c%c%c%c%d%c " fmt, \
332 a2_pc_disabled ? 'D' : 'd', \
333 in_global_reset ? 'R' : 'r', \
334 bam_dmux_power_state ? 'P' : 'p', \
335 bam_connection_is_active ? 'A' : 'a', \
336 bam_dmux_uplink_vote ? 'V' : 'v', \
337 bam_is_connected ? 'U' : 'u', \
338 wait_for_ack ? 'W' : 'w', \
339 ul_wakeup_ack_completion.done ? 'A' : 'a', \
340 atomic_read(&ul_ondemand_vote), \
341 disconnect_ack ? 'D' : 'd', \
342 args); \
343 } \
344} while (0)
Eric Holmberg878923a2012-01-10 14:28:19 -0700345
Zaheerulla Meerf800bba2013-02-13 15:49:14 +0530346#define DMUX_LOG_KERR(fmt, args...) \
347do { \
348 BAM_DMUX_LOG(fmt, args); \
349 pr_err(fmt, args); \
350} while (0)
351
Eric Holmberg878923a2012-01-10 14:28:19 -0700352static inline void set_tx_timestamp(struct tx_pkt_info *pkt)
353{
354 unsigned long long t_now;
355
356 t_now = sched_clock();
357 pkt->ts_nsec = do_div(t_now, 1000000000U);
358 pkt->ts_sec = (unsigned)t_now;
359}
360
361static inline void verify_tx_queue_is_empty(const char *func)
362{
363 unsigned long flags;
364 struct tx_pkt_info *info;
365 int reported = 0;
366
367 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
368 list_for_each_entry(info, &bam_tx_pool, list_node) {
369 if (!reported) {
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +0530370 BAM_DMUX_LOG("%s: tx pool not empty\n", func);
Eric Holmberg454d9da2012-01-12 09:37:14 -0700371 if (!in_global_reset)
372 pr_err("%s: tx pool not empty\n", func);
Eric Holmberg878923a2012-01-10 14:28:19 -0700373 reported = 1;
374 }
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +0530375 BAM_DMUX_LOG("%s: node=%p ts=%u.%09lu\n", __func__,
Eric Holmberg454d9da2012-01-12 09:37:14 -0700376 &info->list_node, info->ts_sec, info->ts_nsec);
377 if (!in_global_reset)
378 pr_err("%s: node=%p ts=%u.%09lu\n", __func__,
379 &info->list_node, info->ts_sec, info->ts_nsec);
Eric Holmberg878923a2012-01-10 14:28:19 -0700380 }
381 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
382}
383
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700384static void queue_rx(void)
385{
386 void *ptr;
387 struct rx_pkt_info *info;
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700388 int ret;
389 int rx_len_cached;
Jeff Hugo949080a2011-08-30 11:58:56 -0600390
Jeff Hugoc9749932011-11-02 17:50:40 -0600391 mutex_lock(&bam_rx_pool_mutexlock);
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700392 rx_len_cached = bam_rx_pool_len;
Jeff Hugoc9749932011-11-02 17:50:40 -0600393 mutex_unlock(&bam_rx_pool_mutexlock);
Jeff Hugo949080a2011-08-30 11:58:56 -0600394
Jeff Hugo988e7ba2012-10-03 15:53:54 -0600395 while (bam_connection_is_active && rx_len_cached < NUM_BUFFERS) {
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700396 if (in_global_reset)
397 goto fail;
398
Jeff Hugo988e7ba2012-10-03 15:53:54 -0600399 info = kmalloc(sizeof(struct rx_pkt_info),
400 GFP_NOWAIT | __GFP_NOWARN);
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700401 if (!info) {
Jeff Hugo988e7ba2012-10-03 15:53:54 -0600402 DMUX_LOG_KERR(
403 "%s: unable to alloc rx_pkt_info, will retry later\n",
404 __func__);
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700405 goto fail;
406 }
407
408 INIT_WORK(&info->work, handle_bam_mux_cmd);
409
Jeff Hugo988e7ba2012-10-03 15:53:54 -0600410 info->skb = __dev_alloc_skb(BUFFER_SIZE,
411 GFP_NOWAIT | __GFP_NOWARN);
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700412 if (info->skb == NULL) {
Jeff Hugo988e7ba2012-10-03 15:53:54 -0600413 DMUX_LOG_KERR(
414 "%s: unable to alloc skb, will retry later\n",
415 __func__);
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700416 goto fail_info;
417 }
418 ptr = skb_put(info->skb, BUFFER_SIZE);
419
420 info->dma_address = dma_map_single(NULL, ptr, BUFFER_SIZE,
Brent Hronik89c96ba2013-08-27 14:34:22 -0600421 bam_ops->dma_from);
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700422 if (info->dma_address == 0 || info->dma_address == ~0) {
423 DMUX_LOG_KERR("%s: dma_map_single failure %p for %p\n",
424 __func__, (void *)info->dma_address, ptr);
425 goto fail_skb;
426 }
427
428 mutex_lock(&bam_rx_pool_mutexlock);
429 list_add_tail(&info->list_node, &bam_rx_pool);
430 rx_len_cached = ++bam_rx_pool_len;
Brent Hronik89c96ba2013-08-27 14:34:22 -0600431 ret = bam_ops->sps_transfer_one_ptr(bam_rx_pipe,
432 info->dma_address, BUFFER_SIZE, info, 0);
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700433 if (ret) {
Eric Holmberg00cf8692012-07-16 14:21:19 -0600434 list_del(&info->list_node);
435 rx_len_cached = --bam_rx_pool_len;
436 mutex_unlock(&bam_rx_pool_mutexlock);
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700437 DMUX_LOG_KERR("%s: sps_transfer_one failed %d\n",
438 __func__, ret);
Eric Holmberg00cf8692012-07-16 14:21:19 -0600439
440 dma_unmap_single(NULL, info->dma_address, BUFFER_SIZE,
Brent Hronik89c96ba2013-08-27 14:34:22 -0600441 bam_ops->dma_from);
Eric Holmberg00cf8692012-07-16 14:21:19 -0600442
443 goto fail_skb;
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700444 }
Eric Holmberg00cf8692012-07-16 14:21:19 -0600445 mutex_unlock(&bam_rx_pool_mutexlock);
446
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700447 }
448 return;
449
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700450fail_skb:
451 dev_kfree_skb_any(info->skb);
452
453fail_info:
454 kfree(info);
455
456fail:
Arun Kumar Neelakantam799447f2012-12-13 18:06:49 +0530457 if (rx_len_cached == 0 && !in_global_reset) {
Jeff Hugo988e7ba2012-10-03 15:53:54 -0600458 DMUX_LOG_KERR("%s: rescheduling\n", __func__);
459 schedule_delayed_work(&queue_rx_work, msecs_to_jiffies(100));
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700460 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700461}
462
Jeff Hugo988e7ba2012-10-03 15:53:54 -0600463static void queue_rx_work_func(struct work_struct *work)
464{
465 queue_rx();
466}
467
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700468static void bam_mux_process_data(struct sk_buff *rx_skb)
469{
470 unsigned long flags;
471 struct bam_mux_hdr *rx_hdr;
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600472 unsigned long event_data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700473
474 rx_hdr = (struct bam_mux_hdr *)rx_skb->data;
475
476 rx_skb->data = (unsigned char *)(rx_hdr + 1);
477 rx_skb->tail = rx_skb->data + rx_hdr->pkt_len;
478 rx_skb->len = rx_hdr->pkt_len;
Jeff Hugoee88f672011-10-04 17:14:52 -0600479 rx_skb->truesize = rx_hdr->pkt_len + sizeof(struct sk_buff);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700480
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600481 event_data = (unsigned long)(rx_skb);
482
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700483 spin_lock_irqsave(&bam_ch[rx_hdr->ch_id].lock, flags);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600484 if (bam_ch[rx_hdr->ch_id].notify)
485 bam_ch[rx_hdr->ch_id].notify(
486 bam_ch[rx_hdr->ch_id].priv, BAM_DMUX_RECEIVE,
487 event_data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700488 else
489 dev_kfree_skb_any(rx_skb);
490 spin_unlock_irqrestore(&bam_ch[rx_hdr->ch_id].lock, flags);
491
492 queue_rx();
493}
494
Eric Holmberg006057d2012-01-11 10:10:42 -0700495static inline void handle_bam_mux_cmd_open(struct bam_mux_hdr *rx_hdr)
496{
497 unsigned long flags;
498 int ret;
499
Eric Holmberga623da82012-07-12 09:37:09 -0600500 mutex_lock(&bam_pdev_mutexlock);
501 if (in_global_reset) {
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +0530502 BAM_DMUX_LOG("%s: open cid %d aborted due to ssr\n",
Eric Holmberga623da82012-07-12 09:37:09 -0600503 __func__, rx_hdr->ch_id);
504 mutex_unlock(&bam_pdev_mutexlock);
505 queue_rx();
506 return;
507 }
Eric Holmberg006057d2012-01-11 10:10:42 -0700508 spin_lock_irqsave(&bam_ch[rx_hdr->ch_id].lock, flags);
509 bam_ch[rx_hdr->ch_id].status |= BAM_CH_REMOTE_OPEN;
510 bam_ch[rx_hdr->ch_id].num_tx_pkts = 0;
511 spin_unlock_irqrestore(&bam_ch[rx_hdr->ch_id].lock, flags);
Eric Holmberg006057d2012-01-11 10:10:42 -0700512 ret = platform_device_add(bam_ch[rx_hdr->ch_id].pdev);
513 if (ret)
514 pr_err("%s: platform_device_add() error: %d\n",
515 __func__, ret);
Eric Holmberga623da82012-07-12 09:37:09 -0600516 mutex_unlock(&bam_pdev_mutexlock);
517 queue_rx();
Eric Holmberg006057d2012-01-11 10:10:42 -0700518}
519
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700520static void handle_bam_mux_cmd(struct work_struct *work)
521{
522 unsigned long flags;
523 struct bam_mux_hdr *rx_hdr;
524 struct rx_pkt_info *info;
525 struct sk_buff *rx_skb;
526
527 info = container_of(work, struct rx_pkt_info, work);
528 rx_skb = info->skb;
Brent Hronik89c96ba2013-08-27 14:34:22 -0600529 dma_unmap_single(NULL, info->dma_address, BUFFER_SIZE,
530 bam_ops->dma_from);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700531 kfree(info);
532
533 rx_hdr = (struct bam_mux_hdr *)rx_skb->data;
534
535 DBG_INC_READ_CNT(sizeof(struct bam_mux_hdr));
536 DBG("%s: magic %x reserved %d cmd %d pad %d ch %d len %d\n", __func__,
537 rx_hdr->magic_num, rx_hdr->reserved, rx_hdr->cmd,
538 rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
539 if (rx_hdr->magic_num != BAM_MUX_HDR_MAGIC_NO) {
Eric Holmberg878923a2012-01-10 14:28:19 -0700540 DMUX_LOG_KERR("%s: dropping invalid hdr. magic %x"
541 " reserved %d cmd %d"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700542 " pad %d ch %d len %d\n", __func__,
543 rx_hdr->magic_num, rx_hdr->reserved, rx_hdr->cmd,
544 rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
545 dev_kfree_skb_any(rx_skb);
546 queue_rx();
547 return;
548 }
Eric Holmberg9ff40a52011-11-17 19:17:00 -0700549
550 if (rx_hdr->ch_id >= BAM_DMUX_NUM_CHANNELS) {
Eric Holmberg878923a2012-01-10 14:28:19 -0700551 DMUX_LOG_KERR("%s: dropping invalid LCID %d"
552 " reserved %d cmd %d"
Eric Holmberg9ff40a52011-11-17 19:17:00 -0700553 " pad %d ch %d len %d\n", __func__,
554 rx_hdr->ch_id, rx_hdr->reserved, rx_hdr->cmd,
555 rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
556 dev_kfree_skb_any(rx_skb);
557 queue_rx();
558 return;
559 }
560
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700561 switch (rx_hdr->cmd) {
562 case BAM_MUX_HDR_CMD_DATA:
563 DBG_INC_READ_CNT(rx_hdr->pkt_len);
564 bam_mux_process_data(rx_skb);
565 break;
566 case BAM_MUX_HDR_CMD_OPEN:
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +0530567 BAM_DMUX_LOG("%s: opening cid %d PC enabled\n", __func__,
Eric Holmberg878923a2012-01-10 14:28:19 -0700568 rx_hdr->ch_id);
Eric Holmberg006057d2012-01-11 10:10:42 -0700569 handle_bam_mux_cmd_open(rx_hdr);
Jeff Hugob1e7c582012-06-20 15:02:11 -0600570 if (!(rx_hdr->reserved & ENABLE_DISCONNECT_ACK)) {
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +0530571 BAM_DMUX_LOG("%s: deactivating disconnect ack\n",
Jeff Hugod7d2b062012-07-24 14:29:56 -0600572 __func__);
Jeff Hugob1e7c582012-06-20 15:02:11 -0600573 disconnect_ack = 0;
Jeff Hugo0b13a352012-03-17 23:18:30 -0600574 }
Eric Holmberg006057d2012-01-11 10:10:42 -0700575 dev_kfree_skb_any(rx_skb);
576 break;
577 case BAM_MUX_HDR_CMD_OPEN_NO_A2_PC:
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +0530578 BAM_DMUX_LOG("%s: opening cid %d PC disabled\n", __func__,
Eric Holmberg006057d2012-01-11 10:10:42 -0700579 rx_hdr->ch_id);
580
581 if (!a2_pc_disabled) {
582 a2_pc_disabled = 1;
Jeff Hugo322179f2012-02-29 10:52:34 -0700583 ul_wakeup();
Eric Holmberg006057d2012-01-11 10:10:42 -0700584 }
585
586 handle_bam_mux_cmd_open(rx_hdr);
Eric Holmberge779dba2011-11-04 18:22:01 -0600587 dev_kfree_skb_any(rx_skb);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700588 break;
589 case BAM_MUX_HDR_CMD_CLOSE:
590 /* probably should drop pending write */
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +0530591 BAM_DMUX_LOG("%s: closing cid %d\n", __func__,
Eric Holmberg878923a2012-01-10 14:28:19 -0700592 rx_hdr->ch_id);
Eric Holmberga623da82012-07-12 09:37:09 -0600593 mutex_lock(&bam_pdev_mutexlock);
594 if (in_global_reset) {
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +0530595 BAM_DMUX_LOG("%s: close cid %d aborted due to ssr\n",
Eric Holmberga623da82012-07-12 09:37:09 -0600596 __func__, rx_hdr->ch_id);
597 mutex_unlock(&bam_pdev_mutexlock);
598 break;
599 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700600 spin_lock_irqsave(&bam_ch[rx_hdr->ch_id].lock, flags);
601 bam_ch[rx_hdr->ch_id].status &= ~BAM_CH_REMOTE_OPEN;
602 spin_unlock_irqrestore(&bam_ch[rx_hdr->ch_id].lock, flags);
Jeff Hugo7960abd2011-08-02 15:39:38 -0600603 platform_device_unregister(bam_ch[rx_hdr->ch_id].pdev);
604 bam_ch[rx_hdr->ch_id].pdev =
605 platform_device_alloc(bam_ch[rx_hdr->ch_id].name, 2);
606 if (!bam_ch[rx_hdr->ch_id].pdev)
607 pr_err("%s: platform_device_alloc failed\n", __func__);
Eric Holmberga623da82012-07-12 09:37:09 -0600608 mutex_unlock(&bam_pdev_mutexlock);
Eric Holmberge779dba2011-11-04 18:22:01 -0600609 dev_kfree_skb_any(rx_skb);
Eric Holmberga623da82012-07-12 09:37:09 -0600610 queue_rx();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700611 break;
612 default:
Eric Holmberg878923a2012-01-10 14:28:19 -0700613 DMUX_LOG_KERR("%s: dropping invalid hdr. magic %x"
614 " reserved %d cmd %d pad %d ch %d len %d\n",
615 __func__, rx_hdr->magic_num, rx_hdr->reserved,
616 rx_hdr->cmd, rx_hdr->pad_len, rx_hdr->ch_id,
617 rx_hdr->pkt_len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700618 dev_kfree_skb_any(rx_skb);
619 queue_rx();
620 return;
621 }
622}
623
624static int bam_mux_write_cmd(void *data, uint32_t len)
625{
626 int rc;
627 struct tx_pkt_info *pkt;
628 dma_addr_t dma_address;
Jeff Hugo626303bf2011-11-21 11:43:28 -0700629 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700630
Eric Holmbergd83cd2b2011-11-04 15:54:17 -0600631 pkt = kmalloc(sizeof(struct tx_pkt_info), GFP_ATOMIC);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700632 if (pkt == NULL) {
633 pr_err("%s: mem alloc for tx_pkt_info failed\n", __func__);
634 rc = -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700635 return rc;
636 }
637
638 dma_address = dma_map_single(NULL, data, len,
Brent Hronik89c96ba2013-08-27 14:34:22 -0600639 bam_ops->dma_to);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700640 if (!dma_address) {
641 pr_err("%s: dma_map_single() failed\n", __func__);
Jeff Hugo96cb7482011-12-07 13:28:31 -0700642 kfree(pkt);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700643 rc = -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700644 return rc;
645 }
646 pkt->skb = (struct sk_buff *)(data);
647 pkt->len = len;
648 pkt->dma_address = dma_address;
649 pkt->is_cmd = 1;
Eric Holmberg878923a2012-01-10 14:28:19 -0700650 set_tx_timestamp(pkt);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600651 INIT_WORK(&pkt->work, bam_mux_write_done);
Jeff Hugo626303bf2011-11-21 11:43:28 -0700652 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600653 list_add_tail(&pkt->list_node, &bam_tx_pool);
Brent Hronik89c96ba2013-08-27 14:34:22 -0600654 rc = bam_ops->sps_transfer_one_ptr(bam_tx_pipe, dma_address, len,
Jeff Hugoc85df962013-04-05 13:22:48 -0600655 pkt, SPS_IOVEC_FLAG_EOT);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600656 if (rc) {
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700657 DMUX_LOG_KERR("%s sps_transfer_one failed rc=%d\n",
658 __func__, rc);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600659 list_del(&pkt->list_node);
Eric Holmberg2fddbcd2011-11-28 18:25:57 -0700660 DBG_INC_TX_SPS_FAILURE_CNT();
Jeff Hugo626303bf2011-11-21 11:43:28 -0700661 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700662 dma_unmap_single(NULL, pkt->dma_address,
663 pkt->len,
Brent Hronik89c96ba2013-08-27 14:34:22 -0600664 bam_ops->dma_to);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600665 kfree(pkt);
Jeff Hugobb6da952012-01-16 15:02:42 -0700666 } else {
667 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600668 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700669
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600670 ul_packet_written = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700671 return rc;
672}
673
674static void bam_mux_write_done(struct work_struct *work)
675{
676 struct sk_buff *skb;
677 struct bam_mux_hdr *hdr;
678 struct tx_pkt_info *info;
Eric Holmberg1cde7a62011-12-19 18:34:01 -0700679 struct tx_pkt_info *info_expected;
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600680 unsigned long event_data;
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700681 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700682
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600683 if (in_global_reset)
684 return;
Eric Holmberg1cde7a62011-12-19 18:34:01 -0700685
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700686 info = container_of(work, struct tx_pkt_info, work);
Eric Holmberg1cde7a62011-12-19 18:34:01 -0700687
688 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
689 info_expected = list_first_entry(&bam_tx_pool,
690 struct tx_pkt_info, list_node);
691 if (unlikely(info != info_expected)) {
Eric Holmberg878923a2012-01-10 14:28:19 -0700692 struct tx_pkt_info *errant_pkt;
Eric Holmberg1cde7a62011-12-19 18:34:01 -0700693
Eric Holmberg878923a2012-01-10 14:28:19 -0700694 DMUX_LOG_KERR("%s: bam_tx_pool mismatch .next=%p,"
695 " list_node=%p, ts=%u.%09lu\n",
696 __func__, bam_tx_pool.next, &info->list_node,
697 info->ts_sec, info->ts_nsec
698 );
699
700 list_for_each_entry(errant_pkt, &bam_tx_pool, list_node) {
701 DMUX_LOG_KERR("%s: node=%p ts=%u.%09lu\n", __func__,
702 &errant_pkt->list_node, errant_pkt->ts_sec,
703 errant_pkt->ts_nsec);
704
705 }
Eric Holmberg1cde7a62011-12-19 18:34:01 -0700706 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
707 BUG();
708 }
709 list_del(&info->list_node);
710 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
711
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600712 if (info->is_cmd) {
713 kfree(info->skb);
714 kfree(info);
715 return;
716 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700717 skb = info->skb;
718 kfree(info);
719 hdr = (struct bam_mux_hdr *)skb->data;
Eric Holmberg9fdef262012-02-14 11:46:05 -0700720 DBG_INC_WRITE_CNT(skb->len);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600721 event_data = (unsigned long)(skb);
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700722 spin_lock_irqsave(&bam_ch[hdr->ch_id].lock, flags);
723 bam_ch[hdr->ch_id].num_tx_pkts--;
724 spin_unlock_irqrestore(&bam_ch[hdr->ch_id].lock, flags);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600725 if (bam_ch[hdr->ch_id].notify)
726 bam_ch[hdr->ch_id].notify(
727 bam_ch[hdr->ch_id].priv, BAM_DMUX_WRITE_DONE,
728 event_data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700729 else
730 dev_kfree_skb_any(skb);
731}
732
733int msm_bam_dmux_write(uint32_t id, struct sk_buff *skb)
734{
735 int rc = 0;
736 struct bam_mux_hdr *hdr;
737 unsigned long flags;
738 struct sk_buff *new_skb = NULL;
739 dma_addr_t dma_address;
740 struct tx_pkt_info *pkt;
741
742 if (id >= BAM_DMUX_NUM_CHANNELS)
743 return -EINVAL;
744 if (!skb)
745 return -EINVAL;
746 if (!bam_mux_initialized)
747 return -ENODEV;
748
749 DBG("%s: writing to ch %d len %d\n", __func__, id, skb->len);
750 spin_lock_irqsave(&bam_ch[id].lock, flags);
751 if (!bam_ch_is_open(id)) {
752 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
753 pr_err("%s: port not open: %d\n", __func__, bam_ch[id].status);
754 return -ENODEV;
755 }
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700756
757 if (bam_ch[id].use_wm &&
758 (bam_ch[id].num_tx_pkts >= HIGH_WATERMARK)) {
759 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
760 pr_err("%s: watermark exceeded: %d\n", __func__, id);
761 return -EAGAIN;
762 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700763 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
764
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600765 read_lock(&ul_wakeup_lock);
Jeff Hugo061ce672011-10-21 17:15:32 -0600766 if (!bam_is_connected) {
767 read_unlock(&ul_wakeup_lock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600768 ul_wakeup();
Jeff Hugo4838f412012-01-20 11:19:37 -0700769 if (unlikely(in_global_reset == 1))
770 return -EFAULT;
Jeff Hugo061ce672011-10-21 17:15:32 -0600771 read_lock(&ul_wakeup_lock);
Jeff Hugod98b1082011-10-24 10:30:23 -0600772 notify_all(BAM_DMUX_UL_CONNECTED, (unsigned long)(NULL));
Jeff Hugo061ce672011-10-21 17:15:32 -0600773 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600774
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700775 /* if skb do not have any tailroom for padding,
776 copy the skb into a new expanded skb */
777 if ((skb->len & 0x3) && (skb_tailroom(skb) < (4 - (skb->len & 0x3)))) {
778 /* revisit, probably dev_alloc_skb and memcpy is effecient */
779 new_skb = skb_copy_expand(skb, skb_headroom(skb),
780 4 - (skb->len & 0x3), GFP_ATOMIC);
781 if (new_skb == NULL) {
782 pr_err("%s: cannot allocate skb\n", __func__);
Jeff Hugoc6af54d2011-11-02 17:00:27 -0600783 goto write_fail;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700784 }
785 dev_kfree_skb_any(skb);
786 skb = new_skb;
787 DBG_INC_WRITE_CPY(skb->len);
788 }
789
790 hdr = (struct bam_mux_hdr *)skb_push(skb, sizeof(struct bam_mux_hdr));
791
792 /* caller should allocate for hdr and padding
793 hdr is fine, padding is tricky */
794 hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
795 hdr->cmd = BAM_MUX_HDR_CMD_DATA;
796 hdr->reserved = 0;
797 hdr->ch_id = id;
798 hdr->pkt_len = skb->len - sizeof(struct bam_mux_hdr);
799 if (skb->len & 0x3)
800 skb_put(skb, 4 - (skb->len & 0x3));
801
802 hdr->pad_len = skb->len - (sizeof(struct bam_mux_hdr) + hdr->pkt_len);
803
804 DBG("%s: data %p, tail %p skb len %d pkt len %d pad len %d\n",
805 __func__, skb->data, skb->tail, skb->len,
806 hdr->pkt_len, hdr->pad_len);
807
808 pkt = kmalloc(sizeof(struct tx_pkt_info), GFP_ATOMIC);
809 if (pkt == NULL) {
810 pr_err("%s: mem alloc for tx_pkt_info failed\n", __func__);
Jeff Hugoc6af54d2011-11-02 17:00:27 -0600811 goto write_fail2;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700812 }
813
814 dma_address = dma_map_single(NULL, skb->data, skb->len,
Brent Hronik89c96ba2013-08-27 14:34:22 -0600815 bam_ops->dma_to);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700816 if (!dma_address) {
817 pr_err("%s: dma_map_single() failed\n", __func__);
Jeff Hugoc6af54d2011-11-02 17:00:27 -0600818 goto write_fail3;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700819 }
820 pkt->skb = skb;
821 pkt->dma_address = dma_address;
822 pkt->is_cmd = 0;
Eric Holmberg878923a2012-01-10 14:28:19 -0700823 set_tx_timestamp(pkt);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700824 INIT_WORK(&pkt->work, bam_mux_write_done);
Jeff Hugo626303bf2011-11-21 11:43:28 -0700825 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600826 list_add_tail(&pkt->list_node, &bam_tx_pool);
Brent Hronik89c96ba2013-08-27 14:34:22 -0600827 rc = bam_ops->sps_transfer_one_ptr(bam_tx_pipe, dma_address, skb->len,
Jeff Hugoc85df962013-04-05 13:22:48 -0600828 pkt, SPS_IOVEC_FLAG_EOT);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600829 if (rc) {
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700830 DMUX_LOG_KERR("%s sps_transfer_one failed rc=%d\n",
831 __func__, rc);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600832 list_del(&pkt->list_node);
Eric Holmberg2fddbcd2011-11-28 18:25:57 -0700833 DBG_INC_TX_SPS_FAILURE_CNT();
Jeff Hugo626303bf2011-11-21 11:43:28 -0700834 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700835 dma_unmap_single(NULL, pkt->dma_address,
Brent Hronik89c96ba2013-08-27 14:34:22 -0600836 pkt->skb->len, bam_ops->dma_to);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600837 kfree(pkt);
Jeff Hugo872bd062011-11-15 17:47:21 -0700838 if (new_skb)
839 dev_kfree_skb_any(new_skb);
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700840 } else {
Jeff Hugobb6da952012-01-16 15:02:42 -0700841 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700842 spin_lock_irqsave(&bam_ch[id].lock, flags);
843 bam_ch[id].num_tx_pkts++;
844 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600845 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600846 ul_packet_written = 1;
847 read_unlock(&ul_wakeup_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700848 return rc;
Jeff Hugoc6af54d2011-11-02 17:00:27 -0600849
850write_fail3:
851 kfree(pkt);
852write_fail2:
Arun Kumar Neelakantam406e5692013-01-17 18:58:04 +0530853 skb_pull(skb, sizeof(struct bam_mux_hdr));
Jeff Hugoc6af54d2011-11-02 17:00:27 -0600854 if (new_skb)
855 dev_kfree_skb_any(new_skb);
856write_fail:
857 read_unlock(&ul_wakeup_lock);
858 return -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700859}
860
861int msm_bam_dmux_open(uint32_t id, void *priv,
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600862 void (*notify)(void *, int, unsigned long))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700863{
864 struct bam_mux_hdr *hdr;
865 unsigned long flags;
866 int rc = 0;
867
868 DBG("%s: opening ch %d\n", __func__, id);
Eric Holmberg5d775432011-11-09 10:23:35 -0700869 if (!bam_mux_initialized) {
870 DBG("%s: not inititialized\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700871 return -ENODEV;
Eric Holmberg5d775432011-11-09 10:23:35 -0700872 }
873 if (id >= BAM_DMUX_NUM_CHANNELS) {
874 pr_err("%s: invalid channel id %d\n", __func__, id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700875 return -EINVAL;
Eric Holmberg5d775432011-11-09 10:23:35 -0700876 }
877 if (notify == NULL) {
878 pr_err("%s: notify function is NULL\n", __func__);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600879 return -EINVAL;
Eric Holmberg5d775432011-11-09 10:23:35 -0700880 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700881
882 hdr = kmalloc(sizeof(struct bam_mux_hdr), GFP_KERNEL);
883 if (hdr == NULL) {
884 pr_err("%s: hdr kmalloc failed. ch: %d\n", __func__, id);
885 return -ENOMEM;
886 }
887 spin_lock_irqsave(&bam_ch[id].lock, flags);
888 if (bam_ch_is_open(id)) {
889 DBG("%s: Already opened %d\n", __func__, id);
890 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
891 kfree(hdr);
892 goto open_done;
893 }
894 if (!bam_ch_is_remote_open(id)) {
895 DBG("%s: Remote not open; ch: %d\n", __func__, id);
896 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
897 kfree(hdr);
Eric Holmberg5d775432011-11-09 10:23:35 -0700898 return -ENODEV;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700899 }
900
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600901 bam_ch[id].notify = notify;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700902 bam_ch[id].priv = priv;
903 bam_ch[id].status |= BAM_CH_LOCAL_OPEN;
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700904 bam_ch[id].num_tx_pkts = 0;
905 bam_ch[id].use_wm = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700906 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
907
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600908 read_lock(&ul_wakeup_lock);
Jeff Hugo061ce672011-10-21 17:15:32 -0600909 if (!bam_is_connected) {
910 read_unlock(&ul_wakeup_lock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600911 ul_wakeup();
Brent Hronik96630422013-05-01 16:38:43 -0600912 if (unlikely(in_global_reset == 1)) {
913 kfree(hdr);
Jeff Hugo4838f412012-01-20 11:19:37 -0700914 return -EFAULT;
Brent Hronik96630422013-05-01 16:38:43 -0600915 }
Jeff Hugo061ce672011-10-21 17:15:32 -0600916 read_lock(&ul_wakeup_lock);
Jeff Hugod98b1082011-10-24 10:30:23 -0600917 notify_all(BAM_DMUX_UL_CONNECTED, (unsigned long)(NULL));
Jeff Hugo061ce672011-10-21 17:15:32 -0600918 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600919
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700920 hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
921 hdr->cmd = BAM_MUX_HDR_CMD_OPEN;
922 hdr->reserved = 0;
923 hdr->ch_id = id;
924 hdr->pkt_len = 0;
925 hdr->pad_len = 0;
926
927 rc = bam_mux_write_cmd((void *)hdr, sizeof(struct bam_mux_hdr));
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600928 read_unlock(&ul_wakeup_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700929
930open_done:
931 DBG("%s: opened ch %d\n", __func__, id);
932 return rc;
933}
934
935int msm_bam_dmux_close(uint32_t id)
936{
937 struct bam_mux_hdr *hdr;
938 unsigned long flags;
939 int rc;
940
941 if (id >= BAM_DMUX_NUM_CHANNELS)
942 return -EINVAL;
943 DBG("%s: closing ch %d\n", __func__, id);
944 if (!bam_mux_initialized)
945 return -ENODEV;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700946
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600947 read_lock(&ul_wakeup_lock);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600948 if (!bam_is_connected && !bam_ch_is_in_reset(id)) {
Jeff Hugo061ce672011-10-21 17:15:32 -0600949 read_unlock(&ul_wakeup_lock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600950 ul_wakeup();
Jeff Hugo4838f412012-01-20 11:19:37 -0700951 if (unlikely(in_global_reset == 1))
952 return -EFAULT;
Jeff Hugo061ce672011-10-21 17:15:32 -0600953 read_lock(&ul_wakeup_lock);
Jeff Hugod98b1082011-10-24 10:30:23 -0600954 notify_all(BAM_DMUX_UL_CONNECTED, (unsigned long)(NULL));
Jeff Hugo061ce672011-10-21 17:15:32 -0600955 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600956
Jeff Hugo061ce672011-10-21 17:15:32 -0600957 spin_lock_irqsave(&bam_ch[id].lock, flags);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600958 bam_ch[id].notify = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700959 bam_ch[id].priv = NULL;
960 bam_ch[id].status &= ~BAM_CH_LOCAL_OPEN;
961 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
962
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600963 if (bam_ch_is_in_reset(id)) {
964 read_unlock(&ul_wakeup_lock);
965 bam_ch[id].status &= ~BAM_CH_IN_RESET;
966 return 0;
967 }
968
Jeff Hugobb5802f2011-11-02 17:10:29 -0600969 hdr = kmalloc(sizeof(struct bam_mux_hdr), GFP_ATOMIC);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700970 if (hdr == NULL) {
971 pr_err("%s: hdr kmalloc failed. ch: %d\n", __func__, id);
Jeff Hugoc6af54d2011-11-02 17:00:27 -0600972 read_unlock(&ul_wakeup_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700973 return -ENOMEM;
974 }
975 hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
976 hdr->cmd = BAM_MUX_HDR_CMD_CLOSE;
977 hdr->reserved = 0;
978 hdr->ch_id = id;
979 hdr->pkt_len = 0;
980 hdr->pad_len = 0;
981
982 rc = bam_mux_write_cmd((void *)hdr, sizeof(struct bam_mux_hdr));
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600983 read_unlock(&ul_wakeup_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700984
985 DBG("%s: closed ch %d\n", __func__, id);
986 return rc;
987}
988
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700989int msm_bam_dmux_is_ch_full(uint32_t id)
990{
991 unsigned long flags;
992 int ret;
993
994 if (id >= BAM_DMUX_NUM_CHANNELS)
995 return -EINVAL;
996
997 spin_lock_irqsave(&bam_ch[id].lock, flags);
998 bam_ch[id].use_wm = 1;
999 ret = bam_ch[id].num_tx_pkts >= HIGH_WATERMARK;
1000 DBG("%s: ch %d num tx pkts=%d, HWM=%d\n", __func__,
1001 id, bam_ch[id].num_tx_pkts, ret);
1002 if (!bam_ch_is_local_open(id)) {
1003 ret = -ENODEV;
1004 pr_err("%s: port not open: %d\n", __func__, bam_ch[id].status);
1005 }
1006 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
1007
1008 return ret;
1009}
1010
1011int msm_bam_dmux_is_ch_low(uint32_t id)
1012{
Eric Holmberged3ca0a2012-04-09 15:44:58 -06001013 unsigned long flags;
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -07001014 int ret;
1015
1016 if (id >= BAM_DMUX_NUM_CHANNELS)
1017 return -EINVAL;
1018
Eric Holmberged3ca0a2012-04-09 15:44:58 -06001019 spin_lock_irqsave(&bam_ch[id].lock, flags);
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -07001020 bam_ch[id].use_wm = 1;
1021 ret = bam_ch[id].num_tx_pkts <= LOW_WATERMARK;
1022 DBG("%s: ch %d num tx pkts=%d, LWM=%d\n", __func__,
1023 id, bam_ch[id].num_tx_pkts, ret);
1024 if (!bam_ch_is_local_open(id)) {
1025 ret = -ENODEV;
1026 pr_err("%s: port not open: %d\n", __func__, bam_ch[id].status);
1027 }
Eric Holmberged3ca0a2012-04-09 15:44:58 -06001028 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -07001029
1030 return ret;
1031}
1032
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001033static void rx_switch_to_interrupt_mode(void)
1034{
1035 struct sps_connect cur_rx_conn;
1036 struct sps_iovec iov;
1037 struct rx_pkt_info *info;
1038 int ret;
1039
1040 /*
1041 * Attempt to enable interrupts - if this fails,
1042 * continue polling and we will retry later.
1043 */
Brent Hronik89c96ba2013-08-27 14:34:22 -06001044 ret = bam_ops->sps_get_config_ptr(bam_rx_pipe, &cur_rx_conn);
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001045 if (ret) {
1046 pr_err("%s: sps_get_config() failed %d\n", __func__, ret);
1047 goto fail;
1048 }
1049
1050 rx_register_event.options = SPS_O_EOT;
Brent Hronik89c96ba2013-08-27 14:34:22 -06001051 ret = bam_ops->sps_register_event_ptr(bam_rx_pipe, &rx_register_event);
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001052 if (ret) {
1053 pr_err("%s: sps_register_event() failed %d\n", __func__, ret);
1054 goto fail;
1055 }
1056
1057 cur_rx_conn.options = SPS_O_AUTO_ENABLE |
1058 SPS_O_EOT | SPS_O_ACK_TRANSFERS;
Brent Hronik89c96ba2013-08-27 14:34:22 -06001059 ret = bam_ops->sps_set_config_ptr(bam_rx_pipe, &cur_rx_conn);
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001060 if (ret) {
1061 pr_err("%s: sps_set_config() failed %d\n", __func__, ret);
1062 goto fail;
1063 }
1064 polling_mode = 0;
Brent Hronik096f7d32013-06-28 15:43:08 -06001065 complete_all(&shutdown_completion);
Eric Holmberg006057d2012-01-11 10:10:42 -07001066 release_wakelock();
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001067
1068 /* handle any rx packets before interrupt was enabled */
1069 while (bam_connection_is_active && !polling_mode) {
Brent Hronik89c96ba2013-08-27 14:34:22 -06001070 ret = bam_ops->sps_get_iovec_ptr(bam_rx_pipe, &iov);
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001071 if (ret) {
1072 pr_err("%s: sps_get_iovec failed %d\n",
1073 __func__, ret);
1074 break;
1075 }
1076 if (iov.addr == 0)
1077 break;
1078
1079 mutex_lock(&bam_rx_pool_mutexlock);
1080 if (unlikely(list_empty(&bam_rx_pool))) {
Eric Holmberg00cf8692012-07-16 14:21:19 -06001081 DMUX_LOG_KERR("%s: have iovec %p but rx pool empty\n",
1082 __func__, (void *)iov.addr);
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001083 mutex_unlock(&bam_rx_pool_mutexlock);
1084 continue;
1085 }
1086 info = list_first_entry(&bam_rx_pool, struct rx_pkt_info,
1087 list_node);
Eric Holmberg00cf8692012-07-16 14:21:19 -06001088 if (info->dma_address != iov.addr) {
1089 DMUX_LOG_KERR("%s: iovec %p != dma %p\n",
1090 __func__,
1091 (void *)iov.addr,
1092 (void *)info->dma_address);
1093 list_for_each_entry(info, &bam_rx_pool, list_node) {
1094 DMUX_LOG_KERR("%s: dma %p\n", __func__,
1095 (void *)info->dma_address);
1096 if (iov.addr == info->dma_address)
1097 break;
1098 }
1099 }
1100 BUG_ON(info->dma_address != iov.addr);
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001101 list_del(&info->list_node);
Eric Holmbergb5b08e52012-01-20 14:19:00 -07001102 --bam_rx_pool_len;
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001103 mutex_unlock(&bam_rx_pool_mutexlock);
1104 handle_bam_mux_cmd(&info->work);
1105 }
1106 return;
1107
1108fail:
1109 pr_err("%s: reverting to polling\n", __func__);
Jeff Hugofff43af92012-03-29 17:54:52 -06001110 queue_work_on(0, bam_mux_rx_workqueue, &rx_timer_work);
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001111}
1112
Jeff Hugo7c185602013-09-11 17:39:54 -06001113/**
1114 * store_rx_timestamp() - store the current raw time as as a timestamp for when
1115 * the last rx packet was processed
1116 */
1117static void store_rx_timestamp(void)
1118{
1119 last_rx_pkt_timestamp = sched_clock();
1120}
1121
1122/**
1123 * log_rx_timestamp() - Log the stored rx pkt timestamp in a human readable
1124 * format
1125 */
1126static void log_rx_timestamp(void)
1127{
1128 unsigned long long t = last_rx_pkt_timestamp;
1129 unsigned long nanosec_rem;
1130
1131 nanosec_rem = do_div(t, 1000000000U);
1132 BAM_DMUX_LOG("Last rx pkt processed at [%6u.%09lu]\n", (unsigned)t,
1133 nanosec_rem);
1134}
1135
Jeff Hugo949080a2011-08-30 11:58:56 -06001136static void rx_timer_work_func(struct work_struct *work)
1137{
1138 struct sps_iovec iov;
Jeff Hugo949080a2011-08-30 11:58:56 -06001139 struct rx_pkt_info *info;
1140 int inactive_cycles = 0;
1141 int ret;
Anurag Singhdcd8b4e2012-07-30 16:46:37 -07001142 u32 buffs_unused, buffs_used;
Jeff Hugo949080a2011-08-30 11:58:56 -06001143
Jeff Hugo7c185602013-09-11 17:39:54 -06001144 BAM_DMUX_LOG("%s: polling start\n", __func__);
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001145 while (bam_connection_is_active) { /* timer loop */
Jeff Hugo949080a2011-08-30 11:58:56 -06001146 ++inactive_cycles;
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001147 while (bam_connection_is_active) { /* deplete queue loop */
Jeff Hugo7c185602013-09-11 17:39:54 -06001148 if (in_global_reset) {
1149 BAM_DMUX_LOG(
1150 "%s: polling exit, global reset detected\n",
1151 __func__);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001152 return;
Jeff Hugo7c185602013-09-11 17:39:54 -06001153 }
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001154
Brent Hronik89c96ba2013-08-27 14:34:22 -06001155 ret = bam_ops->sps_get_iovec_ptr(bam_rx_pipe, &iov);
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001156 if (ret) {
Jeff Hugo7c185602013-09-11 17:39:54 -06001157 DMUX_LOG_KERR("%s: sps_get_iovec failed %d\n",
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001158 __func__, ret);
1159 break;
1160 }
Jeff Hugo949080a2011-08-30 11:58:56 -06001161 if (iov.addr == 0)
1162 break;
Jeff Hugo7c185602013-09-11 17:39:54 -06001163 store_rx_timestamp();
Jeff Hugo949080a2011-08-30 11:58:56 -06001164 inactive_cycles = 0;
Jeff Hugoc9749932011-11-02 17:50:40 -06001165 mutex_lock(&bam_rx_pool_mutexlock);
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001166 if (unlikely(list_empty(&bam_rx_pool))) {
Eric Holmberg00cf8692012-07-16 14:21:19 -06001167 DMUX_LOG_KERR(
1168 "%s: have iovec %p but rx pool empty\n",
1169 __func__, (void *)iov.addr);
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001170 mutex_unlock(&bam_rx_pool_mutexlock);
1171 continue;
1172 }
1173 info = list_first_entry(&bam_rx_pool,
1174 struct rx_pkt_info, list_node);
Eric Holmberg00cf8692012-07-16 14:21:19 -06001175 if (info->dma_address != iov.addr) {
1176 DMUX_LOG_KERR("%s: iovec %p != dma %p\n",
1177 __func__,
1178 (void *)iov.addr,
1179 (void *)info->dma_address);
1180 list_for_each_entry(info, &bam_rx_pool,
1181 list_node) {
1182 DMUX_LOG_KERR("%s: dma %p\n", __func__,
1183 (void *)info->dma_address);
1184 if (iov.addr == info->dma_address)
1185 break;
1186 }
1187 }
1188 BUG_ON(info->dma_address != iov.addr);
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001189 list_del(&info->list_node);
Eric Holmberg00cf8692012-07-16 14:21:19 -06001190 --bam_rx_pool_len;
Jeff Hugoc9749932011-11-02 17:50:40 -06001191 mutex_unlock(&bam_rx_pool_mutexlock);
Jeff Hugo949080a2011-08-30 11:58:56 -06001192 handle_bam_mux_cmd(&info->work);
1193 }
1194
Anurag Singhdcd8b4e2012-07-30 16:46:37 -07001195 if (inactive_cycles >= POLLING_INACTIVITY) {
Jeff Hugo7c185602013-09-11 17:39:54 -06001196 BAM_DMUX_LOG("%s: polling exit, no data\n", __func__);
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001197 rx_switch_to_interrupt_mode();
1198 break;
Jeff Hugo949080a2011-08-30 11:58:56 -06001199 }
1200
Anurag Singhdcd8b4e2012-07-30 16:46:37 -07001201 if (bam_adaptive_timer_enabled) {
1202 usleep_range(rx_timer_interval, rx_timer_interval + 50);
1203
Brent Hronik89c96ba2013-08-27 14:34:22 -06001204 ret = bam_ops->sps_get_unused_desc_num_ptr(bam_rx_pipe,
Anurag Singhdcd8b4e2012-07-30 16:46:37 -07001205 &buffs_unused);
1206
1207 if (ret) {
Jeff Hugo7c185602013-09-11 17:39:54 -06001208 DMUX_LOG_KERR(
1209 "%s: error getting num buffers unused after sleep\n",
Anurag Singhdcd8b4e2012-07-30 16:46:37 -07001210 __func__);
1211
1212 break;
1213 }
1214
1215 buffs_used = NUM_BUFFERS - buffs_unused;
1216
1217 if (buffs_unused == 0) {
1218 rx_timer_interval = MIN_POLLING_SLEEP;
1219 } else {
1220 if (buffs_used > 0) {
1221 rx_timer_interval =
1222 (2 * NUM_BUFFERS *
1223 rx_timer_interval)/
1224 (3 * buffs_used);
1225 } else {
1226 rx_timer_interval =
1227 MAX_POLLING_SLEEP;
1228 }
1229 }
1230
1231 if (rx_timer_interval > MAX_POLLING_SLEEP)
1232 rx_timer_interval = MAX_POLLING_SLEEP;
1233 else if (rx_timer_interval < MIN_POLLING_SLEEP)
1234 rx_timer_interval = MIN_POLLING_SLEEP;
1235 } else {
1236 usleep_range(POLLING_MIN_SLEEP, POLLING_MAX_SLEEP);
1237 }
Jeff Hugo949080a2011-08-30 11:58:56 -06001238 }
1239}
1240
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001241static void bam_mux_tx_notify(struct sps_event_notify *notify)
1242{
1243 struct tx_pkt_info *pkt;
1244
1245 DBG("%s: event %d notified\n", __func__, notify->event_id);
1246
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001247 if (in_global_reset)
1248 return;
1249
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001250 switch (notify->event_id) {
1251 case SPS_EVENT_EOT:
1252 pkt = notify->data.transfer.user;
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001253 if (!pkt->is_cmd)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001254 dma_unmap_single(NULL, pkt->dma_address,
1255 pkt->skb->len,
Brent Hronik89c96ba2013-08-27 14:34:22 -06001256 bam_ops->dma_to);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001257 else
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001258 dma_unmap_single(NULL, pkt->dma_address,
1259 pkt->len,
Brent Hronik89c96ba2013-08-27 14:34:22 -06001260 bam_ops->dma_to);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001261 queue_work(bam_mux_tx_workqueue, &pkt->work);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001262 break;
1263 default:
1264 pr_err("%s: recieved unexpected event id %d\n", __func__,
1265 notify->event_id);
1266 }
1267}
1268
Jeff Hugo33dbc002011-08-25 15:52:53 -06001269static void bam_mux_rx_notify(struct sps_event_notify *notify)
1270{
Jeff Hugo949080a2011-08-30 11:58:56 -06001271 int ret;
1272 struct sps_connect cur_rx_conn;
Jeff Hugo33dbc002011-08-25 15:52:53 -06001273
1274 DBG("%s: event %d notified\n", __func__, notify->event_id);
1275
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001276 if (in_global_reset)
1277 return;
1278
Jeff Hugo33dbc002011-08-25 15:52:53 -06001279 switch (notify->event_id) {
1280 case SPS_EVENT_EOT:
Jeff Hugo949080a2011-08-30 11:58:56 -06001281 /* attempt to disable interrupts in this pipe */
1282 if (!polling_mode) {
Brent Hronik89c96ba2013-08-27 14:34:22 -06001283 ret = bam_ops->sps_get_config_ptr(bam_rx_pipe,
1284 &cur_rx_conn);
Jeff Hugo949080a2011-08-30 11:58:56 -06001285 if (ret) {
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001286 pr_err("%s: sps_get_config() failed %d, interrupts"
1287 " not disabled\n", __func__, ret);
Jeff Hugo949080a2011-08-30 11:58:56 -06001288 break;
1289 }
Jeff Hugoa9d32ba2011-11-21 14:59:48 -07001290 cur_rx_conn.options = SPS_O_AUTO_ENABLE |
Jeff Hugo949080a2011-08-30 11:58:56 -06001291 SPS_O_ACK_TRANSFERS | SPS_O_POLL;
Brent Hronik89c96ba2013-08-27 14:34:22 -06001292 ret = bam_ops->sps_set_config_ptr(bam_rx_pipe,
1293 &cur_rx_conn);
Jeff Hugo949080a2011-08-30 11:58:56 -06001294 if (ret) {
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001295 pr_err("%s: sps_set_config() failed %d, interrupts"
1296 " not disabled\n", __func__, ret);
Jeff Hugo949080a2011-08-30 11:58:56 -06001297 break;
1298 }
Brent Hronik096f7d32013-06-28 15:43:08 -06001299 INIT_COMPLETION(shutdown_completion);
Eric Holmberg006057d2012-01-11 10:10:42 -07001300 grab_wakelock();
Jeff Hugo949080a2011-08-30 11:58:56 -06001301 polling_mode = 1;
Jeff Hugofff43af92012-03-29 17:54:52 -06001302 /*
1303 * run on core 0 so that netif_rx() in rmnet uses only
1304 * one queue
1305 */
1306 queue_work_on(0, bam_mux_rx_workqueue, &rx_timer_work);
Jeff Hugo949080a2011-08-30 11:58:56 -06001307 }
Jeff Hugo33dbc002011-08-25 15:52:53 -06001308 break;
1309 default:
1310 pr_err("%s: recieved unexpected event id %d\n", __func__,
1311 notify->event_id);
1312 }
1313}
1314
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001315#ifdef CONFIG_DEBUG_FS
1316
1317static int debug_tbl(char *buf, int max)
1318{
1319 int i = 0;
1320 int j;
1321
1322 for (j = 0; j < BAM_DMUX_NUM_CHANNELS; ++j) {
1323 i += scnprintf(buf + i, max - i,
1324 "ch%02d local open=%s remote open=%s\n",
1325 j, bam_ch_is_local_open(j) ? "Y" : "N",
1326 bam_ch_is_remote_open(j) ? "Y" : "N");
1327 }
1328
1329 return i;
1330}
1331
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07001332static int debug_ul_pkt_cnt(char *buf, int max)
1333{
1334 struct list_head *p;
1335 unsigned long flags;
1336 int n = 0;
1337
1338 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
1339 __list_for_each(p, &bam_tx_pool) {
1340 ++n;
1341 }
1342 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
1343
1344 return scnprintf(buf, max, "Number of UL packets in flight: %d\n", n);
1345}
1346
1347static int debug_stats(char *buf, int max)
1348{
1349 int i = 0;
1350
1351 i += scnprintf(buf + i, max - i,
Eric Holmberg9fdef262012-02-14 11:46:05 -07001352 "skb read cnt: %u\n"
1353 "skb write cnt: %u\n"
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07001354 "skb copy cnt: %u\n"
1355 "skb copy bytes: %u\n"
Eric Holmberg6074aba2012-01-18 17:59:44 -07001356 "sps tx failures: %u\n"
Eric Holmbergb5b08e52012-01-20 14:19:00 -07001357 "sps tx stalls: %u\n"
Eric Holmberg1f1255d2012-02-22 13:37:21 -07001358 "rx queue len: %d\n"
1359 "a2 ack out cnt: %d\n"
1360 "a2 ack in cnt: %d\n"
1361 "a2 pwr cntl in: %d\n",
Eric Holmberg9fdef262012-02-14 11:46:05 -07001362 bam_dmux_read_cnt,
1363 bam_dmux_write_cnt,
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07001364 bam_dmux_write_cpy_cnt,
1365 bam_dmux_write_cpy_bytes,
Eric Holmberg6074aba2012-01-18 17:59:44 -07001366 bam_dmux_tx_sps_failure_cnt,
Eric Holmbergb5b08e52012-01-20 14:19:00 -07001367 bam_dmux_tx_stall_cnt,
Eric Holmberg1f1255d2012-02-22 13:37:21 -07001368 bam_rx_pool_len,
1369 atomic_read(&bam_dmux_ack_out_cnt),
1370 atomic_read(&bam_dmux_ack_in_cnt),
1371 atomic_read(&bam_dmux_a2_pwr_cntl_in_cnt)
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07001372 );
1373
1374 return i;
1375}
1376
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001377#define DEBUG_BUFMAX 4096
1378static char debug_buffer[DEBUG_BUFMAX];
1379
1380static ssize_t debug_read(struct file *file, char __user *buf,
1381 size_t count, loff_t *ppos)
1382{
1383 int (*fill)(char *buf, int max) = file->private_data;
1384 int bsize = fill(debug_buffer, DEBUG_BUFMAX);
1385 return simple_read_from_buffer(buf, count, ppos, debug_buffer, bsize);
1386}
1387
1388static int debug_open(struct inode *inode, struct file *file)
1389{
1390 file->private_data = inode->i_private;
1391 return 0;
1392}
1393
1394
1395static const struct file_operations debug_ops = {
1396 .read = debug_read,
1397 .open = debug_open,
1398};
1399
1400static void debug_create(const char *name, mode_t mode,
1401 struct dentry *dent,
1402 int (*fill)(char *buf, int max))
1403{
Eric Holmberge4ac80b2012-01-12 09:21:59 -07001404 struct dentry *file;
1405
1406 file = debugfs_create_file(name, mode, dent, fill, &debug_ops);
1407 if (IS_ERR(file))
1408 pr_err("%s: debugfs create failed %d\n", __func__,
1409 (int)PTR_ERR(file));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001410}
1411
1412#endif
1413
Jeff Hugod98b1082011-10-24 10:30:23 -06001414static void notify_all(int event, unsigned long data)
1415{
1416 int i;
Jeff Hugocb798022012-04-09 14:55:40 -06001417 struct list_head *temp;
1418 struct outside_notify_func *func;
Jeff Hugod98b1082011-10-24 10:30:23 -06001419
Jeff Hugoac8152a2013-04-19 11:05:19 -06001420 BAM_DMUX_LOG("%s: event=%d, data=%lu\n", __func__, event, data);
1421
Jeff Hugod98b1082011-10-24 10:30:23 -06001422 for (i = 0; i < BAM_DMUX_NUM_CHANNELS; ++i) {
Jeff Hugoac8152a2013-04-19 11:05:19 -06001423 if (bam_ch_is_open(i))
Jeff Hugod98b1082011-10-24 10:30:23 -06001424 bam_ch[i].notify(bam_ch[i].priv, event, data);
1425 }
Jeff Hugocb798022012-04-09 14:55:40 -06001426
1427 __list_for_each(temp, &bam_other_notify_funcs) {
1428 func = container_of(temp, struct outside_notify_func,
1429 list_node);
1430 func->notify(func->priv, event, data);
1431 }
Jeff Hugod98b1082011-10-24 10:30:23 -06001432}
1433
1434static void kickoff_ul_wakeup_func(struct work_struct *work)
1435{
1436 read_lock(&ul_wakeup_lock);
1437 if (!bam_is_connected) {
1438 read_unlock(&ul_wakeup_lock);
1439 ul_wakeup();
Jeff Hugo4838f412012-01-20 11:19:37 -07001440 if (unlikely(in_global_reset == 1))
1441 return;
Jeff Hugod98b1082011-10-24 10:30:23 -06001442 read_lock(&ul_wakeup_lock);
1443 ul_packet_written = 1;
1444 notify_all(BAM_DMUX_UL_CONNECTED, (unsigned long)(NULL));
1445 }
1446 read_unlock(&ul_wakeup_lock);
1447}
1448
Eric Holmbergbc9f21c2012-01-18 11:33:33 -07001449int msm_bam_dmux_kickoff_ul_wakeup(void)
Jeff Hugod98b1082011-10-24 10:30:23 -06001450{
Eric Holmbergbc9f21c2012-01-18 11:33:33 -07001451 int is_connected;
1452
1453 read_lock(&ul_wakeup_lock);
1454 ul_packet_written = 1;
1455 is_connected = bam_is_connected;
1456 if (!is_connected)
1457 queue_work(bam_mux_tx_workqueue, &kickoff_ul_wakeup);
1458 read_unlock(&ul_wakeup_lock);
1459
1460 return is_connected;
Jeff Hugod98b1082011-10-24 10:30:23 -06001461}
1462
Eric Holmberg878923a2012-01-10 14:28:19 -07001463static void power_vote(int vote)
1464{
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +05301465 BAM_DMUX_LOG("%s: curr=%d, vote=%d\n", __func__,
Eric Holmberg878923a2012-01-10 14:28:19 -07001466 bam_dmux_uplink_vote, vote);
1467
1468 if (bam_dmux_uplink_vote == vote)
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +05301469 BAM_DMUX_LOG("%s: warning - duplicate power vote\n", __func__);
Eric Holmberg878923a2012-01-10 14:28:19 -07001470
1471 bam_dmux_uplink_vote = vote;
1472 if (vote)
Brent Hronik89c96ba2013-08-27 14:34:22 -06001473 bam_ops->smsm_change_state_ptr(SMSM_APPS_STATE,
1474 0, SMSM_A2_POWER_CONTROL);
Eric Holmberg878923a2012-01-10 14:28:19 -07001475 else
Brent Hronik89c96ba2013-08-27 14:34:22 -06001476 bam_ops->smsm_change_state_ptr(SMSM_APPS_STATE,
1477 SMSM_A2_POWER_CONTROL, 0);
Eric Holmberg878923a2012-01-10 14:28:19 -07001478}
1479
Eric Holmberg454d9da2012-01-12 09:37:14 -07001480/*
1481 * @note: Must be called with ul_wakeup_lock locked.
1482 */
1483static inline void ul_powerdown(void)
1484{
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +05301485 BAM_DMUX_LOG("%s: powerdown\n", __func__);
Eric Holmberg454d9da2012-01-12 09:37:14 -07001486 verify_tx_queue_is_empty(__func__);
1487
1488 if (a2_pc_disabled) {
1489 wait_for_dfab = 1;
1490 INIT_COMPLETION(dfab_unvote_completion);
1491 release_wakelock();
1492 } else {
1493 wait_for_ack = 1;
1494 INIT_COMPLETION(ul_wakeup_ack_completion);
1495 power_vote(0);
1496 }
1497 bam_is_connected = 0;
1498 notify_all(BAM_DMUX_UL_DISCONNECTED, (unsigned long)(NULL));
1499}
1500
1501static inline void ul_powerdown_finish(void)
1502{
1503 if (a2_pc_disabled && wait_for_dfab) {
1504 unvote_dfab();
1505 complete_all(&dfab_unvote_completion);
1506 wait_for_dfab = 0;
1507 }
1508}
1509
Eric Holmbergbc9f21c2012-01-18 11:33:33 -07001510/*
1511 * Votes for UL power and returns current power state.
1512 *
1513 * @returns true if currently connected
1514 */
1515int msm_bam_dmux_ul_power_vote(void)
1516{
1517 int is_connected;
1518
1519 read_lock(&ul_wakeup_lock);
1520 atomic_inc(&ul_ondemand_vote);
1521 is_connected = bam_is_connected;
1522 if (!is_connected)
1523 queue_work(bam_mux_tx_workqueue, &kickoff_ul_wakeup);
1524 read_unlock(&ul_wakeup_lock);
1525
1526 return is_connected;
1527}
1528
1529/*
1530 * Unvotes for UL power.
1531 *
1532 * @returns true if vote count is 0 (UL shutdown possible)
1533 */
1534int msm_bam_dmux_ul_power_unvote(void)
1535{
1536 int vote;
1537
1538 read_lock(&ul_wakeup_lock);
1539 vote = atomic_dec_return(&ul_ondemand_vote);
1540 if (unlikely(vote) < 0)
1541 DMUX_LOG_KERR("%s: invalid power vote %d\n", __func__, vote);
1542 read_unlock(&ul_wakeup_lock);
1543
1544 return vote == 0;
1545}
1546
Jeff Hugocb798022012-04-09 14:55:40 -06001547int msm_bam_dmux_reg_notify(void *priv,
1548 void (*notify)(void *priv, int event_type,
1549 unsigned long data))
1550{
1551 struct outside_notify_func *func;
1552
1553 if (!notify)
1554 return -EINVAL;
1555
1556 func = kmalloc(sizeof(struct outside_notify_func), GFP_KERNEL);
1557 if (!func)
1558 return -ENOMEM;
1559
1560 func->notify = notify;
1561 func->priv = priv;
1562 list_add(&func->list_node, &bam_other_notify_funcs);
1563
1564 return 0;
1565}
1566
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001567static void ul_timeout(struct work_struct *work)
1568{
Jeff Hugoc040a5b2011-11-15 14:26:01 -07001569 unsigned long flags;
1570 int ret;
1571
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001572 if (in_global_reset)
1573 return;
Jeff Hugoc040a5b2011-11-15 14:26:01 -07001574 ret = write_trylock_irqsave(&ul_wakeup_lock, flags);
1575 if (!ret) { /* failed to grab lock, reschedule and bail */
1576 schedule_delayed_work(&ul_timeout_work,
1577 msecs_to_jiffies(UL_TIMEOUT_DELAY));
1578 return;
1579 }
Eric Holmberg454d9da2012-01-12 09:37:14 -07001580 if (bam_is_connected) {
Eric Holmberg6074aba2012-01-18 17:59:44 -07001581 if (!ul_packet_written) {
1582 spin_lock(&bam_tx_pool_spinlock);
1583 if (!list_empty(&bam_tx_pool)) {
1584 struct tx_pkt_info *info;
1585
1586 info = list_first_entry(&bam_tx_pool,
1587 struct tx_pkt_info, list_node);
1588 DMUX_LOG_KERR("%s: UL delayed ts=%u.%09lu\n",
1589 __func__, info->ts_sec, info->ts_nsec);
1590 DBG_INC_TX_STALL_CNT();
1591 ul_packet_written = 1;
1592 }
1593 spin_unlock(&bam_tx_pool_spinlock);
1594 }
1595
Eric Holmbergbc9f21c2012-01-18 11:33:33 -07001596 if (ul_packet_written || atomic_read(&ul_ondemand_vote)) {
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +05301597 BAM_DMUX_LOG("%s: pkt written %d\n",
Eric Holmbergbc9f21c2012-01-18 11:33:33 -07001598 __func__, ul_packet_written);
Eric Holmberg454d9da2012-01-12 09:37:14 -07001599 ul_packet_written = 0;
1600 schedule_delayed_work(&ul_timeout_work,
1601 msecs_to_jiffies(UL_TIMEOUT_DELAY));
Eric Holmberg006057d2012-01-11 10:10:42 -07001602 } else {
Eric Holmberg454d9da2012-01-12 09:37:14 -07001603 ul_powerdown();
Eric Holmberg006057d2012-01-11 10:10:42 -07001604 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001605 }
Jeff Hugoc040a5b2011-11-15 14:26:01 -07001606 write_unlock_irqrestore(&ul_wakeup_lock, flags);
Eric Holmberg454d9da2012-01-12 09:37:14 -07001607 ul_powerdown_finish();
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001608}
Jeff Hugo4838f412012-01-20 11:19:37 -07001609
1610static int ssrestart_check(void)
1611{
Jeff Hugob8156d72013-06-04 12:51:10 -06001612 int ret = 0;
1613
Eric Holmberg7614a7f2013-07-29 15:47:12 -06001614 if (in_global_reset) {
1615 DMUX_LOG_KERR("%s: modem timeout: already in SSR\n",
1616 __func__);
1617 return 1;
1618 }
1619
Jeff Hugob8156d72013-06-04 12:51:10 -06001620 DMUX_LOG_KERR("%s: modem timeout: BAM DMUX disabled for SSR\n",
1621 __func__);
Eric Holmberg90285e22012-02-22 12:33:05 -07001622 in_global_reset = 1;
Jeff Hugob8156d72013-06-04 12:51:10 -06001623 ret = subsystem_restart("modem");
1624 if (ret == -ENODEV)
1625 panic("modem subsystem restart failed\n");
Eric Holmberg90285e22012-02-22 12:33:05 -07001626 return 1;
Jeff Hugo4838f412012-01-20 11:19:37 -07001627}
1628
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001629static void ul_wakeup(void)
1630{
Jeff Hugof6c1c1e2011-12-01 17:43:49 -07001631 int ret;
Jeff Hugo5f57ec92012-05-14 13:34:28 -06001632 int do_vote_dfab = 0;
Jeff Hugof6c1c1e2011-12-01 17:43:49 -07001633
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001634 mutex_lock(&wakeup_lock);
1635 if (bam_is_connected) { /* bam got connected before lock grabbed */
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +05301636 BAM_DMUX_LOG("%s Already awake\n", __func__);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001637 mutex_unlock(&wakeup_lock);
1638 return;
1639 }
Eric Holmberg878923a2012-01-10 14:28:19 -07001640
Jeff Hugoc2696142012-05-03 11:42:13 -06001641 /*
Jeff Hugof5001732012-08-27 13:19:09 -06001642 * if this gets hit, that means restart_notifier_cb() has started
1643 * but probably not finished, thus we know SSR has happened, but
1644 * haven't been able to send that info to our clients yet.
1645 * in that case, abort the ul_wakeup() so that we don't undo any
1646 * work restart_notifier_cb() has done. The clients will be notified
1647 * shortly. No cleanup necessary (reschedule the wakeup) as our and
1648 * their SSR handling will cover it
1649 */
1650 if (unlikely(in_global_reset == 1)) {
1651 mutex_unlock(&wakeup_lock);
1652 return;
1653 }
1654
1655 /*
Jeff Hugoc2696142012-05-03 11:42:13 -06001656 * if someone is voting for UL before bam is inited (modem up first
1657 * time), set flag for init to kickoff ul wakeup once bam is inited
1658 */
1659 mutex_lock(&delayed_ul_vote_lock);
1660 if (unlikely(!bam_mux_initialized)) {
1661 need_delayed_ul_vote = 1;
1662 mutex_unlock(&delayed_ul_vote_lock);
1663 mutex_unlock(&wakeup_lock);
1664 return;
1665 }
1666 mutex_unlock(&delayed_ul_vote_lock);
1667
Eric Holmberg006057d2012-01-11 10:10:42 -07001668 if (a2_pc_disabled) {
1669 /*
1670 * don't grab the wakelock the first time because it is
1671 * already grabbed when a2 powers on
1672 */
Jeff Hugo5f57ec92012-05-14 13:34:28 -06001673 if (likely(a2_pc_disabled_wakelock_skipped)) {
Eric Holmberg006057d2012-01-11 10:10:42 -07001674 grab_wakelock();
Jeff Hugo5f57ec92012-05-14 13:34:28 -06001675 do_vote_dfab = 1; /* vote must occur after wait */
1676 } else {
Jeff Hugo583a6da2012-02-03 11:37:30 -07001677 a2_pc_disabled_wakelock_skipped = 1;
Jeff Hugo5f57ec92012-05-14 13:34:28 -06001678 }
Eric Holmberg006057d2012-01-11 10:10:42 -07001679 if (wait_for_dfab) {
Jeff Hugo66f7f1e2012-01-16 14:30:42 -07001680 ret = wait_for_completion_timeout(
Eric Holmberg006057d2012-01-11 10:10:42 -07001681 &dfab_unvote_completion, HZ);
1682 BUG_ON(ret == 0);
1683 }
Jeff Hugo5f57ec92012-05-14 13:34:28 -06001684 if (likely(do_vote_dfab))
1685 vote_dfab();
Eric Holmberg006057d2012-01-11 10:10:42 -07001686 schedule_delayed_work(&ul_timeout_work,
1687 msecs_to_jiffies(UL_TIMEOUT_DELAY));
1688 bam_is_connected = 1;
1689 mutex_unlock(&wakeup_lock);
1690 return;
1691 }
1692
Jeff Hugof6c1c1e2011-12-01 17:43:49 -07001693 /*
1694 * must wait for the previous power down request to have been acked
1695 * chances are it already came in and this will just fall through
1696 * instead of waiting
1697 */
1698 if (wait_for_ack) {
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +05301699 BAM_DMUX_LOG("%s waiting for previous ack\n", __func__);
Jeff Hugo66f7f1e2012-01-16 14:30:42 -07001700 ret = wait_for_completion_timeout(
Jeff Hugo1f317392013-07-24 16:28:52 -06001701 &ul_wakeup_ack_completion,
1702 msecs_to_jiffies(UL_WAKEUP_TIMEOUT_MS));
Eric Holmberg006057d2012-01-11 10:10:42 -07001703 wait_for_ack = 0;
Jeff Hugo4838f412012-01-20 11:19:37 -07001704 if (unlikely(ret == 0) && ssrestart_check()) {
1705 mutex_unlock(&wakeup_lock);
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +05301706 BAM_DMUX_LOG("%s timeout previous ack\n", __func__);
Jeff Hugo4838f412012-01-20 11:19:37 -07001707 return;
1708 }
Jeff Hugof6c1c1e2011-12-01 17:43:49 -07001709 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001710 INIT_COMPLETION(ul_wakeup_ack_completion);
Eric Holmberg878923a2012-01-10 14:28:19 -07001711 power_vote(1);
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +05301712 BAM_DMUX_LOG("%s waiting for wakeup ack\n", __func__);
Jeff Hugo1f317392013-07-24 16:28:52 -06001713 ret = wait_for_completion_timeout(&ul_wakeup_ack_completion,
1714 msecs_to_jiffies(UL_WAKEUP_TIMEOUT_MS));
Jeff Hugo4838f412012-01-20 11:19:37 -07001715 if (unlikely(ret == 0) && ssrestart_check()) {
1716 mutex_unlock(&wakeup_lock);
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +05301717 BAM_DMUX_LOG("%s timeout wakeup ack\n", __func__);
Jeff Hugo4838f412012-01-20 11:19:37 -07001718 return;
1719 }
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +05301720 BAM_DMUX_LOG("%s waiting completion\n", __func__);
Jeff Hugo1f317392013-07-24 16:28:52 -06001721 ret = wait_for_completion_timeout(&bam_connection_completion,
1722 msecs_to_jiffies(UL_WAKEUP_TIMEOUT_MS));
Jeff Hugo4838f412012-01-20 11:19:37 -07001723 if (unlikely(ret == 0) && ssrestart_check()) {
1724 mutex_unlock(&wakeup_lock);
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +05301725 BAM_DMUX_LOG("%s timeout power on\n", __func__);
Jeff Hugo4838f412012-01-20 11:19:37 -07001726 return;
1727 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001728
1729 bam_is_connected = 1;
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +05301730 BAM_DMUX_LOG("%s complete\n", __func__);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001731 schedule_delayed_work(&ul_timeout_work,
1732 msecs_to_jiffies(UL_TIMEOUT_DELAY));
1733 mutex_unlock(&wakeup_lock);
1734}
1735
1736static void reconnect_to_bam(void)
1737{
1738 int i;
1739
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001740 in_global_reset = 0;
Jeff Hugoa82a95c2012-12-14 17:56:19 -07001741 in_ssr = 0;
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001742 vote_dfab();
Jeff Hugo18792a32012-06-20 15:25:55 -06001743 if (!power_management_only_mode) {
Jeff Hugoa82a95c2012-12-14 17:56:19 -07001744 if (ssr_skipped_disconnect) {
1745 /* delayed to here to prevent bus stall */
Brent Hronik89c96ba2013-08-27 14:34:22 -06001746 bam_ops->sps_disconnect_ptr(bam_tx_pipe);
1747 bam_ops->sps_disconnect_ptr(bam_rx_pipe);
Jeff Hugoa82a95c2012-12-14 17:56:19 -07001748 __memzero(rx_desc_mem_buf.base, rx_desc_mem_buf.size);
1749 __memzero(tx_desc_mem_buf.base, tx_desc_mem_buf.size);
1750 }
1751 ssr_skipped_disconnect = 0;
Brent Hronik89c96ba2013-08-27 14:34:22 -06001752 i = bam_ops->sps_device_reset_ptr(a2_device_handle);
Jeff Hugo18792a32012-06-20 15:25:55 -06001753 if (i)
1754 pr_err("%s: device reset failed rc = %d\n", __func__,
1755 i);
Brent Hronik89c96ba2013-08-27 14:34:22 -06001756 i = bam_ops->sps_connect_ptr(bam_tx_pipe, &tx_connection);
Jeff Hugo18792a32012-06-20 15:25:55 -06001757 if (i)
1758 pr_err("%s: tx connection failed rc = %d\n", __func__,
1759 i);
Brent Hronik89c96ba2013-08-27 14:34:22 -06001760 i = bam_ops->sps_connect_ptr(bam_rx_pipe, &rx_connection);
Jeff Hugo18792a32012-06-20 15:25:55 -06001761 if (i)
1762 pr_err("%s: rx connection failed rc = %d\n", __func__,
1763 i);
Brent Hronik89c96ba2013-08-27 14:34:22 -06001764 i = bam_ops->sps_register_event_ptr(bam_tx_pipe,
1765 &tx_register_event);
Jeff Hugo18792a32012-06-20 15:25:55 -06001766 if (i)
1767 pr_err("%s: tx event reg failed rc = %d\n", __func__,
1768 i);
Brent Hronik89c96ba2013-08-27 14:34:22 -06001769 i = bam_ops->sps_register_event_ptr(bam_rx_pipe,
1770 &rx_register_event);
Jeff Hugo18792a32012-06-20 15:25:55 -06001771 if (i)
1772 pr_err("%s: rx event reg failed rc = %d\n", __func__,
1773 i);
1774 }
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001775
1776 bam_connection_is_active = 1;
1777
1778 if (polling_mode)
1779 rx_switch_to_interrupt_mode();
1780
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001781 toggle_apps_ack();
1782 complete_all(&bam_connection_completion);
Jeff Hugo18792a32012-06-20 15:25:55 -06001783 if (!power_management_only_mode)
1784 queue_rx();
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001785}
1786
1787static void disconnect_to_bam(void)
1788{
1789 struct list_head *node;
1790 struct rx_pkt_info *info;
Eric Holmberg454d9da2012-01-12 09:37:14 -07001791 unsigned long flags;
Brent Hronik096f7d32013-06-28 15:43:08 -06001792 unsigned long time_remaining;
1793
Eric Holmberg7614a7f2013-07-29 15:47:12 -06001794 if (!in_global_reset) {
1795 time_remaining = wait_for_completion_timeout(
1796 &shutdown_completion,
1797 msecs_to_jiffies(SHUTDOWN_TIMEOUT_MS));
1798 if (time_remaining == 0) {
1799 DMUX_LOG_KERR("%s: shutdown completion timed out\n",
1800 __func__);
Jeff Hugo7c185602013-09-11 17:39:54 -06001801 log_rx_timestamp();
Eric Holmberg7614a7f2013-07-29 15:47:12 -06001802 ssrestart_check();
1803 }
Brent Hronik096f7d32013-06-28 15:43:08 -06001804 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001805
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001806 bam_connection_is_active = 0;
Eric Holmberg454d9da2012-01-12 09:37:14 -07001807
1808 /* handle disconnect during active UL */
1809 write_lock_irqsave(&ul_wakeup_lock, flags);
1810 if (bam_is_connected) {
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +05301811 BAM_DMUX_LOG("%s: UL active - forcing powerdown\n", __func__);
Eric Holmberg454d9da2012-01-12 09:37:14 -07001812 ul_powerdown();
1813 }
1814 write_unlock_irqrestore(&ul_wakeup_lock, flags);
1815 ul_powerdown_finish();
1816
1817 /* tear down BAM connection */
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001818 INIT_COMPLETION(bam_connection_completion);
Jeff Hugoa82a95c2012-12-14 17:56:19 -07001819
1820 /* in_ssr documentation/assumptions found in restart_notifier_cb */
Jeff Hugo18792a32012-06-20 15:25:55 -06001821 if (!power_management_only_mode) {
Jeff Hugoa82a95c2012-12-14 17:56:19 -07001822 if (likely(!in_ssr)) {
Jeff Hugof7ae7a62013-04-19 11:18:32 -06001823 BAM_DMUX_LOG("%s: disconnect tx\n", __func__);
Brent Hronik89c96ba2013-08-27 14:34:22 -06001824 bam_ops->sps_disconnect_ptr(bam_tx_pipe);
Jeff Hugof7ae7a62013-04-19 11:18:32 -06001825 BAM_DMUX_LOG("%s: disconnect rx\n", __func__);
Brent Hronik89c96ba2013-08-27 14:34:22 -06001826 bam_ops->sps_disconnect_ptr(bam_rx_pipe);
Jeff Hugoa82a95c2012-12-14 17:56:19 -07001827 __memzero(rx_desc_mem_buf.base, rx_desc_mem_buf.size);
1828 __memzero(tx_desc_mem_buf.base, tx_desc_mem_buf.size);
Jeff Hugof7ae7a62013-04-19 11:18:32 -06001829 BAM_DMUX_LOG("%s: device reset\n", __func__);
Jeff Hugoa82a95c2012-12-14 17:56:19 -07001830 sps_device_reset(a2_device_handle);
1831 } else {
1832 ssr_skipped_disconnect = 1;
1833 }
Jeff Hugo18792a32012-06-20 15:25:55 -06001834 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001835 unvote_dfab();
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001836
1837 mutex_lock(&bam_rx_pool_mutexlock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001838 while (!list_empty(&bam_rx_pool)) {
1839 node = bam_rx_pool.next;
1840 list_del(node);
1841 info = container_of(node, struct rx_pkt_info, list_node);
1842 dma_unmap_single(NULL, info->dma_address, BUFFER_SIZE,
Brent Hronik89c96ba2013-08-27 14:34:22 -06001843 bam_ops->dma_from);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001844 dev_kfree_skb_any(info->skb);
1845 kfree(info);
1846 }
Eric Holmbergb5b08e52012-01-20 14:19:00 -07001847 bam_rx_pool_len = 0;
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001848 mutex_unlock(&bam_rx_pool_mutexlock);
Eric Holmberg878923a2012-01-10 14:28:19 -07001849
Jeff Hugo0b13a352012-03-17 23:18:30 -06001850 if (disconnect_ack)
1851 toggle_apps_ack();
1852
Eric Holmberg878923a2012-01-10 14:28:19 -07001853 verify_tx_queue_is_empty(__func__);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001854}
1855
1856static void vote_dfab(void)
1857{
Jeff Hugoca0caa82011-12-05 16:05:23 -07001858 int rc;
1859
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +05301860 BAM_DMUX_LOG("%s\n", __func__);
Eric Holmberg006057d2012-01-11 10:10:42 -07001861 mutex_lock(&dfab_status_lock);
1862 if (dfab_is_on) {
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +05301863 BAM_DMUX_LOG("%s: dfab is already on\n", __func__);
Eric Holmberg006057d2012-01-11 10:10:42 -07001864 mutex_unlock(&dfab_status_lock);
1865 return;
1866 }
Jeff Hugod0befde2012-08-09 15:32:49 -06001867 if (dfab_clk) {
1868 rc = clk_prepare_enable(dfab_clk);
1869 if (rc)
1870 DMUX_LOG_KERR("bam_dmux vote for dfab failed rc = %d\n",
1871 rc);
1872 }
1873 if (xo_clk) {
1874 rc = clk_prepare_enable(xo_clk);
1875 if (rc)
1876 DMUX_LOG_KERR("bam_dmux vote for xo failed rc = %d\n",
1877 rc);
1878 }
Eric Holmberg006057d2012-01-11 10:10:42 -07001879 dfab_is_on = 1;
1880 mutex_unlock(&dfab_status_lock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001881}
1882
1883static void unvote_dfab(void)
1884{
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +05301885 BAM_DMUX_LOG("%s\n", __func__);
Eric Holmberg006057d2012-01-11 10:10:42 -07001886 mutex_lock(&dfab_status_lock);
1887 if (!dfab_is_on) {
1888 DMUX_LOG_KERR("%s: dfab is already off\n", __func__);
1889 dump_stack();
1890 mutex_unlock(&dfab_status_lock);
1891 return;
1892 }
Jeff Hugod0befde2012-08-09 15:32:49 -06001893 if (dfab_clk)
1894 clk_disable_unprepare(dfab_clk);
1895 if (xo_clk)
1896 clk_disable_unprepare(xo_clk);
Eric Holmberg006057d2012-01-11 10:10:42 -07001897 dfab_is_on = 0;
1898 mutex_unlock(&dfab_status_lock);
1899}
1900
1901/* reference counting wrapper around wakelock */
1902static void grab_wakelock(void)
1903{
1904 unsigned long flags;
1905
1906 spin_lock_irqsave(&wakelock_reference_lock, flags);
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +05301907 BAM_DMUX_LOG("%s: ref count = %d\n", __func__,
Eric Holmberg006057d2012-01-11 10:10:42 -07001908 wakelock_reference_count);
1909 if (wakelock_reference_count == 0)
1910 wake_lock(&bam_wakelock);
1911 ++wakelock_reference_count;
1912 spin_unlock_irqrestore(&wakelock_reference_lock, flags);
1913}
1914
1915static void release_wakelock(void)
1916{
1917 unsigned long flags;
1918
1919 spin_lock_irqsave(&wakelock_reference_lock, flags);
1920 if (wakelock_reference_count == 0) {
1921 DMUX_LOG_KERR("%s: bam_dmux wakelock not locked\n", __func__);
1922 dump_stack();
1923 spin_unlock_irqrestore(&wakelock_reference_lock, flags);
1924 return;
1925 }
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +05301926 BAM_DMUX_LOG("%s: ref count = %d\n", __func__,
Eric Holmberg006057d2012-01-11 10:10:42 -07001927 wakelock_reference_count);
1928 --wakelock_reference_count;
1929 if (wakelock_reference_count == 0)
1930 wake_unlock(&bam_wakelock);
1931 spin_unlock_irqrestore(&wakelock_reference_lock, flags);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001932}
1933
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001934static int restart_notifier_cb(struct notifier_block *this,
1935 unsigned long code,
1936 void *data)
1937{
1938 int i;
1939 struct list_head *node;
1940 struct tx_pkt_info *info;
1941 int temp_remote_status;
Jeff Hugo626303bf2011-11-21 11:43:28 -07001942 unsigned long flags;
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001943
Jeff Hugoa82a95c2012-12-14 17:56:19 -07001944 /*
1945 * Bam_dmux counts on the fact that the BEFORE_SHUTDOWN level of
1946 * notifications are guarenteed to execute before the AFTER_SHUTDOWN
1947 * level of notifications, and that BEFORE_SHUTDOWN always occurs in
1948 * all SSR events, no matter what triggered the SSR. Also, bam_dmux
1949 * assumes that SMD does its SSR processing in the AFTER_SHUTDOWN level
1950 * thus bam_dmux is guarenteed to detect SSR before SMD, since the
1951 * callbacks for all the drivers within the AFTER_SHUTDOWN level could
1952 * occur in any order. Bam_dmux uses this knowledge to skip accessing
1953 * the bam hardware when disconnect_to_bam() is triggered by SMD's SSR
1954 * processing. We do not wat to access the bam hardware during SSR
1955 * because a watchdog crash from a bus stall would likely occur.
1956 */
Jeff Hugo199294b2013-02-25 13:46:56 -07001957 if (code == SUBSYS_BEFORE_SHUTDOWN) {
1958 in_global_reset = 1;
Jeff Hugoa82a95c2012-12-14 17:56:19 -07001959 in_ssr = 1;
Zaheerulla Meerf800bba2013-02-13 15:49:14 +05301960 BAM_DMUX_LOG("%s: begin\n", __func__);
Jeff Hugo199294b2013-02-25 13:46:56 -07001961 flush_workqueue(bam_mux_rx_workqueue);
1962 }
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001963 if (code != SUBSYS_AFTER_SHUTDOWN)
1964 return NOTIFY_DONE;
1965
Eric Holmberg454d9da2012-01-12 09:37:14 -07001966 /* Handle uplink Powerdown */
1967 write_lock_irqsave(&ul_wakeup_lock, flags);
1968 if (bam_is_connected) {
1969 ul_powerdown();
1970 wait_for_ack = 0;
1971 }
Jeff Hugo4838f412012-01-20 11:19:37 -07001972 /*
1973 * if modem crash during ul_wakeup(), power_vote is 1, needs to be
1974 * reset to 0. harmless if bam_is_connected check above passes
1975 */
1976 power_vote(0);
Eric Holmberg454d9da2012-01-12 09:37:14 -07001977 write_unlock_irqrestore(&ul_wakeup_lock, flags);
1978 ul_powerdown_finish();
Eric Holmberg006057d2012-01-11 10:10:42 -07001979 a2_pc_disabled = 0;
Jeff Hugo583a6da2012-02-03 11:37:30 -07001980 a2_pc_disabled_wakelock_skipped = 0;
Jeff Hugof62029d2012-07-17 13:39:53 -06001981 disconnect_ack = 1;
Eric Holmberg454d9da2012-01-12 09:37:14 -07001982
1983 /* Cleanup Channel States */
Eric Holmberga623da82012-07-12 09:37:09 -06001984 mutex_lock(&bam_pdev_mutexlock);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001985 for (i = 0; i < BAM_DMUX_NUM_CHANNELS; ++i) {
1986 temp_remote_status = bam_ch_is_remote_open(i);
1987 bam_ch[i].status &= ~BAM_CH_REMOTE_OPEN;
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -07001988 bam_ch[i].num_tx_pkts = 0;
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001989 if (bam_ch_is_local_open(i))
1990 bam_ch[i].status |= BAM_CH_IN_RESET;
1991 if (temp_remote_status) {
1992 platform_device_unregister(bam_ch[i].pdev);
1993 bam_ch[i].pdev = platform_device_alloc(
1994 bam_ch[i].name, 2);
1995 }
1996 }
Eric Holmberga623da82012-07-12 09:37:09 -06001997 mutex_unlock(&bam_pdev_mutexlock);
Eric Holmberg454d9da2012-01-12 09:37:14 -07001998
1999 /* Cleanup pending UL data */
Jeff Hugo626303bf2011-11-21 11:43:28 -07002000 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06002001 while (!list_empty(&bam_tx_pool)) {
2002 node = bam_tx_pool.next;
2003 list_del(node);
2004 info = container_of(node, struct tx_pkt_info,
2005 list_node);
2006 if (!info->is_cmd) {
2007 dma_unmap_single(NULL, info->dma_address,
2008 info->skb->len,
Brent Hronik89c96ba2013-08-27 14:34:22 -06002009 bam_ops->dma_to);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06002010 dev_kfree_skb_any(info->skb);
2011 } else {
2012 dma_unmap_single(NULL, info->dma_address,
2013 info->len,
Brent Hronik89c96ba2013-08-27 14:34:22 -06002014 bam_ops->dma_to);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06002015 kfree(info->skb);
2016 }
2017 kfree(info);
2018 }
Jeff Hugo626303bf2011-11-21 11:43:28 -07002019 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
Eric Holmberg454d9da2012-01-12 09:37:14 -07002020
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +05302021 BAM_DMUX_LOG("%s: complete\n", __func__);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06002022 return NOTIFY_DONE;
2023}
2024
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002025static int bam_init(void)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002026{
2027 u32 h;
2028 dma_addr_t dma_addr;
2029 int ret;
2030 void *a2_virt_addr;
Jeff Hugo4b2890d2012-01-16 16:14:21 -07002031 int skip_iounmap = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002032
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002033 vote_dfab();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002034 /* init BAM */
Jeff Hugo3910ee12012-08-21 14:08:20 -06002035 a2_virt_addr = ioremap_nocache((unsigned long)(a2_phys_base),
2036 a2_phys_size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002037 if (!a2_virt_addr) {
2038 pr_err("%s: ioremap failed\n", __func__);
2039 ret = -ENOMEM;
Jeff Hugo994a92d2012-01-05 13:25:21 -07002040 goto ioremap_failed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002041 }
Jeff Hugo3910ee12012-08-21 14:08:20 -06002042 a2_props.phys_addr = (u32)(a2_phys_base);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002043 a2_props.virt_addr = a2_virt_addr;
Jeff Hugo3910ee12012-08-21 14:08:20 -06002044 a2_props.virt_size = a2_phys_size;
2045 a2_props.irq = a2_bam_irq;
Jeff Hugo927cba62011-11-11 11:49:52 -07002046 a2_props.options = SPS_BAM_OPT_IRQ_WAKEUP;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002047 a2_props.num_pipes = A2_NUM_PIPES;
2048 a2_props.summing_threshold = A2_SUMMING_THRESHOLD;
Jeff Hugo75913c82011-12-05 15:59:01 -07002049 if (cpu_is_msm9615())
2050 a2_props.manage = SPS_BAM_MGR_DEVICE_REMOTE;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002051 /* need to free on tear down */
Brent Hronik89c96ba2013-08-27 14:34:22 -06002052 ret = bam_ops->sps_register_bam_device_ptr(&a2_props, &h);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002053 if (ret < 0) {
2054 pr_err("%s: register bam error %d\n", __func__, ret);
2055 goto register_bam_failed;
2056 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002057 a2_device_handle = h;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002058
Brent Hronik89c96ba2013-08-27 14:34:22 -06002059 bam_tx_pipe = bam_ops->sps_alloc_endpoint_ptr();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002060 if (bam_tx_pipe == NULL) {
2061 pr_err("%s: tx alloc endpoint failed\n", __func__);
2062 ret = -ENOMEM;
Jeff Hugo8ff4a812012-01-17 11:03:13 -07002063 goto tx_alloc_endpoint_failed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002064 }
Brent Hronik89c96ba2013-08-27 14:34:22 -06002065 ret = bam_ops->sps_get_config_ptr(bam_tx_pipe, &tx_connection);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002066 if (ret) {
2067 pr_err("%s: tx get config failed %d\n", __func__, ret);
2068 goto tx_get_config_failed;
2069 }
2070
2071 tx_connection.source = SPS_DEV_HANDLE_MEM;
2072 tx_connection.src_pipe_index = 0;
2073 tx_connection.destination = h;
2074 tx_connection.dest_pipe_index = 4;
2075 tx_connection.mode = SPS_MODE_DEST;
2076 tx_connection.options = SPS_O_AUTO_ENABLE | SPS_O_EOT;
2077 tx_desc_mem_buf.size = 0x800; /* 2k */
2078 tx_desc_mem_buf.base = dma_alloc_coherent(NULL, tx_desc_mem_buf.size,
2079 &dma_addr, 0);
2080 if (tx_desc_mem_buf.base == NULL) {
2081 pr_err("%s: tx memory alloc failed\n", __func__);
2082 ret = -ENOMEM;
Jeff Hugo8ff4a812012-01-17 11:03:13 -07002083 goto tx_get_config_failed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002084 }
2085 tx_desc_mem_buf.phys_base = dma_addr;
2086 memset(tx_desc_mem_buf.base, 0x0, tx_desc_mem_buf.size);
2087 tx_connection.desc = tx_desc_mem_buf;
2088 tx_connection.event_thresh = 0x10;
2089
Brent Hronik89c96ba2013-08-27 14:34:22 -06002090 ret = bam_ops->sps_connect_ptr(bam_tx_pipe, &tx_connection);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002091 if (ret < 0) {
2092 pr_err("%s: tx connect error %d\n", __func__, ret);
2093 goto tx_connect_failed;
2094 }
2095
Brent Hronik89c96ba2013-08-27 14:34:22 -06002096 bam_rx_pipe = bam_ops->sps_alloc_endpoint_ptr();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002097 if (bam_rx_pipe == NULL) {
2098 pr_err("%s: rx alloc endpoint failed\n", __func__);
2099 ret = -ENOMEM;
Jeff Hugo8ff4a812012-01-17 11:03:13 -07002100 goto rx_alloc_endpoint_failed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002101 }
Brent Hronik89c96ba2013-08-27 14:34:22 -06002102 ret = bam_ops->sps_get_config_ptr(bam_rx_pipe, &rx_connection);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002103 if (ret) {
2104 pr_err("%s: rx get config failed %d\n", __func__, ret);
2105 goto rx_get_config_failed;
2106 }
2107
2108 rx_connection.source = h;
2109 rx_connection.src_pipe_index = 5;
2110 rx_connection.destination = SPS_DEV_HANDLE_MEM;
2111 rx_connection.dest_pipe_index = 1;
2112 rx_connection.mode = SPS_MODE_SRC;
Jeff Hugo949080a2011-08-30 11:58:56 -06002113 rx_connection.options = SPS_O_AUTO_ENABLE | SPS_O_EOT |
2114 SPS_O_ACK_TRANSFERS;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002115 rx_desc_mem_buf.size = 0x800; /* 2k */
2116 rx_desc_mem_buf.base = dma_alloc_coherent(NULL, rx_desc_mem_buf.size,
2117 &dma_addr, 0);
2118 if (rx_desc_mem_buf.base == NULL) {
2119 pr_err("%s: rx memory alloc failed\n", __func__);
2120 ret = -ENOMEM;
2121 goto rx_mem_failed;
2122 }
2123 rx_desc_mem_buf.phys_base = dma_addr;
2124 memset(rx_desc_mem_buf.base, 0x0, rx_desc_mem_buf.size);
2125 rx_connection.desc = rx_desc_mem_buf;
2126 rx_connection.event_thresh = 0x10;
2127
Brent Hronik89c96ba2013-08-27 14:34:22 -06002128 ret = bam_ops->sps_connect_ptr(bam_rx_pipe, &rx_connection);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002129 if (ret < 0) {
2130 pr_err("%s: rx connect error %d\n", __func__, ret);
2131 goto rx_connect_failed;
2132 }
2133
2134 tx_register_event.options = SPS_O_EOT;
2135 tx_register_event.mode = SPS_TRIGGER_CALLBACK;
2136 tx_register_event.xfer_done = NULL;
2137 tx_register_event.callback = bam_mux_tx_notify;
2138 tx_register_event.user = NULL;
Brent Hronik89c96ba2013-08-27 14:34:22 -06002139 ret = bam_ops->sps_register_event_ptr(bam_tx_pipe, &tx_register_event);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002140 if (ret < 0) {
2141 pr_err("%s: tx register event error %d\n", __func__, ret);
2142 goto rx_event_reg_failed;
2143 }
2144
Jeff Hugo33dbc002011-08-25 15:52:53 -06002145 rx_register_event.options = SPS_O_EOT;
2146 rx_register_event.mode = SPS_TRIGGER_CALLBACK;
2147 rx_register_event.xfer_done = NULL;
2148 rx_register_event.callback = bam_mux_rx_notify;
2149 rx_register_event.user = NULL;
Brent Hronik89c96ba2013-08-27 14:34:22 -06002150 ret = bam_ops->sps_register_event_ptr(bam_rx_pipe, &rx_register_event);
Jeff Hugo33dbc002011-08-25 15:52:53 -06002151 if (ret < 0) {
2152 pr_err("%s: tx register event error %d\n", __func__, ret);
2153 goto rx_event_reg_failed;
2154 }
2155
Jeff Hugoc2696142012-05-03 11:42:13 -06002156 mutex_lock(&delayed_ul_vote_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002157 bam_mux_initialized = 1;
Jeff Hugoc2696142012-05-03 11:42:13 -06002158 if (need_delayed_ul_vote) {
2159 need_delayed_ul_vote = 0;
2160 msm_bam_dmux_kickoff_ul_wakeup();
2161 }
2162 mutex_unlock(&delayed_ul_vote_lock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002163 toggle_apps_ack();
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06002164 bam_connection_is_active = 1;
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002165 complete_all(&bam_connection_completion);
Jeff Hugo2fb555e2012-03-14 16:33:47 -06002166 queue_rx();
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002167 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002168
2169rx_event_reg_failed:
Brent Hronik89c96ba2013-08-27 14:34:22 -06002170 bam_ops->sps_disconnect_ptr(bam_rx_pipe);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002171rx_connect_failed:
2172 dma_free_coherent(NULL, rx_desc_mem_buf.size, rx_desc_mem_buf.base,
2173 rx_desc_mem_buf.phys_base);
2174rx_mem_failed:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002175rx_get_config_failed:
Brent Hronik89c96ba2013-08-27 14:34:22 -06002176 bam_ops->sps_free_endpoint_ptr(bam_rx_pipe);
Jeff Hugo8ff4a812012-01-17 11:03:13 -07002177rx_alloc_endpoint_failed:
Brent Hronik89c96ba2013-08-27 14:34:22 -06002178 bam_ops->sps_disconnect_ptr(bam_tx_pipe);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002179tx_connect_failed:
2180 dma_free_coherent(NULL, tx_desc_mem_buf.size, tx_desc_mem_buf.base,
2181 tx_desc_mem_buf.phys_base);
2182tx_get_config_failed:
Brent Hronik89c96ba2013-08-27 14:34:22 -06002183 bam_ops->sps_free_endpoint_ptr(bam_tx_pipe);
Jeff Hugo8ff4a812012-01-17 11:03:13 -07002184tx_alloc_endpoint_failed:
Brent Hronik89c96ba2013-08-27 14:34:22 -06002185 bam_ops->sps_deregister_bam_device_ptr(h);
Jeff Hugo4b2890d2012-01-16 16:14:21 -07002186 /*
2187 * sps_deregister_bam_device() calls iounmap. calling iounmap on the
2188 * same handle below will cause a crash, so skip it if we've freed
2189 * the handle here.
2190 */
2191 skip_iounmap = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002192register_bam_failed:
Jeff Hugo4b2890d2012-01-16 16:14:21 -07002193 if (!skip_iounmap)
2194 iounmap(a2_virt_addr);
Jeff Hugo994a92d2012-01-05 13:25:21 -07002195ioremap_failed:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002196 /*destroy_workqueue(bam_mux_workqueue);*/
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002197 return ret;
2198}
2199
2200static int bam_init_fallback(void)
2201{
2202 u32 h;
2203 int ret;
2204 void *a2_virt_addr;
2205
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002206 /* init BAM */
Jeff Hugo3910ee12012-08-21 14:08:20 -06002207 a2_virt_addr = ioremap_nocache((unsigned long)(a2_phys_base),
2208 a2_phys_size);
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002209 if (!a2_virt_addr) {
2210 pr_err("%s: ioremap failed\n", __func__);
2211 ret = -ENOMEM;
2212 goto ioremap_failed;
2213 }
Jeff Hugo3910ee12012-08-21 14:08:20 -06002214 a2_props.phys_addr = (u32)(a2_phys_base);
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002215 a2_props.virt_addr = a2_virt_addr;
Jeff Hugo3910ee12012-08-21 14:08:20 -06002216 a2_props.virt_size = a2_phys_size;
2217 a2_props.irq = a2_bam_irq;
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002218 a2_props.options = SPS_BAM_OPT_IRQ_WAKEUP;
2219 a2_props.num_pipes = A2_NUM_PIPES;
2220 a2_props.summing_threshold = A2_SUMMING_THRESHOLD;
2221 if (cpu_is_msm9615())
2222 a2_props.manage = SPS_BAM_MGR_DEVICE_REMOTE;
Brent Hronik89c96ba2013-08-27 14:34:22 -06002223 ret = bam_ops->sps_register_bam_device_ptr(&a2_props, &h);
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002224 if (ret < 0) {
2225 pr_err("%s: register bam error %d\n", __func__, ret);
2226 goto register_bam_failed;
2227 }
2228 a2_device_handle = h;
Jeff Hugoc2696142012-05-03 11:42:13 -06002229
2230 mutex_lock(&delayed_ul_vote_lock);
2231 bam_mux_initialized = 1;
2232 if (need_delayed_ul_vote) {
2233 need_delayed_ul_vote = 0;
2234 msm_bam_dmux_kickoff_ul_wakeup();
2235 }
2236 mutex_unlock(&delayed_ul_vote_lock);
Jeff Hugo2bec9772012-04-05 12:25:16 -06002237 toggle_apps_ack();
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002238
Jeff Hugo18792a32012-06-20 15:25:55 -06002239 power_management_only_mode = 1;
2240 bam_connection_is_active = 1;
2241 complete_all(&bam_connection_completion);
2242
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002243 return 0;
2244
2245register_bam_failed:
Jeff Hugo4b2890d2012-01-16 16:14:21 -07002246 iounmap(a2_virt_addr);
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002247ioremap_failed:
2248 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002249}
Jeff Hugoade1f842011-08-03 15:53:59 -06002250
Jeff Hugoa670b762012-03-15 15:58:28 -06002251static void msm9615_bam_init(void)
Eric Holmberg604ab252012-01-15 00:01:18 -07002252{
2253 int ret = 0;
2254
2255 ret = bam_init();
2256 if (ret) {
2257 ret = bam_init_fallback();
2258 if (ret)
2259 pr_err("%s: bam init fallback failed: %d",
2260 __func__, ret);
2261 }
2262}
2263
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002264static void toggle_apps_ack(void)
2265{
2266 static unsigned int clear_bit; /* 0 = set the bit, else clear bit */
Eric Holmberg878923a2012-01-10 14:28:19 -07002267
Eric Holmberg7614a7f2013-07-29 15:47:12 -06002268 if (in_global_reset) {
2269 BAM_DMUX_LOG("%s: skipped due to SSR\n", __func__);
2270 return;
2271 }
2272
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +05302273 BAM_DMUX_LOG("%s: apps ack %d->%d\n", __func__,
Eric Holmberg878923a2012-01-10 14:28:19 -07002274 clear_bit & 0x1, ~clear_bit & 0x1);
Brent Hronik89c96ba2013-08-27 14:34:22 -06002275 bam_ops->smsm_change_state_ptr(SMSM_APPS_STATE,
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002276 clear_bit & SMSM_A2_POWER_CONTROL_ACK,
2277 ~clear_bit & SMSM_A2_POWER_CONTROL_ACK);
2278 clear_bit = ~clear_bit;
Eric Holmberg1f1255d2012-02-22 13:37:21 -07002279 DBG_INC_ACK_OUT_CNT();
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002280}
2281
Jeff Hugoade1f842011-08-03 15:53:59 -06002282static void bam_dmux_smsm_cb(void *priv, uint32_t old_state, uint32_t new_state)
2283{
Jeff Hugo4b7c7b32012-04-18 16:25:14 -06002284 static int last_processed_state;
2285
2286 mutex_lock(&smsm_cb_lock);
Eric Holmberg878923a2012-01-10 14:28:19 -07002287 bam_dmux_power_state = new_state & SMSM_A2_POWER_CONTROL ? 1 : 0;
Eric Holmberg1f1255d2012-02-22 13:37:21 -07002288 DBG_INC_A2_POWER_CONTROL_IN_CNT();
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +05302289 BAM_DMUX_LOG("%s: 0x%08x -> 0x%08x\n", __func__, old_state,
Eric Holmberg878923a2012-01-10 14:28:19 -07002290 new_state);
Jeff Hugo4b7c7b32012-04-18 16:25:14 -06002291 if (last_processed_state == (new_state & SMSM_A2_POWER_CONTROL)) {
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +05302292 BAM_DMUX_LOG("%s: already processed this state\n", __func__);
Jeff Hugo4b7c7b32012-04-18 16:25:14 -06002293 mutex_unlock(&smsm_cb_lock);
2294 return;
2295 }
2296
2297 last_processed_state = new_state & SMSM_A2_POWER_CONTROL;
Eric Holmberg878923a2012-01-10 14:28:19 -07002298
Jeff Hugoae3a85e2011-12-02 17:10:18 -07002299 if (bam_mux_initialized && new_state & SMSM_A2_POWER_CONTROL) {
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +05302300 BAM_DMUX_LOG("%s: reconnect\n", __func__);
Eric Holmberg006057d2012-01-11 10:10:42 -07002301 grab_wakelock();
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002302 reconnect_to_bam();
Jeff Hugoae3a85e2011-12-02 17:10:18 -07002303 } else if (bam_mux_initialized &&
2304 !(new_state & SMSM_A2_POWER_CONTROL)) {
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +05302305 BAM_DMUX_LOG("%s: disconnect\n", __func__);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002306 disconnect_to_bam();
Eric Holmberg006057d2012-01-11 10:10:42 -07002307 release_wakelock();
Jeff Hugoae3a85e2011-12-02 17:10:18 -07002308 } else if (new_state & SMSM_A2_POWER_CONTROL) {
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +05302309 BAM_DMUX_LOG("%s: init\n", __func__);
Eric Holmberg006057d2012-01-11 10:10:42 -07002310 grab_wakelock();
Jeff Hugoa670b762012-03-15 15:58:28 -06002311 if (cpu_is_msm9615())
2312 msm9615_bam_init();
2313 else
Eric Holmberg604ab252012-01-15 00:01:18 -07002314 bam_init();
Jeff Hugoae3a85e2011-12-02 17:10:18 -07002315 } else {
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +05302316 BAM_DMUX_LOG("%s: bad state change\n", __func__);
Jeff Hugoade1f842011-08-03 15:53:59 -06002317 pr_err("%s: unsupported state change\n", __func__);
Jeff Hugoae3a85e2011-12-02 17:10:18 -07002318 }
Jeff Hugo4b7c7b32012-04-18 16:25:14 -06002319 mutex_unlock(&smsm_cb_lock);
Jeff Hugoade1f842011-08-03 15:53:59 -06002320
2321}
2322
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002323static void bam_dmux_smsm_ack_cb(void *priv, uint32_t old_state,
2324 uint32_t new_state)
2325{
Eric Holmberg1f1255d2012-02-22 13:37:21 -07002326 DBG_INC_ACK_IN_CNT();
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +05302327 BAM_DMUX_LOG("%s: 0x%08x -> 0x%08x\n", __func__, old_state,
Eric Holmberg878923a2012-01-10 14:28:19 -07002328 new_state);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002329 complete_all(&ul_wakeup_ack_completion);
2330}
2331
Brent Hronik89c96ba2013-08-27 14:34:22 -06002332/**
2333 * msm_bam_dmux_set_bam_ops() - sets the bam_ops
2334 * @ops: bam_ops_if to set
2335 *
2336 * Sets bam_ops to allow switching of runtime behavior. Preconditon, bam dmux
2337 * must be in an idle state. If input ops is NULL, then bam_ops will be
2338 * restored to their default state.
2339 */
2340void msm_bam_dmux_set_bam_ops(struct bam_ops_if *ops)
2341{
2342 if (ops != NULL)
2343 bam_ops = ops;
2344 else
2345 bam_ops = &bam_default_ops;
2346}
2347EXPORT_SYMBOL(msm_bam_dmux_set_bam_ops);
2348
2349/**
2350 * msm_bam_dmux_deinit() - puts bam dmux into a deinited state
2351 *
2352 * Puts bam dmux into a deinitialized state by simulating an ssr.
2353 */
2354void msm_bam_dmux_deinit(void)
2355{
2356 restart_notifier_cb(NULL, SUBSYS_BEFORE_SHUTDOWN, NULL);
2357 restart_notifier_cb(NULL, SUBSYS_AFTER_SHUTDOWN, NULL);
2358}
2359EXPORT_SYMBOL(msm_bam_dmux_deinit);
2360
2361/**
2362 * msm_bam_dmux_reinit() - reinitializes bam dmux
2363 */
2364void msm_bam_dmux_reinit(void)
2365{
2366 bam_ops->smsm_state_cb_register_ptr(SMSM_MODEM_STATE,
2367 SMSM_A2_POWER_CONTROL,
2368 bam_dmux_smsm_cb, NULL);
2369 bam_ops->smsm_state_cb_register_ptr(SMSM_MODEM_STATE,
2370 SMSM_A2_POWER_CONTROL_ACK,
2371 bam_dmux_smsm_ack_cb, NULL);
2372 bam_mux_initialized = 0;
2373 bam_init();
2374}
2375EXPORT_SYMBOL(msm_bam_dmux_reinit);
2376
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002377static int bam_dmux_probe(struct platform_device *pdev)
2378{
2379 int rc;
Jeff Hugo3910ee12012-08-21 14:08:20 -06002380 struct resource *r;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002381
2382 DBG("%s probe called\n", __func__);
2383 if (bam_mux_initialized)
2384 return 0;
2385
Jeff Hugo3910ee12012-08-21 14:08:20 -06002386 if (pdev->dev.of_node) {
2387 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2388 if (!r) {
2389 pr_err("%s: reg field missing\n", __func__);
2390 return -ENODEV;
2391 }
2392 a2_phys_base = (void *)(r->start);
2393 a2_phys_size = (uint32_t)(resource_size(r));
2394 a2_bam_irq = platform_get_irq(pdev, 0);
2395 if (a2_bam_irq == -ENXIO) {
2396 pr_err("%s: irq field missing\n", __func__);
2397 return -ENODEV;
2398 }
2399 DBG("%s: base:%p size:%x irq:%d\n", __func__,
2400 a2_phys_base,
2401 a2_phys_size,
2402 a2_bam_irq);
2403 } else { /* fallback to default init data */
2404 a2_phys_base = (void *)(A2_PHYS_BASE);
2405 a2_phys_size = A2_PHYS_SIZE;
2406 a2_bam_irq = A2_BAM_IRQ;
2407 }
2408
Stephen Boyd69d35e32012-02-14 15:33:30 -08002409 xo_clk = clk_get(&pdev->dev, "xo");
2410 if (IS_ERR(xo_clk)) {
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +05302411 BAM_DMUX_LOG("%s: did not get xo clock\n", __func__);
Jeff Hugod0befde2012-08-09 15:32:49 -06002412 xo_clk = NULL;
Stephen Boyd69d35e32012-02-14 15:33:30 -08002413 }
Stephen Boyd1c51a492011-10-26 12:11:47 -07002414 dfab_clk = clk_get(&pdev->dev, "bus_clk");
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002415 if (IS_ERR(dfab_clk)) {
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +05302416 BAM_DMUX_LOG("%s: did not get dfab clock\n", __func__);
Jeff Hugod0befde2012-08-09 15:32:49 -06002417 dfab_clk = NULL;
2418 } else {
2419 rc = clk_set_rate(dfab_clk, 64000000);
2420 if (rc)
2421 pr_err("%s: unable to set dfab clock rate\n", __func__);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002422 }
2423
Jeff Hugofff43af92012-03-29 17:54:52 -06002424 /*
2425 * setup the workqueue so that it can be pinned to core 0 and not
2426 * block the watchdog pet function, so that netif_rx() in rmnet
2427 * only uses one queue.
2428 */
2429 bam_mux_rx_workqueue = alloc_workqueue("bam_dmux_rx",
2430 WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002431 if (!bam_mux_rx_workqueue)
2432 return -ENOMEM;
2433
2434 bam_mux_tx_workqueue = create_singlethread_workqueue("bam_dmux_tx");
2435 if (!bam_mux_tx_workqueue) {
2436 destroy_workqueue(bam_mux_rx_workqueue);
2437 return -ENOMEM;
2438 }
2439
Jeff Hugo7960abd2011-08-02 15:39:38 -06002440 for (rc = 0; rc < BAM_DMUX_NUM_CHANNELS; ++rc) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002441 spin_lock_init(&bam_ch[rc].lock);
Jeff Hugo7960abd2011-08-02 15:39:38 -06002442 scnprintf(bam_ch[rc].name, BAM_DMUX_CH_NAME_MAX_LEN,
2443 "bam_dmux_ch_%d", rc);
2444 /* bus 2, ie a2 stream 2 */
2445 bam_ch[rc].pdev = platform_device_alloc(bam_ch[rc].name, 2);
2446 if (!bam_ch[rc].pdev) {
2447 pr_err("%s: platform device alloc failed\n", __func__);
2448 destroy_workqueue(bam_mux_rx_workqueue);
2449 destroy_workqueue(bam_mux_tx_workqueue);
2450 return -ENOMEM;
2451 }
2452 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002453
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002454 init_completion(&ul_wakeup_ack_completion);
2455 init_completion(&bam_connection_completion);
Eric Holmberg006057d2012-01-11 10:10:42 -07002456 init_completion(&dfab_unvote_completion);
Brent Hronik096f7d32013-06-28 15:43:08 -06002457 init_completion(&shutdown_completion);
2458 complete_all(&shutdown_completion);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002459 INIT_DELAYED_WORK(&ul_timeout_work, ul_timeout);
Jeff Hugo988e7ba2012-10-03 15:53:54 -06002460 INIT_DELAYED_WORK(&queue_rx_work, queue_rx_work_func);
Jeff Hugoae3a85e2011-12-02 17:10:18 -07002461 wake_lock_init(&bam_wakelock, WAKE_LOCK_SUSPEND, "bam_dmux_wakelock");
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002462
Brent Hronik89c96ba2013-08-27 14:34:22 -06002463 rc = bam_ops->smsm_state_cb_register_ptr(SMSM_MODEM_STATE,
2464 SMSM_A2_POWER_CONTROL,
2465 bam_dmux_smsm_cb, NULL);
Jeff Hugoade1f842011-08-03 15:53:59 -06002466
2467 if (rc) {
2468 destroy_workqueue(bam_mux_rx_workqueue);
2469 destroy_workqueue(bam_mux_tx_workqueue);
2470 pr_err("%s: smsm cb register failed, rc: %d\n", __func__, rc);
2471 return -ENOMEM;
2472 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002473
Brent Hronik89c96ba2013-08-27 14:34:22 -06002474 rc = bam_ops->smsm_state_cb_register_ptr(SMSM_MODEM_STATE,
2475 SMSM_A2_POWER_CONTROL_ACK,
2476 bam_dmux_smsm_ack_cb, NULL);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002477
2478 if (rc) {
2479 destroy_workqueue(bam_mux_rx_workqueue);
2480 destroy_workqueue(bam_mux_tx_workqueue);
Brent Hronik89c96ba2013-08-27 14:34:22 -06002481 bam_ops->smsm_state_cb_deregister_ptr(SMSM_MODEM_STATE,
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002482 SMSM_A2_POWER_CONTROL,
2483 bam_dmux_smsm_cb, NULL);
2484 pr_err("%s: smsm ack cb register failed, rc: %d\n", __func__,
2485 rc);
2486 for (rc = 0; rc < BAM_DMUX_NUM_CHANNELS; ++rc)
2487 platform_device_put(bam_ch[rc].pdev);
2488 return -ENOMEM;
2489 }
2490
Brent Hronik89c96ba2013-08-27 14:34:22 -06002491 if (bam_ops->smsm_get_state_ptr(SMSM_MODEM_STATE) &
2492 SMSM_A2_POWER_CONTROL)
2493 bam_dmux_smsm_cb(NULL, 0,
2494 bam_ops->smsm_get_state_ptr(SMSM_MODEM_STATE));
Eric Holmbergfd1e2ae2011-11-15 18:28:17 -07002495
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002496 return 0;
2497}
2498
Jeff Hugo3910ee12012-08-21 14:08:20 -06002499static struct of_device_id msm_match_table[] = {
2500 {.compatible = "qcom,bam_dmux"},
2501 {},
2502};
2503
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002504static struct platform_driver bam_dmux_driver = {
2505 .probe = bam_dmux_probe,
2506 .driver = {
2507 .name = "BAM_RMNT",
2508 .owner = THIS_MODULE,
Jeff Hugo3910ee12012-08-21 14:08:20 -06002509 .of_match_table = msm_match_table,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002510 },
2511};
2512
2513static int __init bam_dmux_init(void)
2514{
2515#ifdef CONFIG_DEBUG_FS
2516 struct dentry *dent;
2517
2518 dent = debugfs_create_dir("bam_dmux", 0);
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07002519 if (!IS_ERR(dent)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002520 debug_create("tbl", 0444, dent, debug_tbl);
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07002521 debug_create("ul_pkt_cnt", 0444, dent, debug_ul_pkt_cnt);
2522 debug_create("stats", 0444, dent, debug_stats);
2523 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002524#endif
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +05302525
2526 bam_ipc_log_txt = ipc_log_context_create(BAM_IPC_LOG_PAGES, "bam_dmux");
2527 if (!bam_ipc_log_txt) {
2528 pr_err("%s : unable to create IPC Logging Context", __func__);
Eric Holmberg878923a2012-01-10 14:28:19 -07002529 }
2530
Anurag Singhdcd8b4e2012-07-30 16:46:37 -07002531 rx_timer_interval = DEFAULT_POLLING_MIN_SLEEP;
2532
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06002533 subsys_notif_register_notifier("modem", &restart_notifier);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002534 return platform_driver_register(&bam_dmux_driver);
2535}
2536
Jeff Hugoade1f842011-08-03 15:53:59 -06002537late_initcall(bam_dmux_init); /* needs to init after SMD */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002538MODULE_DESCRIPTION("MSM BAM DMUX");
2539MODULE_LICENSE("GPL v2");