blob: 991ccef7f11ce7964810ff3ad2d203d8afa2414d [file] [log] [blame]
Arun Kumar Neelakantam406e5692013-01-17 18:58:04 +05301/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14/*
15 * BAM DMUX module.
16 */
17
18#define DEBUG
19
20#include <linux/delay.h>
21#include <linux/module.h>
22#include <linux/netdevice.h>
23#include <linux/platform_device.h>
24#include <linux/sched.h>
25#include <linux/skbuff.h>
26#include <linux/debugfs.h>
Jeff Hugoaab7ebc2011-09-07 16:46:04 -060027#include <linux/clk.h>
Jeff Hugoae3a85e2011-12-02 17:10:18 -070028#include <linux/wakelock.h>
Eric Holmberg878923a2012-01-10 14:28:19 -070029#include <linux/kfifo.h>
Jeff Hugo3910ee12012-08-21 14:08:20 -060030#include <linux/of.h>
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +053031#include <mach/msm_ipc_logging.h>
Arun Kumar Neelakantame7c0d622013-10-11 14:34:02 +053032#include <linux/srcu.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070033#include <mach/sps.h>
34#include <mach/bam_dmux.h>
Jeff Hugoade1f842011-08-03 15:53:59 -060035#include <mach/msm_smsm.h>
Jeff Hugo6e7a92a2011-10-24 05:25:13 -060036#include <mach/subsystem_notif.h>
Jeff Hugo75913c82011-12-05 15:59:01 -070037#include <mach/socinfo.h>
Jeff Hugo4838f412012-01-20 11:19:37 -070038#include <mach/subsystem_restart.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070039
Brent Hronik89c96ba2013-08-27 14:34:22 -060040#include "bam_dmux_private.h"
41
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070042#define BAM_CH_LOCAL_OPEN 0x1
43#define BAM_CH_REMOTE_OPEN 0x2
Jeff Hugo6e7a92a2011-10-24 05:25:13 -060044#define BAM_CH_IN_RESET 0x4
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070045
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -070046#define LOW_WATERMARK 2
47#define HIGH_WATERMARK 4
Anurag Singhdcd8b4e2012-07-30 16:46:37 -070048#define DEFAULT_POLLING_MIN_SLEEP (950)
49#define MAX_POLLING_SLEEP (6050)
50#define MIN_POLLING_SLEEP (950)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070051
52static int msm_bam_dmux_debug_enable;
53module_param_named(debug_enable, msm_bam_dmux_debug_enable,
54 int, S_IRUGO | S_IWUSR | S_IWGRP);
Anurag Singh308c3862013-08-13 17:22:41 -070055static int POLLING_MIN_SLEEP = 2950;
Anurag Singhdcd8b4e2012-07-30 16:46:37 -070056module_param_named(min_sleep, POLLING_MIN_SLEEP,
57 int, S_IRUGO | S_IWUSR | S_IWGRP);
Anurag Singh308c3862013-08-13 17:22:41 -070058static int POLLING_MAX_SLEEP = 3050;
Anurag Singhdcd8b4e2012-07-30 16:46:37 -070059module_param_named(max_sleep, POLLING_MAX_SLEEP,
60 int, S_IRUGO | S_IWUSR | S_IWGRP);
Anurag Singh308c3862013-08-13 17:22:41 -070061static int POLLING_INACTIVITY = 1;
Anurag Singhdcd8b4e2012-07-30 16:46:37 -070062module_param_named(inactivity, POLLING_INACTIVITY,
63 int, S_IRUGO | S_IWUSR | S_IWGRP);
Anurag Singh308c3862013-08-13 17:22:41 -070064static int bam_adaptive_timer_enabled;
Anurag Singhdcd8b4e2012-07-30 16:46:37 -070065module_param_named(adaptive_timer_enabled,
66 bam_adaptive_timer_enabled,
67 int, S_IRUGO | S_IWUSR | S_IWGRP);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070068
Brent Hronik89c96ba2013-08-27 14:34:22 -060069static struct bam_ops_if bam_default_ops = {
70 /* smsm */
71 .smsm_change_state_ptr = &smsm_change_state,
72 .smsm_get_state_ptr = &smsm_get_state,
73 .smsm_state_cb_register_ptr = &smsm_state_cb_register,
74 .smsm_state_cb_deregister_ptr = &smsm_state_cb_deregister,
75
76 /* sps */
77 .sps_connect_ptr = &sps_connect,
78 .sps_disconnect_ptr = &sps_disconnect,
79 .sps_register_bam_device_ptr = &sps_register_bam_device,
80 .sps_deregister_bam_device_ptr = &sps_deregister_bam_device,
81 .sps_alloc_endpoint_ptr = &sps_alloc_endpoint,
82 .sps_free_endpoint_ptr = &sps_free_endpoint,
83 .sps_set_config_ptr = &sps_set_config,
84 .sps_get_config_ptr = &sps_get_config,
85 .sps_device_reset_ptr = &sps_device_reset,
86 .sps_register_event_ptr = &sps_register_event,
87 .sps_transfer_one_ptr = &sps_transfer_one,
88 .sps_get_iovec_ptr = &sps_get_iovec,
89 .sps_get_unused_desc_num_ptr = &sps_get_unused_desc_num,
90
91 .dma_to = DMA_TO_DEVICE,
92 .dma_from = DMA_FROM_DEVICE,
93};
94static struct bam_ops_if *bam_ops = &bam_default_ops;
95
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070096#if defined(DEBUG)
97static uint32_t bam_dmux_read_cnt;
98static uint32_t bam_dmux_write_cnt;
99static uint32_t bam_dmux_write_cpy_cnt;
100static uint32_t bam_dmux_write_cpy_bytes;
Eric Holmberg2fddbcd2011-11-28 18:25:57 -0700101static uint32_t bam_dmux_tx_sps_failure_cnt;
Eric Holmberg6074aba2012-01-18 17:59:44 -0700102static uint32_t bam_dmux_tx_stall_cnt;
Eric Holmberg1f1255d2012-02-22 13:37:21 -0700103static atomic_t bam_dmux_ack_out_cnt = ATOMIC_INIT(0);
104static atomic_t bam_dmux_ack_in_cnt = ATOMIC_INIT(0);
105static atomic_t bam_dmux_a2_pwr_cntl_in_cnt = ATOMIC_INIT(0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700106
107#define DBG(x...) do { \
108 if (msm_bam_dmux_debug_enable) \
109 pr_debug(x); \
110 } while (0)
111
112#define DBG_INC_READ_CNT(x) do { \
113 bam_dmux_read_cnt += (x); \
114 if (msm_bam_dmux_debug_enable) \
115 pr_debug("%s: total read bytes %u\n", \
116 __func__, bam_dmux_read_cnt); \
117 } while (0)
118
119#define DBG_INC_WRITE_CNT(x) do { \
120 bam_dmux_write_cnt += (x); \
121 if (msm_bam_dmux_debug_enable) \
122 pr_debug("%s: total written bytes %u\n", \
123 __func__, bam_dmux_write_cnt); \
124 } while (0)
125
126#define DBG_INC_WRITE_CPY(x) do { \
127 bam_dmux_write_cpy_bytes += (x); \
128 bam_dmux_write_cpy_cnt++; \
129 if (msm_bam_dmux_debug_enable) \
130 pr_debug("%s: total write copy cnt %u, bytes %u\n", \
131 __func__, bam_dmux_write_cpy_cnt, \
132 bam_dmux_write_cpy_bytes); \
133 } while (0)
Eric Holmberg2fddbcd2011-11-28 18:25:57 -0700134
135#define DBG_INC_TX_SPS_FAILURE_CNT() do { \
136 bam_dmux_tx_sps_failure_cnt++; \
137} while (0)
138
Eric Holmberg6074aba2012-01-18 17:59:44 -0700139#define DBG_INC_TX_STALL_CNT() do { \
140 bam_dmux_tx_stall_cnt++; \
141} while (0)
142
Eric Holmberg1f1255d2012-02-22 13:37:21 -0700143#define DBG_INC_ACK_OUT_CNT() \
144 atomic_inc(&bam_dmux_ack_out_cnt)
145
146#define DBG_INC_A2_POWER_CONTROL_IN_CNT() \
147 atomic_inc(&bam_dmux_a2_pwr_cntl_in_cnt)
148
149#define DBG_INC_ACK_IN_CNT() \
150 atomic_inc(&bam_dmux_ack_in_cnt)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700151#else
152#define DBG(x...) do { } while (0)
153#define DBG_INC_READ_CNT(x...) do { } while (0)
154#define DBG_INC_WRITE_CNT(x...) do { } while (0)
155#define DBG_INC_WRITE_CPY(x...) do { } while (0)
Eric Holmberg2fddbcd2011-11-28 18:25:57 -0700156#define DBG_INC_TX_SPS_FAILURE_CNT() do { } while (0)
Eric Holmberg6074aba2012-01-18 17:59:44 -0700157#define DBG_INC_TX_STALL_CNT() do { } while (0)
Eric Holmberg1f1255d2012-02-22 13:37:21 -0700158#define DBG_INC_ACK_OUT_CNT() do { } while (0)
159#define DBG_INC_A2_POWER_CONTROL_IN_CNT() \
160 do { } while (0)
161#define DBG_INC_ACK_IN_CNT() do { } while (0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700162#endif
163
164struct bam_ch_info {
165 uint32_t status;
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600166 void (*notify)(void *, int, unsigned long);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700167 void *priv;
168 spinlock_t lock;
Jeff Hugo7960abd2011-08-02 15:39:38 -0600169 struct platform_device *pdev;
170 char name[BAM_DMUX_CH_NAME_MAX_LEN];
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700171 int num_tx_pkts;
172 int use_wm;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700173};
174
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700175#define A2_NUM_PIPES 6
176#define A2_SUMMING_THRESHOLD 4096
177#define A2_DEFAULT_DESCRIPTORS 32
178#define A2_PHYS_BASE 0x124C2000
179#define A2_PHYS_SIZE 0x2000
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700180#define NUM_BUFFERS 32
Jeff Hugo3910ee12012-08-21 14:08:20 -0600181
182#ifndef A2_BAM_IRQ
183#define A2_BAM_IRQ -1
184#endif
185
186static void *a2_phys_base;
187static uint32_t a2_phys_size;
188static int a2_bam_irq;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700189static struct sps_bam_props a2_props;
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600190static u32 a2_device_handle;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700191static struct sps_pipe *bam_tx_pipe;
192static struct sps_pipe *bam_rx_pipe;
193static struct sps_connect tx_connection;
194static struct sps_connect rx_connection;
195static struct sps_mem_buffer tx_desc_mem_buf;
196static struct sps_mem_buffer rx_desc_mem_buf;
197static struct sps_register_event tx_register_event;
Jeff Hugo33dbc002011-08-25 15:52:53 -0600198static struct sps_register_event rx_register_event;
Jeff Hugo7c185602013-09-11 17:39:54 -0600199static unsigned long long last_rx_pkt_timestamp;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700200
201static struct bam_ch_info bam_ch[BAM_DMUX_NUM_CHANNELS];
202static int bam_mux_initialized;
203
Jeff Hugo949080a2011-08-30 11:58:56 -0600204static int polling_mode;
Anurag Singhdcd8b4e2012-07-30 16:46:37 -0700205static unsigned long rx_timer_interval;
Jeff Hugo949080a2011-08-30 11:58:56 -0600206
207static LIST_HEAD(bam_rx_pool);
Jeff Hugoc9749932011-11-02 17:50:40 -0600208static DEFINE_MUTEX(bam_rx_pool_mutexlock);
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700209static int bam_rx_pool_len;
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600210static LIST_HEAD(bam_tx_pool);
Jeff Hugoc9749932011-11-02 17:50:40 -0600211static DEFINE_SPINLOCK(bam_tx_pool_spinlock);
Eric Holmberga623da82012-07-12 09:37:09 -0600212static DEFINE_MUTEX(bam_pdev_mutexlock);
Jeff Hugo949080a2011-08-30 11:58:56 -0600213
Jeff Hugod98b1082011-10-24 10:30:23 -0600214static void notify_all(int event, unsigned long data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700215static void bam_mux_write_done(struct work_struct *work);
216static void handle_bam_mux_cmd(struct work_struct *work);
Jeff Hugo949080a2011-08-30 11:58:56 -0600217static void rx_timer_work_func(struct work_struct *work);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700218
Jeff Hugo949080a2011-08-30 11:58:56 -0600219static DECLARE_WORK(rx_timer_work, rx_timer_work_func);
Jeff Hugo988e7ba2012-10-03 15:53:54 -0600220static struct delayed_work queue_rx_work;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700221
222static struct workqueue_struct *bam_mux_rx_workqueue;
223static struct workqueue_struct *bam_mux_tx_workqueue;
224
Arun Kumar Neelakantame7c0d622013-10-11 14:34:02 +0530225static struct srcu_struct bam_dmux_srcu;
226
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600227/* A2 power collaspe */
228#define UL_TIMEOUT_DELAY 1000 /* in ms */
Jeff Hugo0b13a352012-03-17 23:18:30 -0600229#define ENABLE_DISCONNECT_ACK 0x1
Brent Hronik096f7d32013-06-28 15:43:08 -0600230#define SHUTDOWN_TIMEOUT_MS 500
Jeff Hugo1f317392013-07-24 16:28:52 -0600231#define UL_WAKEUP_TIMEOUT_MS 2000
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600232static void toggle_apps_ack(void);
233static void reconnect_to_bam(void);
234static void disconnect_to_bam(void);
235static void ul_wakeup(void);
236static void ul_timeout(struct work_struct *work);
237static void vote_dfab(void);
238static void unvote_dfab(void);
Jeff Hugod98b1082011-10-24 10:30:23 -0600239static void kickoff_ul_wakeup_func(struct work_struct *work);
Eric Holmberg006057d2012-01-11 10:10:42 -0700240static void grab_wakelock(void);
241static void release_wakelock(void);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600242
243static int bam_is_connected;
244static DEFINE_MUTEX(wakeup_lock);
245static struct completion ul_wakeup_ack_completion;
246static struct completion bam_connection_completion;
247static struct delayed_work ul_timeout_work;
248static int ul_packet_written;
Eric Holmbergbc9f21c2012-01-18 11:33:33 -0700249static atomic_t ul_ondemand_vote = ATOMIC_INIT(0);
Stephen Boyd69d35e32012-02-14 15:33:30 -0800250static struct clk *dfab_clk, *xo_clk;
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600251static DEFINE_RWLOCK(ul_wakeup_lock);
Jeff Hugod98b1082011-10-24 10:30:23 -0600252static DECLARE_WORK(kickoff_ul_wakeup, kickoff_ul_wakeup_func);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600253static int bam_connection_is_active;
Jeff Hugof6c1c1e2011-12-01 17:43:49 -0700254static int wait_for_ack;
Jeff Hugoae3a85e2011-12-02 17:10:18 -0700255static struct wake_lock bam_wakelock;
Eric Holmberg006057d2012-01-11 10:10:42 -0700256static int a2_pc_disabled;
257static DEFINE_MUTEX(dfab_status_lock);
258static int dfab_is_on;
259static int wait_for_dfab;
260static struct completion dfab_unvote_completion;
261static DEFINE_SPINLOCK(wakelock_reference_lock);
262static int wakelock_reference_count;
Jeff Hugo583a6da2012-02-03 11:37:30 -0700263static int a2_pc_disabled_wakelock_skipped;
Jeff Hugob1e7c582012-06-20 15:02:11 -0600264static int disconnect_ack = 1;
Jeff Hugocb798022012-04-09 14:55:40 -0600265static LIST_HEAD(bam_other_notify_funcs);
Jeff Hugo4b7c7b32012-04-18 16:25:14 -0600266static DEFINE_MUTEX(smsm_cb_lock);
Jeff Hugoc2696142012-05-03 11:42:13 -0600267static DEFINE_MUTEX(delayed_ul_vote_lock);
268static int need_delayed_ul_vote;
Jeff Hugo18792a32012-06-20 15:25:55 -0600269static int power_management_only_mode;
Jeff Hugoa82a95c2012-12-14 17:56:19 -0700270static int in_ssr;
271static int ssr_skipped_disconnect;
Brent Hronik096f7d32013-06-28 15:43:08 -0600272static struct completion shutdown_completion;
Jeff Hugocb798022012-04-09 14:55:40 -0600273
274struct outside_notify_func {
275 void (*notify)(void *, int, unsigned long);
276 void *priv;
277 struct list_head list_node;
278};
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600279/* End A2 power collaspe */
280
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600281/* subsystem restart */
282static int restart_notifier_cb(struct notifier_block *this,
283 unsigned long code,
284 void *data);
285
286static struct notifier_block restart_notifier = {
287 .notifier_call = restart_notifier_cb,
288};
289static int in_global_reset;
290/* end subsystem restart */
291
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700292#define bam_ch_is_open(x) \
293 (bam_ch[(x)].status == (BAM_CH_LOCAL_OPEN | BAM_CH_REMOTE_OPEN))
294
295#define bam_ch_is_local_open(x) \
296 (bam_ch[(x)].status & BAM_CH_LOCAL_OPEN)
297
298#define bam_ch_is_remote_open(x) \
299 (bam_ch[(x)].status & BAM_CH_REMOTE_OPEN)
300
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600301#define bam_ch_is_in_reset(x) \
302 (bam_ch[(x)].status & BAM_CH_IN_RESET)
303
Eric Holmberg878923a2012-01-10 14:28:19 -0700304struct kfifo bam_dmux_state_log;
Eric Holmberg878923a2012-01-10 14:28:19 -0700305static int bam_dmux_uplink_vote;
306static int bam_dmux_power_state;
307
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +0530308static void *bam_ipc_log_txt;
309
310#define BAM_IPC_LOG_PAGES 5
311
Eric Holmberg878923a2012-01-10 14:28:19 -0700312/**
313 * Log a state change along with a small message.
Eric Holmberg878923a2012-01-10 14:28:19 -0700314 * Complete size of messsage is limited to @todo.
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +0530315 * Logging is done using IPC Logging infrastructure.
316 *
317 * States
318 * D: 1 = Power collapse disabled
319 * R: 1 = in global reset
320 * P: 1 = BAM is powered up
321 * A: 1 = BAM initialized and ready for data
322 * V: 1 = Uplink vote for power
323 * U: 1 = Uplink active
324 * W: 1 = Uplink Wait-for-ack
325 * A: 1 = Uplink ACK received
326 * #: >=1 On-demand uplink vote
327 * D: 1 = Disconnect ACK active
Eric Holmberg878923a2012-01-10 14:28:19 -0700328 */
Eric Holmberg878923a2012-01-10 14:28:19 -0700329
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +0530330#define BAM_DMUX_LOG(fmt, args...) \
331do { \
332 if (bam_ipc_log_txt) { \
333 ipc_log_string(bam_ipc_log_txt, \
334 "<DMUX> %c%c%c%c %c%c%c%c%d%c " fmt, \
335 a2_pc_disabled ? 'D' : 'd', \
336 in_global_reset ? 'R' : 'r', \
337 bam_dmux_power_state ? 'P' : 'p', \
338 bam_connection_is_active ? 'A' : 'a', \
339 bam_dmux_uplink_vote ? 'V' : 'v', \
340 bam_is_connected ? 'U' : 'u', \
341 wait_for_ack ? 'W' : 'w', \
342 ul_wakeup_ack_completion.done ? 'A' : 'a', \
343 atomic_read(&ul_ondemand_vote), \
344 disconnect_ack ? 'D' : 'd', \
345 args); \
346 } \
347} while (0)
Eric Holmberg878923a2012-01-10 14:28:19 -0700348
Zaheerulla Meerf800bba2013-02-13 15:49:14 +0530349#define DMUX_LOG_KERR(fmt, args...) \
350do { \
351 BAM_DMUX_LOG(fmt, args); \
352 pr_err(fmt, args); \
353} while (0)
354
Eric Holmberg878923a2012-01-10 14:28:19 -0700355static inline void set_tx_timestamp(struct tx_pkt_info *pkt)
356{
357 unsigned long long t_now;
358
359 t_now = sched_clock();
360 pkt->ts_nsec = do_div(t_now, 1000000000U);
361 pkt->ts_sec = (unsigned)t_now;
362}
363
364static inline void verify_tx_queue_is_empty(const char *func)
365{
366 unsigned long flags;
367 struct tx_pkt_info *info;
368 int reported = 0;
369
370 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
371 list_for_each_entry(info, &bam_tx_pool, list_node) {
372 if (!reported) {
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +0530373 BAM_DMUX_LOG("%s: tx pool not empty\n", func);
Eric Holmberg454d9da2012-01-12 09:37:14 -0700374 if (!in_global_reset)
375 pr_err("%s: tx pool not empty\n", func);
Eric Holmberg878923a2012-01-10 14:28:19 -0700376 reported = 1;
377 }
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +0530378 BAM_DMUX_LOG("%s: node=%p ts=%u.%09lu\n", __func__,
Eric Holmberg454d9da2012-01-12 09:37:14 -0700379 &info->list_node, info->ts_sec, info->ts_nsec);
380 if (!in_global_reset)
381 pr_err("%s: node=%p ts=%u.%09lu\n", __func__,
382 &info->list_node, info->ts_sec, info->ts_nsec);
Eric Holmberg878923a2012-01-10 14:28:19 -0700383 }
384 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
385}
386
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700387static void queue_rx(void)
388{
389 void *ptr;
390 struct rx_pkt_info *info;
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700391 int ret;
392 int rx_len_cached;
Jeff Hugo949080a2011-08-30 11:58:56 -0600393
Jeff Hugoc9749932011-11-02 17:50:40 -0600394 mutex_lock(&bam_rx_pool_mutexlock);
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700395 rx_len_cached = bam_rx_pool_len;
Jeff Hugoc9749932011-11-02 17:50:40 -0600396 mutex_unlock(&bam_rx_pool_mutexlock);
Jeff Hugo949080a2011-08-30 11:58:56 -0600397
Jeff Hugo988e7ba2012-10-03 15:53:54 -0600398 while (bam_connection_is_active && rx_len_cached < NUM_BUFFERS) {
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700399 if (in_global_reset)
400 goto fail;
401
Jeff Hugo988e7ba2012-10-03 15:53:54 -0600402 info = kmalloc(sizeof(struct rx_pkt_info),
403 GFP_NOWAIT | __GFP_NOWARN);
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700404 if (!info) {
Jeff Hugo988e7ba2012-10-03 15:53:54 -0600405 DMUX_LOG_KERR(
406 "%s: unable to alloc rx_pkt_info, will retry later\n",
407 __func__);
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700408 goto fail;
409 }
410
411 INIT_WORK(&info->work, handle_bam_mux_cmd);
412
Jeff Hugo988e7ba2012-10-03 15:53:54 -0600413 info->skb = __dev_alloc_skb(BUFFER_SIZE,
414 GFP_NOWAIT | __GFP_NOWARN);
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700415 if (info->skb == NULL) {
Jeff Hugo988e7ba2012-10-03 15:53:54 -0600416 DMUX_LOG_KERR(
417 "%s: unable to alloc skb, will retry later\n",
418 __func__);
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700419 goto fail_info;
420 }
421 ptr = skb_put(info->skb, BUFFER_SIZE);
422
423 info->dma_address = dma_map_single(NULL, ptr, BUFFER_SIZE,
Brent Hronik89c96ba2013-08-27 14:34:22 -0600424 bam_ops->dma_from);
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700425 if (info->dma_address == 0 || info->dma_address == ~0) {
426 DMUX_LOG_KERR("%s: dma_map_single failure %p for %p\n",
427 __func__, (void *)info->dma_address, ptr);
428 goto fail_skb;
429 }
430
431 mutex_lock(&bam_rx_pool_mutexlock);
432 list_add_tail(&info->list_node, &bam_rx_pool);
433 rx_len_cached = ++bam_rx_pool_len;
Brent Hronik89c96ba2013-08-27 14:34:22 -0600434 ret = bam_ops->sps_transfer_one_ptr(bam_rx_pipe,
435 info->dma_address, BUFFER_SIZE, info, 0);
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700436 if (ret) {
Eric Holmberg00cf8692012-07-16 14:21:19 -0600437 list_del(&info->list_node);
438 rx_len_cached = --bam_rx_pool_len;
439 mutex_unlock(&bam_rx_pool_mutexlock);
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700440 DMUX_LOG_KERR("%s: sps_transfer_one failed %d\n",
441 __func__, ret);
Eric Holmberg00cf8692012-07-16 14:21:19 -0600442
443 dma_unmap_single(NULL, info->dma_address, BUFFER_SIZE,
Brent Hronik89c96ba2013-08-27 14:34:22 -0600444 bam_ops->dma_from);
Eric Holmberg00cf8692012-07-16 14:21:19 -0600445
446 goto fail_skb;
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700447 }
Eric Holmberg00cf8692012-07-16 14:21:19 -0600448 mutex_unlock(&bam_rx_pool_mutexlock);
449
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700450 }
451 return;
452
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700453fail_skb:
454 dev_kfree_skb_any(info->skb);
455
456fail_info:
457 kfree(info);
458
459fail:
Arun Kumar Neelakantam799447f2012-12-13 18:06:49 +0530460 if (rx_len_cached == 0 && !in_global_reset) {
Jeff Hugo988e7ba2012-10-03 15:53:54 -0600461 DMUX_LOG_KERR("%s: rescheduling\n", __func__);
462 schedule_delayed_work(&queue_rx_work, msecs_to_jiffies(100));
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700463 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700464}
465
Jeff Hugo988e7ba2012-10-03 15:53:54 -0600466static void queue_rx_work_func(struct work_struct *work)
467{
468 queue_rx();
469}
470
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700471static void bam_mux_process_data(struct sk_buff *rx_skb)
472{
473 unsigned long flags;
474 struct bam_mux_hdr *rx_hdr;
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600475 unsigned long event_data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700476
477 rx_hdr = (struct bam_mux_hdr *)rx_skb->data;
478
479 rx_skb->data = (unsigned char *)(rx_hdr + 1);
480 rx_skb->tail = rx_skb->data + rx_hdr->pkt_len;
481 rx_skb->len = rx_hdr->pkt_len;
Jeff Hugoee88f672011-10-04 17:14:52 -0600482 rx_skb->truesize = rx_hdr->pkt_len + sizeof(struct sk_buff);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700483
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600484 event_data = (unsigned long)(rx_skb);
485
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700486 spin_lock_irqsave(&bam_ch[rx_hdr->ch_id].lock, flags);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600487 if (bam_ch[rx_hdr->ch_id].notify)
488 bam_ch[rx_hdr->ch_id].notify(
489 bam_ch[rx_hdr->ch_id].priv, BAM_DMUX_RECEIVE,
490 event_data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700491 else
492 dev_kfree_skb_any(rx_skb);
493 spin_unlock_irqrestore(&bam_ch[rx_hdr->ch_id].lock, flags);
494
495 queue_rx();
496}
497
Eric Holmberg006057d2012-01-11 10:10:42 -0700498static inline void handle_bam_mux_cmd_open(struct bam_mux_hdr *rx_hdr)
499{
500 unsigned long flags;
501 int ret;
502
Eric Holmberga623da82012-07-12 09:37:09 -0600503 mutex_lock(&bam_pdev_mutexlock);
504 if (in_global_reset) {
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +0530505 BAM_DMUX_LOG("%s: open cid %d aborted due to ssr\n",
Eric Holmberga623da82012-07-12 09:37:09 -0600506 __func__, rx_hdr->ch_id);
507 mutex_unlock(&bam_pdev_mutexlock);
508 queue_rx();
509 return;
510 }
Eric Holmberg006057d2012-01-11 10:10:42 -0700511 spin_lock_irqsave(&bam_ch[rx_hdr->ch_id].lock, flags);
512 bam_ch[rx_hdr->ch_id].status |= BAM_CH_REMOTE_OPEN;
513 bam_ch[rx_hdr->ch_id].num_tx_pkts = 0;
514 spin_unlock_irqrestore(&bam_ch[rx_hdr->ch_id].lock, flags);
Eric Holmberg006057d2012-01-11 10:10:42 -0700515 ret = platform_device_add(bam_ch[rx_hdr->ch_id].pdev);
516 if (ret)
517 pr_err("%s: platform_device_add() error: %d\n",
518 __func__, ret);
Eric Holmberga623da82012-07-12 09:37:09 -0600519 mutex_unlock(&bam_pdev_mutexlock);
520 queue_rx();
Eric Holmberg006057d2012-01-11 10:10:42 -0700521}
522
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700523static void handle_bam_mux_cmd(struct work_struct *work)
524{
525 unsigned long flags;
526 struct bam_mux_hdr *rx_hdr;
527 struct rx_pkt_info *info;
528 struct sk_buff *rx_skb;
529
530 info = container_of(work, struct rx_pkt_info, work);
531 rx_skb = info->skb;
Brent Hronik89c96ba2013-08-27 14:34:22 -0600532 dma_unmap_single(NULL, info->dma_address, BUFFER_SIZE,
533 bam_ops->dma_from);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700534 kfree(info);
535
536 rx_hdr = (struct bam_mux_hdr *)rx_skb->data;
537
538 DBG_INC_READ_CNT(sizeof(struct bam_mux_hdr));
539 DBG("%s: magic %x reserved %d cmd %d pad %d ch %d len %d\n", __func__,
540 rx_hdr->magic_num, rx_hdr->reserved, rx_hdr->cmd,
541 rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
542 if (rx_hdr->magic_num != BAM_MUX_HDR_MAGIC_NO) {
Eric Holmberg878923a2012-01-10 14:28:19 -0700543 DMUX_LOG_KERR("%s: dropping invalid hdr. magic %x"
544 " reserved %d cmd %d"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700545 " pad %d ch %d len %d\n", __func__,
546 rx_hdr->magic_num, rx_hdr->reserved, rx_hdr->cmd,
547 rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
548 dev_kfree_skb_any(rx_skb);
549 queue_rx();
550 return;
551 }
Eric Holmberg9ff40a52011-11-17 19:17:00 -0700552
553 if (rx_hdr->ch_id >= BAM_DMUX_NUM_CHANNELS) {
Eric Holmberg878923a2012-01-10 14:28:19 -0700554 DMUX_LOG_KERR("%s: dropping invalid LCID %d"
555 " reserved %d cmd %d"
Eric Holmberg9ff40a52011-11-17 19:17:00 -0700556 " pad %d ch %d len %d\n", __func__,
557 rx_hdr->ch_id, rx_hdr->reserved, rx_hdr->cmd,
558 rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
559 dev_kfree_skb_any(rx_skb);
560 queue_rx();
561 return;
562 }
563
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700564 switch (rx_hdr->cmd) {
565 case BAM_MUX_HDR_CMD_DATA:
566 DBG_INC_READ_CNT(rx_hdr->pkt_len);
567 bam_mux_process_data(rx_skb);
568 break;
569 case BAM_MUX_HDR_CMD_OPEN:
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +0530570 BAM_DMUX_LOG("%s: opening cid %d PC enabled\n", __func__,
Eric Holmberg878923a2012-01-10 14:28:19 -0700571 rx_hdr->ch_id);
Eric Holmberg006057d2012-01-11 10:10:42 -0700572 handle_bam_mux_cmd_open(rx_hdr);
Jeff Hugob1e7c582012-06-20 15:02:11 -0600573 if (!(rx_hdr->reserved & ENABLE_DISCONNECT_ACK)) {
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +0530574 BAM_DMUX_LOG("%s: deactivating disconnect ack\n",
Jeff Hugod7d2b062012-07-24 14:29:56 -0600575 __func__);
Jeff Hugob1e7c582012-06-20 15:02:11 -0600576 disconnect_ack = 0;
Jeff Hugo0b13a352012-03-17 23:18:30 -0600577 }
Eric Holmberg006057d2012-01-11 10:10:42 -0700578 dev_kfree_skb_any(rx_skb);
579 break;
580 case BAM_MUX_HDR_CMD_OPEN_NO_A2_PC:
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +0530581 BAM_DMUX_LOG("%s: opening cid %d PC disabled\n", __func__,
Eric Holmberg006057d2012-01-11 10:10:42 -0700582 rx_hdr->ch_id);
583
584 if (!a2_pc_disabled) {
585 a2_pc_disabled = 1;
Jeff Hugo322179f2012-02-29 10:52:34 -0700586 ul_wakeup();
Eric Holmberg006057d2012-01-11 10:10:42 -0700587 }
588
589 handle_bam_mux_cmd_open(rx_hdr);
Eric Holmberge779dba2011-11-04 18:22:01 -0600590 dev_kfree_skb_any(rx_skb);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700591 break;
592 case BAM_MUX_HDR_CMD_CLOSE:
593 /* probably should drop pending write */
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +0530594 BAM_DMUX_LOG("%s: closing cid %d\n", __func__,
Eric Holmberg878923a2012-01-10 14:28:19 -0700595 rx_hdr->ch_id);
Eric Holmberga623da82012-07-12 09:37:09 -0600596 mutex_lock(&bam_pdev_mutexlock);
597 if (in_global_reset) {
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +0530598 BAM_DMUX_LOG("%s: close cid %d aborted due to ssr\n",
Eric Holmberga623da82012-07-12 09:37:09 -0600599 __func__, rx_hdr->ch_id);
600 mutex_unlock(&bam_pdev_mutexlock);
601 break;
602 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700603 spin_lock_irqsave(&bam_ch[rx_hdr->ch_id].lock, flags);
604 bam_ch[rx_hdr->ch_id].status &= ~BAM_CH_REMOTE_OPEN;
605 spin_unlock_irqrestore(&bam_ch[rx_hdr->ch_id].lock, flags);
Jeff Hugo7960abd2011-08-02 15:39:38 -0600606 platform_device_unregister(bam_ch[rx_hdr->ch_id].pdev);
607 bam_ch[rx_hdr->ch_id].pdev =
608 platform_device_alloc(bam_ch[rx_hdr->ch_id].name, 2);
609 if (!bam_ch[rx_hdr->ch_id].pdev)
610 pr_err("%s: platform_device_alloc failed\n", __func__);
Eric Holmberga623da82012-07-12 09:37:09 -0600611 mutex_unlock(&bam_pdev_mutexlock);
Eric Holmberge779dba2011-11-04 18:22:01 -0600612 dev_kfree_skb_any(rx_skb);
Eric Holmberga623da82012-07-12 09:37:09 -0600613 queue_rx();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700614 break;
615 default:
Eric Holmberg878923a2012-01-10 14:28:19 -0700616 DMUX_LOG_KERR("%s: dropping invalid hdr. magic %x"
617 " reserved %d cmd %d pad %d ch %d len %d\n",
618 __func__, rx_hdr->magic_num, rx_hdr->reserved,
619 rx_hdr->cmd, rx_hdr->pad_len, rx_hdr->ch_id,
620 rx_hdr->pkt_len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700621 dev_kfree_skb_any(rx_skb);
622 queue_rx();
623 return;
624 }
625}
626
627static int bam_mux_write_cmd(void *data, uint32_t len)
628{
629 int rc;
630 struct tx_pkt_info *pkt;
631 dma_addr_t dma_address;
Jeff Hugo626303bf2011-11-21 11:43:28 -0700632 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700633
Eric Holmbergd83cd2b2011-11-04 15:54:17 -0600634 pkt = kmalloc(sizeof(struct tx_pkt_info), GFP_ATOMIC);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700635 if (pkt == NULL) {
636 pr_err("%s: mem alloc for tx_pkt_info failed\n", __func__);
637 rc = -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700638 return rc;
639 }
640
641 dma_address = dma_map_single(NULL, data, len,
Brent Hronik89c96ba2013-08-27 14:34:22 -0600642 bam_ops->dma_to);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700643 if (!dma_address) {
644 pr_err("%s: dma_map_single() failed\n", __func__);
Jeff Hugo96cb7482011-12-07 13:28:31 -0700645 kfree(pkt);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700646 rc = -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700647 return rc;
648 }
649 pkt->skb = (struct sk_buff *)(data);
650 pkt->len = len;
651 pkt->dma_address = dma_address;
652 pkt->is_cmd = 1;
Eric Holmberg878923a2012-01-10 14:28:19 -0700653 set_tx_timestamp(pkt);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600654 INIT_WORK(&pkt->work, bam_mux_write_done);
Jeff Hugo626303bf2011-11-21 11:43:28 -0700655 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600656 list_add_tail(&pkt->list_node, &bam_tx_pool);
Brent Hronik89c96ba2013-08-27 14:34:22 -0600657 rc = bam_ops->sps_transfer_one_ptr(bam_tx_pipe, dma_address, len,
Jeff Hugoc85df962013-04-05 13:22:48 -0600658 pkt, SPS_IOVEC_FLAG_EOT);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600659 if (rc) {
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700660 DMUX_LOG_KERR("%s sps_transfer_one failed rc=%d\n",
661 __func__, rc);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600662 list_del(&pkt->list_node);
Eric Holmberg2fddbcd2011-11-28 18:25:57 -0700663 DBG_INC_TX_SPS_FAILURE_CNT();
Jeff Hugo626303bf2011-11-21 11:43:28 -0700664 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700665 dma_unmap_single(NULL, pkt->dma_address,
666 pkt->len,
Brent Hronik89c96ba2013-08-27 14:34:22 -0600667 bam_ops->dma_to);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600668 kfree(pkt);
Jeff Hugobb6da952012-01-16 15:02:42 -0700669 } else {
670 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600671 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700672
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600673 ul_packet_written = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700674 return rc;
675}
676
677static void bam_mux_write_done(struct work_struct *work)
678{
679 struct sk_buff *skb;
680 struct bam_mux_hdr *hdr;
681 struct tx_pkt_info *info;
Eric Holmberg1cde7a62011-12-19 18:34:01 -0700682 struct tx_pkt_info *info_expected;
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600683 unsigned long event_data;
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700684 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700685
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600686 if (in_global_reset)
687 return;
Eric Holmberg1cde7a62011-12-19 18:34:01 -0700688
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700689 info = container_of(work, struct tx_pkt_info, work);
Eric Holmberg1cde7a62011-12-19 18:34:01 -0700690
691 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
692 info_expected = list_first_entry(&bam_tx_pool,
693 struct tx_pkt_info, list_node);
694 if (unlikely(info != info_expected)) {
Eric Holmberg878923a2012-01-10 14:28:19 -0700695 struct tx_pkt_info *errant_pkt;
Eric Holmberg1cde7a62011-12-19 18:34:01 -0700696
Eric Holmberg878923a2012-01-10 14:28:19 -0700697 DMUX_LOG_KERR("%s: bam_tx_pool mismatch .next=%p,"
698 " list_node=%p, ts=%u.%09lu\n",
699 __func__, bam_tx_pool.next, &info->list_node,
700 info->ts_sec, info->ts_nsec
701 );
702
703 list_for_each_entry(errant_pkt, &bam_tx_pool, list_node) {
704 DMUX_LOG_KERR("%s: node=%p ts=%u.%09lu\n", __func__,
705 &errant_pkt->list_node, errant_pkt->ts_sec,
706 errant_pkt->ts_nsec);
707
708 }
Eric Holmberg1cde7a62011-12-19 18:34:01 -0700709 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
710 BUG();
711 }
712 list_del(&info->list_node);
713 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
714
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600715 if (info->is_cmd) {
716 kfree(info->skb);
717 kfree(info);
718 return;
719 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700720 skb = info->skb;
721 kfree(info);
722 hdr = (struct bam_mux_hdr *)skb->data;
Eric Holmberg9fdef262012-02-14 11:46:05 -0700723 DBG_INC_WRITE_CNT(skb->len);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600724 event_data = (unsigned long)(skb);
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700725 spin_lock_irqsave(&bam_ch[hdr->ch_id].lock, flags);
726 bam_ch[hdr->ch_id].num_tx_pkts--;
727 spin_unlock_irqrestore(&bam_ch[hdr->ch_id].lock, flags);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600728 if (bam_ch[hdr->ch_id].notify)
729 bam_ch[hdr->ch_id].notify(
730 bam_ch[hdr->ch_id].priv, BAM_DMUX_WRITE_DONE,
731 event_data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700732 else
733 dev_kfree_skb_any(skb);
734}
735
736int msm_bam_dmux_write(uint32_t id, struct sk_buff *skb)
737{
738 int rc = 0;
739 struct bam_mux_hdr *hdr;
740 unsigned long flags;
741 struct sk_buff *new_skb = NULL;
742 dma_addr_t dma_address;
743 struct tx_pkt_info *pkt;
Arun Kumar Neelakantame7c0d622013-10-11 14:34:02 +0530744 int rcu_id;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700745
746 if (id >= BAM_DMUX_NUM_CHANNELS)
747 return -EINVAL;
748 if (!skb)
749 return -EINVAL;
750 if (!bam_mux_initialized)
751 return -ENODEV;
752
Arun Kumar Neelakantame7c0d622013-10-11 14:34:02 +0530753 rcu_id = srcu_read_lock(&bam_dmux_srcu);
754 if (in_global_reset) {
755 BAM_DMUX_LOG("%s: In SSR... ch_id[%d]\n", __func__, id);
756 srcu_read_unlock(&bam_dmux_srcu, rcu_id);
757 return -EFAULT;
758 }
759
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700760 DBG("%s: writing to ch %d len %d\n", __func__, id, skb->len);
761 spin_lock_irqsave(&bam_ch[id].lock, flags);
762 if (!bam_ch_is_open(id)) {
763 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
764 pr_err("%s: port not open: %d\n", __func__, bam_ch[id].status);
Arun Kumar Neelakantame7c0d622013-10-11 14:34:02 +0530765 srcu_read_unlock(&bam_dmux_srcu, rcu_id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700766 return -ENODEV;
767 }
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700768
769 if (bam_ch[id].use_wm &&
770 (bam_ch[id].num_tx_pkts >= HIGH_WATERMARK)) {
771 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
772 pr_err("%s: watermark exceeded: %d\n", __func__, id);
Arun Kumar Neelakantame7c0d622013-10-11 14:34:02 +0530773 srcu_read_unlock(&bam_dmux_srcu, rcu_id);
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700774 return -EAGAIN;
775 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700776 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
777
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600778 read_lock(&ul_wakeup_lock);
Jeff Hugo061ce672011-10-21 17:15:32 -0600779 if (!bam_is_connected) {
780 read_unlock(&ul_wakeup_lock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600781 ul_wakeup();
Arun Kumar Neelakantame7c0d622013-10-11 14:34:02 +0530782 if (unlikely(in_global_reset == 1)) {
783 srcu_read_unlock(&bam_dmux_srcu, rcu_id);
Jeff Hugo4838f412012-01-20 11:19:37 -0700784 return -EFAULT;
Arun Kumar Neelakantame7c0d622013-10-11 14:34:02 +0530785 }
Jeff Hugo061ce672011-10-21 17:15:32 -0600786 read_lock(&ul_wakeup_lock);
Jeff Hugod98b1082011-10-24 10:30:23 -0600787 notify_all(BAM_DMUX_UL_CONNECTED, (unsigned long)(NULL));
Jeff Hugo061ce672011-10-21 17:15:32 -0600788 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600789
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700790 /* if skb do not have any tailroom for padding,
791 copy the skb into a new expanded skb */
792 if ((skb->len & 0x3) && (skb_tailroom(skb) < (4 - (skb->len & 0x3)))) {
793 /* revisit, probably dev_alloc_skb and memcpy is effecient */
794 new_skb = skb_copy_expand(skb, skb_headroom(skb),
795 4 - (skb->len & 0x3), GFP_ATOMIC);
796 if (new_skb == NULL) {
797 pr_err("%s: cannot allocate skb\n", __func__);
Jeff Hugoc6af54d2011-11-02 17:00:27 -0600798 goto write_fail;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700799 }
800 dev_kfree_skb_any(skb);
801 skb = new_skb;
802 DBG_INC_WRITE_CPY(skb->len);
803 }
804
805 hdr = (struct bam_mux_hdr *)skb_push(skb, sizeof(struct bam_mux_hdr));
806
807 /* caller should allocate for hdr and padding
808 hdr is fine, padding is tricky */
809 hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
810 hdr->cmd = BAM_MUX_HDR_CMD_DATA;
811 hdr->reserved = 0;
812 hdr->ch_id = id;
813 hdr->pkt_len = skb->len - sizeof(struct bam_mux_hdr);
814 if (skb->len & 0x3)
815 skb_put(skb, 4 - (skb->len & 0x3));
816
817 hdr->pad_len = skb->len - (sizeof(struct bam_mux_hdr) + hdr->pkt_len);
818
819 DBG("%s: data %p, tail %p skb len %d pkt len %d pad len %d\n",
820 __func__, skb->data, skb->tail, skb->len,
821 hdr->pkt_len, hdr->pad_len);
822
823 pkt = kmalloc(sizeof(struct tx_pkt_info), GFP_ATOMIC);
824 if (pkt == NULL) {
825 pr_err("%s: mem alloc for tx_pkt_info failed\n", __func__);
Jeff Hugoc6af54d2011-11-02 17:00:27 -0600826 goto write_fail2;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700827 }
828
829 dma_address = dma_map_single(NULL, skb->data, skb->len,
Brent Hronik89c96ba2013-08-27 14:34:22 -0600830 bam_ops->dma_to);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700831 if (!dma_address) {
832 pr_err("%s: dma_map_single() failed\n", __func__);
Jeff Hugoc6af54d2011-11-02 17:00:27 -0600833 goto write_fail3;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700834 }
835 pkt->skb = skb;
836 pkt->dma_address = dma_address;
837 pkt->is_cmd = 0;
Eric Holmberg878923a2012-01-10 14:28:19 -0700838 set_tx_timestamp(pkt);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700839 INIT_WORK(&pkt->work, bam_mux_write_done);
Jeff Hugo626303bf2011-11-21 11:43:28 -0700840 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600841 list_add_tail(&pkt->list_node, &bam_tx_pool);
Brent Hronik89c96ba2013-08-27 14:34:22 -0600842 rc = bam_ops->sps_transfer_one_ptr(bam_tx_pipe, dma_address, skb->len,
Jeff Hugoc85df962013-04-05 13:22:48 -0600843 pkt, SPS_IOVEC_FLAG_EOT);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600844 if (rc) {
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700845 DMUX_LOG_KERR("%s sps_transfer_one failed rc=%d\n",
846 __func__, rc);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600847 list_del(&pkt->list_node);
Eric Holmberg2fddbcd2011-11-28 18:25:57 -0700848 DBG_INC_TX_SPS_FAILURE_CNT();
Jeff Hugo626303bf2011-11-21 11:43:28 -0700849 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700850 dma_unmap_single(NULL, pkt->dma_address,
Brent Hronik89c96ba2013-08-27 14:34:22 -0600851 pkt->skb->len, bam_ops->dma_to);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600852 kfree(pkt);
Jeff Hugo872bd062011-11-15 17:47:21 -0700853 if (new_skb)
854 dev_kfree_skb_any(new_skb);
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700855 } else {
Jeff Hugobb6da952012-01-16 15:02:42 -0700856 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700857 spin_lock_irqsave(&bam_ch[id].lock, flags);
858 bam_ch[id].num_tx_pkts++;
859 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600860 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600861 ul_packet_written = 1;
862 read_unlock(&ul_wakeup_lock);
Arun Kumar Neelakantame7c0d622013-10-11 14:34:02 +0530863 srcu_read_unlock(&bam_dmux_srcu, rcu_id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700864 return rc;
Jeff Hugoc6af54d2011-11-02 17:00:27 -0600865
866write_fail3:
867 kfree(pkt);
868write_fail2:
Arun Kumar Neelakantam406e5692013-01-17 18:58:04 +0530869 skb_pull(skb, sizeof(struct bam_mux_hdr));
Jeff Hugoc6af54d2011-11-02 17:00:27 -0600870 if (new_skb)
871 dev_kfree_skb_any(new_skb);
872write_fail:
873 read_unlock(&ul_wakeup_lock);
Arun Kumar Neelakantame7c0d622013-10-11 14:34:02 +0530874 srcu_read_unlock(&bam_dmux_srcu, rcu_id);
Jeff Hugoc6af54d2011-11-02 17:00:27 -0600875 return -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700876}
877
878int msm_bam_dmux_open(uint32_t id, void *priv,
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600879 void (*notify)(void *, int, unsigned long))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700880{
881 struct bam_mux_hdr *hdr;
882 unsigned long flags;
883 int rc = 0;
884
885 DBG("%s: opening ch %d\n", __func__, id);
Eric Holmberg5d775432011-11-09 10:23:35 -0700886 if (!bam_mux_initialized) {
887 DBG("%s: not inititialized\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700888 return -ENODEV;
Eric Holmberg5d775432011-11-09 10:23:35 -0700889 }
890 if (id >= BAM_DMUX_NUM_CHANNELS) {
891 pr_err("%s: invalid channel id %d\n", __func__, id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700892 return -EINVAL;
Eric Holmberg5d775432011-11-09 10:23:35 -0700893 }
894 if (notify == NULL) {
895 pr_err("%s: notify function is NULL\n", __func__);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600896 return -EINVAL;
Eric Holmberg5d775432011-11-09 10:23:35 -0700897 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700898
899 hdr = kmalloc(sizeof(struct bam_mux_hdr), GFP_KERNEL);
900 if (hdr == NULL) {
901 pr_err("%s: hdr kmalloc failed. ch: %d\n", __func__, id);
902 return -ENOMEM;
903 }
904 spin_lock_irqsave(&bam_ch[id].lock, flags);
905 if (bam_ch_is_open(id)) {
906 DBG("%s: Already opened %d\n", __func__, id);
907 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
908 kfree(hdr);
909 goto open_done;
910 }
911 if (!bam_ch_is_remote_open(id)) {
912 DBG("%s: Remote not open; ch: %d\n", __func__, id);
913 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
914 kfree(hdr);
Eric Holmberg5d775432011-11-09 10:23:35 -0700915 return -ENODEV;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700916 }
917
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600918 bam_ch[id].notify = notify;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700919 bam_ch[id].priv = priv;
920 bam_ch[id].status |= BAM_CH_LOCAL_OPEN;
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700921 bam_ch[id].num_tx_pkts = 0;
922 bam_ch[id].use_wm = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700923 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
924
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600925 read_lock(&ul_wakeup_lock);
Jeff Hugo061ce672011-10-21 17:15:32 -0600926 if (!bam_is_connected) {
927 read_unlock(&ul_wakeup_lock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600928 ul_wakeup();
Brent Hronik96630422013-05-01 16:38:43 -0600929 if (unlikely(in_global_reset == 1)) {
930 kfree(hdr);
Jeff Hugo4838f412012-01-20 11:19:37 -0700931 return -EFAULT;
Brent Hronik96630422013-05-01 16:38:43 -0600932 }
Jeff Hugo061ce672011-10-21 17:15:32 -0600933 read_lock(&ul_wakeup_lock);
Jeff Hugod98b1082011-10-24 10:30:23 -0600934 notify_all(BAM_DMUX_UL_CONNECTED, (unsigned long)(NULL));
Jeff Hugo061ce672011-10-21 17:15:32 -0600935 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600936
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700937 hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
938 hdr->cmd = BAM_MUX_HDR_CMD_OPEN;
939 hdr->reserved = 0;
940 hdr->ch_id = id;
941 hdr->pkt_len = 0;
942 hdr->pad_len = 0;
943
944 rc = bam_mux_write_cmd((void *)hdr, sizeof(struct bam_mux_hdr));
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600945 read_unlock(&ul_wakeup_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700946
947open_done:
948 DBG("%s: opened ch %d\n", __func__, id);
949 return rc;
950}
951
952int msm_bam_dmux_close(uint32_t id)
953{
954 struct bam_mux_hdr *hdr;
955 unsigned long flags;
956 int rc;
957
958 if (id >= BAM_DMUX_NUM_CHANNELS)
959 return -EINVAL;
960 DBG("%s: closing ch %d\n", __func__, id);
961 if (!bam_mux_initialized)
962 return -ENODEV;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700963
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600964 read_lock(&ul_wakeup_lock);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600965 if (!bam_is_connected && !bam_ch_is_in_reset(id)) {
Jeff Hugo061ce672011-10-21 17:15:32 -0600966 read_unlock(&ul_wakeup_lock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600967 ul_wakeup();
Jeff Hugo4838f412012-01-20 11:19:37 -0700968 if (unlikely(in_global_reset == 1))
969 return -EFAULT;
Jeff Hugo061ce672011-10-21 17:15:32 -0600970 read_lock(&ul_wakeup_lock);
Jeff Hugod98b1082011-10-24 10:30:23 -0600971 notify_all(BAM_DMUX_UL_CONNECTED, (unsigned long)(NULL));
Jeff Hugo061ce672011-10-21 17:15:32 -0600972 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600973
Jeff Hugo061ce672011-10-21 17:15:32 -0600974 spin_lock_irqsave(&bam_ch[id].lock, flags);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600975 bam_ch[id].notify = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700976 bam_ch[id].priv = NULL;
977 bam_ch[id].status &= ~BAM_CH_LOCAL_OPEN;
978 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
979
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600980 if (bam_ch_is_in_reset(id)) {
981 read_unlock(&ul_wakeup_lock);
982 bam_ch[id].status &= ~BAM_CH_IN_RESET;
983 return 0;
984 }
985
Jeff Hugobb5802f2011-11-02 17:10:29 -0600986 hdr = kmalloc(sizeof(struct bam_mux_hdr), GFP_ATOMIC);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700987 if (hdr == NULL) {
988 pr_err("%s: hdr kmalloc failed. ch: %d\n", __func__, id);
Jeff Hugoc6af54d2011-11-02 17:00:27 -0600989 read_unlock(&ul_wakeup_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700990 return -ENOMEM;
991 }
992 hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
993 hdr->cmd = BAM_MUX_HDR_CMD_CLOSE;
994 hdr->reserved = 0;
995 hdr->ch_id = id;
996 hdr->pkt_len = 0;
997 hdr->pad_len = 0;
998
999 rc = bam_mux_write_cmd((void *)hdr, sizeof(struct bam_mux_hdr));
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001000 read_unlock(&ul_wakeup_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001001
1002 DBG("%s: closed ch %d\n", __func__, id);
1003 return rc;
1004}
1005
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -07001006int msm_bam_dmux_is_ch_full(uint32_t id)
1007{
1008 unsigned long flags;
1009 int ret;
1010
1011 if (id >= BAM_DMUX_NUM_CHANNELS)
1012 return -EINVAL;
1013
1014 spin_lock_irqsave(&bam_ch[id].lock, flags);
1015 bam_ch[id].use_wm = 1;
1016 ret = bam_ch[id].num_tx_pkts >= HIGH_WATERMARK;
1017 DBG("%s: ch %d num tx pkts=%d, HWM=%d\n", __func__,
1018 id, bam_ch[id].num_tx_pkts, ret);
1019 if (!bam_ch_is_local_open(id)) {
1020 ret = -ENODEV;
1021 pr_err("%s: port not open: %d\n", __func__, bam_ch[id].status);
1022 }
1023 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
1024
1025 return ret;
1026}
1027
1028int msm_bam_dmux_is_ch_low(uint32_t id)
1029{
Eric Holmberged3ca0a2012-04-09 15:44:58 -06001030 unsigned long flags;
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -07001031 int ret;
1032
1033 if (id >= BAM_DMUX_NUM_CHANNELS)
1034 return -EINVAL;
1035
Eric Holmberged3ca0a2012-04-09 15:44:58 -06001036 spin_lock_irqsave(&bam_ch[id].lock, flags);
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -07001037 bam_ch[id].use_wm = 1;
1038 ret = bam_ch[id].num_tx_pkts <= LOW_WATERMARK;
1039 DBG("%s: ch %d num tx pkts=%d, LWM=%d\n", __func__,
1040 id, bam_ch[id].num_tx_pkts, ret);
1041 if (!bam_ch_is_local_open(id)) {
1042 ret = -ENODEV;
1043 pr_err("%s: port not open: %d\n", __func__, bam_ch[id].status);
1044 }
Eric Holmberged3ca0a2012-04-09 15:44:58 -06001045 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -07001046
1047 return ret;
1048}
1049
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001050static void rx_switch_to_interrupt_mode(void)
1051{
1052 struct sps_connect cur_rx_conn;
1053 struct sps_iovec iov;
1054 struct rx_pkt_info *info;
1055 int ret;
1056
1057 /*
1058 * Attempt to enable interrupts - if this fails,
1059 * continue polling and we will retry later.
1060 */
Brent Hronik89c96ba2013-08-27 14:34:22 -06001061 ret = bam_ops->sps_get_config_ptr(bam_rx_pipe, &cur_rx_conn);
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001062 if (ret) {
1063 pr_err("%s: sps_get_config() failed %d\n", __func__, ret);
1064 goto fail;
1065 }
1066
1067 rx_register_event.options = SPS_O_EOT;
Brent Hronik89c96ba2013-08-27 14:34:22 -06001068 ret = bam_ops->sps_register_event_ptr(bam_rx_pipe, &rx_register_event);
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001069 if (ret) {
1070 pr_err("%s: sps_register_event() failed %d\n", __func__, ret);
1071 goto fail;
1072 }
1073
1074 cur_rx_conn.options = SPS_O_AUTO_ENABLE |
1075 SPS_O_EOT | SPS_O_ACK_TRANSFERS;
Brent Hronik89c96ba2013-08-27 14:34:22 -06001076 ret = bam_ops->sps_set_config_ptr(bam_rx_pipe, &cur_rx_conn);
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001077 if (ret) {
1078 pr_err("%s: sps_set_config() failed %d\n", __func__, ret);
1079 goto fail;
1080 }
1081 polling_mode = 0;
Brent Hronik096f7d32013-06-28 15:43:08 -06001082 complete_all(&shutdown_completion);
Eric Holmberg006057d2012-01-11 10:10:42 -07001083 release_wakelock();
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001084
1085 /* handle any rx packets before interrupt was enabled */
1086 while (bam_connection_is_active && !polling_mode) {
Brent Hronik89c96ba2013-08-27 14:34:22 -06001087 ret = bam_ops->sps_get_iovec_ptr(bam_rx_pipe, &iov);
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001088 if (ret) {
1089 pr_err("%s: sps_get_iovec failed %d\n",
1090 __func__, ret);
1091 break;
1092 }
1093 if (iov.addr == 0)
1094 break;
1095
1096 mutex_lock(&bam_rx_pool_mutexlock);
1097 if (unlikely(list_empty(&bam_rx_pool))) {
Eric Holmberg00cf8692012-07-16 14:21:19 -06001098 DMUX_LOG_KERR("%s: have iovec %p but rx pool empty\n",
1099 __func__, (void *)iov.addr);
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001100 mutex_unlock(&bam_rx_pool_mutexlock);
1101 continue;
1102 }
1103 info = list_first_entry(&bam_rx_pool, struct rx_pkt_info,
1104 list_node);
Eric Holmberg00cf8692012-07-16 14:21:19 -06001105 if (info->dma_address != iov.addr) {
1106 DMUX_LOG_KERR("%s: iovec %p != dma %p\n",
1107 __func__,
1108 (void *)iov.addr,
1109 (void *)info->dma_address);
1110 list_for_each_entry(info, &bam_rx_pool, list_node) {
1111 DMUX_LOG_KERR("%s: dma %p\n", __func__,
1112 (void *)info->dma_address);
1113 if (iov.addr == info->dma_address)
1114 break;
1115 }
1116 }
1117 BUG_ON(info->dma_address != iov.addr);
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001118 list_del(&info->list_node);
Eric Holmbergb5b08e52012-01-20 14:19:00 -07001119 --bam_rx_pool_len;
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001120 mutex_unlock(&bam_rx_pool_mutexlock);
1121 handle_bam_mux_cmd(&info->work);
1122 }
1123 return;
1124
1125fail:
1126 pr_err("%s: reverting to polling\n", __func__);
Jeff Hugofff43af92012-03-29 17:54:52 -06001127 queue_work_on(0, bam_mux_rx_workqueue, &rx_timer_work);
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001128}
1129
Jeff Hugo7c185602013-09-11 17:39:54 -06001130/**
1131 * store_rx_timestamp() - store the current raw time as as a timestamp for when
1132 * the last rx packet was processed
1133 */
1134static void store_rx_timestamp(void)
1135{
1136 last_rx_pkt_timestamp = sched_clock();
1137}
1138
1139/**
1140 * log_rx_timestamp() - Log the stored rx pkt timestamp in a human readable
1141 * format
1142 */
1143static void log_rx_timestamp(void)
1144{
1145 unsigned long long t = last_rx_pkt_timestamp;
1146 unsigned long nanosec_rem;
1147
1148 nanosec_rem = do_div(t, 1000000000U);
1149 BAM_DMUX_LOG("Last rx pkt processed at [%6u.%09lu]\n", (unsigned)t,
1150 nanosec_rem);
1151}
1152
Jeff Hugo949080a2011-08-30 11:58:56 -06001153static void rx_timer_work_func(struct work_struct *work)
1154{
1155 struct sps_iovec iov;
Jeff Hugo949080a2011-08-30 11:58:56 -06001156 struct rx_pkt_info *info;
1157 int inactive_cycles = 0;
1158 int ret;
Anurag Singhdcd8b4e2012-07-30 16:46:37 -07001159 u32 buffs_unused, buffs_used;
Jeff Hugo949080a2011-08-30 11:58:56 -06001160
Jeff Hugo7c185602013-09-11 17:39:54 -06001161 BAM_DMUX_LOG("%s: polling start\n", __func__);
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001162 while (bam_connection_is_active) { /* timer loop */
Jeff Hugo949080a2011-08-30 11:58:56 -06001163 ++inactive_cycles;
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001164 while (bam_connection_is_active) { /* deplete queue loop */
Jeff Hugo7c185602013-09-11 17:39:54 -06001165 if (in_global_reset) {
1166 BAM_DMUX_LOG(
1167 "%s: polling exit, global reset detected\n",
1168 __func__);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001169 return;
Jeff Hugo7c185602013-09-11 17:39:54 -06001170 }
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001171
Brent Hronik89c96ba2013-08-27 14:34:22 -06001172 ret = bam_ops->sps_get_iovec_ptr(bam_rx_pipe, &iov);
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001173 if (ret) {
Jeff Hugo7c185602013-09-11 17:39:54 -06001174 DMUX_LOG_KERR("%s: sps_get_iovec failed %d\n",
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001175 __func__, ret);
1176 break;
1177 }
Jeff Hugo949080a2011-08-30 11:58:56 -06001178 if (iov.addr == 0)
1179 break;
Jeff Hugo7c185602013-09-11 17:39:54 -06001180 store_rx_timestamp();
Jeff Hugo949080a2011-08-30 11:58:56 -06001181 inactive_cycles = 0;
Jeff Hugoc9749932011-11-02 17:50:40 -06001182 mutex_lock(&bam_rx_pool_mutexlock);
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001183 if (unlikely(list_empty(&bam_rx_pool))) {
Eric Holmberg00cf8692012-07-16 14:21:19 -06001184 DMUX_LOG_KERR(
1185 "%s: have iovec %p but rx pool empty\n",
1186 __func__, (void *)iov.addr);
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001187 mutex_unlock(&bam_rx_pool_mutexlock);
1188 continue;
1189 }
1190 info = list_first_entry(&bam_rx_pool,
1191 struct rx_pkt_info, list_node);
Eric Holmberg00cf8692012-07-16 14:21:19 -06001192 if (info->dma_address != iov.addr) {
1193 DMUX_LOG_KERR("%s: iovec %p != dma %p\n",
1194 __func__,
1195 (void *)iov.addr,
1196 (void *)info->dma_address);
1197 list_for_each_entry(info, &bam_rx_pool,
1198 list_node) {
1199 DMUX_LOG_KERR("%s: dma %p\n", __func__,
1200 (void *)info->dma_address);
1201 if (iov.addr == info->dma_address)
1202 break;
1203 }
1204 }
1205 BUG_ON(info->dma_address != iov.addr);
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001206 list_del(&info->list_node);
Eric Holmberg00cf8692012-07-16 14:21:19 -06001207 --bam_rx_pool_len;
Jeff Hugoc9749932011-11-02 17:50:40 -06001208 mutex_unlock(&bam_rx_pool_mutexlock);
Jeff Hugo949080a2011-08-30 11:58:56 -06001209 handle_bam_mux_cmd(&info->work);
1210 }
1211
Anurag Singhdcd8b4e2012-07-30 16:46:37 -07001212 if (inactive_cycles >= POLLING_INACTIVITY) {
Jeff Hugo7c185602013-09-11 17:39:54 -06001213 BAM_DMUX_LOG("%s: polling exit, no data\n", __func__);
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001214 rx_switch_to_interrupt_mode();
1215 break;
Jeff Hugo949080a2011-08-30 11:58:56 -06001216 }
1217
Anurag Singhdcd8b4e2012-07-30 16:46:37 -07001218 if (bam_adaptive_timer_enabled) {
1219 usleep_range(rx_timer_interval, rx_timer_interval + 50);
1220
Brent Hronik89c96ba2013-08-27 14:34:22 -06001221 ret = bam_ops->sps_get_unused_desc_num_ptr(bam_rx_pipe,
Anurag Singhdcd8b4e2012-07-30 16:46:37 -07001222 &buffs_unused);
1223
1224 if (ret) {
Jeff Hugo7c185602013-09-11 17:39:54 -06001225 DMUX_LOG_KERR(
1226 "%s: error getting num buffers unused after sleep\n",
Anurag Singhdcd8b4e2012-07-30 16:46:37 -07001227 __func__);
1228
1229 break;
1230 }
1231
1232 buffs_used = NUM_BUFFERS - buffs_unused;
1233
1234 if (buffs_unused == 0) {
1235 rx_timer_interval = MIN_POLLING_SLEEP;
1236 } else {
1237 if (buffs_used > 0) {
1238 rx_timer_interval =
1239 (2 * NUM_BUFFERS *
1240 rx_timer_interval)/
1241 (3 * buffs_used);
1242 } else {
1243 rx_timer_interval =
1244 MAX_POLLING_SLEEP;
1245 }
1246 }
1247
1248 if (rx_timer_interval > MAX_POLLING_SLEEP)
1249 rx_timer_interval = MAX_POLLING_SLEEP;
1250 else if (rx_timer_interval < MIN_POLLING_SLEEP)
1251 rx_timer_interval = MIN_POLLING_SLEEP;
1252 } else {
1253 usleep_range(POLLING_MIN_SLEEP, POLLING_MAX_SLEEP);
1254 }
Jeff Hugo949080a2011-08-30 11:58:56 -06001255 }
1256}
1257
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001258static void bam_mux_tx_notify(struct sps_event_notify *notify)
1259{
1260 struct tx_pkt_info *pkt;
1261
1262 DBG("%s: event %d notified\n", __func__, notify->event_id);
1263
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001264 if (in_global_reset)
1265 return;
1266
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001267 switch (notify->event_id) {
1268 case SPS_EVENT_EOT:
1269 pkt = notify->data.transfer.user;
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001270 if (!pkt->is_cmd)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001271 dma_unmap_single(NULL, pkt->dma_address,
1272 pkt->skb->len,
Brent Hronik89c96ba2013-08-27 14:34:22 -06001273 bam_ops->dma_to);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001274 else
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001275 dma_unmap_single(NULL, pkt->dma_address,
1276 pkt->len,
Brent Hronik89c96ba2013-08-27 14:34:22 -06001277 bam_ops->dma_to);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001278 queue_work(bam_mux_tx_workqueue, &pkt->work);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001279 break;
1280 default:
1281 pr_err("%s: recieved unexpected event id %d\n", __func__,
1282 notify->event_id);
1283 }
1284}
1285
Jeff Hugo33dbc002011-08-25 15:52:53 -06001286static void bam_mux_rx_notify(struct sps_event_notify *notify)
1287{
Jeff Hugo949080a2011-08-30 11:58:56 -06001288 int ret;
1289 struct sps_connect cur_rx_conn;
Jeff Hugo33dbc002011-08-25 15:52:53 -06001290
1291 DBG("%s: event %d notified\n", __func__, notify->event_id);
1292
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001293 if (in_global_reset)
1294 return;
1295
Jeff Hugo33dbc002011-08-25 15:52:53 -06001296 switch (notify->event_id) {
1297 case SPS_EVENT_EOT:
Jeff Hugo949080a2011-08-30 11:58:56 -06001298 /* attempt to disable interrupts in this pipe */
1299 if (!polling_mode) {
Brent Hronik89c96ba2013-08-27 14:34:22 -06001300 ret = bam_ops->sps_get_config_ptr(bam_rx_pipe,
1301 &cur_rx_conn);
Jeff Hugo949080a2011-08-30 11:58:56 -06001302 if (ret) {
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001303 pr_err("%s: sps_get_config() failed %d, interrupts"
1304 " not disabled\n", __func__, ret);
Jeff Hugo949080a2011-08-30 11:58:56 -06001305 break;
1306 }
Jeff Hugoa9d32ba2011-11-21 14:59:48 -07001307 cur_rx_conn.options = SPS_O_AUTO_ENABLE |
Jeff Hugo949080a2011-08-30 11:58:56 -06001308 SPS_O_ACK_TRANSFERS | SPS_O_POLL;
Brent Hronik89c96ba2013-08-27 14:34:22 -06001309 ret = bam_ops->sps_set_config_ptr(bam_rx_pipe,
1310 &cur_rx_conn);
Jeff Hugo949080a2011-08-30 11:58:56 -06001311 if (ret) {
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001312 pr_err("%s: sps_set_config() failed %d, interrupts"
1313 " not disabled\n", __func__, ret);
Jeff Hugo949080a2011-08-30 11:58:56 -06001314 break;
1315 }
Brent Hronik096f7d32013-06-28 15:43:08 -06001316 INIT_COMPLETION(shutdown_completion);
Eric Holmberg006057d2012-01-11 10:10:42 -07001317 grab_wakelock();
Jeff Hugo949080a2011-08-30 11:58:56 -06001318 polling_mode = 1;
Jeff Hugofff43af92012-03-29 17:54:52 -06001319 /*
1320 * run on core 0 so that netif_rx() in rmnet uses only
1321 * one queue
1322 */
1323 queue_work_on(0, bam_mux_rx_workqueue, &rx_timer_work);
Jeff Hugo949080a2011-08-30 11:58:56 -06001324 }
Jeff Hugo33dbc002011-08-25 15:52:53 -06001325 break;
1326 default:
1327 pr_err("%s: recieved unexpected event id %d\n", __func__,
1328 notify->event_id);
1329 }
1330}
1331
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001332#ifdef CONFIG_DEBUG_FS
1333
1334static int debug_tbl(char *buf, int max)
1335{
1336 int i = 0;
1337 int j;
1338
1339 for (j = 0; j < BAM_DMUX_NUM_CHANNELS; ++j) {
1340 i += scnprintf(buf + i, max - i,
1341 "ch%02d local open=%s remote open=%s\n",
1342 j, bam_ch_is_local_open(j) ? "Y" : "N",
1343 bam_ch_is_remote_open(j) ? "Y" : "N");
1344 }
1345
1346 return i;
1347}
1348
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07001349static int debug_ul_pkt_cnt(char *buf, int max)
1350{
1351 struct list_head *p;
1352 unsigned long flags;
1353 int n = 0;
1354
1355 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
1356 __list_for_each(p, &bam_tx_pool) {
1357 ++n;
1358 }
1359 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
1360
1361 return scnprintf(buf, max, "Number of UL packets in flight: %d\n", n);
1362}
1363
1364static int debug_stats(char *buf, int max)
1365{
1366 int i = 0;
1367
1368 i += scnprintf(buf + i, max - i,
Eric Holmberg9fdef262012-02-14 11:46:05 -07001369 "skb read cnt: %u\n"
1370 "skb write cnt: %u\n"
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07001371 "skb copy cnt: %u\n"
1372 "skb copy bytes: %u\n"
Eric Holmberg6074aba2012-01-18 17:59:44 -07001373 "sps tx failures: %u\n"
Eric Holmbergb5b08e52012-01-20 14:19:00 -07001374 "sps tx stalls: %u\n"
Eric Holmberg1f1255d2012-02-22 13:37:21 -07001375 "rx queue len: %d\n"
1376 "a2 ack out cnt: %d\n"
1377 "a2 ack in cnt: %d\n"
1378 "a2 pwr cntl in: %d\n",
Eric Holmberg9fdef262012-02-14 11:46:05 -07001379 bam_dmux_read_cnt,
1380 bam_dmux_write_cnt,
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07001381 bam_dmux_write_cpy_cnt,
1382 bam_dmux_write_cpy_bytes,
Eric Holmberg6074aba2012-01-18 17:59:44 -07001383 bam_dmux_tx_sps_failure_cnt,
Eric Holmbergb5b08e52012-01-20 14:19:00 -07001384 bam_dmux_tx_stall_cnt,
Eric Holmberg1f1255d2012-02-22 13:37:21 -07001385 bam_rx_pool_len,
1386 atomic_read(&bam_dmux_ack_out_cnt),
1387 atomic_read(&bam_dmux_ack_in_cnt),
1388 atomic_read(&bam_dmux_a2_pwr_cntl_in_cnt)
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07001389 );
1390
1391 return i;
1392}
1393
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001394#define DEBUG_BUFMAX 4096
1395static char debug_buffer[DEBUG_BUFMAX];
1396
1397static ssize_t debug_read(struct file *file, char __user *buf,
1398 size_t count, loff_t *ppos)
1399{
1400 int (*fill)(char *buf, int max) = file->private_data;
1401 int bsize = fill(debug_buffer, DEBUG_BUFMAX);
1402 return simple_read_from_buffer(buf, count, ppos, debug_buffer, bsize);
1403}
1404
1405static int debug_open(struct inode *inode, struct file *file)
1406{
1407 file->private_data = inode->i_private;
1408 return 0;
1409}
1410
1411
1412static const struct file_operations debug_ops = {
1413 .read = debug_read,
1414 .open = debug_open,
1415};
1416
1417static void debug_create(const char *name, mode_t mode,
1418 struct dentry *dent,
1419 int (*fill)(char *buf, int max))
1420{
Eric Holmberge4ac80b2012-01-12 09:21:59 -07001421 struct dentry *file;
1422
1423 file = debugfs_create_file(name, mode, dent, fill, &debug_ops);
1424 if (IS_ERR(file))
1425 pr_err("%s: debugfs create failed %d\n", __func__,
1426 (int)PTR_ERR(file));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001427}
1428
1429#endif
1430
Jeff Hugod98b1082011-10-24 10:30:23 -06001431static void notify_all(int event, unsigned long data)
1432{
1433 int i;
Jeff Hugocb798022012-04-09 14:55:40 -06001434 struct list_head *temp;
1435 struct outside_notify_func *func;
Jeff Hugod98b1082011-10-24 10:30:23 -06001436
Jeff Hugoac8152a2013-04-19 11:05:19 -06001437 BAM_DMUX_LOG("%s: event=%d, data=%lu\n", __func__, event, data);
1438
Jeff Hugod98b1082011-10-24 10:30:23 -06001439 for (i = 0; i < BAM_DMUX_NUM_CHANNELS; ++i) {
Jeff Hugoac8152a2013-04-19 11:05:19 -06001440 if (bam_ch_is_open(i))
Jeff Hugod98b1082011-10-24 10:30:23 -06001441 bam_ch[i].notify(bam_ch[i].priv, event, data);
1442 }
Jeff Hugocb798022012-04-09 14:55:40 -06001443
1444 __list_for_each(temp, &bam_other_notify_funcs) {
1445 func = container_of(temp, struct outside_notify_func,
1446 list_node);
1447 func->notify(func->priv, event, data);
1448 }
Jeff Hugod98b1082011-10-24 10:30:23 -06001449}
1450
1451static void kickoff_ul_wakeup_func(struct work_struct *work)
1452{
1453 read_lock(&ul_wakeup_lock);
1454 if (!bam_is_connected) {
1455 read_unlock(&ul_wakeup_lock);
1456 ul_wakeup();
Jeff Hugo4838f412012-01-20 11:19:37 -07001457 if (unlikely(in_global_reset == 1))
1458 return;
Jeff Hugod98b1082011-10-24 10:30:23 -06001459 read_lock(&ul_wakeup_lock);
1460 ul_packet_written = 1;
1461 notify_all(BAM_DMUX_UL_CONNECTED, (unsigned long)(NULL));
1462 }
1463 read_unlock(&ul_wakeup_lock);
1464}
1465
Eric Holmbergbc9f21c2012-01-18 11:33:33 -07001466int msm_bam_dmux_kickoff_ul_wakeup(void)
Jeff Hugod98b1082011-10-24 10:30:23 -06001467{
Eric Holmbergbc9f21c2012-01-18 11:33:33 -07001468 int is_connected;
1469
1470 read_lock(&ul_wakeup_lock);
1471 ul_packet_written = 1;
1472 is_connected = bam_is_connected;
1473 if (!is_connected)
1474 queue_work(bam_mux_tx_workqueue, &kickoff_ul_wakeup);
1475 read_unlock(&ul_wakeup_lock);
1476
1477 return is_connected;
Jeff Hugod98b1082011-10-24 10:30:23 -06001478}
1479
Eric Holmberg878923a2012-01-10 14:28:19 -07001480static void power_vote(int vote)
1481{
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +05301482 BAM_DMUX_LOG("%s: curr=%d, vote=%d\n", __func__,
Eric Holmberg878923a2012-01-10 14:28:19 -07001483 bam_dmux_uplink_vote, vote);
1484
1485 if (bam_dmux_uplink_vote == vote)
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +05301486 BAM_DMUX_LOG("%s: warning - duplicate power vote\n", __func__);
Eric Holmberg878923a2012-01-10 14:28:19 -07001487
1488 bam_dmux_uplink_vote = vote;
1489 if (vote)
Brent Hronik89c96ba2013-08-27 14:34:22 -06001490 bam_ops->smsm_change_state_ptr(SMSM_APPS_STATE,
1491 0, SMSM_A2_POWER_CONTROL);
Eric Holmberg878923a2012-01-10 14:28:19 -07001492 else
Brent Hronik89c96ba2013-08-27 14:34:22 -06001493 bam_ops->smsm_change_state_ptr(SMSM_APPS_STATE,
1494 SMSM_A2_POWER_CONTROL, 0);
Eric Holmberg878923a2012-01-10 14:28:19 -07001495}
1496
Eric Holmberg454d9da2012-01-12 09:37:14 -07001497/*
1498 * @note: Must be called with ul_wakeup_lock locked.
1499 */
1500static inline void ul_powerdown(void)
1501{
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +05301502 BAM_DMUX_LOG("%s: powerdown\n", __func__);
Eric Holmberg454d9da2012-01-12 09:37:14 -07001503 verify_tx_queue_is_empty(__func__);
1504
1505 if (a2_pc_disabled) {
1506 wait_for_dfab = 1;
1507 INIT_COMPLETION(dfab_unvote_completion);
1508 release_wakelock();
1509 } else {
1510 wait_for_ack = 1;
1511 INIT_COMPLETION(ul_wakeup_ack_completion);
1512 power_vote(0);
1513 }
1514 bam_is_connected = 0;
1515 notify_all(BAM_DMUX_UL_DISCONNECTED, (unsigned long)(NULL));
1516}
1517
1518static inline void ul_powerdown_finish(void)
1519{
1520 if (a2_pc_disabled && wait_for_dfab) {
1521 unvote_dfab();
1522 complete_all(&dfab_unvote_completion);
1523 wait_for_dfab = 0;
1524 }
1525}
1526
Eric Holmbergbc9f21c2012-01-18 11:33:33 -07001527/*
1528 * Votes for UL power and returns current power state.
1529 *
1530 * @returns true if currently connected
1531 */
1532int msm_bam_dmux_ul_power_vote(void)
1533{
1534 int is_connected;
1535
1536 read_lock(&ul_wakeup_lock);
1537 atomic_inc(&ul_ondemand_vote);
1538 is_connected = bam_is_connected;
1539 if (!is_connected)
1540 queue_work(bam_mux_tx_workqueue, &kickoff_ul_wakeup);
1541 read_unlock(&ul_wakeup_lock);
1542
1543 return is_connected;
1544}
1545
1546/*
1547 * Unvotes for UL power.
1548 *
1549 * @returns true if vote count is 0 (UL shutdown possible)
1550 */
1551int msm_bam_dmux_ul_power_unvote(void)
1552{
1553 int vote;
1554
1555 read_lock(&ul_wakeup_lock);
1556 vote = atomic_dec_return(&ul_ondemand_vote);
1557 if (unlikely(vote) < 0)
1558 DMUX_LOG_KERR("%s: invalid power vote %d\n", __func__, vote);
1559 read_unlock(&ul_wakeup_lock);
1560
1561 return vote == 0;
1562}
1563
Jeff Hugocb798022012-04-09 14:55:40 -06001564int msm_bam_dmux_reg_notify(void *priv,
1565 void (*notify)(void *priv, int event_type,
1566 unsigned long data))
1567{
1568 struct outside_notify_func *func;
1569
1570 if (!notify)
1571 return -EINVAL;
1572
1573 func = kmalloc(sizeof(struct outside_notify_func), GFP_KERNEL);
1574 if (!func)
1575 return -ENOMEM;
1576
1577 func->notify = notify;
1578 func->priv = priv;
1579 list_add(&func->list_node, &bam_other_notify_funcs);
1580
1581 return 0;
1582}
1583
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001584static void ul_timeout(struct work_struct *work)
1585{
Jeff Hugoc040a5b2011-11-15 14:26:01 -07001586 unsigned long flags;
1587 int ret;
1588
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001589 if (in_global_reset)
1590 return;
Jeff Hugoc040a5b2011-11-15 14:26:01 -07001591 ret = write_trylock_irqsave(&ul_wakeup_lock, flags);
1592 if (!ret) { /* failed to grab lock, reschedule and bail */
1593 schedule_delayed_work(&ul_timeout_work,
1594 msecs_to_jiffies(UL_TIMEOUT_DELAY));
1595 return;
1596 }
Eric Holmberg454d9da2012-01-12 09:37:14 -07001597 if (bam_is_connected) {
Eric Holmberg6074aba2012-01-18 17:59:44 -07001598 if (!ul_packet_written) {
1599 spin_lock(&bam_tx_pool_spinlock);
1600 if (!list_empty(&bam_tx_pool)) {
1601 struct tx_pkt_info *info;
1602
1603 info = list_first_entry(&bam_tx_pool,
1604 struct tx_pkt_info, list_node);
1605 DMUX_LOG_KERR("%s: UL delayed ts=%u.%09lu\n",
1606 __func__, info->ts_sec, info->ts_nsec);
1607 DBG_INC_TX_STALL_CNT();
1608 ul_packet_written = 1;
1609 }
1610 spin_unlock(&bam_tx_pool_spinlock);
1611 }
1612
Eric Holmbergbc9f21c2012-01-18 11:33:33 -07001613 if (ul_packet_written || atomic_read(&ul_ondemand_vote)) {
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +05301614 BAM_DMUX_LOG("%s: pkt written %d\n",
Eric Holmbergbc9f21c2012-01-18 11:33:33 -07001615 __func__, ul_packet_written);
Eric Holmberg454d9da2012-01-12 09:37:14 -07001616 ul_packet_written = 0;
1617 schedule_delayed_work(&ul_timeout_work,
1618 msecs_to_jiffies(UL_TIMEOUT_DELAY));
Eric Holmberg006057d2012-01-11 10:10:42 -07001619 } else {
Eric Holmberg454d9da2012-01-12 09:37:14 -07001620 ul_powerdown();
Eric Holmberg006057d2012-01-11 10:10:42 -07001621 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001622 }
Jeff Hugoc040a5b2011-11-15 14:26:01 -07001623 write_unlock_irqrestore(&ul_wakeup_lock, flags);
Eric Holmberg454d9da2012-01-12 09:37:14 -07001624 ul_powerdown_finish();
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001625}
Jeff Hugo4838f412012-01-20 11:19:37 -07001626
1627static int ssrestart_check(void)
1628{
Jeff Hugob8156d72013-06-04 12:51:10 -06001629 int ret = 0;
1630
Eric Holmberg7614a7f2013-07-29 15:47:12 -06001631 if (in_global_reset) {
1632 DMUX_LOG_KERR("%s: modem timeout: already in SSR\n",
1633 __func__);
1634 return 1;
1635 }
1636
Jeff Hugob8156d72013-06-04 12:51:10 -06001637 DMUX_LOG_KERR("%s: modem timeout: BAM DMUX disabled for SSR\n",
1638 __func__);
Eric Holmberg90285e22012-02-22 12:33:05 -07001639 in_global_reset = 1;
Jeff Hugob8156d72013-06-04 12:51:10 -06001640 ret = subsystem_restart("modem");
1641 if (ret == -ENODEV)
1642 panic("modem subsystem restart failed\n");
Eric Holmberg90285e22012-02-22 12:33:05 -07001643 return 1;
Jeff Hugo4838f412012-01-20 11:19:37 -07001644}
1645
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001646static void ul_wakeup(void)
1647{
Jeff Hugof6c1c1e2011-12-01 17:43:49 -07001648 int ret;
Jeff Hugo5f57ec92012-05-14 13:34:28 -06001649 int do_vote_dfab = 0;
Jeff Hugof6c1c1e2011-12-01 17:43:49 -07001650
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001651 mutex_lock(&wakeup_lock);
1652 if (bam_is_connected) { /* bam got connected before lock grabbed */
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +05301653 BAM_DMUX_LOG("%s Already awake\n", __func__);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001654 mutex_unlock(&wakeup_lock);
1655 return;
1656 }
Eric Holmberg878923a2012-01-10 14:28:19 -07001657
Jeff Hugoc2696142012-05-03 11:42:13 -06001658 /*
Jeff Hugof5001732012-08-27 13:19:09 -06001659 * if this gets hit, that means restart_notifier_cb() has started
1660 * but probably not finished, thus we know SSR has happened, but
1661 * haven't been able to send that info to our clients yet.
1662 * in that case, abort the ul_wakeup() so that we don't undo any
1663 * work restart_notifier_cb() has done. The clients will be notified
1664 * shortly. No cleanup necessary (reschedule the wakeup) as our and
1665 * their SSR handling will cover it
1666 */
1667 if (unlikely(in_global_reset == 1)) {
1668 mutex_unlock(&wakeup_lock);
1669 return;
1670 }
1671
1672 /*
Jeff Hugoc2696142012-05-03 11:42:13 -06001673 * if someone is voting for UL before bam is inited (modem up first
1674 * time), set flag for init to kickoff ul wakeup once bam is inited
1675 */
1676 mutex_lock(&delayed_ul_vote_lock);
1677 if (unlikely(!bam_mux_initialized)) {
1678 need_delayed_ul_vote = 1;
1679 mutex_unlock(&delayed_ul_vote_lock);
1680 mutex_unlock(&wakeup_lock);
1681 return;
1682 }
1683 mutex_unlock(&delayed_ul_vote_lock);
1684
Eric Holmberg006057d2012-01-11 10:10:42 -07001685 if (a2_pc_disabled) {
1686 /*
1687 * don't grab the wakelock the first time because it is
1688 * already grabbed when a2 powers on
1689 */
Jeff Hugo5f57ec92012-05-14 13:34:28 -06001690 if (likely(a2_pc_disabled_wakelock_skipped)) {
Eric Holmberg006057d2012-01-11 10:10:42 -07001691 grab_wakelock();
Jeff Hugo5f57ec92012-05-14 13:34:28 -06001692 do_vote_dfab = 1; /* vote must occur after wait */
1693 } else {
Jeff Hugo583a6da2012-02-03 11:37:30 -07001694 a2_pc_disabled_wakelock_skipped = 1;
Jeff Hugo5f57ec92012-05-14 13:34:28 -06001695 }
Eric Holmberg006057d2012-01-11 10:10:42 -07001696 if (wait_for_dfab) {
Jeff Hugo66f7f1e2012-01-16 14:30:42 -07001697 ret = wait_for_completion_timeout(
Eric Holmberg006057d2012-01-11 10:10:42 -07001698 &dfab_unvote_completion, HZ);
1699 BUG_ON(ret == 0);
1700 }
Jeff Hugo5f57ec92012-05-14 13:34:28 -06001701 if (likely(do_vote_dfab))
1702 vote_dfab();
Eric Holmberg006057d2012-01-11 10:10:42 -07001703 schedule_delayed_work(&ul_timeout_work,
1704 msecs_to_jiffies(UL_TIMEOUT_DELAY));
1705 bam_is_connected = 1;
1706 mutex_unlock(&wakeup_lock);
1707 return;
1708 }
1709
Jeff Hugof6c1c1e2011-12-01 17:43:49 -07001710 /*
1711 * must wait for the previous power down request to have been acked
1712 * chances are it already came in and this will just fall through
1713 * instead of waiting
1714 */
1715 if (wait_for_ack) {
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +05301716 BAM_DMUX_LOG("%s waiting for previous ack\n", __func__);
Jeff Hugo66f7f1e2012-01-16 14:30:42 -07001717 ret = wait_for_completion_timeout(
Jeff Hugo1f317392013-07-24 16:28:52 -06001718 &ul_wakeup_ack_completion,
1719 msecs_to_jiffies(UL_WAKEUP_TIMEOUT_MS));
Eric Holmberg006057d2012-01-11 10:10:42 -07001720 wait_for_ack = 0;
Jeff Hugo4838f412012-01-20 11:19:37 -07001721 if (unlikely(ret == 0) && ssrestart_check()) {
1722 mutex_unlock(&wakeup_lock);
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +05301723 BAM_DMUX_LOG("%s timeout previous ack\n", __func__);
Jeff Hugo4838f412012-01-20 11:19:37 -07001724 return;
1725 }
Jeff Hugof6c1c1e2011-12-01 17:43:49 -07001726 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001727 INIT_COMPLETION(ul_wakeup_ack_completion);
Eric Holmberg878923a2012-01-10 14:28:19 -07001728 power_vote(1);
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +05301729 BAM_DMUX_LOG("%s waiting for wakeup ack\n", __func__);
Jeff Hugo1f317392013-07-24 16:28:52 -06001730 ret = wait_for_completion_timeout(&ul_wakeup_ack_completion,
1731 msecs_to_jiffies(UL_WAKEUP_TIMEOUT_MS));
Jeff Hugo4838f412012-01-20 11:19:37 -07001732 if (unlikely(ret == 0) && ssrestart_check()) {
1733 mutex_unlock(&wakeup_lock);
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +05301734 BAM_DMUX_LOG("%s timeout wakeup ack\n", __func__);
Jeff Hugo4838f412012-01-20 11:19:37 -07001735 return;
1736 }
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +05301737 BAM_DMUX_LOG("%s waiting completion\n", __func__);
Jeff Hugo1f317392013-07-24 16:28:52 -06001738 ret = wait_for_completion_timeout(&bam_connection_completion,
1739 msecs_to_jiffies(UL_WAKEUP_TIMEOUT_MS));
Jeff Hugo4838f412012-01-20 11:19:37 -07001740 if (unlikely(ret == 0) && ssrestart_check()) {
1741 mutex_unlock(&wakeup_lock);
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +05301742 BAM_DMUX_LOG("%s timeout power on\n", __func__);
Jeff Hugo4838f412012-01-20 11:19:37 -07001743 return;
1744 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001745
1746 bam_is_connected = 1;
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +05301747 BAM_DMUX_LOG("%s complete\n", __func__);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001748 schedule_delayed_work(&ul_timeout_work,
1749 msecs_to_jiffies(UL_TIMEOUT_DELAY));
1750 mutex_unlock(&wakeup_lock);
1751}
1752
1753static void reconnect_to_bam(void)
1754{
1755 int i;
1756
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001757 in_global_reset = 0;
Jeff Hugoa82a95c2012-12-14 17:56:19 -07001758 in_ssr = 0;
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001759 vote_dfab();
Jeff Hugo18792a32012-06-20 15:25:55 -06001760 if (!power_management_only_mode) {
Jeff Hugoa82a95c2012-12-14 17:56:19 -07001761 if (ssr_skipped_disconnect) {
1762 /* delayed to here to prevent bus stall */
Brent Hronik89c96ba2013-08-27 14:34:22 -06001763 bam_ops->sps_disconnect_ptr(bam_tx_pipe);
1764 bam_ops->sps_disconnect_ptr(bam_rx_pipe);
Jeff Hugoa82a95c2012-12-14 17:56:19 -07001765 __memzero(rx_desc_mem_buf.base, rx_desc_mem_buf.size);
1766 __memzero(tx_desc_mem_buf.base, tx_desc_mem_buf.size);
1767 }
1768 ssr_skipped_disconnect = 0;
Brent Hronik89c96ba2013-08-27 14:34:22 -06001769 i = bam_ops->sps_device_reset_ptr(a2_device_handle);
Jeff Hugo18792a32012-06-20 15:25:55 -06001770 if (i)
1771 pr_err("%s: device reset failed rc = %d\n", __func__,
1772 i);
Brent Hronik89c96ba2013-08-27 14:34:22 -06001773 i = bam_ops->sps_connect_ptr(bam_tx_pipe, &tx_connection);
Jeff Hugo18792a32012-06-20 15:25:55 -06001774 if (i)
1775 pr_err("%s: tx connection failed rc = %d\n", __func__,
1776 i);
Brent Hronik89c96ba2013-08-27 14:34:22 -06001777 i = bam_ops->sps_connect_ptr(bam_rx_pipe, &rx_connection);
Jeff Hugo18792a32012-06-20 15:25:55 -06001778 if (i)
1779 pr_err("%s: rx connection failed rc = %d\n", __func__,
1780 i);
Brent Hronik89c96ba2013-08-27 14:34:22 -06001781 i = bam_ops->sps_register_event_ptr(bam_tx_pipe,
1782 &tx_register_event);
Jeff Hugo18792a32012-06-20 15:25:55 -06001783 if (i)
1784 pr_err("%s: tx event reg failed rc = %d\n", __func__,
1785 i);
Brent Hronik89c96ba2013-08-27 14:34:22 -06001786 i = bam_ops->sps_register_event_ptr(bam_rx_pipe,
1787 &rx_register_event);
Jeff Hugo18792a32012-06-20 15:25:55 -06001788 if (i)
1789 pr_err("%s: rx event reg failed rc = %d\n", __func__,
1790 i);
1791 }
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001792
1793 bam_connection_is_active = 1;
1794
1795 if (polling_mode)
1796 rx_switch_to_interrupt_mode();
1797
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001798 toggle_apps_ack();
1799 complete_all(&bam_connection_completion);
Jeff Hugo18792a32012-06-20 15:25:55 -06001800 if (!power_management_only_mode)
1801 queue_rx();
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001802}
1803
1804static void disconnect_to_bam(void)
1805{
1806 struct list_head *node;
1807 struct rx_pkt_info *info;
Eric Holmberg454d9da2012-01-12 09:37:14 -07001808 unsigned long flags;
Brent Hronik096f7d32013-06-28 15:43:08 -06001809 unsigned long time_remaining;
1810
Eric Holmberg7614a7f2013-07-29 15:47:12 -06001811 if (!in_global_reset) {
1812 time_remaining = wait_for_completion_timeout(
1813 &shutdown_completion,
1814 msecs_to_jiffies(SHUTDOWN_TIMEOUT_MS));
1815 if (time_remaining == 0) {
1816 DMUX_LOG_KERR("%s: shutdown completion timed out\n",
1817 __func__);
Jeff Hugo7c185602013-09-11 17:39:54 -06001818 log_rx_timestamp();
Eric Holmberg7614a7f2013-07-29 15:47:12 -06001819 ssrestart_check();
1820 }
Brent Hronik096f7d32013-06-28 15:43:08 -06001821 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001822
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001823 bam_connection_is_active = 0;
Eric Holmberg454d9da2012-01-12 09:37:14 -07001824
1825 /* handle disconnect during active UL */
1826 write_lock_irqsave(&ul_wakeup_lock, flags);
1827 if (bam_is_connected) {
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +05301828 BAM_DMUX_LOG("%s: UL active - forcing powerdown\n", __func__);
Eric Holmberg454d9da2012-01-12 09:37:14 -07001829 ul_powerdown();
1830 }
1831 write_unlock_irqrestore(&ul_wakeup_lock, flags);
1832 ul_powerdown_finish();
1833
1834 /* tear down BAM connection */
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001835 INIT_COMPLETION(bam_connection_completion);
Jeff Hugoa82a95c2012-12-14 17:56:19 -07001836
1837 /* in_ssr documentation/assumptions found in restart_notifier_cb */
Jeff Hugo18792a32012-06-20 15:25:55 -06001838 if (!power_management_only_mode) {
Jeff Hugoa82a95c2012-12-14 17:56:19 -07001839 if (likely(!in_ssr)) {
Jeff Hugof7ae7a62013-04-19 11:18:32 -06001840 BAM_DMUX_LOG("%s: disconnect tx\n", __func__);
Brent Hronik89c96ba2013-08-27 14:34:22 -06001841 bam_ops->sps_disconnect_ptr(bam_tx_pipe);
Jeff Hugof7ae7a62013-04-19 11:18:32 -06001842 BAM_DMUX_LOG("%s: disconnect rx\n", __func__);
Brent Hronik89c96ba2013-08-27 14:34:22 -06001843 bam_ops->sps_disconnect_ptr(bam_rx_pipe);
Jeff Hugoa82a95c2012-12-14 17:56:19 -07001844 __memzero(rx_desc_mem_buf.base, rx_desc_mem_buf.size);
1845 __memzero(tx_desc_mem_buf.base, tx_desc_mem_buf.size);
Jeff Hugof7ae7a62013-04-19 11:18:32 -06001846 BAM_DMUX_LOG("%s: device reset\n", __func__);
Jeff Hugoa82a95c2012-12-14 17:56:19 -07001847 sps_device_reset(a2_device_handle);
1848 } else {
1849 ssr_skipped_disconnect = 1;
1850 }
Jeff Hugo18792a32012-06-20 15:25:55 -06001851 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001852 unvote_dfab();
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001853
1854 mutex_lock(&bam_rx_pool_mutexlock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001855 while (!list_empty(&bam_rx_pool)) {
1856 node = bam_rx_pool.next;
1857 list_del(node);
1858 info = container_of(node, struct rx_pkt_info, list_node);
1859 dma_unmap_single(NULL, info->dma_address, BUFFER_SIZE,
Brent Hronik89c96ba2013-08-27 14:34:22 -06001860 bam_ops->dma_from);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001861 dev_kfree_skb_any(info->skb);
1862 kfree(info);
1863 }
Eric Holmbergb5b08e52012-01-20 14:19:00 -07001864 bam_rx_pool_len = 0;
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001865 mutex_unlock(&bam_rx_pool_mutexlock);
Eric Holmberg878923a2012-01-10 14:28:19 -07001866
Jeff Hugo0b13a352012-03-17 23:18:30 -06001867 if (disconnect_ack)
1868 toggle_apps_ack();
1869
Eric Holmberg878923a2012-01-10 14:28:19 -07001870 verify_tx_queue_is_empty(__func__);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001871}
1872
1873static void vote_dfab(void)
1874{
Jeff Hugoca0caa82011-12-05 16:05:23 -07001875 int rc;
1876
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +05301877 BAM_DMUX_LOG("%s\n", __func__);
Eric Holmberg006057d2012-01-11 10:10:42 -07001878 mutex_lock(&dfab_status_lock);
1879 if (dfab_is_on) {
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +05301880 BAM_DMUX_LOG("%s: dfab is already on\n", __func__);
Eric Holmberg006057d2012-01-11 10:10:42 -07001881 mutex_unlock(&dfab_status_lock);
1882 return;
1883 }
Jeff Hugod0befde2012-08-09 15:32:49 -06001884 if (dfab_clk) {
1885 rc = clk_prepare_enable(dfab_clk);
1886 if (rc)
1887 DMUX_LOG_KERR("bam_dmux vote for dfab failed rc = %d\n",
1888 rc);
1889 }
1890 if (xo_clk) {
1891 rc = clk_prepare_enable(xo_clk);
1892 if (rc)
1893 DMUX_LOG_KERR("bam_dmux vote for xo failed rc = %d\n",
1894 rc);
1895 }
Eric Holmberg006057d2012-01-11 10:10:42 -07001896 dfab_is_on = 1;
1897 mutex_unlock(&dfab_status_lock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001898}
1899
1900static void unvote_dfab(void)
1901{
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +05301902 BAM_DMUX_LOG("%s\n", __func__);
Eric Holmberg006057d2012-01-11 10:10:42 -07001903 mutex_lock(&dfab_status_lock);
1904 if (!dfab_is_on) {
1905 DMUX_LOG_KERR("%s: dfab is already off\n", __func__);
1906 dump_stack();
1907 mutex_unlock(&dfab_status_lock);
1908 return;
1909 }
Jeff Hugod0befde2012-08-09 15:32:49 -06001910 if (dfab_clk)
1911 clk_disable_unprepare(dfab_clk);
1912 if (xo_clk)
1913 clk_disable_unprepare(xo_clk);
Eric Holmberg006057d2012-01-11 10:10:42 -07001914 dfab_is_on = 0;
1915 mutex_unlock(&dfab_status_lock);
1916}
1917
1918/* reference counting wrapper around wakelock */
1919static void grab_wakelock(void)
1920{
1921 unsigned long flags;
1922
1923 spin_lock_irqsave(&wakelock_reference_lock, flags);
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +05301924 BAM_DMUX_LOG("%s: ref count = %d\n", __func__,
Eric Holmberg006057d2012-01-11 10:10:42 -07001925 wakelock_reference_count);
1926 if (wakelock_reference_count == 0)
1927 wake_lock(&bam_wakelock);
1928 ++wakelock_reference_count;
1929 spin_unlock_irqrestore(&wakelock_reference_lock, flags);
1930}
1931
1932static void release_wakelock(void)
1933{
1934 unsigned long flags;
1935
1936 spin_lock_irqsave(&wakelock_reference_lock, flags);
1937 if (wakelock_reference_count == 0) {
1938 DMUX_LOG_KERR("%s: bam_dmux wakelock not locked\n", __func__);
1939 dump_stack();
1940 spin_unlock_irqrestore(&wakelock_reference_lock, flags);
1941 return;
1942 }
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +05301943 BAM_DMUX_LOG("%s: ref count = %d\n", __func__,
Eric Holmberg006057d2012-01-11 10:10:42 -07001944 wakelock_reference_count);
1945 --wakelock_reference_count;
1946 if (wakelock_reference_count == 0)
1947 wake_unlock(&bam_wakelock);
1948 spin_unlock_irqrestore(&wakelock_reference_lock, flags);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001949}
1950
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001951static int restart_notifier_cb(struct notifier_block *this,
1952 unsigned long code,
1953 void *data)
1954{
1955 int i;
1956 struct list_head *node;
1957 struct tx_pkt_info *info;
1958 int temp_remote_status;
Jeff Hugo626303bf2011-11-21 11:43:28 -07001959 unsigned long flags;
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001960
Jeff Hugoa82a95c2012-12-14 17:56:19 -07001961 /*
1962 * Bam_dmux counts on the fact that the BEFORE_SHUTDOWN level of
1963 * notifications are guarenteed to execute before the AFTER_SHUTDOWN
1964 * level of notifications, and that BEFORE_SHUTDOWN always occurs in
1965 * all SSR events, no matter what triggered the SSR. Also, bam_dmux
1966 * assumes that SMD does its SSR processing in the AFTER_SHUTDOWN level
1967 * thus bam_dmux is guarenteed to detect SSR before SMD, since the
1968 * callbacks for all the drivers within the AFTER_SHUTDOWN level could
1969 * occur in any order. Bam_dmux uses this knowledge to skip accessing
1970 * the bam hardware when disconnect_to_bam() is triggered by SMD's SSR
1971 * processing. We do not wat to access the bam hardware during SSR
1972 * because a watchdog crash from a bus stall would likely occur.
1973 */
Jeff Hugo199294b2013-02-25 13:46:56 -07001974 if (code == SUBSYS_BEFORE_SHUTDOWN) {
Arun Kumar Neelakantame7c0d622013-10-11 14:34:02 +05301975 BAM_DMUX_LOG("%s: begin\n", __func__);
Jeff Hugo199294b2013-02-25 13:46:56 -07001976 in_global_reset = 1;
Jeff Hugoa82a95c2012-12-14 17:56:19 -07001977 in_ssr = 1;
Arun Kumar Neelakantame7c0d622013-10-11 14:34:02 +05301978 /* wait till all bam_dmux writes completes */
1979 synchronize_srcu(&bam_dmux_srcu);
1980 BAM_DMUX_LOG("%s: ssr signaling complete\n", __func__);
Jeff Hugo199294b2013-02-25 13:46:56 -07001981 flush_workqueue(bam_mux_rx_workqueue);
1982 }
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001983 if (code != SUBSYS_AFTER_SHUTDOWN)
1984 return NOTIFY_DONE;
1985
Eric Holmberg454d9da2012-01-12 09:37:14 -07001986 /* Handle uplink Powerdown */
1987 write_lock_irqsave(&ul_wakeup_lock, flags);
1988 if (bam_is_connected) {
1989 ul_powerdown();
1990 wait_for_ack = 0;
1991 }
Jeff Hugo4838f412012-01-20 11:19:37 -07001992 /*
1993 * if modem crash during ul_wakeup(), power_vote is 1, needs to be
1994 * reset to 0. harmless if bam_is_connected check above passes
1995 */
1996 power_vote(0);
Eric Holmberg454d9da2012-01-12 09:37:14 -07001997 write_unlock_irqrestore(&ul_wakeup_lock, flags);
1998 ul_powerdown_finish();
Eric Holmberg006057d2012-01-11 10:10:42 -07001999 a2_pc_disabled = 0;
Jeff Hugo583a6da2012-02-03 11:37:30 -07002000 a2_pc_disabled_wakelock_skipped = 0;
Jeff Hugof62029d2012-07-17 13:39:53 -06002001 disconnect_ack = 1;
Eric Holmberg454d9da2012-01-12 09:37:14 -07002002
2003 /* Cleanup Channel States */
Eric Holmberga623da82012-07-12 09:37:09 -06002004 mutex_lock(&bam_pdev_mutexlock);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06002005 for (i = 0; i < BAM_DMUX_NUM_CHANNELS; ++i) {
2006 temp_remote_status = bam_ch_is_remote_open(i);
2007 bam_ch[i].status &= ~BAM_CH_REMOTE_OPEN;
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -07002008 bam_ch[i].num_tx_pkts = 0;
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06002009 if (bam_ch_is_local_open(i))
2010 bam_ch[i].status |= BAM_CH_IN_RESET;
2011 if (temp_remote_status) {
2012 platform_device_unregister(bam_ch[i].pdev);
2013 bam_ch[i].pdev = platform_device_alloc(
2014 bam_ch[i].name, 2);
2015 }
2016 }
Eric Holmberga623da82012-07-12 09:37:09 -06002017 mutex_unlock(&bam_pdev_mutexlock);
Eric Holmberg454d9da2012-01-12 09:37:14 -07002018
2019 /* Cleanup pending UL data */
Jeff Hugo626303bf2011-11-21 11:43:28 -07002020 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06002021 while (!list_empty(&bam_tx_pool)) {
2022 node = bam_tx_pool.next;
2023 list_del(node);
2024 info = container_of(node, struct tx_pkt_info,
2025 list_node);
2026 if (!info->is_cmd) {
2027 dma_unmap_single(NULL, info->dma_address,
2028 info->skb->len,
Brent Hronik89c96ba2013-08-27 14:34:22 -06002029 bam_ops->dma_to);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06002030 dev_kfree_skb_any(info->skb);
2031 } else {
2032 dma_unmap_single(NULL, info->dma_address,
2033 info->len,
Brent Hronik89c96ba2013-08-27 14:34:22 -06002034 bam_ops->dma_to);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06002035 kfree(info->skb);
2036 }
2037 kfree(info);
2038 }
Jeff Hugo626303bf2011-11-21 11:43:28 -07002039 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
Eric Holmberg454d9da2012-01-12 09:37:14 -07002040
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +05302041 BAM_DMUX_LOG("%s: complete\n", __func__);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06002042 return NOTIFY_DONE;
2043}
2044
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002045static int bam_init(void)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002046{
2047 u32 h;
2048 dma_addr_t dma_addr;
2049 int ret;
2050 void *a2_virt_addr;
Jeff Hugo4b2890d2012-01-16 16:14:21 -07002051 int skip_iounmap = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002052
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002053 vote_dfab();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002054 /* init BAM */
Jeff Hugo3910ee12012-08-21 14:08:20 -06002055 a2_virt_addr = ioremap_nocache((unsigned long)(a2_phys_base),
2056 a2_phys_size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002057 if (!a2_virt_addr) {
2058 pr_err("%s: ioremap failed\n", __func__);
2059 ret = -ENOMEM;
Jeff Hugo994a92d2012-01-05 13:25:21 -07002060 goto ioremap_failed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002061 }
Jeff Hugo3910ee12012-08-21 14:08:20 -06002062 a2_props.phys_addr = (u32)(a2_phys_base);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002063 a2_props.virt_addr = a2_virt_addr;
Jeff Hugo3910ee12012-08-21 14:08:20 -06002064 a2_props.virt_size = a2_phys_size;
2065 a2_props.irq = a2_bam_irq;
Jeff Hugo927cba62011-11-11 11:49:52 -07002066 a2_props.options = SPS_BAM_OPT_IRQ_WAKEUP;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002067 a2_props.num_pipes = A2_NUM_PIPES;
2068 a2_props.summing_threshold = A2_SUMMING_THRESHOLD;
Jeff Hugo75913c82011-12-05 15:59:01 -07002069 if (cpu_is_msm9615())
2070 a2_props.manage = SPS_BAM_MGR_DEVICE_REMOTE;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002071 /* need to free on tear down */
Brent Hronik89c96ba2013-08-27 14:34:22 -06002072 ret = bam_ops->sps_register_bam_device_ptr(&a2_props, &h);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002073 if (ret < 0) {
2074 pr_err("%s: register bam error %d\n", __func__, ret);
2075 goto register_bam_failed;
2076 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002077 a2_device_handle = h;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002078
Brent Hronik89c96ba2013-08-27 14:34:22 -06002079 bam_tx_pipe = bam_ops->sps_alloc_endpoint_ptr();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002080 if (bam_tx_pipe == NULL) {
2081 pr_err("%s: tx alloc endpoint failed\n", __func__);
2082 ret = -ENOMEM;
Jeff Hugo8ff4a812012-01-17 11:03:13 -07002083 goto tx_alloc_endpoint_failed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002084 }
Brent Hronik89c96ba2013-08-27 14:34:22 -06002085 ret = bam_ops->sps_get_config_ptr(bam_tx_pipe, &tx_connection);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002086 if (ret) {
2087 pr_err("%s: tx get config failed %d\n", __func__, ret);
2088 goto tx_get_config_failed;
2089 }
2090
2091 tx_connection.source = SPS_DEV_HANDLE_MEM;
2092 tx_connection.src_pipe_index = 0;
2093 tx_connection.destination = h;
2094 tx_connection.dest_pipe_index = 4;
2095 tx_connection.mode = SPS_MODE_DEST;
2096 tx_connection.options = SPS_O_AUTO_ENABLE | SPS_O_EOT;
2097 tx_desc_mem_buf.size = 0x800; /* 2k */
2098 tx_desc_mem_buf.base = dma_alloc_coherent(NULL, tx_desc_mem_buf.size,
2099 &dma_addr, 0);
2100 if (tx_desc_mem_buf.base == NULL) {
2101 pr_err("%s: tx memory alloc failed\n", __func__);
2102 ret = -ENOMEM;
Jeff Hugo8ff4a812012-01-17 11:03:13 -07002103 goto tx_get_config_failed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002104 }
2105 tx_desc_mem_buf.phys_base = dma_addr;
2106 memset(tx_desc_mem_buf.base, 0x0, tx_desc_mem_buf.size);
2107 tx_connection.desc = tx_desc_mem_buf;
2108 tx_connection.event_thresh = 0x10;
2109
Brent Hronik89c96ba2013-08-27 14:34:22 -06002110 ret = bam_ops->sps_connect_ptr(bam_tx_pipe, &tx_connection);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002111 if (ret < 0) {
2112 pr_err("%s: tx connect error %d\n", __func__, ret);
2113 goto tx_connect_failed;
2114 }
2115
Brent Hronik89c96ba2013-08-27 14:34:22 -06002116 bam_rx_pipe = bam_ops->sps_alloc_endpoint_ptr();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002117 if (bam_rx_pipe == NULL) {
2118 pr_err("%s: rx alloc endpoint failed\n", __func__);
2119 ret = -ENOMEM;
Jeff Hugo8ff4a812012-01-17 11:03:13 -07002120 goto rx_alloc_endpoint_failed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002121 }
Brent Hronik89c96ba2013-08-27 14:34:22 -06002122 ret = bam_ops->sps_get_config_ptr(bam_rx_pipe, &rx_connection);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002123 if (ret) {
2124 pr_err("%s: rx get config failed %d\n", __func__, ret);
2125 goto rx_get_config_failed;
2126 }
2127
2128 rx_connection.source = h;
2129 rx_connection.src_pipe_index = 5;
2130 rx_connection.destination = SPS_DEV_HANDLE_MEM;
2131 rx_connection.dest_pipe_index = 1;
2132 rx_connection.mode = SPS_MODE_SRC;
Jeff Hugo949080a2011-08-30 11:58:56 -06002133 rx_connection.options = SPS_O_AUTO_ENABLE | SPS_O_EOT |
2134 SPS_O_ACK_TRANSFERS;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002135 rx_desc_mem_buf.size = 0x800; /* 2k */
2136 rx_desc_mem_buf.base = dma_alloc_coherent(NULL, rx_desc_mem_buf.size,
2137 &dma_addr, 0);
2138 if (rx_desc_mem_buf.base == NULL) {
2139 pr_err("%s: rx memory alloc failed\n", __func__);
2140 ret = -ENOMEM;
2141 goto rx_mem_failed;
2142 }
2143 rx_desc_mem_buf.phys_base = dma_addr;
2144 memset(rx_desc_mem_buf.base, 0x0, rx_desc_mem_buf.size);
2145 rx_connection.desc = rx_desc_mem_buf;
2146 rx_connection.event_thresh = 0x10;
2147
Brent Hronik89c96ba2013-08-27 14:34:22 -06002148 ret = bam_ops->sps_connect_ptr(bam_rx_pipe, &rx_connection);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002149 if (ret < 0) {
2150 pr_err("%s: rx connect error %d\n", __func__, ret);
2151 goto rx_connect_failed;
2152 }
2153
2154 tx_register_event.options = SPS_O_EOT;
2155 tx_register_event.mode = SPS_TRIGGER_CALLBACK;
2156 tx_register_event.xfer_done = NULL;
2157 tx_register_event.callback = bam_mux_tx_notify;
2158 tx_register_event.user = NULL;
Brent Hronik89c96ba2013-08-27 14:34:22 -06002159 ret = bam_ops->sps_register_event_ptr(bam_tx_pipe, &tx_register_event);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002160 if (ret < 0) {
2161 pr_err("%s: tx register event error %d\n", __func__, ret);
2162 goto rx_event_reg_failed;
2163 }
2164
Jeff Hugo33dbc002011-08-25 15:52:53 -06002165 rx_register_event.options = SPS_O_EOT;
2166 rx_register_event.mode = SPS_TRIGGER_CALLBACK;
2167 rx_register_event.xfer_done = NULL;
2168 rx_register_event.callback = bam_mux_rx_notify;
2169 rx_register_event.user = NULL;
Brent Hronik89c96ba2013-08-27 14:34:22 -06002170 ret = bam_ops->sps_register_event_ptr(bam_rx_pipe, &rx_register_event);
Jeff Hugo33dbc002011-08-25 15:52:53 -06002171 if (ret < 0) {
2172 pr_err("%s: tx register event error %d\n", __func__, ret);
2173 goto rx_event_reg_failed;
2174 }
2175
Jeff Hugoc2696142012-05-03 11:42:13 -06002176 mutex_lock(&delayed_ul_vote_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002177 bam_mux_initialized = 1;
Jeff Hugoc2696142012-05-03 11:42:13 -06002178 if (need_delayed_ul_vote) {
2179 need_delayed_ul_vote = 0;
2180 msm_bam_dmux_kickoff_ul_wakeup();
2181 }
2182 mutex_unlock(&delayed_ul_vote_lock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002183 toggle_apps_ack();
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06002184 bam_connection_is_active = 1;
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002185 complete_all(&bam_connection_completion);
Jeff Hugo2fb555e2012-03-14 16:33:47 -06002186 queue_rx();
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002187 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002188
2189rx_event_reg_failed:
Brent Hronik89c96ba2013-08-27 14:34:22 -06002190 bam_ops->sps_disconnect_ptr(bam_rx_pipe);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002191rx_connect_failed:
2192 dma_free_coherent(NULL, rx_desc_mem_buf.size, rx_desc_mem_buf.base,
2193 rx_desc_mem_buf.phys_base);
2194rx_mem_failed:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002195rx_get_config_failed:
Brent Hronik89c96ba2013-08-27 14:34:22 -06002196 bam_ops->sps_free_endpoint_ptr(bam_rx_pipe);
Jeff Hugo8ff4a812012-01-17 11:03:13 -07002197rx_alloc_endpoint_failed:
Brent Hronik89c96ba2013-08-27 14:34:22 -06002198 bam_ops->sps_disconnect_ptr(bam_tx_pipe);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002199tx_connect_failed:
2200 dma_free_coherent(NULL, tx_desc_mem_buf.size, tx_desc_mem_buf.base,
2201 tx_desc_mem_buf.phys_base);
2202tx_get_config_failed:
Brent Hronik89c96ba2013-08-27 14:34:22 -06002203 bam_ops->sps_free_endpoint_ptr(bam_tx_pipe);
Jeff Hugo8ff4a812012-01-17 11:03:13 -07002204tx_alloc_endpoint_failed:
Brent Hronik89c96ba2013-08-27 14:34:22 -06002205 bam_ops->sps_deregister_bam_device_ptr(h);
Jeff Hugo4b2890d2012-01-16 16:14:21 -07002206 /*
2207 * sps_deregister_bam_device() calls iounmap. calling iounmap on the
2208 * same handle below will cause a crash, so skip it if we've freed
2209 * the handle here.
2210 */
2211 skip_iounmap = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002212register_bam_failed:
Jeff Hugo4b2890d2012-01-16 16:14:21 -07002213 if (!skip_iounmap)
2214 iounmap(a2_virt_addr);
Jeff Hugo994a92d2012-01-05 13:25:21 -07002215ioremap_failed:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002216 /*destroy_workqueue(bam_mux_workqueue);*/
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002217 return ret;
2218}
2219
2220static int bam_init_fallback(void)
2221{
2222 u32 h;
2223 int ret;
2224 void *a2_virt_addr;
2225
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002226 /* init BAM */
Jeff Hugo3910ee12012-08-21 14:08:20 -06002227 a2_virt_addr = ioremap_nocache((unsigned long)(a2_phys_base),
2228 a2_phys_size);
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002229 if (!a2_virt_addr) {
2230 pr_err("%s: ioremap failed\n", __func__);
2231 ret = -ENOMEM;
2232 goto ioremap_failed;
2233 }
Jeff Hugo3910ee12012-08-21 14:08:20 -06002234 a2_props.phys_addr = (u32)(a2_phys_base);
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002235 a2_props.virt_addr = a2_virt_addr;
Jeff Hugo3910ee12012-08-21 14:08:20 -06002236 a2_props.virt_size = a2_phys_size;
2237 a2_props.irq = a2_bam_irq;
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002238 a2_props.options = SPS_BAM_OPT_IRQ_WAKEUP;
2239 a2_props.num_pipes = A2_NUM_PIPES;
2240 a2_props.summing_threshold = A2_SUMMING_THRESHOLD;
2241 if (cpu_is_msm9615())
2242 a2_props.manage = SPS_BAM_MGR_DEVICE_REMOTE;
Brent Hronik89c96ba2013-08-27 14:34:22 -06002243 ret = bam_ops->sps_register_bam_device_ptr(&a2_props, &h);
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002244 if (ret < 0) {
2245 pr_err("%s: register bam error %d\n", __func__, ret);
2246 goto register_bam_failed;
2247 }
2248 a2_device_handle = h;
Jeff Hugoc2696142012-05-03 11:42:13 -06002249
2250 mutex_lock(&delayed_ul_vote_lock);
2251 bam_mux_initialized = 1;
2252 if (need_delayed_ul_vote) {
2253 need_delayed_ul_vote = 0;
2254 msm_bam_dmux_kickoff_ul_wakeup();
2255 }
2256 mutex_unlock(&delayed_ul_vote_lock);
Jeff Hugo2bec9772012-04-05 12:25:16 -06002257 toggle_apps_ack();
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002258
Jeff Hugo18792a32012-06-20 15:25:55 -06002259 power_management_only_mode = 1;
2260 bam_connection_is_active = 1;
2261 complete_all(&bam_connection_completion);
2262
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002263 return 0;
2264
2265register_bam_failed:
Jeff Hugo4b2890d2012-01-16 16:14:21 -07002266 iounmap(a2_virt_addr);
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002267ioremap_failed:
2268 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002269}
Jeff Hugoade1f842011-08-03 15:53:59 -06002270
Jeff Hugoa670b762012-03-15 15:58:28 -06002271static void msm9615_bam_init(void)
Eric Holmberg604ab252012-01-15 00:01:18 -07002272{
2273 int ret = 0;
2274
2275 ret = bam_init();
2276 if (ret) {
2277 ret = bam_init_fallback();
2278 if (ret)
2279 pr_err("%s: bam init fallback failed: %d",
2280 __func__, ret);
2281 }
2282}
2283
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002284static void toggle_apps_ack(void)
2285{
2286 static unsigned int clear_bit; /* 0 = set the bit, else clear bit */
Eric Holmberg878923a2012-01-10 14:28:19 -07002287
Eric Holmberg7614a7f2013-07-29 15:47:12 -06002288 if (in_global_reset) {
2289 BAM_DMUX_LOG("%s: skipped due to SSR\n", __func__);
2290 return;
2291 }
2292
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +05302293 BAM_DMUX_LOG("%s: apps ack %d->%d\n", __func__,
Eric Holmberg878923a2012-01-10 14:28:19 -07002294 clear_bit & 0x1, ~clear_bit & 0x1);
Brent Hronik89c96ba2013-08-27 14:34:22 -06002295 bam_ops->smsm_change_state_ptr(SMSM_APPS_STATE,
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002296 clear_bit & SMSM_A2_POWER_CONTROL_ACK,
2297 ~clear_bit & SMSM_A2_POWER_CONTROL_ACK);
2298 clear_bit = ~clear_bit;
Eric Holmberg1f1255d2012-02-22 13:37:21 -07002299 DBG_INC_ACK_OUT_CNT();
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002300}
2301
Jeff Hugoade1f842011-08-03 15:53:59 -06002302static void bam_dmux_smsm_cb(void *priv, uint32_t old_state, uint32_t new_state)
2303{
Jeff Hugo4b7c7b32012-04-18 16:25:14 -06002304 static int last_processed_state;
2305
2306 mutex_lock(&smsm_cb_lock);
Eric Holmberg878923a2012-01-10 14:28:19 -07002307 bam_dmux_power_state = new_state & SMSM_A2_POWER_CONTROL ? 1 : 0;
Eric Holmberg1f1255d2012-02-22 13:37:21 -07002308 DBG_INC_A2_POWER_CONTROL_IN_CNT();
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +05302309 BAM_DMUX_LOG("%s: 0x%08x -> 0x%08x\n", __func__, old_state,
Eric Holmberg878923a2012-01-10 14:28:19 -07002310 new_state);
Jeff Hugo4b7c7b32012-04-18 16:25:14 -06002311 if (last_processed_state == (new_state & SMSM_A2_POWER_CONTROL)) {
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +05302312 BAM_DMUX_LOG("%s: already processed this state\n", __func__);
Jeff Hugo4b7c7b32012-04-18 16:25:14 -06002313 mutex_unlock(&smsm_cb_lock);
2314 return;
2315 }
2316
2317 last_processed_state = new_state & SMSM_A2_POWER_CONTROL;
Eric Holmberg878923a2012-01-10 14:28:19 -07002318
Jeff Hugoae3a85e2011-12-02 17:10:18 -07002319 if (bam_mux_initialized && new_state & SMSM_A2_POWER_CONTROL) {
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +05302320 BAM_DMUX_LOG("%s: reconnect\n", __func__);
Eric Holmberg006057d2012-01-11 10:10:42 -07002321 grab_wakelock();
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002322 reconnect_to_bam();
Jeff Hugoae3a85e2011-12-02 17:10:18 -07002323 } else if (bam_mux_initialized &&
2324 !(new_state & SMSM_A2_POWER_CONTROL)) {
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +05302325 BAM_DMUX_LOG("%s: disconnect\n", __func__);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002326 disconnect_to_bam();
Eric Holmberg006057d2012-01-11 10:10:42 -07002327 release_wakelock();
Jeff Hugoae3a85e2011-12-02 17:10:18 -07002328 } else if (new_state & SMSM_A2_POWER_CONTROL) {
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +05302329 BAM_DMUX_LOG("%s: init\n", __func__);
Eric Holmberg006057d2012-01-11 10:10:42 -07002330 grab_wakelock();
Jeff Hugoa670b762012-03-15 15:58:28 -06002331 if (cpu_is_msm9615())
2332 msm9615_bam_init();
2333 else
Eric Holmberg604ab252012-01-15 00:01:18 -07002334 bam_init();
Jeff Hugoae3a85e2011-12-02 17:10:18 -07002335 } else {
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +05302336 BAM_DMUX_LOG("%s: bad state change\n", __func__);
Jeff Hugoade1f842011-08-03 15:53:59 -06002337 pr_err("%s: unsupported state change\n", __func__);
Jeff Hugoae3a85e2011-12-02 17:10:18 -07002338 }
Jeff Hugo4b7c7b32012-04-18 16:25:14 -06002339 mutex_unlock(&smsm_cb_lock);
Jeff Hugoade1f842011-08-03 15:53:59 -06002340
2341}
2342
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002343static void bam_dmux_smsm_ack_cb(void *priv, uint32_t old_state,
2344 uint32_t new_state)
2345{
Eric Holmberg1f1255d2012-02-22 13:37:21 -07002346 DBG_INC_ACK_IN_CNT();
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +05302347 BAM_DMUX_LOG("%s: 0x%08x -> 0x%08x\n", __func__, old_state,
Eric Holmberg878923a2012-01-10 14:28:19 -07002348 new_state);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002349 complete_all(&ul_wakeup_ack_completion);
2350}
2351
Brent Hronik89c96ba2013-08-27 14:34:22 -06002352/**
2353 * msm_bam_dmux_set_bam_ops() - sets the bam_ops
2354 * @ops: bam_ops_if to set
2355 *
2356 * Sets bam_ops to allow switching of runtime behavior. Preconditon, bam dmux
2357 * must be in an idle state. If input ops is NULL, then bam_ops will be
2358 * restored to their default state.
2359 */
2360void msm_bam_dmux_set_bam_ops(struct bam_ops_if *ops)
2361{
2362 if (ops != NULL)
2363 bam_ops = ops;
2364 else
2365 bam_ops = &bam_default_ops;
2366}
2367EXPORT_SYMBOL(msm_bam_dmux_set_bam_ops);
2368
2369/**
2370 * msm_bam_dmux_deinit() - puts bam dmux into a deinited state
2371 *
2372 * Puts bam dmux into a deinitialized state by simulating an ssr.
2373 */
2374void msm_bam_dmux_deinit(void)
2375{
2376 restart_notifier_cb(NULL, SUBSYS_BEFORE_SHUTDOWN, NULL);
2377 restart_notifier_cb(NULL, SUBSYS_AFTER_SHUTDOWN, NULL);
2378}
2379EXPORT_SYMBOL(msm_bam_dmux_deinit);
2380
2381/**
2382 * msm_bam_dmux_reinit() - reinitializes bam dmux
2383 */
2384void msm_bam_dmux_reinit(void)
2385{
2386 bam_ops->smsm_state_cb_register_ptr(SMSM_MODEM_STATE,
2387 SMSM_A2_POWER_CONTROL,
2388 bam_dmux_smsm_cb, NULL);
2389 bam_ops->smsm_state_cb_register_ptr(SMSM_MODEM_STATE,
2390 SMSM_A2_POWER_CONTROL_ACK,
2391 bam_dmux_smsm_ack_cb, NULL);
2392 bam_mux_initialized = 0;
2393 bam_init();
2394}
2395EXPORT_SYMBOL(msm_bam_dmux_reinit);
2396
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002397static int bam_dmux_probe(struct platform_device *pdev)
2398{
2399 int rc;
Jeff Hugo3910ee12012-08-21 14:08:20 -06002400 struct resource *r;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002401
2402 DBG("%s probe called\n", __func__);
2403 if (bam_mux_initialized)
2404 return 0;
2405
Jeff Hugo3910ee12012-08-21 14:08:20 -06002406 if (pdev->dev.of_node) {
2407 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2408 if (!r) {
2409 pr_err("%s: reg field missing\n", __func__);
2410 return -ENODEV;
2411 }
2412 a2_phys_base = (void *)(r->start);
2413 a2_phys_size = (uint32_t)(resource_size(r));
2414 a2_bam_irq = platform_get_irq(pdev, 0);
2415 if (a2_bam_irq == -ENXIO) {
2416 pr_err("%s: irq field missing\n", __func__);
2417 return -ENODEV;
2418 }
2419 DBG("%s: base:%p size:%x irq:%d\n", __func__,
2420 a2_phys_base,
2421 a2_phys_size,
2422 a2_bam_irq);
2423 } else { /* fallback to default init data */
2424 a2_phys_base = (void *)(A2_PHYS_BASE);
2425 a2_phys_size = A2_PHYS_SIZE;
2426 a2_bam_irq = A2_BAM_IRQ;
2427 }
2428
Stephen Boyd69d35e32012-02-14 15:33:30 -08002429 xo_clk = clk_get(&pdev->dev, "xo");
2430 if (IS_ERR(xo_clk)) {
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +05302431 BAM_DMUX_LOG("%s: did not get xo clock\n", __func__);
Jeff Hugod0befde2012-08-09 15:32:49 -06002432 xo_clk = NULL;
Stephen Boyd69d35e32012-02-14 15:33:30 -08002433 }
Stephen Boyd1c51a492011-10-26 12:11:47 -07002434 dfab_clk = clk_get(&pdev->dev, "bus_clk");
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002435 if (IS_ERR(dfab_clk)) {
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +05302436 BAM_DMUX_LOG("%s: did not get dfab clock\n", __func__);
Jeff Hugod0befde2012-08-09 15:32:49 -06002437 dfab_clk = NULL;
2438 } else {
2439 rc = clk_set_rate(dfab_clk, 64000000);
2440 if (rc)
2441 pr_err("%s: unable to set dfab clock rate\n", __func__);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002442 }
2443
Jeff Hugofff43af92012-03-29 17:54:52 -06002444 /*
2445 * setup the workqueue so that it can be pinned to core 0 and not
2446 * block the watchdog pet function, so that netif_rx() in rmnet
2447 * only uses one queue.
2448 */
2449 bam_mux_rx_workqueue = alloc_workqueue("bam_dmux_rx",
2450 WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002451 if (!bam_mux_rx_workqueue)
2452 return -ENOMEM;
2453
2454 bam_mux_tx_workqueue = create_singlethread_workqueue("bam_dmux_tx");
2455 if (!bam_mux_tx_workqueue) {
2456 destroy_workqueue(bam_mux_rx_workqueue);
2457 return -ENOMEM;
2458 }
2459
Jeff Hugo7960abd2011-08-02 15:39:38 -06002460 for (rc = 0; rc < BAM_DMUX_NUM_CHANNELS; ++rc) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002461 spin_lock_init(&bam_ch[rc].lock);
Jeff Hugo7960abd2011-08-02 15:39:38 -06002462 scnprintf(bam_ch[rc].name, BAM_DMUX_CH_NAME_MAX_LEN,
2463 "bam_dmux_ch_%d", rc);
2464 /* bus 2, ie a2 stream 2 */
2465 bam_ch[rc].pdev = platform_device_alloc(bam_ch[rc].name, 2);
2466 if (!bam_ch[rc].pdev) {
2467 pr_err("%s: platform device alloc failed\n", __func__);
2468 destroy_workqueue(bam_mux_rx_workqueue);
2469 destroy_workqueue(bam_mux_tx_workqueue);
2470 return -ENOMEM;
2471 }
2472 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002473
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002474 init_completion(&ul_wakeup_ack_completion);
2475 init_completion(&bam_connection_completion);
Eric Holmberg006057d2012-01-11 10:10:42 -07002476 init_completion(&dfab_unvote_completion);
Brent Hronik096f7d32013-06-28 15:43:08 -06002477 init_completion(&shutdown_completion);
2478 complete_all(&shutdown_completion);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002479 INIT_DELAYED_WORK(&ul_timeout_work, ul_timeout);
Jeff Hugo988e7ba2012-10-03 15:53:54 -06002480 INIT_DELAYED_WORK(&queue_rx_work, queue_rx_work_func);
Jeff Hugoae3a85e2011-12-02 17:10:18 -07002481 wake_lock_init(&bam_wakelock, WAKE_LOCK_SUSPEND, "bam_dmux_wakelock");
Arun Kumar Neelakantame7c0d622013-10-11 14:34:02 +05302482 init_srcu_struct(&bam_dmux_srcu);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002483
Brent Hronik89c96ba2013-08-27 14:34:22 -06002484 rc = bam_ops->smsm_state_cb_register_ptr(SMSM_MODEM_STATE,
2485 SMSM_A2_POWER_CONTROL,
2486 bam_dmux_smsm_cb, NULL);
Jeff Hugoade1f842011-08-03 15:53:59 -06002487
2488 if (rc) {
2489 destroy_workqueue(bam_mux_rx_workqueue);
2490 destroy_workqueue(bam_mux_tx_workqueue);
2491 pr_err("%s: smsm cb register failed, rc: %d\n", __func__, rc);
2492 return -ENOMEM;
2493 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002494
Brent Hronik89c96ba2013-08-27 14:34:22 -06002495 rc = bam_ops->smsm_state_cb_register_ptr(SMSM_MODEM_STATE,
2496 SMSM_A2_POWER_CONTROL_ACK,
2497 bam_dmux_smsm_ack_cb, NULL);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002498
2499 if (rc) {
2500 destroy_workqueue(bam_mux_rx_workqueue);
2501 destroy_workqueue(bam_mux_tx_workqueue);
Brent Hronik89c96ba2013-08-27 14:34:22 -06002502 bam_ops->smsm_state_cb_deregister_ptr(SMSM_MODEM_STATE,
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002503 SMSM_A2_POWER_CONTROL,
2504 bam_dmux_smsm_cb, NULL);
2505 pr_err("%s: smsm ack cb register failed, rc: %d\n", __func__,
2506 rc);
2507 for (rc = 0; rc < BAM_DMUX_NUM_CHANNELS; ++rc)
2508 platform_device_put(bam_ch[rc].pdev);
2509 return -ENOMEM;
2510 }
2511
Brent Hronik89c96ba2013-08-27 14:34:22 -06002512 if (bam_ops->smsm_get_state_ptr(SMSM_MODEM_STATE) &
2513 SMSM_A2_POWER_CONTROL)
2514 bam_dmux_smsm_cb(NULL, 0,
2515 bam_ops->smsm_get_state_ptr(SMSM_MODEM_STATE));
Eric Holmbergfd1e2ae2011-11-15 18:28:17 -07002516
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002517 return 0;
2518}
2519
Jeff Hugo3910ee12012-08-21 14:08:20 -06002520static struct of_device_id msm_match_table[] = {
2521 {.compatible = "qcom,bam_dmux"},
2522 {},
2523};
2524
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002525static struct platform_driver bam_dmux_driver = {
2526 .probe = bam_dmux_probe,
2527 .driver = {
2528 .name = "BAM_RMNT",
2529 .owner = THIS_MODULE,
Jeff Hugo3910ee12012-08-21 14:08:20 -06002530 .of_match_table = msm_match_table,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002531 },
2532};
2533
2534static int __init bam_dmux_init(void)
2535{
2536#ifdef CONFIG_DEBUG_FS
2537 struct dentry *dent;
2538
2539 dent = debugfs_create_dir("bam_dmux", 0);
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07002540 if (!IS_ERR(dent)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002541 debug_create("tbl", 0444, dent, debug_tbl);
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07002542 debug_create("ul_pkt_cnt", 0444, dent, debug_ul_pkt_cnt);
2543 debug_create("stats", 0444, dent, debug_stats);
2544 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002545#endif
Zaheerulla Meer6fbf32c2013-01-31 17:06:44 +05302546
2547 bam_ipc_log_txt = ipc_log_context_create(BAM_IPC_LOG_PAGES, "bam_dmux");
2548 if (!bam_ipc_log_txt) {
2549 pr_err("%s : unable to create IPC Logging Context", __func__);
Eric Holmberg878923a2012-01-10 14:28:19 -07002550 }
2551
Anurag Singhdcd8b4e2012-07-30 16:46:37 -07002552 rx_timer_interval = DEFAULT_POLLING_MIN_SLEEP;
2553
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06002554 subsys_notif_register_notifier("modem", &restart_notifier);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002555 return platform_driver_register(&bam_dmux_driver);
2556}
2557
Jeff Hugoade1f842011-08-03 15:53:59 -06002558late_initcall(bam_dmux_init); /* needs to init after SMD */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002559MODULE_DESCRIPTION("MSM BAM DMUX");
2560MODULE_LICENSE("GPL v2");