Arun Kumar Neelakantam | 406e569 | 2013-01-17 18:58:04 +0530 | [diff] [blame] | 1 | /* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved. |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2 | * |
| 3 | * This program is free software; you can redistribute it and/or modify |
| 4 | * it under the terms of the GNU General Public License version 2 and |
| 5 | * only version 2 as published by the Free Software Foundation. |
| 6 | * |
| 7 | * This program is distributed in the hope that it will be useful, |
| 8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 10 | * GNU General Public License for more details. |
| 11 | * |
| 12 | */ |
| 13 | |
| 14 | /* |
| 15 | * BAM DMUX module. |
| 16 | */ |
| 17 | |
| 18 | #define DEBUG |
| 19 | |
| 20 | #include <linux/delay.h> |
| 21 | #include <linux/module.h> |
| 22 | #include <linux/netdevice.h> |
| 23 | #include <linux/platform_device.h> |
| 24 | #include <linux/sched.h> |
| 25 | #include <linux/skbuff.h> |
| 26 | #include <linux/debugfs.h> |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 27 | #include <linux/clk.h> |
Jeff Hugo | ae3a85e | 2011-12-02 17:10:18 -0700 | [diff] [blame] | 28 | #include <linux/wakelock.h> |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 29 | #include <linux/kfifo.h> |
Jeff Hugo | 3910ee1 | 2012-08-21 14:08:20 -0600 | [diff] [blame] | 30 | #include <linux/of.h> |
Zaheerulla Meer | 6fbf32c | 2013-01-31 17:06:44 +0530 | [diff] [blame] | 31 | #include <mach/msm_ipc_logging.h> |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 32 | #include <mach/sps.h> |
| 33 | #include <mach/bam_dmux.h> |
Jeff Hugo | ade1f84 | 2011-08-03 15:53:59 -0600 | [diff] [blame] | 34 | #include <mach/msm_smsm.h> |
Jeff Hugo | 6e7a92a | 2011-10-24 05:25:13 -0600 | [diff] [blame] | 35 | #include <mach/subsystem_notif.h> |
Jeff Hugo | 75913c8 | 2011-12-05 15:59:01 -0700 | [diff] [blame] | 36 | #include <mach/socinfo.h> |
Jeff Hugo | 4838f41 | 2012-01-20 11:19:37 -0700 | [diff] [blame] | 37 | #include <mach/subsystem_restart.h> |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 38 | |
Brent Hronik | 89c96ba | 2013-08-27 14:34:22 -0600 | [diff] [blame^] | 39 | #include "bam_dmux_private.h" |
| 40 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 41 | #define BAM_CH_LOCAL_OPEN 0x1 |
| 42 | #define BAM_CH_REMOTE_OPEN 0x2 |
Jeff Hugo | 6e7a92a | 2011-10-24 05:25:13 -0600 | [diff] [blame] | 43 | #define BAM_CH_IN_RESET 0x4 |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 44 | |
Karthikeyan Ramasubramanian | 7bf5ca8 | 2011-11-21 13:33:19 -0700 | [diff] [blame] | 45 | #define LOW_WATERMARK 2 |
| 46 | #define HIGH_WATERMARK 4 |
Anurag Singh | dcd8b4e | 2012-07-30 16:46:37 -0700 | [diff] [blame] | 47 | #define DEFAULT_POLLING_MIN_SLEEP (950) |
| 48 | #define MAX_POLLING_SLEEP (6050) |
| 49 | #define MIN_POLLING_SLEEP (950) |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 50 | |
| 51 | static int msm_bam_dmux_debug_enable; |
| 52 | module_param_named(debug_enable, msm_bam_dmux_debug_enable, |
| 53 | int, S_IRUGO | S_IWUSR | S_IWGRP); |
Anurag Singh | 308c386 | 2013-08-13 17:22:41 -0700 | [diff] [blame] | 54 | static int POLLING_MIN_SLEEP = 2950; |
Anurag Singh | dcd8b4e | 2012-07-30 16:46:37 -0700 | [diff] [blame] | 55 | module_param_named(min_sleep, POLLING_MIN_SLEEP, |
| 56 | int, S_IRUGO | S_IWUSR | S_IWGRP); |
Anurag Singh | 308c386 | 2013-08-13 17:22:41 -0700 | [diff] [blame] | 57 | static int POLLING_MAX_SLEEP = 3050; |
Anurag Singh | dcd8b4e | 2012-07-30 16:46:37 -0700 | [diff] [blame] | 58 | module_param_named(max_sleep, POLLING_MAX_SLEEP, |
| 59 | int, S_IRUGO | S_IWUSR | S_IWGRP); |
Anurag Singh | 308c386 | 2013-08-13 17:22:41 -0700 | [diff] [blame] | 60 | static int POLLING_INACTIVITY = 1; |
Anurag Singh | dcd8b4e | 2012-07-30 16:46:37 -0700 | [diff] [blame] | 61 | module_param_named(inactivity, POLLING_INACTIVITY, |
| 62 | int, S_IRUGO | S_IWUSR | S_IWGRP); |
Anurag Singh | 308c386 | 2013-08-13 17:22:41 -0700 | [diff] [blame] | 63 | static int bam_adaptive_timer_enabled; |
Anurag Singh | dcd8b4e | 2012-07-30 16:46:37 -0700 | [diff] [blame] | 64 | module_param_named(adaptive_timer_enabled, |
| 65 | bam_adaptive_timer_enabled, |
| 66 | int, S_IRUGO | S_IWUSR | S_IWGRP); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 67 | |
Brent Hronik | 89c96ba | 2013-08-27 14:34:22 -0600 | [diff] [blame^] | 68 | static struct bam_ops_if bam_default_ops = { |
| 69 | /* smsm */ |
| 70 | .smsm_change_state_ptr = &smsm_change_state, |
| 71 | .smsm_get_state_ptr = &smsm_get_state, |
| 72 | .smsm_state_cb_register_ptr = &smsm_state_cb_register, |
| 73 | .smsm_state_cb_deregister_ptr = &smsm_state_cb_deregister, |
| 74 | |
| 75 | /* sps */ |
| 76 | .sps_connect_ptr = &sps_connect, |
| 77 | .sps_disconnect_ptr = &sps_disconnect, |
| 78 | .sps_register_bam_device_ptr = &sps_register_bam_device, |
| 79 | .sps_deregister_bam_device_ptr = &sps_deregister_bam_device, |
| 80 | .sps_alloc_endpoint_ptr = &sps_alloc_endpoint, |
| 81 | .sps_free_endpoint_ptr = &sps_free_endpoint, |
| 82 | .sps_set_config_ptr = &sps_set_config, |
| 83 | .sps_get_config_ptr = &sps_get_config, |
| 84 | .sps_device_reset_ptr = &sps_device_reset, |
| 85 | .sps_register_event_ptr = &sps_register_event, |
| 86 | .sps_transfer_one_ptr = &sps_transfer_one, |
| 87 | .sps_get_iovec_ptr = &sps_get_iovec, |
| 88 | .sps_get_unused_desc_num_ptr = &sps_get_unused_desc_num, |
| 89 | |
| 90 | .dma_to = DMA_TO_DEVICE, |
| 91 | .dma_from = DMA_FROM_DEVICE, |
| 92 | }; |
| 93 | static struct bam_ops_if *bam_ops = &bam_default_ops; |
| 94 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 95 | #if defined(DEBUG) |
| 96 | static uint32_t bam_dmux_read_cnt; |
| 97 | static uint32_t bam_dmux_write_cnt; |
| 98 | static uint32_t bam_dmux_write_cpy_cnt; |
| 99 | static uint32_t bam_dmux_write_cpy_bytes; |
Eric Holmberg | 2fddbcd | 2011-11-28 18:25:57 -0700 | [diff] [blame] | 100 | static uint32_t bam_dmux_tx_sps_failure_cnt; |
Eric Holmberg | 6074aba | 2012-01-18 17:59:44 -0700 | [diff] [blame] | 101 | static uint32_t bam_dmux_tx_stall_cnt; |
Eric Holmberg | 1f1255d | 2012-02-22 13:37:21 -0700 | [diff] [blame] | 102 | static atomic_t bam_dmux_ack_out_cnt = ATOMIC_INIT(0); |
| 103 | static atomic_t bam_dmux_ack_in_cnt = ATOMIC_INIT(0); |
| 104 | static atomic_t bam_dmux_a2_pwr_cntl_in_cnt = ATOMIC_INIT(0); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 105 | |
| 106 | #define DBG(x...) do { \ |
| 107 | if (msm_bam_dmux_debug_enable) \ |
| 108 | pr_debug(x); \ |
| 109 | } while (0) |
| 110 | |
| 111 | #define DBG_INC_READ_CNT(x) do { \ |
| 112 | bam_dmux_read_cnt += (x); \ |
| 113 | if (msm_bam_dmux_debug_enable) \ |
| 114 | pr_debug("%s: total read bytes %u\n", \ |
| 115 | __func__, bam_dmux_read_cnt); \ |
| 116 | } while (0) |
| 117 | |
| 118 | #define DBG_INC_WRITE_CNT(x) do { \ |
| 119 | bam_dmux_write_cnt += (x); \ |
| 120 | if (msm_bam_dmux_debug_enable) \ |
| 121 | pr_debug("%s: total written bytes %u\n", \ |
| 122 | __func__, bam_dmux_write_cnt); \ |
| 123 | } while (0) |
| 124 | |
| 125 | #define DBG_INC_WRITE_CPY(x) do { \ |
| 126 | bam_dmux_write_cpy_bytes += (x); \ |
| 127 | bam_dmux_write_cpy_cnt++; \ |
| 128 | if (msm_bam_dmux_debug_enable) \ |
| 129 | pr_debug("%s: total write copy cnt %u, bytes %u\n", \ |
| 130 | __func__, bam_dmux_write_cpy_cnt, \ |
| 131 | bam_dmux_write_cpy_bytes); \ |
| 132 | } while (0) |
Eric Holmberg | 2fddbcd | 2011-11-28 18:25:57 -0700 | [diff] [blame] | 133 | |
| 134 | #define DBG_INC_TX_SPS_FAILURE_CNT() do { \ |
| 135 | bam_dmux_tx_sps_failure_cnt++; \ |
| 136 | } while (0) |
| 137 | |
Eric Holmberg | 6074aba | 2012-01-18 17:59:44 -0700 | [diff] [blame] | 138 | #define DBG_INC_TX_STALL_CNT() do { \ |
| 139 | bam_dmux_tx_stall_cnt++; \ |
| 140 | } while (0) |
| 141 | |
Eric Holmberg | 1f1255d | 2012-02-22 13:37:21 -0700 | [diff] [blame] | 142 | #define DBG_INC_ACK_OUT_CNT() \ |
| 143 | atomic_inc(&bam_dmux_ack_out_cnt) |
| 144 | |
| 145 | #define DBG_INC_A2_POWER_CONTROL_IN_CNT() \ |
| 146 | atomic_inc(&bam_dmux_a2_pwr_cntl_in_cnt) |
| 147 | |
| 148 | #define DBG_INC_ACK_IN_CNT() \ |
| 149 | atomic_inc(&bam_dmux_ack_in_cnt) |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 150 | #else |
| 151 | #define DBG(x...) do { } while (0) |
| 152 | #define DBG_INC_READ_CNT(x...) do { } while (0) |
| 153 | #define DBG_INC_WRITE_CNT(x...) do { } while (0) |
| 154 | #define DBG_INC_WRITE_CPY(x...) do { } while (0) |
Eric Holmberg | 2fddbcd | 2011-11-28 18:25:57 -0700 | [diff] [blame] | 155 | #define DBG_INC_TX_SPS_FAILURE_CNT() do { } while (0) |
Eric Holmberg | 6074aba | 2012-01-18 17:59:44 -0700 | [diff] [blame] | 156 | #define DBG_INC_TX_STALL_CNT() do { } while (0) |
Eric Holmberg | 1f1255d | 2012-02-22 13:37:21 -0700 | [diff] [blame] | 157 | #define DBG_INC_ACK_OUT_CNT() do { } while (0) |
| 158 | #define DBG_INC_A2_POWER_CONTROL_IN_CNT() \ |
| 159 | do { } while (0) |
| 160 | #define DBG_INC_ACK_IN_CNT() do { } while (0) |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 161 | #endif |
| 162 | |
| 163 | struct bam_ch_info { |
| 164 | uint32_t status; |
Jeff Hugo | 1c4531c | 2011-08-02 14:55:37 -0600 | [diff] [blame] | 165 | void (*notify)(void *, int, unsigned long); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 166 | void *priv; |
| 167 | spinlock_t lock; |
Jeff Hugo | 7960abd | 2011-08-02 15:39:38 -0600 | [diff] [blame] | 168 | struct platform_device *pdev; |
| 169 | char name[BAM_DMUX_CH_NAME_MAX_LEN]; |
Karthikeyan Ramasubramanian | 7bf5ca8 | 2011-11-21 13:33:19 -0700 | [diff] [blame] | 170 | int num_tx_pkts; |
| 171 | int use_wm; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 172 | }; |
| 173 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 174 | #define A2_NUM_PIPES 6 |
| 175 | #define A2_SUMMING_THRESHOLD 4096 |
| 176 | #define A2_DEFAULT_DESCRIPTORS 32 |
| 177 | #define A2_PHYS_BASE 0x124C2000 |
| 178 | #define A2_PHYS_SIZE 0x2000 |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 179 | #define NUM_BUFFERS 32 |
Jeff Hugo | 3910ee1 | 2012-08-21 14:08:20 -0600 | [diff] [blame] | 180 | |
| 181 | #ifndef A2_BAM_IRQ |
| 182 | #define A2_BAM_IRQ -1 |
| 183 | #endif |
| 184 | |
| 185 | static void *a2_phys_base; |
| 186 | static uint32_t a2_phys_size; |
| 187 | static int a2_bam_irq; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 188 | static struct sps_bam_props a2_props; |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 189 | static u32 a2_device_handle; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 190 | static struct sps_pipe *bam_tx_pipe; |
| 191 | static struct sps_pipe *bam_rx_pipe; |
| 192 | static struct sps_connect tx_connection; |
| 193 | static struct sps_connect rx_connection; |
| 194 | static struct sps_mem_buffer tx_desc_mem_buf; |
| 195 | static struct sps_mem_buffer rx_desc_mem_buf; |
| 196 | static struct sps_register_event tx_register_event; |
Jeff Hugo | 33dbc00 | 2011-08-25 15:52:53 -0600 | [diff] [blame] | 197 | static struct sps_register_event rx_register_event; |
Jeff Hugo | 7c18560 | 2013-09-11 17:39:54 -0600 | [diff] [blame] | 198 | static unsigned long long last_rx_pkt_timestamp; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 199 | |
| 200 | static struct bam_ch_info bam_ch[BAM_DMUX_NUM_CHANNELS]; |
| 201 | static int bam_mux_initialized; |
| 202 | |
Jeff Hugo | 949080a | 2011-08-30 11:58:56 -0600 | [diff] [blame] | 203 | static int polling_mode; |
Anurag Singh | dcd8b4e | 2012-07-30 16:46:37 -0700 | [diff] [blame] | 204 | static unsigned long rx_timer_interval; |
Jeff Hugo | 949080a | 2011-08-30 11:58:56 -0600 | [diff] [blame] | 205 | |
| 206 | static LIST_HEAD(bam_rx_pool); |
Jeff Hugo | c974993 | 2011-11-02 17:50:40 -0600 | [diff] [blame] | 207 | static DEFINE_MUTEX(bam_rx_pool_mutexlock); |
Eric Holmberg | b5b08e5 | 2012-01-20 14:19:00 -0700 | [diff] [blame] | 208 | static int bam_rx_pool_len; |
Jeff Hugo | 6e7a92a | 2011-10-24 05:25:13 -0600 | [diff] [blame] | 209 | static LIST_HEAD(bam_tx_pool); |
Jeff Hugo | c974993 | 2011-11-02 17:50:40 -0600 | [diff] [blame] | 210 | static DEFINE_SPINLOCK(bam_tx_pool_spinlock); |
Eric Holmberg | a623da8 | 2012-07-12 09:37:09 -0600 | [diff] [blame] | 211 | static DEFINE_MUTEX(bam_pdev_mutexlock); |
Jeff Hugo | 949080a | 2011-08-30 11:58:56 -0600 | [diff] [blame] | 212 | |
Jeff Hugo | d98b108 | 2011-10-24 10:30:23 -0600 | [diff] [blame] | 213 | static void notify_all(int event, unsigned long data); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 214 | static void bam_mux_write_done(struct work_struct *work); |
| 215 | static void handle_bam_mux_cmd(struct work_struct *work); |
Jeff Hugo | 949080a | 2011-08-30 11:58:56 -0600 | [diff] [blame] | 216 | static void rx_timer_work_func(struct work_struct *work); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 217 | |
Jeff Hugo | 949080a | 2011-08-30 11:58:56 -0600 | [diff] [blame] | 218 | static DECLARE_WORK(rx_timer_work, rx_timer_work_func); |
Jeff Hugo | 988e7ba | 2012-10-03 15:53:54 -0600 | [diff] [blame] | 219 | static struct delayed_work queue_rx_work; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 220 | |
| 221 | static struct workqueue_struct *bam_mux_rx_workqueue; |
| 222 | static struct workqueue_struct *bam_mux_tx_workqueue; |
| 223 | |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 224 | /* A2 power collaspe */ |
| 225 | #define UL_TIMEOUT_DELAY 1000 /* in ms */ |
Jeff Hugo | 0b13a35 | 2012-03-17 23:18:30 -0600 | [diff] [blame] | 226 | #define ENABLE_DISCONNECT_ACK 0x1 |
Brent Hronik | 096f7d3 | 2013-06-28 15:43:08 -0600 | [diff] [blame] | 227 | #define SHUTDOWN_TIMEOUT_MS 500 |
Jeff Hugo | 1f31739 | 2013-07-24 16:28:52 -0600 | [diff] [blame] | 228 | #define UL_WAKEUP_TIMEOUT_MS 2000 |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 229 | static void toggle_apps_ack(void); |
| 230 | static void reconnect_to_bam(void); |
| 231 | static void disconnect_to_bam(void); |
| 232 | static void ul_wakeup(void); |
| 233 | static void ul_timeout(struct work_struct *work); |
| 234 | static void vote_dfab(void); |
| 235 | static void unvote_dfab(void); |
Jeff Hugo | d98b108 | 2011-10-24 10:30:23 -0600 | [diff] [blame] | 236 | static void kickoff_ul_wakeup_func(struct work_struct *work); |
Eric Holmberg | 006057d | 2012-01-11 10:10:42 -0700 | [diff] [blame] | 237 | static void grab_wakelock(void); |
| 238 | static void release_wakelock(void); |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 239 | |
| 240 | static int bam_is_connected; |
| 241 | static DEFINE_MUTEX(wakeup_lock); |
| 242 | static struct completion ul_wakeup_ack_completion; |
| 243 | static struct completion bam_connection_completion; |
| 244 | static struct delayed_work ul_timeout_work; |
| 245 | static int ul_packet_written; |
Eric Holmberg | bc9f21c | 2012-01-18 11:33:33 -0700 | [diff] [blame] | 246 | static atomic_t ul_ondemand_vote = ATOMIC_INIT(0); |
Stephen Boyd | 69d35e3 | 2012-02-14 15:33:30 -0800 | [diff] [blame] | 247 | static struct clk *dfab_clk, *xo_clk; |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 248 | static DEFINE_RWLOCK(ul_wakeup_lock); |
Jeff Hugo | d98b108 | 2011-10-24 10:30:23 -0600 | [diff] [blame] | 249 | static DECLARE_WORK(kickoff_ul_wakeup, kickoff_ul_wakeup_func); |
Jeff Hugo | 6e7a92a | 2011-10-24 05:25:13 -0600 | [diff] [blame] | 250 | static int bam_connection_is_active; |
Jeff Hugo | f6c1c1e | 2011-12-01 17:43:49 -0700 | [diff] [blame] | 251 | static int wait_for_ack; |
Jeff Hugo | ae3a85e | 2011-12-02 17:10:18 -0700 | [diff] [blame] | 252 | static struct wake_lock bam_wakelock; |
Eric Holmberg | 006057d | 2012-01-11 10:10:42 -0700 | [diff] [blame] | 253 | static int a2_pc_disabled; |
| 254 | static DEFINE_MUTEX(dfab_status_lock); |
| 255 | static int dfab_is_on; |
| 256 | static int wait_for_dfab; |
| 257 | static struct completion dfab_unvote_completion; |
| 258 | static DEFINE_SPINLOCK(wakelock_reference_lock); |
| 259 | static int wakelock_reference_count; |
Jeff Hugo | 583a6da | 2012-02-03 11:37:30 -0700 | [diff] [blame] | 260 | static int a2_pc_disabled_wakelock_skipped; |
Jeff Hugo | b1e7c58 | 2012-06-20 15:02:11 -0600 | [diff] [blame] | 261 | static int disconnect_ack = 1; |
Jeff Hugo | cb79802 | 2012-04-09 14:55:40 -0600 | [diff] [blame] | 262 | static LIST_HEAD(bam_other_notify_funcs); |
Jeff Hugo | 4b7c7b3 | 2012-04-18 16:25:14 -0600 | [diff] [blame] | 263 | static DEFINE_MUTEX(smsm_cb_lock); |
Jeff Hugo | c269614 | 2012-05-03 11:42:13 -0600 | [diff] [blame] | 264 | static DEFINE_MUTEX(delayed_ul_vote_lock); |
| 265 | static int need_delayed_ul_vote; |
Jeff Hugo | 18792a3 | 2012-06-20 15:25:55 -0600 | [diff] [blame] | 266 | static int power_management_only_mode; |
Jeff Hugo | a82a95c | 2012-12-14 17:56:19 -0700 | [diff] [blame] | 267 | static int in_ssr; |
| 268 | static int ssr_skipped_disconnect; |
Brent Hronik | 096f7d3 | 2013-06-28 15:43:08 -0600 | [diff] [blame] | 269 | static struct completion shutdown_completion; |
Jeff Hugo | cb79802 | 2012-04-09 14:55:40 -0600 | [diff] [blame] | 270 | |
| 271 | struct outside_notify_func { |
| 272 | void (*notify)(void *, int, unsigned long); |
| 273 | void *priv; |
| 274 | struct list_head list_node; |
| 275 | }; |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 276 | /* End A2 power collaspe */ |
| 277 | |
Jeff Hugo | 6e7a92a | 2011-10-24 05:25:13 -0600 | [diff] [blame] | 278 | /* subsystem restart */ |
| 279 | static int restart_notifier_cb(struct notifier_block *this, |
| 280 | unsigned long code, |
| 281 | void *data); |
| 282 | |
| 283 | static struct notifier_block restart_notifier = { |
| 284 | .notifier_call = restart_notifier_cb, |
| 285 | }; |
| 286 | static int in_global_reset; |
| 287 | /* end subsystem restart */ |
| 288 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 289 | #define bam_ch_is_open(x) \ |
| 290 | (bam_ch[(x)].status == (BAM_CH_LOCAL_OPEN | BAM_CH_REMOTE_OPEN)) |
| 291 | |
| 292 | #define bam_ch_is_local_open(x) \ |
| 293 | (bam_ch[(x)].status & BAM_CH_LOCAL_OPEN) |
| 294 | |
| 295 | #define bam_ch_is_remote_open(x) \ |
| 296 | (bam_ch[(x)].status & BAM_CH_REMOTE_OPEN) |
| 297 | |
Jeff Hugo | 6e7a92a | 2011-10-24 05:25:13 -0600 | [diff] [blame] | 298 | #define bam_ch_is_in_reset(x) \ |
| 299 | (bam_ch[(x)].status & BAM_CH_IN_RESET) |
| 300 | |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 301 | struct kfifo bam_dmux_state_log; |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 302 | static int bam_dmux_uplink_vote; |
| 303 | static int bam_dmux_power_state; |
| 304 | |
Zaheerulla Meer | 6fbf32c | 2013-01-31 17:06:44 +0530 | [diff] [blame] | 305 | static void *bam_ipc_log_txt; |
| 306 | |
| 307 | #define BAM_IPC_LOG_PAGES 5 |
| 308 | |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 309 | /** |
| 310 | * Log a state change along with a small message. |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 311 | * Complete size of messsage is limited to @todo. |
Zaheerulla Meer | 6fbf32c | 2013-01-31 17:06:44 +0530 | [diff] [blame] | 312 | * Logging is done using IPC Logging infrastructure. |
| 313 | * |
| 314 | * States |
| 315 | * D: 1 = Power collapse disabled |
| 316 | * R: 1 = in global reset |
| 317 | * P: 1 = BAM is powered up |
| 318 | * A: 1 = BAM initialized and ready for data |
| 319 | * V: 1 = Uplink vote for power |
| 320 | * U: 1 = Uplink active |
| 321 | * W: 1 = Uplink Wait-for-ack |
| 322 | * A: 1 = Uplink ACK received |
| 323 | * #: >=1 On-demand uplink vote |
| 324 | * D: 1 = Disconnect ACK active |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 325 | */ |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 326 | |
Zaheerulla Meer | 6fbf32c | 2013-01-31 17:06:44 +0530 | [diff] [blame] | 327 | #define BAM_DMUX_LOG(fmt, args...) \ |
| 328 | do { \ |
| 329 | if (bam_ipc_log_txt) { \ |
| 330 | ipc_log_string(bam_ipc_log_txt, \ |
| 331 | "<DMUX> %c%c%c%c %c%c%c%c%d%c " fmt, \ |
| 332 | a2_pc_disabled ? 'D' : 'd', \ |
| 333 | in_global_reset ? 'R' : 'r', \ |
| 334 | bam_dmux_power_state ? 'P' : 'p', \ |
| 335 | bam_connection_is_active ? 'A' : 'a', \ |
| 336 | bam_dmux_uplink_vote ? 'V' : 'v', \ |
| 337 | bam_is_connected ? 'U' : 'u', \ |
| 338 | wait_for_ack ? 'W' : 'w', \ |
| 339 | ul_wakeup_ack_completion.done ? 'A' : 'a', \ |
| 340 | atomic_read(&ul_ondemand_vote), \ |
| 341 | disconnect_ack ? 'D' : 'd', \ |
| 342 | args); \ |
| 343 | } \ |
| 344 | } while (0) |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 345 | |
Zaheerulla Meer | f800bba | 2013-02-13 15:49:14 +0530 | [diff] [blame] | 346 | #define DMUX_LOG_KERR(fmt, args...) \ |
| 347 | do { \ |
| 348 | BAM_DMUX_LOG(fmt, args); \ |
| 349 | pr_err(fmt, args); \ |
| 350 | } while (0) |
| 351 | |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 352 | static inline void set_tx_timestamp(struct tx_pkt_info *pkt) |
| 353 | { |
| 354 | unsigned long long t_now; |
| 355 | |
| 356 | t_now = sched_clock(); |
| 357 | pkt->ts_nsec = do_div(t_now, 1000000000U); |
| 358 | pkt->ts_sec = (unsigned)t_now; |
| 359 | } |
| 360 | |
| 361 | static inline void verify_tx_queue_is_empty(const char *func) |
| 362 | { |
| 363 | unsigned long flags; |
| 364 | struct tx_pkt_info *info; |
| 365 | int reported = 0; |
| 366 | |
| 367 | spin_lock_irqsave(&bam_tx_pool_spinlock, flags); |
| 368 | list_for_each_entry(info, &bam_tx_pool, list_node) { |
| 369 | if (!reported) { |
Zaheerulla Meer | 6fbf32c | 2013-01-31 17:06:44 +0530 | [diff] [blame] | 370 | BAM_DMUX_LOG("%s: tx pool not empty\n", func); |
Eric Holmberg | 454d9da | 2012-01-12 09:37:14 -0700 | [diff] [blame] | 371 | if (!in_global_reset) |
| 372 | pr_err("%s: tx pool not empty\n", func); |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 373 | reported = 1; |
| 374 | } |
Zaheerulla Meer | 6fbf32c | 2013-01-31 17:06:44 +0530 | [diff] [blame] | 375 | BAM_DMUX_LOG("%s: node=%p ts=%u.%09lu\n", __func__, |
Eric Holmberg | 454d9da | 2012-01-12 09:37:14 -0700 | [diff] [blame] | 376 | &info->list_node, info->ts_sec, info->ts_nsec); |
| 377 | if (!in_global_reset) |
| 378 | pr_err("%s: node=%p ts=%u.%09lu\n", __func__, |
| 379 | &info->list_node, info->ts_sec, info->ts_nsec); |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 380 | } |
| 381 | spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags); |
| 382 | } |
| 383 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 384 | static void queue_rx(void) |
| 385 | { |
| 386 | void *ptr; |
| 387 | struct rx_pkt_info *info; |
Eric Holmberg | b5b08e5 | 2012-01-20 14:19:00 -0700 | [diff] [blame] | 388 | int ret; |
| 389 | int rx_len_cached; |
Jeff Hugo | 949080a | 2011-08-30 11:58:56 -0600 | [diff] [blame] | 390 | |
Jeff Hugo | c974993 | 2011-11-02 17:50:40 -0600 | [diff] [blame] | 391 | mutex_lock(&bam_rx_pool_mutexlock); |
Eric Holmberg | b5b08e5 | 2012-01-20 14:19:00 -0700 | [diff] [blame] | 392 | rx_len_cached = bam_rx_pool_len; |
Jeff Hugo | c974993 | 2011-11-02 17:50:40 -0600 | [diff] [blame] | 393 | mutex_unlock(&bam_rx_pool_mutexlock); |
Jeff Hugo | 949080a | 2011-08-30 11:58:56 -0600 | [diff] [blame] | 394 | |
Jeff Hugo | 988e7ba | 2012-10-03 15:53:54 -0600 | [diff] [blame] | 395 | while (bam_connection_is_active && rx_len_cached < NUM_BUFFERS) { |
Eric Holmberg | b5b08e5 | 2012-01-20 14:19:00 -0700 | [diff] [blame] | 396 | if (in_global_reset) |
| 397 | goto fail; |
| 398 | |
Jeff Hugo | 988e7ba | 2012-10-03 15:53:54 -0600 | [diff] [blame] | 399 | info = kmalloc(sizeof(struct rx_pkt_info), |
| 400 | GFP_NOWAIT | __GFP_NOWARN); |
Eric Holmberg | b5b08e5 | 2012-01-20 14:19:00 -0700 | [diff] [blame] | 401 | if (!info) { |
Jeff Hugo | 988e7ba | 2012-10-03 15:53:54 -0600 | [diff] [blame] | 402 | DMUX_LOG_KERR( |
| 403 | "%s: unable to alloc rx_pkt_info, will retry later\n", |
| 404 | __func__); |
Eric Holmberg | b5b08e5 | 2012-01-20 14:19:00 -0700 | [diff] [blame] | 405 | goto fail; |
| 406 | } |
| 407 | |
| 408 | INIT_WORK(&info->work, handle_bam_mux_cmd); |
| 409 | |
Jeff Hugo | 988e7ba | 2012-10-03 15:53:54 -0600 | [diff] [blame] | 410 | info->skb = __dev_alloc_skb(BUFFER_SIZE, |
| 411 | GFP_NOWAIT | __GFP_NOWARN); |
Eric Holmberg | b5b08e5 | 2012-01-20 14:19:00 -0700 | [diff] [blame] | 412 | if (info->skb == NULL) { |
Jeff Hugo | 988e7ba | 2012-10-03 15:53:54 -0600 | [diff] [blame] | 413 | DMUX_LOG_KERR( |
| 414 | "%s: unable to alloc skb, will retry later\n", |
| 415 | __func__); |
Eric Holmberg | b5b08e5 | 2012-01-20 14:19:00 -0700 | [diff] [blame] | 416 | goto fail_info; |
| 417 | } |
| 418 | ptr = skb_put(info->skb, BUFFER_SIZE); |
| 419 | |
| 420 | info->dma_address = dma_map_single(NULL, ptr, BUFFER_SIZE, |
Brent Hronik | 89c96ba | 2013-08-27 14:34:22 -0600 | [diff] [blame^] | 421 | bam_ops->dma_from); |
Eric Holmberg | b5b08e5 | 2012-01-20 14:19:00 -0700 | [diff] [blame] | 422 | if (info->dma_address == 0 || info->dma_address == ~0) { |
| 423 | DMUX_LOG_KERR("%s: dma_map_single failure %p for %p\n", |
| 424 | __func__, (void *)info->dma_address, ptr); |
| 425 | goto fail_skb; |
| 426 | } |
| 427 | |
| 428 | mutex_lock(&bam_rx_pool_mutexlock); |
| 429 | list_add_tail(&info->list_node, &bam_rx_pool); |
| 430 | rx_len_cached = ++bam_rx_pool_len; |
Brent Hronik | 89c96ba | 2013-08-27 14:34:22 -0600 | [diff] [blame^] | 431 | ret = bam_ops->sps_transfer_one_ptr(bam_rx_pipe, |
| 432 | info->dma_address, BUFFER_SIZE, info, 0); |
Eric Holmberg | b5b08e5 | 2012-01-20 14:19:00 -0700 | [diff] [blame] | 433 | if (ret) { |
Eric Holmberg | 00cf869 | 2012-07-16 14:21:19 -0600 | [diff] [blame] | 434 | list_del(&info->list_node); |
| 435 | rx_len_cached = --bam_rx_pool_len; |
| 436 | mutex_unlock(&bam_rx_pool_mutexlock); |
Eric Holmberg | b5b08e5 | 2012-01-20 14:19:00 -0700 | [diff] [blame] | 437 | DMUX_LOG_KERR("%s: sps_transfer_one failed %d\n", |
| 438 | __func__, ret); |
Eric Holmberg | 00cf869 | 2012-07-16 14:21:19 -0600 | [diff] [blame] | 439 | |
| 440 | dma_unmap_single(NULL, info->dma_address, BUFFER_SIZE, |
Brent Hronik | 89c96ba | 2013-08-27 14:34:22 -0600 | [diff] [blame^] | 441 | bam_ops->dma_from); |
Eric Holmberg | 00cf869 | 2012-07-16 14:21:19 -0600 | [diff] [blame] | 442 | |
| 443 | goto fail_skb; |
Eric Holmberg | b5b08e5 | 2012-01-20 14:19:00 -0700 | [diff] [blame] | 444 | } |
Eric Holmberg | 00cf869 | 2012-07-16 14:21:19 -0600 | [diff] [blame] | 445 | mutex_unlock(&bam_rx_pool_mutexlock); |
| 446 | |
Eric Holmberg | b5b08e5 | 2012-01-20 14:19:00 -0700 | [diff] [blame] | 447 | } |
| 448 | return; |
| 449 | |
Eric Holmberg | b5b08e5 | 2012-01-20 14:19:00 -0700 | [diff] [blame] | 450 | fail_skb: |
| 451 | dev_kfree_skb_any(info->skb); |
| 452 | |
| 453 | fail_info: |
| 454 | kfree(info); |
| 455 | |
| 456 | fail: |
Arun Kumar Neelakantam | 799447f | 2012-12-13 18:06:49 +0530 | [diff] [blame] | 457 | if (rx_len_cached == 0 && !in_global_reset) { |
Jeff Hugo | 988e7ba | 2012-10-03 15:53:54 -0600 | [diff] [blame] | 458 | DMUX_LOG_KERR("%s: rescheduling\n", __func__); |
| 459 | schedule_delayed_work(&queue_rx_work, msecs_to_jiffies(100)); |
Eric Holmberg | b5b08e5 | 2012-01-20 14:19:00 -0700 | [diff] [blame] | 460 | } |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 461 | } |
| 462 | |
Jeff Hugo | 988e7ba | 2012-10-03 15:53:54 -0600 | [diff] [blame] | 463 | static void queue_rx_work_func(struct work_struct *work) |
| 464 | { |
| 465 | queue_rx(); |
| 466 | } |
| 467 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 468 | static void bam_mux_process_data(struct sk_buff *rx_skb) |
| 469 | { |
| 470 | unsigned long flags; |
| 471 | struct bam_mux_hdr *rx_hdr; |
Jeff Hugo | 1c4531c | 2011-08-02 14:55:37 -0600 | [diff] [blame] | 472 | unsigned long event_data; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 473 | |
| 474 | rx_hdr = (struct bam_mux_hdr *)rx_skb->data; |
| 475 | |
| 476 | rx_skb->data = (unsigned char *)(rx_hdr + 1); |
| 477 | rx_skb->tail = rx_skb->data + rx_hdr->pkt_len; |
| 478 | rx_skb->len = rx_hdr->pkt_len; |
Jeff Hugo | ee88f67 | 2011-10-04 17:14:52 -0600 | [diff] [blame] | 479 | rx_skb->truesize = rx_hdr->pkt_len + sizeof(struct sk_buff); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 480 | |
Jeff Hugo | 1c4531c | 2011-08-02 14:55:37 -0600 | [diff] [blame] | 481 | event_data = (unsigned long)(rx_skb); |
| 482 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 483 | spin_lock_irqsave(&bam_ch[rx_hdr->ch_id].lock, flags); |
Jeff Hugo | 1c4531c | 2011-08-02 14:55:37 -0600 | [diff] [blame] | 484 | if (bam_ch[rx_hdr->ch_id].notify) |
| 485 | bam_ch[rx_hdr->ch_id].notify( |
| 486 | bam_ch[rx_hdr->ch_id].priv, BAM_DMUX_RECEIVE, |
| 487 | event_data); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 488 | else |
| 489 | dev_kfree_skb_any(rx_skb); |
| 490 | spin_unlock_irqrestore(&bam_ch[rx_hdr->ch_id].lock, flags); |
| 491 | |
| 492 | queue_rx(); |
| 493 | } |
| 494 | |
Eric Holmberg | 006057d | 2012-01-11 10:10:42 -0700 | [diff] [blame] | 495 | static inline void handle_bam_mux_cmd_open(struct bam_mux_hdr *rx_hdr) |
| 496 | { |
| 497 | unsigned long flags; |
| 498 | int ret; |
| 499 | |
Eric Holmberg | a623da8 | 2012-07-12 09:37:09 -0600 | [diff] [blame] | 500 | mutex_lock(&bam_pdev_mutexlock); |
| 501 | if (in_global_reset) { |
Zaheerulla Meer | 6fbf32c | 2013-01-31 17:06:44 +0530 | [diff] [blame] | 502 | BAM_DMUX_LOG("%s: open cid %d aborted due to ssr\n", |
Eric Holmberg | a623da8 | 2012-07-12 09:37:09 -0600 | [diff] [blame] | 503 | __func__, rx_hdr->ch_id); |
| 504 | mutex_unlock(&bam_pdev_mutexlock); |
| 505 | queue_rx(); |
| 506 | return; |
| 507 | } |
Eric Holmberg | 006057d | 2012-01-11 10:10:42 -0700 | [diff] [blame] | 508 | spin_lock_irqsave(&bam_ch[rx_hdr->ch_id].lock, flags); |
| 509 | bam_ch[rx_hdr->ch_id].status |= BAM_CH_REMOTE_OPEN; |
| 510 | bam_ch[rx_hdr->ch_id].num_tx_pkts = 0; |
| 511 | spin_unlock_irqrestore(&bam_ch[rx_hdr->ch_id].lock, flags); |
Eric Holmberg | 006057d | 2012-01-11 10:10:42 -0700 | [diff] [blame] | 512 | ret = platform_device_add(bam_ch[rx_hdr->ch_id].pdev); |
| 513 | if (ret) |
| 514 | pr_err("%s: platform_device_add() error: %d\n", |
| 515 | __func__, ret); |
Eric Holmberg | a623da8 | 2012-07-12 09:37:09 -0600 | [diff] [blame] | 516 | mutex_unlock(&bam_pdev_mutexlock); |
| 517 | queue_rx(); |
Eric Holmberg | 006057d | 2012-01-11 10:10:42 -0700 | [diff] [blame] | 518 | } |
| 519 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 520 | static void handle_bam_mux_cmd(struct work_struct *work) |
| 521 | { |
| 522 | unsigned long flags; |
| 523 | struct bam_mux_hdr *rx_hdr; |
| 524 | struct rx_pkt_info *info; |
| 525 | struct sk_buff *rx_skb; |
| 526 | |
| 527 | info = container_of(work, struct rx_pkt_info, work); |
| 528 | rx_skb = info->skb; |
Brent Hronik | 89c96ba | 2013-08-27 14:34:22 -0600 | [diff] [blame^] | 529 | dma_unmap_single(NULL, info->dma_address, BUFFER_SIZE, |
| 530 | bam_ops->dma_from); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 531 | kfree(info); |
| 532 | |
| 533 | rx_hdr = (struct bam_mux_hdr *)rx_skb->data; |
| 534 | |
| 535 | DBG_INC_READ_CNT(sizeof(struct bam_mux_hdr)); |
| 536 | DBG("%s: magic %x reserved %d cmd %d pad %d ch %d len %d\n", __func__, |
| 537 | rx_hdr->magic_num, rx_hdr->reserved, rx_hdr->cmd, |
| 538 | rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len); |
| 539 | if (rx_hdr->magic_num != BAM_MUX_HDR_MAGIC_NO) { |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 540 | DMUX_LOG_KERR("%s: dropping invalid hdr. magic %x" |
| 541 | " reserved %d cmd %d" |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 542 | " pad %d ch %d len %d\n", __func__, |
| 543 | rx_hdr->magic_num, rx_hdr->reserved, rx_hdr->cmd, |
| 544 | rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len); |
| 545 | dev_kfree_skb_any(rx_skb); |
| 546 | queue_rx(); |
| 547 | return; |
| 548 | } |
Eric Holmberg | 9ff40a5 | 2011-11-17 19:17:00 -0700 | [diff] [blame] | 549 | |
| 550 | if (rx_hdr->ch_id >= BAM_DMUX_NUM_CHANNELS) { |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 551 | DMUX_LOG_KERR("%s: dropping invalid LCID %d" |
| 552 | " reserved %d cmd %d" |
Eric Holmberg | 9ff40a5 | 2011-11-17 19:17:00 -0700 | [diff] [blame] | 553 | " pad %d ch %d len %d\n", __func__, |
| 554 | rx_hdr->ch_id, rx_hdr->reserved, rx_hdr->cmd, |
| 555 | rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len); |
| 556 | dev_kfree_skb_any(rx_skb); |
| 557 | queue_rx(); |
| 558 | return; |
| 559 | } |
| 560 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 561 | switch (rx_hdr->cmd) { |
| 562 | case BAM_MUX_HDR_CMD_DATA: |
| 563 | DBG_INC_READ_CNT(rx_hdr->pkt_len); |
| 564 | bam_mux_process_data(rx_skb); |
| 565 | break; |
| 566 | case BAM_MUX_HDR_CMD_OPEN: |
Zaheerulla Meer | 6fbf32c | 2013-01-31 17:06:44 +0530 | [diff] [blame] | 567 | BAM_DMUX_LOG("%s: opening cid %d PC enabled\n", __func__, |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 568 | rx_hdr->ch_id); |
Eric Holmberg | 006057d | 2012-01-11 10:10:42 -0700 | [diff] [blame] | 569 | handle_bam_mux_cmd_open(rx_hdr); |
Jeff Hugo | b1e7c58 | 2012-06-20 15:02:11 -0600 | [diff] [blame] | 570 | if (!(rx_hdr->reserved & ENABLE_DISCONNECT_ACK)) { |
Zaheerulla Meer | 6fbf32c | 2013-01-31 17:06:44 +0530 | [diff] [blame] | 571 | BAM_DMUX_LOG("%s: deactivating disconnect ack\n", |
Jeff Hugo | d7d2b06 | 2012-07-24 14:29:56 -0600 | [diff] [blame] | 572 | __func__); |
Jeff Hugo | b1e7c58 | 2012-06-20 15:02:11 -0600 | [diff] [blame] | 573 | disconnect_ack = 0; |
Jeff Hugo | 0b13a35 | 2012-03-17 23:18:30 -0600 | [diff] [blame] | 574 | } |
Eric Holmberg | 006057d | 2012-01-11 10:10:42 -0700 | [diff] [blame] | 575 | dev_kfree_skb_any(rx_skb); |
| 576 | break; |
| 577 | case BAM_MUX_HDR_CMD_OPEN_NO_A2_PC: |
Zaheerulla Meer | 6fbf32c | 2013-01-31 17:06:44 +0530 | [diff] [blame] | 578 | BAM_DMUX_LOG("%s: opening cid %d PC disabled\n", __func__, |
Eric Holmberg | 006057d | 2012-01-11 10:10:42 -0700 | [diff] [blame] | 579 | rx_hdr->ch_id); |
| 580 | |
| 581 | if (!a2_pc_disabled) { |
| 582 | a2_pc_disabled = 1; |
Jeff Hugo | 322179f | 2012-02-29 10:52:34 -0700 | [diff] [blame] | 583 | ul_wakeup(); |
Eric Holmberg | 006057d | 2012-01-11 10:10:42 -0700 | [diff] [blame] | 584 | } |
| 585 | |
| 586 | handle_bam_mux_cmd_open(rx_hdr); |
Eric Holmberg | e779dba | 2011-11-04 18:22:01 -0600 | [diff] [blame] | 587 | dev_kfree_skb_any(rx_skb); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 588 | break; |
| 589 | case BAM_MUX_HDR_CMD_CLOSE: |
| 590 | /* probably should drop pending write */ |
Zaheerulla Meer | 6fbf32c | 2013-01-31 17:06:44 +0530 | [diff] [blame] | 591 | BAM_DMUX_LOG("%s: closing cid %d\n", __func__, |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 592 | rx_hdr->ch_id); |
Eric Holmberg | a623da8 | 2012-07-12 09:37:09 -0600 | [diff] [blame] | 593 | mutex_lock(&bam_pdev_mutexlock); |
| 594 | if (in_global_reset) { |
Zaheerulla Meer | 6fbf32c | 2013-01-31 17:06:44 +0530 | [diff] [blame] | 595 | BAM_DMUX_LOG("%s: close cid %d aborted due to ssr\n", |
Eric Holmberg | a623da8 | 2012-07-12 09:37:09 -0600 | [diff] [blame] | 596 | __func__, rx_hdr->ch_id); |
| 597 | mutex_unlock(&bam_pdev_mutexlock); |
| 598 | break; |
| 599 | } |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 600 | spin_lock_irqsave(&bam_ch[rx_hdr->ch_id].lock, flags); |
| 601 | bam_ch[rx_hdr->ch_id].status &= ~BAM_CH_REMOTE_OPEN; |
| 602 | spin_unlock_irqrestore(&bam_ch[rx_hdr->ch_id].lock, flags); |
Jeff Hugo | 7960abd | 2011-08-02 15:39:38 -0600 | [diff] [blame] | 603 | platform_device_unregister(bam_ch[rx_hdr->ch_id].pdev); |
| 604 | bam_ch[rx_hdr->ch_id].pdev = |
| 605 | platform_device_alloc(bam_ch[rx_hdr->ch_id].name, 2); |
| 606 | if (!bam_ch[rx_hdr->ch_id].pdev) |
| 607 | pr_err("%s: platform_device_alloc failed\n", __func__); |
Eric Holmberg | a623da8 | 2012-07-12 09:37:09 -0600 | [diff] [blame] | 608 | mutex_unlock(&bam_pdev_mutexlock); |
Eric Holmberg | e779dba | 2011-11-04 18:22:01 -0600 | [diff] [blame] | 609 | dev_kfree_skb_any(rx_skb); |
Eric Holmberg | a623da8 | 2012-07-12 09:37:09 -0600 | [diff] [blame] | 610 | queue_rx(); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 611 | break; |
| 612 | default: |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 613 | DMUX_LOG_KERR("%s: dropping invalid hdr. magic %x" |
| 614 | " reserved %d cmd %d pad %d ch %d len %d\n", |
| 615 | __func__, rx_hdr->magic_num, rx_hdr->reserved, |
| 616 | rx_hdr->cmd, rx_hdr->pad_len, rx_hdr->ch_id, |
| 617 | rx_hdr->pkt_len); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 618 | dev_kfree_skb_any(rx_skb); |
| 619 | queue_rx(); |
| 620 | return; |
| 621 | } |
| 622 | } |
| 623 | |
| 624 | static int bam_mux_write_cmd(void *data, uint32_t len) |
| 625 | { |
| 626 | int rc; |
| 627 | struct tx_pkt_info *pkt; |
| 628 | dma_addr_t dma_address; |
Jeff Hugo | 626303bf | 2011-11-21 11:43:28 -0700 | [diff] [blame] | 629 | unsigned long flags; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 630 | |
Eric Holmberg | d83cd2b | 2011-11-04 15:54:17 -0600 | [diff] [blame] | 631 | pkt = kmalloc(sizeof(struct tx_pkt_info), GFP_ATOMIC); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 632 | if (pkt == NULL) { |
| 633 | pr_err("%s: mem alloc for tx_pkt_info failed\n", __func__); |
| 634 | rc = -ENOMEM; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 635 | return rc; |
| 636 | } |
| 637 | |
| 638 | dma_address = dma_map_single(NULL, data, len, |
Brent Hronik | 89c96ba | 2013-08-27 14:34:22 -0600 | [diff] [blame^] | 639 | bam_ops->dma_to); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 640 | if (!dma_address) { |
| 641 | pr_err("%s: dma_map_single() failed\n", __func__); |
Jeff Hugo | 96cb748 | 2011-12-07 13:28:31 -0700 | [diff] [blame] | 642 | kfree(pkt); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 643 | rc = -ENOMEM; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 644 | return rc; |
| 645 | } |
| 646 | pkt->skb = (struct sk_buff *)(data); |
| 647 | pkt->len = len; |
| 648 | pkt->dma_address = dma_address; |
| 649 | pkt->is_cmd = 1; |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 650 | set_tx_timestamp(pkt); |
Jeff Hugo | 6e7a92a | 2011-10-24 05:25:13 -0600 | [diff] [blame] | 651 | INIT_WORK(&pkt->work, bam_mux_write_done); |
Jeff Hugo | 626303bf | 2011-11-21 11:43:28 -0700 | [diff] [blame] | 652 | spin_lock_irqsave(&bam_tx_pool_spinlock, flags); |
Jeff Hugo | 6e7a92a | 2011-10-24 05:25:13 -0600 | [diff] [blame] | 653 | list_add_tail(&pkt->list_node, &bam_tx_pool); |
Brent Hronik | 89c96ba | 2013-08-27 14:34:22 -0600 | [diff] [blame^] | 654 | rc = bam_ops->sps_transfer_one_ptr(bam_tx_pipe, dma_address, len, |
Jeff Hugo | c85df96 | 2013-04-05 13:22:48 -0600 | [diff] [blame] | 655 | pkt, SPS_IOVEC_FLAG_EOT); |
Jeff Hugo | 7b80c80 | 2011-11-04 16:12:20 -0600 | [diff] [blame] | 656 | if (rc) { |
Eric Holmberg | b5b08e5 | 2012-01-20 14:19:00 -0700 | [diff] [blame] | 657 | DMUX_LOG_KERR("%s sps_transfer_one failed rc=%d\n", |
| 658 | __func__, rc); |
Jeff Hugo | 7b80c80 | 2011-11-04 16:12:20 -0600 | [diff] [blame] | 659 | list_del(&pkt->list_node); |
Eric Holmberg | 2fddbcd | 2011-11-28 18:25:57 -0700 | [diff] [blame] | 660 | DBG_INC_TX_SPS_FAILURE_CNT(); |
Jeff Hugo | 626303bf | 2011-11-21 11:43:28 -0700 | [diff] [blame] | 661 | spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags); |
Eric Holmberg | b5b08e5 | 2012-01-20 14:19:00 -0700 | [diff] [blame] | 662 | dma_unmap_single(NULL, pkt->dma_address, |
| 663 | pkt->len, |
Brent Hronik | 89c96ba | 2013-08-27 14:34:22 -0600 | [diff] [blame^] | 664 | bam_ops->dma_to); |
Jeff Hugo | 7b80c80 | 2011-11-04 16:12:20 -0600 | [diff] [blame] | 665 | kfree(pkt); |
Jeff Hugo | bb6da95 | 2012-01-16 15:02:42 -0700 | [diff] [blame] | 666 | } else { |
| 667 | spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags); |
Jeff Hugo | 7b80c80 | 2011-11-04 16:12:20 -0600 | [diff] [blame] | 668 | } |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 669 | |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 670 | ul_packet_written = 1; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 671 | return rc; |
| 672 | } |
| 673 | |
| 674 | static void bam_mux_write_done(struct work_struct *work) |
| 675 | { |
| 676 | struct sk_buff *skb; |
| 677 | struct bam_mux_hdr *hdr; |
| 678 | struct tx_pkt_info *info; |
Eric Holmberg | 1cde7a6 | 2011-12-19 18:34:01 -0700 | [diff] [blame] | 679 | struct tx_pkt_info *info_expected; |
Jeff Hugo | 1c4531c | 2011-08-02 14:55:37 -0600 | [diff] [blame] | 680 | unsigned long event_data; |
Karthikeyan Ramasubramanian | 7bf5ca8 | 2011-11-21 13:33:19 -0700 | [diff] [blame] | 681 | unsigned long flags; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 682 | |
Jeff Hugo | 6e7a92a | 2011-10-24 05:25:13 -0600 | [diff] [blame] | 683 | if (in_global_reset) |
| 684 | return; |
Eric Holmberg | 1cde7a6 | 2011-12-19 18:34:01 -0700 | [diff] [blame] | 685 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 686 | info = container_of(work, struct tx_pkt_info, work); |
Eric Holmberg | 1cde7a6 | 2011-12-19 18:34:01 -0700 | [diff] [blame] | 687 | |
| 688 | spin_lock_irqsave(&bam_tx_pool_spinlock, flags); |
| 689 | info_expected = list_first_entry(&bam_tx_pool, |
| 690 | struct tx_pkt_info, list_node); |
| 691 | if (unlikely(info != info_expected)) { |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 692 | struct tx_pkt_info *errant_pkt; |
Eric Holmberg | 1cde7a6 | 2011-12-19 18:34:01 -0700 | [diff] [blame] | 693 | |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 694 | DMUX_LOG_KERR("%s: bam_tx_pool mismatch .next=%p," |
| 695 | " list_node=%p, ts=%u.%09lu\n", |
| 696 | __func__, bam_tx_pool.next, &info->list_node, |
| 697 | info->ts_sec, info->ts_nsec |
| 698 | ); |
| 699 | |
| 700 | list_for_each_entry(errant_pkt, &bam_tx_pool, list_node) { |
| 701 | DMUX_LOG_KERR("%s: node=%p ts=%u.%09lu\n", __func__, |
| 702 | &errant_pkt->list_node, errant_pkt->ts_sec, |
| 703 | errant_pkt->ts_nsec); |
| 704 | |
| 705 | } |
Eric Holmberg | 1cde7a6 | 2011-12-19 18:34:01 -0700 | [diff] [blame] | 706 | spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags); |
| 707 | BUG(); |
| 708 | } |
| 709 | list_del(&info->list_node); |
| 710 | spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags); |
| 711 | |
Jeff Hugo | 6e7a92a | 2011-10-24 05:25:13 -0600 | [diff] [blame] | 712 | if (info->is_cmd) { |
| 713 | kfree(info->skb); |
| 714 | kfree(info); |
| 715 | return; |
| 716 | } |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 717 | skb = info->skb; |
| 718 | kfree(info); |
| 719 | hdr = (struct bam_mux_hdr *)skb->data; |
Eric Holmberg | 9fdef26 | 2012-02-14 11:46:05 -0700 | [diff] [blame] | 720 | DBG_INC_WRITE_CNT(skb->len); |
Jeff Hugo | 1c4531c | 2011-08-02 14:55:37 -0600 | [diff] [blame] | 721 | event_data = (unsigned long)(skb); |
Karthikeyan Ramasubramanian | 7bf5ca8 | 2011-11-21 13:33:19 -0700 | [diff] [blame] | 722 | spin_lock_irqsave(&bam_ch[hdr->ch_id].lock, flags); |
| 723 | bam_ch[hdr->ch_id].num_tx_pkts--; |
| 724 | spin_unlock_irqrestore(&bam_ch[hdr->ch_id].lock, flags); |
Jeff Hugo | 1c4531c | 2011-08-02 14:55:37 -0600 | [diff] [blame] | 725 | if (bam_ch[hdr->ch_id].notify) |
| 726 | bam_ch[hdr->ch_id].notify( |
| 727 | bam_ch[hdr->ch_id].priv, BAM_DMUX_WRITE_DONE, |
| 728 | event_data); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 729 | else |
| 730 | dev_kfree_skb_any(skb); |
| 731 | } |
| 732 | |
| 733 | int msm_bam_dmux_write(uint32_t id, struct sk_buff *skb) |
| 734 | { |
| 735 | int rc = 0; |
| 736 | struct bam_mux_hdr *hdr; |
| 737 | unsigned long flags; |
| 738 | struct sk_buff *new_skb = NULL; |
| 739 | dma_addr_t dma_address; |
| 740 | struct tx_pkt_info *pkt; |
| 741 | |
| 742 | if (id >= BAM_DMUX_NUM_CHANNELS) |
| 743 | return -EINVAL; |
| 744 | if (!skb) |
| 745 | return -EINVAL; |
| 746 | if (!bam_mux_initialized) |
| 747 | return -ENODEV; |
| 748 | |
| 749 | DBG("%s: writing to ch %d len %d\n", __func__, id, skb->len); |
| 750 | spin_lock_irqsave(&bam_ch[id].lock, flags); |
| 751 | if (!bam_ch_is_open(id)) { |
| 752 | spin_unlock_irqrestore(&bam_ch[id].lock, flags); |
| 753 | pr_err("%s: port not open: %d\n", __func__, bam_ch[id].status); |
| 754 | return -ENODEV; |
| 755 | } |
Karthikeyan Ramasubramanian | 7bf5ca8 | 2011-11-21 13:33:19 -0700 | [diff] [blame] | 756 | |
| 757 | if (bam_ch[id].use_wm && |
| 758 | (bam_ch[id].num_tx_pkts >= HIGH_WATERMARK)) { |
| 759 | spin_unlock_irqrestore(&bam_ch[id].lock, flags); |
| 760 | pr_err("%s: watermark exceeded: %d\n", __func__, id); |
| 761 | return -EAGAIN; |
| 762 | } |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 763 | spin_unlock_irqrestore(&bam_ch[id].lock, flags); |
| 764 | |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 765 | read_lock(&ul_wakeup_lock); |
Jeff Hugo | 061ce67 | 2011-10-21 17:15:32 -0600 | [diff] [blame] | 766 | if (!bam_is_connected) { |
| 767 | read_unlock(&ul_wakeup_lock); |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 768 | ul_wakeup(); |
Jeff Hugo | 4838f41 | 2012-01-20 11:19:37 -0700 | [diff] [blame] | 769 | if (unlikely(in_global_reset == 1)) |
| 770 | return -EFAULT; |
Jeff Hugo | 061ce67 | 2011-10-21 17:15:32 -0600 | [diff] [blame] | 771 | read_lock(&ul_wakeup_lock); |
Jeff Hugo | d98b108 | 2011-10-24 10:30:23 -0600 | [diff] [blame] | 772 | notify_all(BAM_DMUX_UL_CONNECTED, (unsigned long)(NULL)); |
Jeff Hugo | 061ce67 | 2011-10-21 17:15:32 -0600 | [diff] [blame] | 773 | } |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 774 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 775 | /* if skb do not have any tailroom for padding, |
| 776 | copy the skb into a new expanded skb */ |
| 777 | if ((skb->len & 0x3) && (skb_tailroom(skb) < (4 - (skb->len & 0x3)))) { |
| 778 | /* revisit, probably dev_alloc_skb and memcpy is effecient */ |
| 779 | new_skb = skb_copy_expand(skb, skb_headroom(skb), |
| 780 | 4 - (skb->len & 0x3), GFP_ATOMIC); |
| 781 | if (new_skb == NULL) { |
| 782 | pr_err("%s: cannot allocate skb\n", __func__); |
Jeff Hugo | c6af54d | 2011-11-02 17:00:27 -0600 | [diff] [blame] | 783 | goto write_fail; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 784 | } |
| 785 | dev_kfree_skb_any(skb); |
| 786 | skb = new_skb; |
| 787 | DBG_INC_WRITE_CPY(skb->len); |
| 788 | } |
| 789 | |
| 790 | hdr = (struct bam_mux_hdr *)skb_push(skb, sizeof(struct bam_mux_hdr)); |
| 791 | |
| 792 | /* caller should allocate for hdr and padding |
| 793 | hdr is fine, padding is tricky */ |
| 794 | hdr->magic_num = BAM_MUX_HDR_MAGIC_NO; |
| 795 | hdr->cmd = BAM_MUX_HDR_CMD_DATA; |
| 796 | hdr->reserved = 0; |
| 797 | hdr->ch_id = id; |
| 798 | hdr->pkt_len = skb->len - sizeof(struct bam_mux_hdr); |
| 799 | if (skb->len & 0x3) |
| 800 | skb_put(skb, 4 - (skb->len & 0x3)); |
| 801 | |
| 802 | hdr->pad_len = skb->len - (sizeof(struct bam_mux_hdr) + hdr->pkt_len); |
| 803 | |
| 804 | DBG("%s: data %p, tail %p skb len %d pkt len %d pad len %d\n", |
| 805 | __func__, skb->data, skb->tail, skb->len, |
| 806 | hdr->pkt_len, hdr->pad_len); |
| 807 | |
| 808 | pkt = kmalloc(sizeof(struct tx_pkt_info), GFP_ATOMIC); |
| 809 | if (pkt == NULL) { |
| 810 | pr_err("%s: mem alloc for tx_pkt_info failed\n", __func__); |
Jeff Hugo | c6af54d | 2011-11-02 17:00:27 -0600 | [diff] [blame] | 811 | goto write_fail2; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 812 | } |
| 813 | |
| 814 | dma_address = dma_map_single(NULL, skb->data, skb->len, |
Brent Hronik | 89c96ba | 2013-08-27 14:34:22 -0600 | [diff] [blame^] | 815 | bam_ops->dma_to); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 816 | if (!dma_address) { |
| 817 | pr_err("%s: dma_map_single() failed\n", __func__); |
Jeff Hugo | c6af54d | 2011-11-02 17:00:27 -0600 | [diff] [blame] | 818 | goto write_fail3; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 819 | } |
| 820 | pkt->skb = skb; |
| 821 | pkt->dma_address = dma_address; |
| 822 | pkt->is_cmd = 0; |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 823 | set_tx_timestamp(pkt); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 824 | INIT_WORK(&pkt->work, bam_mux_write_done); |
Jeff Hugo | 626303bf | 2011-11-21 11:43:28 -0700 | [diff] [blame] | 825 | spin_lock_irqsave(&bam_tx_pool_spinlock, flags); |
Jeff Hugo | 6e7a92a | 2011-10-24 05:25:13 -0600 | [diff] [blame] | 826 | list_add_tail(&pkt->list_node, &bam_tx_pool); |
Brent Hronik | 89c96ba | 2013-08-27 14:34:22 -0600 | [diff] [blame^] | 827 | rc = bam_ops->sps_transfer_one_ptr(bam_tx_pipe, dma_address, skb->len, |
Jeff Hugo | c85df96 | 2013-04-05 13:22:48 -0600 | [diff] [blame] | 828 | pkt, SPS_IOVEC_FLAG_EOT); |
Jeff Hugo | 7b80c80 | 2011-11-04 16:12:20 -0600 | [diff] [blame] | 829 | if (rc) { |
Eric Holmberg | b5b08e5 | 2012-01-20 14:19:00 -0700 | [diff] [blame] | 830 | DMUX_LOG_KERR("%s sps_transfer_one failed rc=%d\n", |
| 831 | __func__, rc); |
Jeff Hugo | 7b80c80 | 2011-11-04 16:12:20 -0600 | [diff] [blame] | 832 | list_del(&pkt->list_node); |
Eric Holmberg | 2fddbcd | 2011-11-28 18:25:57 -0700 | [diff] [blame] | 833 | DBG_INC_TX_SPS_FAILURE_CNT(); |
Jeff Hugo | 626303bf | 2011-11-21 11:43:28 -0700 | [diff] [blame] | 834 | spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags); |
Eric Holmberg | b5b08e5 | 2012-01-20 14:19:00 -0700 | [diff] [blame] | 835 | dma_unmap_single(NULL, pkt->dma_address, |
Brent Hronik | 89c96ba | 2013-08-27 14:34:22 -0600 | [diff] [blame^] | 836 | pkt->skb->len, bam_ops->dma_to); |
Jeff Hugo | 7b80c80 | 2011-11-04 16:12:20 -0600 | [diff] [blame] | 837 | kfree(pkt); |
Jeff Hugo | 872bd06 | 2011-11-15 17:47:21 -0700 | [diff] [blame] | 838 | if (new_skb) |
| 839 | dev_kfree_skb_any(new_skb); |
Karthikeyan Ramasubramanian | 7bf5ca8 | 2011-11-21 13:33:19 -0700 | [diff] [blame] | 840 | } else { |
Jeff Hugo | bb6da95 | 2012-01-16 15:02:42 -0700 | [diff] [blame] | 841 | spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags); |
Karthikeyan Ramasubramanian | 7bf5ca8 | 2011-11-21 13:33:19 -0700 | [diff] [blame] | 842 | spin_lock_irqsave(&bam_ch[id].lock, flags); |
| 843 | bam_ch[id].num_tx_pkts++; |
| 844 | spin_unlock_irqrestore(&bam_ch[id].lock, flags); |
Jeff Hugo | 7b80c80 | 2011-11-04 16:12:20 -0600 | [diff] [blame] | 845 | } |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 846 | ul_packet_written = 1; |
| 847 | read_unlock(&ul_wakeup_lock); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 848 | return rc; |
Jeff Hugo | c6af54d | 2011-11-02 17:00:27 -0600 | [diff] [blame] | 849 | |
| 850 | write_fail3: |
| 851 | kfree(pkt); |
| 852 | write_fail2: |
Arun Kumar Neelakantam | 406e569 | 2013-01-17 18:58:04 +0530 | [diff] [blame] | 853 | skb_pull(skb, sizeof(struct bam_mux_hdr)); |
Jeff Hugo | c6af54d | 2011-11-02 17:00:27 -0600 | [diff] [blame] | 854 | if (new_skb) |
| 855 | dev_kfree_skb_any(new_skb); |
| 856 | write_fail: |
| 857 | read_unlock(&ul_wakeup_lock); |
| 858 | return -ENOMEM; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 859 | } |
| 860 | |
| 861 | int msm_bam_dmux_open(uint32_t id, void *priv, |
Jeff Hugo | 1c4531c | 2011-08-02 14:55:37 -0600 | [diff] [blame] | 862 | void (*notify)(void *, int, unsigned long)) |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 863 | { |
| 864 | struct bam_mux_hdr *hdr; |
| 865 | unsigned long flags; |
| 866 | int rc = 0; |
| 867 | |
| 868 | DBG("%s: opening ch %d\n", __func__, id); |
Eric Holmberg | 5d77543 | 2011-11-09 10:23:35 -0700 | [diff] [blame] | 869 | if (!bam_mux_initialized) { |
| 870 | DBG("%s: not inititialized\n", __func__); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 871 | return -ENODEV; |
Eric Holmberg | 5d77543 | 2011-11-09 10:23:35 -0700 | [diff] [blame] | 872 | } |
| 873 | if (id >= BAM_DMUX_NUM_CHANNELS) { |
| 874 | pr_err("%s: invalid channel id %d\n", __func__, id); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 875 | return -EINVAL; |
Eric Holmberg | 5d77543 | 2011-11-09 10:23:35 -0700 | [diff] [blame] | 876 | } |
| 877 | if (notify == NULL) { |
| 878 | pr_err("%s: notify function is NULL\n", __func__); |
Jeff Hugo | 1c4531c | 2011-08-02 14:55:37 -0600 | [diff] [blame] | 879 | return -EINVAL; |
Eric Holmberg | 5d77543 | 2011-11-09 10:23:35 -0700 | [diff] [blame] | 880 | } |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 881 | |
| 882 | hdr = kmalloc(sizeof(struct bam_mux_hdr), GFP_KERNEL); |
| 883 | if (hdr == NULL) { |
| 884 | pr_err("%s: hdr kmalloc failed. ch: %d\n", __func__, id); |
| 885 | return -ENOMEM; |
| 886 | } |
| 887 | spin_lock_irqsave(&bam_ch[id].lock, flags); |
| 888 | if (bam_ch_is_open(id)) { |
| 889 | DBG("%s: Already opened %d\n", __func__, id); |
| 890 | spin_unlock_irqrestore(&bam_ch[id].lock, flags); |
| 891 | kfree(hdr); |
| 892 | goto open_done; |
| 893 | } |
| 894 | if (!bam_ch_is_remote_open(id)) { |
| 895 | DBG("%s: Remote not open; ch: %d\n", __func__, id); |
| 896 | spin_unlock_irqrestore(&bam_ch[id].lock, flags); |
| 897 | kfree(hdr); |
Eric Holmberg | 5d77543 | 2011-11-09 10:23:35 -0700 | [diff] [blame] | 898 | return -ENODEV; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 899 | } |
| 900 | |
Jeff Hugo | 1c4531c | 2011-08-02 14:55:37 -0600 | [diff] [blame] | 901 | bam_ch[id].notify = notify; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 902 | bam_ch[id].priv = priv; |
| 903 | bam_ch[id].status |= BAM_CH_LOCAL_OPEN; |
Karthikeyan Ramasubramanian | 7bf5ca8 | 2011-11-21 13:33:19 -0700 | [diff] [blame] | 904 | bam_ch[id].num_tx_pkts = 0; |
| 905 | bam_ch[id].use_wm = 0; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 906 | spin_unlock_irqrestore(&bam_ch[id].lock, flags); |
| 907 | |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 908 | read_lock(&ul_wakeup_lock); |
Jeff Hugo | 061ce67 | 2011-10-21 17:15:32 -0600 | [diff] [blame] | 909 | if (!bam_is_connected) { |
| 910 | read_unlock(&ul_wakeup_lock); |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 911 | ul_wakeup(); |
Brent Hronik | 9663042 | 2013-05-01 16:38:43 -0600 | [diff] [blame] | 912 | if (unlikely(in_global_reset == 1)) { |
| 913 | kfree(hdr); |
Jeff Hugo | 4838f41 | 2012-01-20 11:19:37 -0700 | [diff] [blame] | 914 | return -EFAULT; |
Brent Hronik | 9663042 | 2013-05-01 16:38:43 -0600 | [diff] [blame] | 915 | } |
Jeff Hugo | 061ce67 | 2011-10-21 17:15:32 -0600 | [diff] [blame] | 916 | read_lock(&ul_wakeup_lock); |
Jeff Hugo | d98b108 | 2011-10-24 10:30:23 -0600 | [diff] [blame] | 917 | notify_all(BAM_DMUX_UL_CONNECTED, (unsigned long)(NULL)); |
Jeff Hugo | 061ce67 | 2011-10-21 17:15:32 -0600 | [diff] [blame] | 918 | } |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 919 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 920 | hdr->magic_num = BAM_MUX_HDR_MAGIC_NO; |
| 921 | hdr->cmd = BAM_MUX_HDR_CMD_OPEN; |
| 922 | hdr->reserved = 0; |
| 923 | hdr->ch_id = id; |
| 924 | hdr->pkt_len = 0; |
| 925 | hdr->pad_len = 0; |
| 926 | |
| 927 | rc = bam_mux_write_cmd((void *)hdr, sizeof(struct bam_mux_hdr)); |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 928 | read_unlock(&ul_wakeup_lock); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 929 | |
| 930 | open_done: |
| 931 | DBG("%s: opened ch %d\n", __func__, id); |
| 932 | return rc; |
| 933 | } |
| 934 | |
| 935 | int msm_bam_dmux_close(uint32_t id) |
| 936 | { |
| 937 | struct bam_mux_hdr *hdr; |
| 938 | unsigned long flags; |
| 939 | int rc; |
| 940 | |
| 941 | if (id >= BAM_DMUX_NUM_CHANNELS) |
| 942 | return -EINVAL; |
| 943 | DBG("%s: closing ch %d\n", __func__, id); |
| 944 | if (!bam_mux_initialized) |
| 945 | return -ENODEV; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 946 | |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 947 | read_lock(&ul_wakeup_lock); |
Jeff Hugo | 6e7a92a | 2011-10-24 05:25:13 -0600 | [diff] [blame] | 948 | if (!bam_is_connected && !bam_ch_is_in_reset(id)) { |
Jeff Hugo | 061ce67 | 2011-10-21 17:15:32 -0600 | [diff] [blame] | 949 | read_unlock(&ul_wakeup_lock); |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 950 | ul_wakeup(); |
Jeff Hugo | 4838f41 | 2012-01-20 11:19:37 -0700 | [diff] [blame] | 951 | if (unlikely(in_global_reset == 1)) |
| 952 | return -EFAULT; |
Jeff Hugo | 061ce67 | 2011-10-21 17:15:32 -0600 | [diff] [blame] | 953 | read_lock(&ul_wakeup_lock); |
Jeff Hugo | d98b108 | 2011-10-24 10:30:23 -0600 | [diff] [blame] | 954 | notify_all(BAM_DMUX_UL_CONNECTED, (unsigned long)(NULL)); |
Jeff Hugo | 061ce67 | 2011-10-21 17:15:32 -0600 | [diff] [blame] | 955 | } |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 956 | |
Jeff Hugo | 061ce67 | 2011-10-21 17:15:32 -0600 | [diff] [blame] | 957 | spin_lock_irqsave(&bam_ch[id].lock, flags); |
Jeff Hugo | 1c4531c | 2011-08-02 14:55:37 -0600 | [diff] [blame] | 958 | bam_ch[id].notify = NULL; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 959 | bam_ch[id].priv = NULL; |
| 960 | bam_ch[id].status &= ~BAM_CH_LOCAL_OPEN; |
| 961 | spin_unlock_irqrestore(&bam_ch[id].lock, flags); |
| 962 | |
Jeff Hugo | 6e7a92a | 2011-10-24 05:25:13 -0600 | [diff] [blame] | 963 | if (bam_ch_is_in_reset(id)) { |
| 964 | read_unlock(&ul_wakeup_lock); |
| 965 | bam_ch[id].status &= ~BAM_CH_IN_RESET; |
| 966 | return 0; |
| 967 | } |
| 968 | |
Jeff Hugo | bb5802f | 2011-11-02 17:10:29 -0600 | [diff] [blame] | 969 | hdr = kmalloc(sizeof(struct bam_mux_hdr), GFP_ATOMIC); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 970 | if (hdr == NULL) { |
| 971 | pr_err("%s: hdr kmalloc failed. ch: %d\n", __func__, id); |
Jeff Hugo | c6af54d | 2011-11-02 17:00:27 -0600 | [diff] [blame] | 972 | read_unlock(&ul_wakeup_lock); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 973 | return -ENOMEM; |
| 974 | } |
| 975 | hdr->magic_num = BAM_MUX_HDR_MAGIC_NO; |
| 976 | hdr->cmd = BAM_MUX_HDR_CMD_CLOSE; |
| 977 | hdr->reserved = 0; |
| 978 | hdr->ch_id = id; |
| 979 | hdr->pkt_len = 0; |
| 980 | hdr->pad_len = 0; |
| 981 | |
| 982 | rc = bam_mux_write_cmd((void *)hdr, sizeof(struct bam_mux_hdr)); |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 983 | read_unlock(&ul_wakeup_lock); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 984 | |
| 985 | DBG("%s: closed ch %d\n", __func__, id); |
| 986 | return rc; |
| 987 | } |
| 988 | |
Karthikeyan Ramasubramanian | 7bf5ca8 | 2011-11-21 13:33:19 -0700 | [diff] [blame] | 989 | int msm_bam_dmux_is_ch_full(uint32_t id) |
| 990 | { |
| 991 | unsigned long flags; |
| 992 | int ret; |
| 993 | |
| 994 | if (id >= BAM_DMUX_NUM_CHANNELS) |
| 995 | return -EINVAL; |
| 996 | |
| 997 | spin_lock_irqsave(&bam_ch[id].lock, flags); |
| 998 | bam_ch[id].use_wm = 1; |
| 999 | ret = bam_ch[id].num_tx_pkts >= HIGH_WATERMARK; |
| 1000 | DBG("%s: ch %d num tx pkts=%d, HWM=%d\n", __func__, |
| 1001 | id, bam_ch[id].num_tx_pkts, ret); |
| 1002 | if (!bam_ch_is_local_open(id)) { |
| 1003 | ret = -ENODEV; |
| 1004 | pr_err("%s: port not open: %d\n", __func__, bam_ch[id].status); |
| 1005 | } |
| 1006 | spin_unlock_irqrestore(&bam_ch[id].lock, flags); |
| 1007 | |
| 1008 | return ret; |
| 1009 | } |
| 1010 | |
| 1011 | int msm_bam_dmux_is_ch_low(uint32_t id) |
| 1012 | { |
Eric Holmberg | ed3ca0a | 2012-04-09 15:44:58 -0600 | [diff] [blame] | 1013 | unsigned long flags; |
Karthikeyan Ramasubramanian | 7bf5ca8 | 2011-11-21 13:33:19 -0700 | [diff] [blame] | 1014 | int ret; |
| 1015 | |
| 1016 | if (id >= BAM_DMUX_NUM_CHANNELS) |
| 1017 | return -EINVAL; |
| 1018 | |
Eric Holmberg | ed3ca0a | 2012-04-09 15:44:58 -0600 | [diff] [blame] | 1019 | spin_lock_irqsave(&bam_ch[id].lock, flags); |
Karthikeyan Ramasubramanian | 7bf5ca8 | 2011-11-21 13:33:19 -0700 | [diff] [blame] | 1020 | bam_ch[id].use_wm = 1; |
| 1021 | ret = bam_ch[id].num_tx_pkts <= LOW_WATERMARK; |
| 1022 | DBG("%s: ch %d num tx pkts=%d, LWM=%d\n", __func__, |
| 1023 | id, bam_ch[id].num_tx_pkts, ret); |
| 1024 | if (!bam_ch_is_local_open(id)) { |
| 1025 | ret = -ENODEV; |
| 1026 | pr_err("%s: port not open: %d\n", __func__, bam_ch[id].status); |
| 1027 | } |
Eric Holmberg | ed3ca0a | 2012-04-09 15:44:58 -0600 | [diff] [blame] | 1028 | spin_unlock_irqrestore(&bam_ch[id].lock, flags); |
Karthikeyan Ramasubramanian | 7bf5ca8 | 2011-11-21 13:33:19 -0700 | [diff] [blame] | 1029 | |
| 1030 | return ret; |
| 1031 | } |
| 1032 | |
Eric Holmberg | 8df0cdb | 2012-01-04 17:40:46 -0700 | [diff] [blame] | 1033 | static void rx_switch_to_interrupt_mode(void) |
| 1034 | { |
| 1035 | struct sps_connect cur_rx_conn; |
| 1036 | struct sps_iovec iov; |
| 1037 | struct rx_pkt_info *info; |
| 1038 | int ret; |
| 1039 | |
| 1040 | /* |
| 1041 | * Attempt to enable interrupts - if this fails, |
| 1042 | * continue polling and we will retry later. |
| 1043 | */ |
Brent Hronik | 89c96ba | 2013-08-27 14:34:22 -0600 | [diff] [blame^] | 1044 | ret = bam_ops->sps_get_config_ptr(bam_rx_pipe, &cur_rx_conn); |
Eric Holmberg | 8df0cdb | 2012-01-04 17:40:46 -0700 | [diff] [blame] | 1045 | if (ret) { |
| 1046 | pr_err("%s: sps_get_config() failed %d\n", __func__, ret); |
| 1047 | goto fail; |
| 1048 | } |
| 1049 | |
| 1050 | rx_register_event.options = SPS_O_EOT; |
Brent Hronik | 89c96ba | 2013-08-27 14:34:22 -0600 | [diff] [blame^] | 1051 | ret = bam_ops->sps_register_event_ptr(bam_rx_pipe, &rx_register_event); |
Eric Holmberg | 8df0cdb | 2012-01-04 17:40:46 -0700 | [diff] [blame] | 1052 | if (ret) { |
| 1053 | pr_err("%s: sps_register_event() failed %d\n", __func__, ret); |
| 1054 | goto fail; |
| 1055 | } |
| 1056 | |
| 1057 | cur_rx_conn.options = SPS_O_AUTO_ENABLE | |
| 1058 | SPS_O_EOT | SPS_O_ACK_TRANSFERS; |
Brent Hronik | 89c96ba | 2013-08-27 14:34:22 -0600 | [diff] [blame^] | 1059 | ret = bam_ops->sps_set_config_ptr(bam_rx_pipe, &cur_rx_conn); |
Eric Holmberg | 8df0cdb | 2012-01-04 17:40:46 -0700 | [diff] [blame] | 1060 | if (ret) { |
| 1061 | pr_err("%s: sps_set_config() failed %d\n", __func__, ret); |
| 1062 | goto fail; |
| 1063 | } |
| 1064 | polling_mode = 0; |
Brent Hronik | 096f7d3 | 2013-06-28 15:43:08 -0600 | [diff] [blame] | 1065 | complete_all(&shutdown_completion); |
Eric Holmberg | 006057d | 2012-01-11 10:10:42 -0700 | [diff] [blame] | 1066 | release_wakelock(); |
Eric Holmberg | 8df0cdb | 2012-01-04 17:40:46 -0700 | [diff] [blame] | 1067 | |
| 1068 | /* handle any rx packets before interrupt was enabled */ |
| 1069 | while (bam_connection_is_active && !polling_mode) { |
Brent Hronik | 89c96ba | 2013-08-27 14:34:22 -0600 | [diff] [blame^] | 1070 | ret = bam_ops->sps_get_iovec_ptr(bam_rx_pipe, &iov); |
Eric Holmberg | 8df0cdb | 2012-01-04 17:40:46 -0700 | [diff] [blame] | 1071 | if (ret) { |
| 1072 | pr_err("%s: sps_get_iovec failed %d\n", |
| 1073 | __func__, ret); |
| 1074 | break; |
| 1075 | } |
| 1076 | if (iov.addr == 0) |
| 1077 | break; |
| 1078 | |
| 1079 | mutex_lock(&bam_rx_pool_mutexlock); |
| 1080 | if (unlikely(list_empty(&bam_rx_pool))) { |
Eric Holmberg | 00cf869 | 2012-07-16 14:21:19 -0600 | [diff] [blame] | 1081 | DMUX_LOG_KERR("%s: have iovec %p but rx pool empty\n", |
| 1082 | __func__, (void *)iov.addr); |
Eric Holmberg | 8df0cdb | 2012-01-04 17:40:46 -0700 | [diff] [blame] | 1083 | mutex_unlock(&bam_rx_pool_mutexlock); |
| 1084 | continue; |
| 1085 | } |
| 1086 | info = list_first_entry(&bam_rx_pool, struct rx_pkt_info, |
| 1087 | list_node); |
Eric Holmberg | 00cf869 | 2012-07-16 14:21:19 -0600 | [diff] [blame] | 1088 | if (info->dma_address != iov.addr) { |
| 1089 | DMUX_LOG_KERR("%s: iovec %p != dma %p\n", |
| 1090 | __func__, |
| 1091 | (void *)iov.addr, |
| 1092 | (void *)info->dma_address); |
| 1093 | list_for_each_entry(info, &bam_rx_pool, list_node) { |
| 1094 | DMUX_LOG_KERR("%s: dma %p\n", __func__, |
| 1095 | (void *)info->dma_address); |
| 1096 | if (iov.addr == info->dma_address) |
| 1097 | break; |
| 1098 | } |
| 1099 | } |
| 1100 | BUG_ON(info->dma_address != iov.addr); |
Eric Holmberg | 8df0cdb | 2012-01-04 17:40:46 -0700 | [diff] [blame] | 1101 | list_del(&info->list_node); |
Eric Holmberg | b5b08e5 | 2012-01-20 14:19:00 -0700 | [diff] [blame] | 1102 | --bam_rx_pool_len; |
Eric Holmberg | 8df0cdb | 2012-01-04 17:40:46 -0700 | [diff] [blame] | 1103 | mutex_unlock(&bam_rx_pool_mutexlock); |
| 1104 | handle_bam_mux_cmd(&info->work); |
| 1105 | } |
| 1106 | return; |
| 1107 | |
| 1108 | fail: |
| 1109 | pr_err("%s: reverting to polling\n", __func__); |
Jeff Hugo | fff43af9 | 2012-03-29 17:54:52 -0600 | [diff] [blame] | 1110 | queue_work_on(0, bam_mux_rx_workqueue, &rx_timer_work); |
Eric Holmberg | 8df0cdb | 2012-01-04 17:40:46 -0700 | [diff] [blame] | 1111 | } |
| 1112 | |
Jeff Hugo | 7c18560 | 2013-09-11 17:39:54 -0600 | [diff] [blame] | 1113 | /** |
| 1114 | * store_rx_timestamp() - store the current raw time as as a timestamp for when |
| 1115 | * the last rx packet was processed |
| 1116 | */ |
| 1117 | static void store_rx_timestamp(void) |
| 1118 | { |
| 1119 | last_rx_pkt_timestamp = sched_clock(); |
| 1120 | } |
| 1121 | |
| 1122 | /** |
| 1123 | * log_rx_timestamp() - Log the stored rx pkt timestamp in a human readable |
| 1124 | * format |
| 1125 | */ |
| 1126 | static void log_rx_timestamp(void) |
| 1127 | { |
| 1128 | unsigned long long t = last_rx_pkt_timestamp; |
| 1129 | unsigned long nanosec_rem; |
| 1130 | |
| 1131 | nanosec_rem = do_div(t, 1000000000U); |
| 1132 | BAM_DMUX_LOG("Last rx pkt processed at [%6u.%09lu]\n", (unsigned)t, |
| 1133 | nanosec_rem); |
| 1134 | } |
| 1135 | |
Jeff Hugo | 949080a | 2011-08-30 11:58:56 -0600 | [diff] [blame] | 1136 | static void rx_timer_work_func(struct work_struct *work) |
| 1137 | { |
| 1138 | struct sps_iovec iov; |
Jeff Hugo | 949080a | 2011-08-30 11:58:56 -0600 | [diff] [blame] | 1139 | struct rx_pkt_info *info; |
| 1140 | int inactive_cycles = 0; |
| 1141 | int ret; |
Anurag Singh | dcd8b4e | 2012-07-30 16:46:37 -0700 | [diff] [blame] | 1142 | u32 buffs_unused, buffs_used; |
Jeff Hugo | 949080a | 2011-08-30 11:58:56 -0600 | [diff] [blame] | 1143 | |
Jeff Hugo | 7c18560 | 2013-09-11 17:39:54 -0600 | [diff] [blame] | 1144 | BAM_DMUX_LOG("%s: polling start\n", __func__); |
Eric Holmberg | 8df0cdb | 2012-01-04 17:40:46 -0700 | [diff] [blame] | 1145 | while (bam_connection_is_active) { /* timer loop */ |
Jeff Hugo | 949080a | 2011-08-30 11:58:56 -0600 | [diff] [blame] | 1146 | ++inactive_cycles; |
Eric Holmberg | 8df0cdb | 2012-01-04 17:40:46 -0700 | [diff] [blame] | 1147 | while (bam_connection_is_active) { /* deplete queue loop */ |
Jeff Hugo | 7c18560 | 2013-09-11 17:39:54 -0600 | [diff] [blame] | 1148 | if (in_global_reset) { |
| 1149 | BAM_DMUX_LOG( |
| 1150 | "%s: polling exit, global reset detected\n", |
| 1151 | __func__); |
Jeff Hugo | 6e7a92a | 2011-10-24 05:25:13 -0600 | [diff] [blame] | 1152 | return; |
Jeff Hugo | 7c18560 | 2013-09-11 17:39:54 -0600 | [diff] [blame] | 1153 | } |
Eric Holmberg | 8df0cdb | 2012-01-04 17:40:46 -0700 | [diff] [blame] | 1154 | |
Brent Hronik | 89c96ba | 2013-08-27 14:34:22 -0600 | [diff] [blame^] | 1155 | ret = bam_ops->sps_get_iovec_ptr(bam_rx_pipe, &iov); |
Eric Holmberg | 8df0cdb | 2012-01-04 17:40:46 -0700 | [diff] [blame] | 1156 | if (ret) { |
Jeff Hugo | 7c18560 | 2013-09-11 17:39:54 -0600 | [diff] [blame] | 1157 | DMUX_LOG_KERR("%s: sps_get_iovec failed %d\n", |
Eric Holmberg | 8df0cdb | 2012-01-04 17:40:46 -0700 | [diff] [blame] | 1158 | __func__, ret); |
| 1159 | break; |
| 1160 | } |
Jeff Hugo | 949080a | 2011-08-30 11:58:56 -0600 | [diff] [blame] | 1161 | if (iov.addr == 0) |
| 1162 | break; |
Jeff Hugo | 7c18560 | 2013-09-11 17:39:54 -0600 | [diff] [blame] | 1163 | store_rx_timestamp(); |
Jeff Hugo | 949080a | 2011-08-30 11:58:56 -0600 | [diff] [blame] | 1164 | inactive_cycles = 0; |
Jeff Hugo | c974993 | 2011-11-02 17:50:40 -0600 | [diff] [blame] | 1165 | mutex_lock(&bam_rx_pool_mutexlock); |
Eric Holmberg | 8df0cdb | 2012-01-04 17:40:46 -0700 | [diff] [blame] | 1166 | if (unlikely(list_empty(&bam_rx_pool))) { |
Eric Holmberg | 00cf869 | 2012-07-16 14:21:19 -0600 | [diff] [blame] | 1167 | DMUX_LOG_KERR( |
| 1168 | "%s: have iovec %p but rx pool empty\n", |
| 1169 | __func__, (void *)iov.addr); |
Eric Holmberg | 8df0cdb | 2012-01-04 17:40:46 -0700 | [diff] [blame] | 1170 | mutex_unlock(&bam_rx_pool_mutexlock); |
| 1171 | continue; |
| 1172 | } |
| 1173 | info = list_first_entry(&bam_rx_pool, |
| 1174 | struct rx_pkt_info, list_node); |
Eric Holmberg | 00cf869 | 2012-07-16 14:21:19 -0600 | [diff] [blame] | 1175 | if (info->dma_address != iov.addr) { |
| 1176 | DMUX_LOG_KERR("%s: iovec %p != dma %p\n", |
| 1177 | __func__, |
| 1178 | (void *)iov.addr, |
| 1179 | (void *)info->dma_address); |
| 1180 | list_for_each_entry(info, &bam_rx_pool, |
| 1181 | list_node) { |
| 1182 | DMUX_LOG_KERR("%s: dma %p\n", __func__, |
| 1183 | (void *)info->dma_address); |
| 1184 | if (iov.addr == info->dma_address) |
| 1185 | break; |
| 1186 | } |
| 1187 | } |
| 1188 | BUG_ON(info->dma_address != iov.addr); |
Eric Holmberg | 8df0cdb | 2012-01-04 17:40:46 -0700 | [diff] [blame] | 1189 | list_del(&info->list_node); |
Eric Holmberg | 00cf869 | 2012-07-16 14:21:19 -0600 | [diff] [blame] | 1190 | --bam_rx_pool_len; |
Jeff Hugo | c974993 | 2011-11-02 17:50:40 -0600 | [diff] [blame] | 1191 | mutex_unlock(&bam_rx_pool_mutexlock); |
Jeff Hugo | 949080a | 2011-08-30 11:58:56 -0600 | [diff] [blame] | 1192 | handle_bam_mux_cmd(&info->work); |
| 1193 | } |
| 1194 | |
Anurag Singh | dcd8b4e | 2012-07-30 16:46:37 -0700 | [diff] [blame] | 1195 | if (inactive_cycles >= POLLING_INACTIVITY) { |
Jeff Hugo | 7c18560 | 2013-09-11 17:39:54 -0600 | [diff] [blame] | 1196 | BAM_DMUX_LOG("%s: polling exit, no data\n", __func__); |
Eric Holmberg | 8df0cdb | 2012-01-04 17:40:46 -0700 | [diff] [blame] | 1197 | rx_switch_to_interrupt_mode(); |
| 1198 | break; |
Jeff Hugo | 949080a | 2011-08-30 11:58:56 -0600 | [diff] [blame] | 1199 | } |
| 1200 | |
Anurag Singh | dcd8b4e | 2012-07-30 16:46:37 -0700 | [diff] [blame] | 1201 | if (bam_adaptive_timer_enabled) { |
| 1202 | usleep_range(rx_timer_interval, rx_timer_interval + 50); |
| 1203 | |
Brent Hronik | 89c96ba | 2013-08-27 14:34:22 -0600 | [diff] [blame^] | 1204 | ret = bam_ops->sps_get_unused_desc_num_ptr(bam_rx_pipe, |
Anurag Singh | dcd8b4e | 2012-07-30 16:46:37 -0700 | [diff] [blame] | 1205 | &buffs_unused); |
| 1206 | |
| 1207 | if (ret) { |
Jeff Hugo | 7c18560 | 2013-09-11 17:39:54 -0600 | [diff] [blame] | 1208 | DMUX_LOG_KERR( |
| 1209 | "%s: error getting num buffers unused after sleep\n", |
Anurag Singh | dcd8b4e | 2012-07-30 16:46:37 -0700 | [diff] [blame] | 1210 | __func__); |
| 1211 | |
| 1212 | break; |
| 1213 | } |
| 1214 | |
| 1215 | buffs_used = NUM_BUFFERS - buffs_unused; |
| 1216 | |
| 1217 | if (buffs_unused == 0) { |
| 1218 | rx_timer_interval = MIN_POLLING_SLEEP; |
| 1219 | } else { |
| 1220 | if (buffs_used > 0) { |
| 1221 | rx_timer_interval = |
| 1222 | (2 * NUM_BUFFERS * |
| 1223 | rx_timer_interval)/ |
| 1224 | (3 * buffs_used); |
| 1225 | } else { |
| 1226 | rx_timer_interval = |
| 1227 | MAX_POLLING_SLEEP; |
| 1228 | } |
| 1229 | } |
| 1230 | |
| 1231 | if (rx_timer_interval > MAX_POLLING_SLEEP) |
| 1232 | rx_timer_interval = MAX_POLLING_SLEEP; |
| 1233 | else if (rx_timer_interval < MIN_POLLING_SLEEP) |
| 1234 | rx_timer_interval = MIN_POLLING_SLEEP; |
| 1235 | } else { |
| 1236 | usleep_range(POLLING_MIN_SLEEP, POLLING_MAX_SLEEP); |
| 1237 | } |
Jeff Hugo | 949080a | 2011-08-30 11:58:56 -0600 | [diff] [blame] | 1238 | } |
| 1239 | } |
| 1240 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 1241 | static void bam_mux_tx_notify(struct sps_event_notify *notify) |
| 1242 | { |
| 1243 | struct tx_pkt_info *pkt; |
| 1244 | |
| 1245 | DBG("%s: event %d notified\n", __func__, notify->event_id); |
| 1246 | |
Jeff Hugo | 6e7a92a | 2011-10-24 05:25:13 -0600 | [diff] [blame] | 1247 | if (in_global_reset) |
| 1248 | return; |
| 1249 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 1250 | switch (notify->event_id) { |
| 1251 | case SPS_EVENT_EOT: |
| 1252 | pkt = notify->data.transfer.user; |
Jeff Hugo | 6e7a92a | 2011-10-24 05:25:13 -0600 | [diff] [blame] | 1253 | if (!pkt->is_cmd) |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 1254 | dma_unmap_single(NULL, pkt->dma_address, |
| 1255 | pkt->skb->len, |
Brent Hronik | 89c96ba | 2013-08-27 14:34:22 -0600 | [diff] [blame^] | 1256 | bam_ops->dma_to); |
Jeff Hugo | 6e7a92a | 2011-10-24 05:25:13 -0600 | [diff] [blame] | 1257 | else |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 1258 | dma_unmap_single(NULL, pkt->dma_address, |
| 1259 | pkt->len, |
Brent Hronik | 89c96ba | 2013-08-27 14:34:22 -0600 | [diff] [blame^] | 1260 | bam_ops->dma_to); |
Jeff Hugo | 6e7a92a | 2011-10-24 05:25:13 -0600 | [diff] [blame] | 1261 | queue_work(bam_mux_tx_workqueue, &pkt->work); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 1262 | break; |
| 1263 | default: |
| 1264 | pr_err("%s: recieved unexpected event id %d\n", __func__, |
| 1265 | notify->event_id); |
| 1266 | } |
| 1267 | } |
| 1268 | |
Jeff Hugo | 33dbc00 | 2011-08-25 15:52:53 -0600 | [diff] [blame] | 1269 | static void bam_mux_rx_notify(struct sps_event_notify *notify) |
| 1270 | { |
Jeff Hugo | 949080a | 2011-08-30 11:58:56 -0600 | [diff] [blame] | 1271 | int ret; |
| 1272 | struct sps_connect cur_rx_conn; |
Jeff Hugo | 33dbc00 | 2011-08-25 15:52:53 -0600 | [diff] [blame] | 1273 | |
| 1274 | DBG("%s: event %d notified\n", __func__, notify->event_id); |
| 1275 | |
Jeff Hugo | 6e7a92a | 2011-10-24 05:25:13 -0600 | [diff] [blame] | 1276 | if (in_global_reset) |
| 1277 | return; |
| 1278 | |
Jeff Hugo | 33dbc00 | 2011-08-25 15:52:53 -0600 | [diff] [blame] | 1279 | switch (notify->event_id) { |
| 1280 | case SPS_EVENT_EOT: |
Jeff Hugo | 949080a | 2011-08-30 11:58:56 -0600 | [diff] [blame] | 1281 | /* attempt to disable interrupts in this pipe */ |
| 1282 | if (!polling_mode) { |
Brent Hronik | 89c96ba | 2013-08-27 14:34:22 -0600 | [diff] [blame^] | 1283 | ret = bam_ops->sps_get_config_ptr(bam_rx_pipe, |
| 1284 | &cur_rx_conn); |
Jeff Hugo | 949080a | 2011-08-30 11:58:56 -0600 | [diff] [blame] | 1285 | if (ret) { |
Eric Holmberg | 8df0cdb | 2012-01-04 17:40:46 -0700 | [diff] [blame] | 1286 | pr_err("%s: sps_get_config() failed %d, interrupts" |
| 1287 | " not disabled\n", __func__, ret); |
Jeff Hugo | 949080a | 2011-08-30 11:58:56 -0600 | [diff] [blame] | 1288 | break; |
| 1289 | } |
Jeff Hugo | a9d32ba | 2011-11-21 14:59:48 -0700 | [diff] [blame] | 1290 | cur_rx_conn.options = SPS_O_AUTO_ENABLE | |
Jeff Hugo | 949080a | 2011-08-30 11:58:56 -0600 | [diff] [blame] | 1291 | SPS_O_ACK_TRANSFERS | SPS_O_POLL; |
Brent Hronik | 89c96ba | 2013-08-27 14:34:22 -0600 | [diff] [blame^] | 1292 | ret = bam_ops->sps_set_config_ptr(bam_rx_pipe, |
| 1293 | &cur_rx_conn); |
Jeff Hugo | 949080a | 2011-08-30 11:58:56 -0600 | [diff] [blame] | 1294 | if (ret) { |
Eric Holmberg | 8df0cdb | 2012-01-04 17:40:46 -0700 | [diff] [blame] | 1295 | pr_err("%s: sps_set_config() failed %d, interrupts" |
| 1296 | " not disabled\n", __func__, ret); |
Jeff Hugo | 949080a | 2011-08-30 11:58:56 -0600 | [diff] [blame] | 1297 | break; |
| 1298 | } |
Brent Hronik | 096f7d3 | 2013-06-28 15:43:08 -0600 | [diff] [blame] | 1299 | INIT_COMPLETION(shutdown_completion); |
Eric Holmberg | 006057d | 2012-01-11 10:10:42 -0700 | [diff] [blame] | 1300 | grab_wakelock(); |
Jeff Hugo | 949080a | 2011-08-30 11:58:56 -0600 | [diff] [blame] | 1301 | polling_mode = 1; |
Jeff Hugo | fff43af9 | 2012-03-29 17:54:52 -0600 | [diff] [blame] | 1302 | /* |
| 1303 | * run on core 0 so that netif_rx() in rmnet uses only |
| 1304 | * one queue |
| 1305 | */ |
| 1306 | queue_work_on(0, bam_mux_rx_workqueue, &rx_timer_work); |
Jeff Hugo | 949080a | 2011-08-30 11:58:56 -0600 | [diff] [blame] | 1307 | } |
Jeff Hugo | 33dbc00 | 2011-08-25 15:52:53 -0600 | [diff] [blame] | 1308 | break; |
| 1309 | default: |
| 1310 | pr_err("%s: recieved unexpected event id %d\n", __func__, |
| 1311 | notify->event_id); |
| 1312 | } |
| 1313 | } |
| 1314 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 1315 | #ifdef CONFIG_DEBUG_FS |
| 1316 | |
| 1317 | static int debug_tbl(char *buf, int max) |
| 1318 | { |
| 1319 | int i = 0; |
| 1320 | int j; |
| 1321 | |
| 1322 | for (j = 0; j < BAM_DMUX_NUM_CHANNELS; ++j) { |
| 1323 | i += scnprintf(buf + i, max - i, |
| 1324 | "ch%02d local open=%s remote open=%s\n", |
| 1325 | j, bam_ch_is_local_open(j) ? "Y" : "N", |
| 1326 | bam_ch_is_remote_open(j) ? "Y" : "N"); |
| 1327 | } |
| 1328 | |
| 1329 | return i; |
| 1330 | } |
| 1331 | |
Eric Holmberg | 2fddbcd | 2011-11-28 18:25:57 -0700 | [diff] [blame] | 1332 | static int debug_ul_pkt_cnt(char *buf, int max) |
| 1333 | { |
| 1334 | struct list_head *p; |
| 1335 | unsigned long flags; |
| 1336 | int n = 0; |
| 1337 | |
| 1338 | spin_lock_irqsave(&bam_tx_pool_spinlock, flags); |
| 1339 | __list_for_each(p, &bam_tx_pool) { |
| 1340 | ++n; |
| 1341 | } |
| 1342 | spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags); |
| 1343 | |
| 1344 | return scnprintf(buf, max, "Number of UL packets in flight: %d\n", n); |
| 1345 | } |
| 1346 | |
| 1347 | static int debug_stats(char *buf, int max) |
| 1348 | { |
| 1349 | int i = 0; |
| 1350 | |
| 1351 | i += scnprintf(buf + i, max - i, |
Eric Holmberg | 9fdef26 | 2012-02-14 11:46:05 -0700 | [diff] [blame] | 1352 | "skb read cnt: %u\n" |
| 1353 | "skb write cnt: %u\n" |
Eric Holmberg | 2fddbcd | 2011-11-28 18:25:57 -0700 | [diff] [blame] | 1354 | "skb copy cnt: %u\n" |
| 1355 | "skb copy bytes: %u\n" |
Eric Holmberg | 6074aba | 2012-01-18 17:59:44 -0700 | [diff] [blame] | 1356 | "sps tx failures: %u\n" |
Eric Holmberg | b5b08e5 | 2012-01-20 14:19:00 -0700 | [diff] [blame] | 1357 | "sps tx stalls: %u\n" |
Eric Holmberg | 1f1255d | 2012-02-22 13:37:21 -0700 | [diff] [blame] | 1358 | "rx queue len: %d\n" |
| 1359 | "a2 ack out cnt: %d\n" |
| 1360 | "a2 ack in cnt: %d\n" |
| 1361 | "a2 pwr cntl in: %d\n", |
Eric Holmberg | 9fdef26 | 2012-02-14 11:46:05 -0700 | [diff] [blame] | 1362 | bam_dmux_read_cnt, |
| 1363 | bam_dmux_write_cnt, |
Eric Holmberg | 2fddbcd | 2011-11-28 18:25:57 -0700 | [diff] [blame] | 1364 | bam_dmux_write_cpy_cnt, |
| 1365 | bam_dmux_write_cpy_bytes, |
Eric Holmberg | 6074aba | 2012-01-18 17:59:44 -0700 | [diff] [blame] | 1366 | bam_dmux_tx_sps_failure_cnt, |
Eric Holmberg | b5b08e5 | 2012-01-20 14:19:00 -0700 | [diff] [blame] | 1367 | bam_dmux_tx_stall_cnt, |
Eric Holmberg | 1f1255d | 2012-02-22 13:37:21 -0700 | [diff] [blame] | 1368 | bam_rx_pool_len, |
| 1369 | atomic_read(&bam_dmux_ack_out_cnt), |
| 1370 | atomic_read(&bam_dmux_ack_in_cnt), |
| 1371 | atomic_read(&bam_dmux_a2_pwr_cntl_in_cnt) |
Eric Holmberg | 2fddbcd | 2011-11-28 18:25:57 -0700 | [diff] [blame] | 1372 | ); |
| 1373 | |
| 1374 | return i; |
| 1375 | } |
| 1376 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 1377 | #define DEBUG_BUFMAX 4096 |
| 1378 | static char debug_buffer[DEBUG_BUFMAX]; |
| 1379 | |
| 1380 | static ssize_t debug_read(struct file *file, char __user *buf, |
| 1381 | size_t count, loff_t *ppos) |
| 1382 | { |
| 1383 | int (*fill)(char *buf, int max) = file->private_data; |
| 1384 | int bsize = fill(debug_buffer, DEBUG_BUFMAX); |
| 1385 | return simple_read_from_buffer(buf, count, ppos, debug_buffer, bsize); |
| 1386 | } |
| 1387 | |
| 1388 | static int debug_open(struct inode *inode, struct file *file) |
| 1389 | { |
| 1390 | file->private_data = inode->i_private; |
| 1391 | return 0; |
| 1392 | } |
| 1393 | |
| 1394 | |
| 1395 | static const struct file_operations debug_ops = { |
| 1396 | .read = debug_read, |
| 1397 | .open = debug_open, |
| 1398 | }; |
| 1399 | |
| 1400 | static void debug_create(const char *name, mode_t mode, |
| 1401 | struct dentry *dent, |
| 1402 | int (*fill)(char *buf, int max)) |
| 1403 | { |
Eric Holmberg | e4ac80b | 2012-01-12 09:21:59 -0700 | [diff] [blame] | 1404 | struct dentry *file; |
| 1405 | |
| 1406 | file = debugfs_create_file(name, mode, dent, fill, &debug_ops); |
| 1407 | if (IS_ERR(file)) |
| 1408 | pr_err("%s: debugfs create failed %d\n", __func__, |
| 1409 | (int)PTR_ERR(file)); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 1410 | } |
| 1411 | |
| 1412 | #endif |
| 1413 | |
Jeff Hugo | d98b108 | 2011-10-24 10:30:23 -0600 | [diff] [blame] | 1414 | static void notify_all(int event, unsigned long data) |
| 1415 | { |
| 1416 | int i; |
Jeff Hugo | cb79802 | 2012-04-09 14:55:40 -0600 | [diff] [blame] | 1417 | struct list_head *temp; |
| 1418 | struct outside_notify_func *func; |
Jeff Hugo | d98b108 | 2011-10-24 10:30:23 -0600 | [diff] [blame] | 1419 | |
Jeff Hugo | ac8152a | 2013-04-19 11:05:19 -0600 | [diff] [blame] | 1420 | BAM_DMUX_LOG("%s: event=%d, data=%lu\n", __func__, event, data); |
| 1421 | |
Jeff Hugo | d98b108 | 2011-10-24 10:30:23 -0600 | [diff] [blame] | 1422 | for (i = 0; i < BAM_DMUX_NUM_CHANNELS; ++i) { |
Jeff Hugo | ac8152a | 2013-04-19 11:05:19 -0600 | [diff] [blame] | 1423 | if (bam_ch_is_open(i)) |
Jeff Hugo | d98b108 | 2011-10-24 10:30:23 -0600 | [diff] [blame] | 1424 | bam_ch[i].notify(bam_ch[i].priv, event, data); |
| 1425 | } |
Jeff Hugo | cb79802 | 2012-04-09 14:55:40 -0600 | [diff] [blame] | 1426 | |
| 1427 | __list_for_each(temp, &bam_other_notify_funcs) { |
| 1428 | func = container_of(temp, struct outside_notify_func, |
| 1429 | list_node); |
| 1430 | func->notify(func->priv, event, data); |
| 1431 | } |
Jeff Hugo | d98b108 | 2011-10-24 10:30:23 -0600 | [diff] [blame] | 1432 | } |
| 1433 | |
| 1434 | static void kickoff_ul_wakeup_func(struct work_struct *work) |
| 1435 | { |
| 1436 | read_lock(&ul_wakeup_lock); |
| 1437 | if (!bam_is_connected) { |
| 1438 | read_unlock(&ul_wakeup_lock); |
| 1439 | ul_wakeup(); |
Jeff Hugo | 4838f41 | 2012-01-20 11:19:37 -0700 | [diff] [blame] | 1440 | if (unlikely(in_global_reset == 1)) |
| 1441 | return; |
Jeff Hugo | d98b108 | 2011-10-24 10:30:23 -0600 | [diff] [blame] | 1442 | read_lock(&ul_wakeup_lock); |
| 1443 | ul_packet_written = 1; |
| 1444 | notify_all(BAM_DMUX_UL_CONNECTED, (unsigned long)(NULL)); |
| 1445 | } |
| 1446 | read_unlock(&ul_wakeup_lock); |
| 1447 | } |
| 1448 | |
Eric Holmberg | bc9f21c | 2012-01-18 11:33:33 -0700 | [diff] [blame] | 1449 | int msm_bam_dmux_kickoff_ul_wakeup(void) |
Jeff Hugo | d98b108 | 2011-10-24 10:30:23 -0600 | [diff] [blame] | 1450 | { |
Eric Holmberg | bc9f21c | 2012-01-18 11:33:33 -0700 | [diff] [blame] | 1451 | int is_connected; |
| 1452 | |
| 1453 | read_lock(&ul_wakeup_lock); |
| 1454 | ul_packet_written = 1; |
| 1455 | is_connected = bam_is_connected; |
| 1456 | if (!is_connected) |
| 1457 | queue_work(bam_mux_tx_workqueue, &kickoff_ul_wakeup); |
| 1458 | read_unlock(&ul_wakeup_lock); |
| 1459 | |
| 1460 | return is_connected; |
Jeff Hugo | d98b108 | 2011-10-24 10:30:23 -0600 | [diff] [blame] | 1461 | } |
| 1462 | |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 1463 | static void power_vote(int vote) |
| 1464 | { |
Zaheerulla Meer | 6fbf32c | 2013-01-31 17:06:44 +0530 | [diff] [blame] | 1465 | BAM_DMUX_LOG("%s: curr=%d, vote=%d\n", __func__, |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 1466 | bam_dmux_uplink_vote, vote); |
| 1467 | |
| 1468 | if (bam_dmux_uplink_vote == vote) |
Zaheerulla Meer | 6fbf32c | 2013-01-31 17:06:44 +0530 | [diff] [blame] | 1469 | BAM_DMUX_LOG("%s: warning - duplicate power vote\n", __func__); |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 1470 | |
| 1471 | bam_dmux_uplink_vote = vote; |
| 1472 | if (vote) |
Brent Hronik | 89c96ba | 2013-08-27 14:34:22 -0600 | [diff] [blame^] | 1473 | bam_ops->smsm_change_state_ptr(SMSM_APPS_STATE, |
| 1474 | 0, SMSM_A2_POWER_CONTROL); |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 1475 | else |
Brent Hronik | 89c96ba | 2013-08-27 14:34:22 -0600 | [diff] [blame^] | 1476 | bam_ops->smsm_change_state_ptr(SMSM_APPS_STATE, |
| 1477 | SMSM_A2_POWER_CONTROL, 0); |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 1478 | } |
| 1479 | |
Eric Holmberg | 454d9da | 2012-01-12 09:37:14 -0700 | [diff] [blame] | 1480 | /* |
| 1481 | * @note: Must be called with ul_wakeup_lock locked. |
| 1482 | */ |
| 1483 | static inline void ul_powerdown(void) |
| 1484 | { |
Zaheerulla Meer | 6fbf32c | 2013-01-31 17:06:44 +0530 | [diff] [blame] | 1485 | BAM_DMUX_LOG("%s: powerdown\n", __func__); |
Eric Holmberg | 454d9da | 2012-01-12 09:37:14 -0700 | [diff] [blame] | 1486 | verify_tx_queue_is_empty(__func__); |
| 1487 | |
| 1488 | if (a2_pc_disabled) { |
| 1489 | wait_for_dfab = 1; |
| 1490 | INIT_COMPLETION(dfab_unvote_completion); |
| 1491 | release_wakelock(); |
| 1492 | } else { |
| 1493 | wait_for_ack = 1; |
| 1494 | INIT_COMPLETION(ul_wakeup_ack_completion); |
| 1495 | power_vote(0); |
| 1496 | } |
| 1497 | bam_is_connected = 0; |
| 1498 | notify_all(BAM_DMUX_UL_DISCONNECTED, (unsigned long)(NULL)); |
| 1499 | } |
| 1500 | |
| 1501 | static inline void ul_powerdown_finish(void) |
| 1502 | { |
| 1503 | if (a2_pc_disabled && wait_for_dfab) { |
| 1504 | unvote_dfab(); |
| 1505 | complete_all(&dfab_unvote_completion); |
| 1506 | wait_for_dfab = 0; |
| 1507 | } |
| 1508 | } |
| 1509 | |
Eric Holmberg | bc9f21c | 2012-01-18 11:33:33 -0700 | [diff] [blame] | 1510 | /* |
| 1511 | * Votes for UL power and returns current power state. |
| 1512 | * |
| 1513 | * @returns true if currently connected |
| 1514 | */ |
| 1515 | int msm_bam_dmux_ul_power_vote(void) |
| 1516 | { |
| 1517 | int is_connected; |
| 1518 | |
| 1519 | read_lock(&ul_wakeup_lock); |
| 1520 | atomic_inc(&ul_ondemand_vote); |
| 1521 | is_connected = bam_is_connected; |
| 1522 | if (!is_connected) |
| 1523 | queue_work(bam_mux_tx_workqueue, &kickoff_ul_wakeup); |
| 1524 | read_unlock(&ul_wakeup_lock); |
| 1525 | |
| 1526 | return is_connected; |
| 1527 | } |
| 1528 | |
| 1529 | /* |
| 1530 | * Unvotes for UL power. |
| 1531 | * |
| 1532 | * @returns true if vote count is 0 (UL shutdown possible) |
| 1533 | */ |
| 1534 | int msm_bam_dmux_ul_power_unvote(void) |
| 1535 | { |
| 1536 | int vote; |
| 1537 | |
| 1538 | read_lock(&ul_wakeup_lock); |
| 1539 | vote = atomic_dec_return(&ul_ondemand_vote); |
| 1540 | if (unlikely(vote) < 0) |
| 1541 | DMUX_LOG_KERR("%s: invalid power vote %d\n", __func__, vote); |
| 1542 | read_unlock(&ul_wakeup_lock); |
| 1543 | |
| 1544 | return vote == 0; |
| 1545 | } |
| 1546 | |
Jeff Hugo | cb79802 | 2012-04-09 14:55:40 -0600 | [diff] [blame] | 1547 | int msm_bam_dmux_reg_notify(void *priv, |
| 1548 | void (*notify)(void *priv, int event_type, |
| 1549 | unsigned long data)) |
| 1550 | { |
| 1551 | struct outside_notify_func *func; |
| 1552 | |
| 1553 | if (!notify) |
| 1554 | return -EINVAL; |
| 1555 | |
| 1556 | func = kmalloc(sizeof(struct outside_notify_func), GFP_KERNEL); |
| 1557 | if (!func) |
| 1558 | return -ENOMEM; |
| 1559 | |
| 1560 | func->notify = notify; |
| 1561 | func->priv = priv; |
| 1562 | list_add(&func->list_node, &bam_other_notify_funcs); |
| 1563 | |
| 1564 | return 0; |
| 1565 | } |
| 1566 | |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 1567 | static void ul_timeout(struct work_struct *work) |
| 1568 | { |
Jeff Hugo | c040a5b | 2011-11-15 14:26:01 -0700 | [diff] [blame] | 1569 | unsigned long flags; |
| 1570 | int ret; |
| 1571 | |
Jeff Hugo | 6e7a92a | 2011-10-24 05:25:13 -0600 | [diff] [blame] | 1572 | if (in_global_reset) |
| 1573 | return; |
Jeff Hugo | c040a5b | 2011-11-15 14:26:01 -0700 | [diff] [blame] | 1574 | ret = write_trylock_irqsave(&ul_wakeup_lock, flags); |
| 1575 | if (!ret) { /* failed to grab lock, reschedule and bail */ |
| 1576 | schedule_delayed_work(&ul_timeout_work, |
| 1577 | msecs_to_jiffies(UL_TIMEOUT_DELAY)); |
| 1578 | return; |
| 1579 | } |
Eric Holmberg | 454d9da | 2012-01-12 09:37:14 -0700 | [diff] [blame] | 1580 | if (bam_is_connected) { |
Eric Holmberg | 6074aba | 2012-01-18 17:59:44 -0700 | [diff] [blame] | 1581 | if (!ul_packet_written) { |
| 1582 | spin_lock(&bam_tx_pool_spinlock); |
| 1583 | if (!list_empty(&bam_tx_pool)) { |
| 1584 | struct tx_pkt_info *info; |
| 1585 | |
| 1586 | info = list_first_entry(&bam_tx_pool, |
| 1587 | struct tx_pkt_info, list_node); |
| 1588 | DMUX_LOG_KERR("%s: UL delayed ts=%u.%09lu\n", |
| 1589 | __func__, info->ts_sec, info->ts_nsec); |
| 1590 | DBG_INC_TX_STALL_CNT(); |
| 1591 | ul_packet_written = 1; |
| 1592 | } |
| 1593 | spin_unlock(&bam_tx_pool_spinlock); |
| 1594 | } |
| 1595 | |
Eric Holmberg | bc9f21c | 2012-01-18 11:33:33 -0700 | [diff] [blame] | 1596 | if (ul_packet_written || atomic_read(&ul_ondemand_vote)) { |
Zaheerulla Meer | 6fbf32c | 2013-01-31 17:06:44 +0530 | [diff] [blame] | 1597 | BAM_DMUX_LOG("%s: pkt written %d\n", |
Eric Holmberg | bc9f21c | 2012-01-18 11:33:33 -0700 | [diff] [blame] | 1598 | __func__, ul_packet_written); |
Eric Holmberg | 454d9da | 2012-01-12 09:37:14 -0700 | [diff] [blame] | 1599 | ul_packet_written = 0; |
| 1600 | schedule_delayed_work(&ul_timeout_work, |
| 1601 | msecs_to_jiffies(UL_TIMEOUT_DELAY)); |
Eric Holmberg | 006057d | 2012-01-11 10:10:42 -0700 | [diff] [blame] | 1602 | } else { |
Eric Holmberg | 454d9da | 2012-01-12 09:37:14 -0700 | [diff] [blame] | 1603 | ul_powerdown(); |
Eric Holmberg | 006057d | 2012-01-11 10:10:42 -0700 | [diff] [blame] | 1604 | } |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 1605 | } |
Jeff Hugo | c040a5b | 2011-11-15 14:26:01 -0700 | [diff] [blame] | 1606 | write_unlock_irqrestore(&ul_wakeup_lock, flags); |
Eric Holmberg | 454d9da | 2012-01-12 09:37:14 -0700 | [diff] [blame] | 1607 | ul_powerdown_finish(); |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 1608 | } |
Jeff Hugo | 4838f41 | 2012-01-20 11:19:37 -0700 | [diff] [blame] | 1609 | |
| 1610 | static int ssrestart_check(void) |
| 1611 | { |
Jeff Hugo | b8156d7 | 2013-06-04 12:51:10 -0600 | [diff] [blame] | 1612 | int ret = 0; |
| 1613 | |
Eric Holmberg | 7614a7f | 2013-07-29 15:47:12 -0600 | [diff] [blame] | 1614 | if (in_global_reset) { |
| 1615 | DMUX_LOG_KERR("%s: modem timeout: already in SSR\n", |
| 1616 | __func__); |
| 1617 | return 1; |
| 1618 | } |
| 1619 | |
Jeff Hugo | b8156d7 | 2013-06-04 12:51:10 -0600 | [diff] [blame] | 1620 | DMUX_LOG_KERR("%s: modem timeout: BAM DMUX disabled for SSR\n", |
| 1621 | __func__); |
Eric Holmberg | 90285e2 | 2012-02-22 12:33:05 -0700 | [diff] [blame] | 1622 | in_global_reset = 1; |
Jeff Hugo | b8156d7 | 2013-06-04 12:51:10 -0600 | [diff] [blame] | 1623 | ret = subsystem_restart("modem"); |
| 1624 | if (ret == -ENODEV) |
| 1625 | panic("modem subsystem restart failed\n"); |
Eric Holmberg | 90285e2 | 2012-02-22 12:33:05 -0700 | [diff] [blame] | 1626 | return 1; |
Jeff Hugo | 4838f41 | 2012-01-20 11:19:37 -0700 | [diff] [blame] | 1627 | } |
| 1628 | |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 1629 | static void ul_wakeup(void) |
| 1630 | { |
Jeff Hugo | f6c1c1e | 2011-12-01 17:43:49 -0700 | [diff] [blame] | 1631 | int ret; |
Jeff Hugo | 5f57ec9 | 2012-05-14 13:34:28 -0600 | [diff] [blame] | 1632 | int do_vote_dfab = 0; |
Jeff Hugo | f6c1c1e | 2011-12-01 17:43:49 -0700 | [diff] [blame] | 1633 | |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 1634 | mutex_lock(&wakeup_lock); |
| 1635 | if (bam_is_connected) { /* bam got connected before lock grabbed */ |
Zaheerulla Meer | 6fbf32c | 2013-01-31 17:06:44 +0530 | [diff] [blame] | 1636 | BAM_DMUX_LOG("%s Already awake\n", __func__); |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 1637 | mutex_unlock(&wakeup_lock); |
| 1638 | return; |
| 1639 | } |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 1640 | |
Jeff Hugo | c269614 | 2012-05-03 11:42:13 -0600 | [diff] [blame] | 1641 | /* |
Jeff Hugo | f500173 | 2012-08-27 13:19:09 -0600 | [diff] [blame] | 1642 | * if this gets hit, that means restart_notifier_cb() has started |
| 1643 | * but probably not finished, thus we know SSR has happened, but |
| 1644 | * haven't been able to send that info to our clients yet. |
| 1645 | * in that case, abort the ul_wakeup() so that we don't undo any |
| 1646 | * work restart_notifier_cb() has done. The clients will be notified |
| 1647 | * shortly. No cleanup necessary (reschedule the wakeup) as our and |
| 1648 | * their SSR handling will cover it |
| 1649 | */ |
| 1650 | if (unlikely(in_global_reset == 1)) { |
| 1651 | mutex_unlock(&wakeup_lock); |
| 1652 | return; |
| 1653 | } |
| 1654 | |
| 1655 | /* |
Jeff Hugo | c269614 | 2012-05-03 11:42:13 -0600 | [diff] [blame] | 1656 | * if someone is voting for UL before bam is inited (modem up first |
| 1657 | * time), set flag for init to kickoff ul wakeup once bam is inited |
| 1658 | */ |
| 1659 | mutex_lock(&delayed_ul_vote_lock); |
| 1660 | if (unlikely(!bam_mux_initialized)) { |
| 1661 | need_delayed_ul_vote = 1; |
| 1662 | mutex_unlock(&delayed_ul_vote_lock); |
| 1663 | mutex_unlock(&wakeup_lock); |
| 1664 | return; |
| 1665 | } |
| 1666 | mutex_unlock(&delayed_ul_vote_lock); |
| 1667 | |
Eric Holmberg | 006057d | 2012-01-11 10:10:42 -0700 | [diff] [blame] | 1668 | if (a2_pc_disabled) { |
| 1669 | /* |
| 1670 | * don't grab the wakelock the first time because it is |
| 1671 | * already grabbed when a2 powers on |
| 1672 | */ |
Jeff Hugo | 5f57ec9 | 2012-05-14 13:34:28 -0600 | [diff] [blame] | 1673 | if (likely(a2_pc_disabled_wakelock_skipped)) { |
Eric Holmberg | 006057d | 2012-01-11 10:10:42 -0700 | [diff] [blame] | 1674 | grab_wakelock(); |
Jeff Hugo | 5f57ec9 | 2012-05-14 13:34:28 -0600 | [diff] [blame] | 1675 | do_vote_dfab = 1; /* vote must occur after wait */ |
| 1676 | } else { |
Jeff Hugo | 583a6da | 2012-02-03 11:37:30 -0700 | [diff] [blame] | 1677 | a2_pc_disabled_wakelock_skipped = 1; |
Jeff Hugo | 5f57ec9 | 2012-05-14 13:34:28 -0600 | [diff] [blame] | 1678 | } |
Eric Holmberg | 006057d | 2012-01-11 10:10:42 -0700 | [diff] [blame] | 1679 | if (wait_for_dfab) { |
Jeff Hugo | 66f7f1e | 2012-01-16 14:30:42 -0700 | [diff] [blame] | 1680 | ret = wait_for_completion_timeout( |
Eric Holmberg | 006057d | 2012-01-11 10:10:42 -0700 | [diff] [blame] | 1681 | &dfab_unvote_completion, HZ); |
| 1682 | BUG_ON(ret == 0); |
| 1683 | } |
Jeff Hugo | 5f57ec9 | 2012-05-14 13:34:28 -0600 | [diff] [blame] | 1684 | if (likely(do_vote_dfab)) |
| 1685 | vote_dfab(); |
Eric Holmberg | 006057d | 2012-01-11 10:10:42 -0700 | [diff] [blame] | 1686 | schedule_delayed_work(&ul_timeout_work, |
| 1687 | msecs_to_jiffies(UL_TIMEOUT_DELAY)); |
| 1688 | bam_is_connected = 1; |
| 1689 | mutex_unlock(&wakeup_lock); |
| 1690 | return; |
| 1691 | } |
| 1692 | |
Jeff Hugo | f6c1c1e | 2011-12-01 17:43:49 -0700 | [diff] [blame] | 1693 | /* |
| 1694 | * must wait for the previous power down request to have been acked |
| 1695 | * chances are it already came in and this will just fall through |
| 1696 | * instead of waiting |
| 1697 | */ |
| 1698 | if (wait_for_ack) { |
Zaheerulla Meer | 6fbf32c | 2013-01-31 17:06:44 +0530 | [diff] [blame] | 1699 | BAM_DMUX_LOG("%s waiting for previous ack\n", __func__); |
Jeff Hugo | 66f7f1e | 2012-01-16 14:30:42 -0700 | [diff] [blame] | 1700 | ret = wait_for_completion_timeout( |
Jeff Hugo | 1f31739 | 2013-07-24 16:28:52 -0600 | [diff] [blame] | 1701 | &ul_wakeup_ack_completion, |
| 1702 | msecs_to_jiffies(UL_WAKEUP_TIMEOUT_MS)); |
Eric Holmberg | 006057d | 2012-01-11 10:10:42 -0700 | [diff] [blame] | 1703 | wait_for_ack = 0; |
Jeff Hugo | 4838f41 | 2012-01-20 11:19:37 -0700 | [diff] [blame] | 1704 | if (unlikely(ret == 0) && ssrestart_check()) { |
| 1705 | mutex_unlock(&wakeup_lock); |
Zaheerulla Meer | 6fbf32c | 2013-01-31 17:06:44 +0530 | [diff] [blame] | 1706 | BAM_DMUX_LOG("%s timeout previous ack\n", __func__); |
Jeff Hugo | 4838f41 | 2012-01-20 11:19:37 -0700 | [diff] [blame] | 1707 | return; |
| 1708 | } |
Jeff Hugo | f6c1c1e | 2011-12-01 17:43:49 -0700 | [diff] [blame] | 1709 | } |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 1710 | INIT_COMPLETION(ul_wakeup_ack_completion); |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 1711 | power_vote(1); |
Zaheerulla Meer | 6fbf32c | 2013-01-31 17:06:44 +0530 | [diff] [blame] | 1712 | BAM_DMUX_LOG("%s waiting for wakeup ack\n", __func__); |
Jeff Hugo | 1f31739 | 2013-07-24 16:28:52 -0600 | [diff] [blame] | 1713 | ret = wait_for_completion_timeout(&ul_wakeup_ack_completion, |
| 1714 | msecs_to_jiffies(UL_WAKEUP_TIMEOUT_MS)); |
Jeff Hugo | 4838f41 | 2012-01-20 11:19:37 -0700 | [diff] [blame] | 1715 | if (unlikely(ret == 0) && ssrestart_check()) { |
| 1716 | mutex_unlock(&wakeup_lock); |
Zaheerulla Meer | 6fbf32c | 2013-01-31 17:06:44 +0530 | [diff] [blame] | 1717 | BAM_DMUX_LOG("%s timeout wakeup ack\n", __func__); |
Jeff Hugo | 4838f41 | 2012-01-20 11:19:37 -0700 | [diff] [blame] | 1718 | return; |
| 1719 | } |
Zaheerulla Meer | 6fbf32c | 2013-01-31 17:06:44 +0530 | [diff] [blame] | 1720 | BAM_DMUX_LOG("%s waiting completion\n", __func__); |
Jeff Hugo | 1f31739 | 2013-07-24 16:28:52 -0600 | [diff] [blame] | 1721 | ret = wait_for_completion_timeout(&bam_connection_completion, |
| 1722 | msecs_to_jiffies(UL_WAKEUP_TIMEOUT_MS)); |
Jeff Hugo | 4838f41 | 2012-01-20 11:19:37 -0700 | [diff] [blame] | 1723 | if (unlikely(ret == 0) && ssrestart_check()) { |
| 1724 | mutex_unlock(&wakeup_lock); |
Zaheerulla Meer | 6fbf32c | 2013-01-31 17:06:44 +0530 | [diff] [blame] | 1725 | BAM_DMUX_LOG("%s timeout power on\n", __func__); |
Jeff Hugo | 4838f41 | 2012-01-20 11:19:37 -0700 | [diff] [blame] | 1726 | return; |
| 1727 | } |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 1728 | |
| 1729 | bam_is_connected = 1; |
Zaheerulla Meer | 6fbf32c | 2013-01-31 17:06:44 +0530 | [diff] [blame] | 1730 | BAM_DMUX_LOG("%s complete\n", __func__); |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 1731 | schedule_delayed_work(&ul_timeout_work, |
| 1732 | msecs_to_jiffies(UL_TIMEOUT_DELAY)); |
| 1733 | mutex_unlock(&wakeup_lock); |
| 1734 | } |
| 1735 | |
| 1736 | static void reconnect_to_bam(void) |
| 1737 | { |
| 1738 | int i; |
| 1739 | |
Jeff Hugo | 6e7a92a | 2011-10-24 05:25:13 -0600 | [diff] [blame] | 1740 | in_global_reset = 0; |
Jeff Hugo | a82a95c | 2012-12-14 17:56:19 -0700 | [diff] [blame] | 1741 | in_ssr = 0; |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 1742 | vote_dfab(); |
Jeff Hugo | 18792a3 | 2012-06-20 15:25:55 -0600 | [diff] [blame] | 1743 | if (!power_management_only_mode) { |
Jeff Hugo | a82a95c | 2012-12-14 17:56:19 -0700 | [diff] [blame] | 1744 | if (ssr_skipped_disconnect) { |
| 1745 | /* delayed to here to prevent bus stall */ |
Brent Hronik | 89c96ba | 2013-08-27 14:34:22 -0600 | [diff] [blame^] | 1746 | bam_ops->sps_disconnect_ptr(bam_tx_pipe); |
| 1747 | bam_ops->sps_disconnect_ptr(bam_rx_pipe); |
Jeff Hugo | a82a95c | 2012-12-14 17:56:19 -0700 | [diff] [blame] | 1748 | __memzero(rx_desc_mem_buf.base, rx_desc_mem_buf.size); |
| 1749 | __memzero(tx_desc_mem_buf.base, tx_desc_mem_buf.size); |
| 1750 | } |
| 1751 | ssr_skipped_disconnect = 0; |
Brent Hronik | 89c96ba | 2013-08-27 14:34:22 -0600 | [diff] [blame^] | 1752 | i = bam_ops->sps_device_reset_ptr(a2_device_handle); |
Jeff Hugo | 18792a3 | 2012-06-20 15:25:55 -0600 | [diff] [blame] | 1753 | if (i) |
| 1754 | pr_err("%s: device reset failed rc = %d\n", __func__, |
| 1755 | i); |
Brent Hronik | 89c96ba | 2013-08-27 14:34:22 -0600 | [diff] [blame^] | 1756 | i = bam_ops->sps_connect_ptr(bam_tx_pipe, &tx_connection); |
Jeff Hugo | 18792a3 | 2012-06-20 15:25:55 -0600 | [diff] [blame] | 1757 | if (i) |
| 1758 | pr_err("%s: tx connection failed rc = %d\n", __func__, |
| 1759 | i); |
Brent Hronik | 89c96ba | 2013-08-27 14:34:22 -0600 | [diff] [blame^] | 1760 | i = bam_ops->sps_connect_ptr(bam_rx_pipe, &rx_connection); |
Jeff Hugo | 18792a3 | 2012-06-20 15:25:55 -0600 | [diff] [blame] | 1761 | if (i) |
| 1762 | pr_err("%s: rx connection failed rc = %d\n", __func__, |
| 1763 | i); |
Brent Hronik | 89c96ba | 2013-08-27 14:34:22 -0600 | [diff] [blame^] | 1764 | i = bam_ops->sps_register_event_ptr(bam_tx_pipe, |
| 1765 | &tx_register_event); |
Jeff Hugo | 18792a3 | 2012-06-20 15:25:55 -0600 | [diff] [blame] | 1766 | if (i) |
| 1767 | pr_err("%s: tx event reg failed rc = %d\n", __func__, |
| 1768 | i); |
Brent Hronik | 89c96ba | 2013-08-27 14:34:22 -0600 | [diff] [blame^] | 1769 | i = bam_ops->sps_register_event_ptr(bam_rx_pipe, |
| 1770 | &rx_register_event); |
Jeff Hugo | 18792a3 | 2012-06-20 15:25:55 -0600 | [diff] [blame] | 1771 | if (i) |
| 1772 | pr_err("%s: rx event reg failed rc = %d\n", __func__, |
| 1773 | i); |
| 1774 | } |
Eric Holmberg | 8df0cdb | 2012-01-04 17:40:46 -0700 | [diff] [blame] | 1775 | |
| 1776 | bam_connection_is_active = 1; |
| 1777 | |
| 1778 | if (polling_mode) |
| 1779 | rx_switch_to_interrupt_mode(); |
| 1780 | |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 1781 | toggle_apps_ack(); |
| 1782 | complete_all(&bam_connection_completion); |
Jeff Hugo | 18792a3 | 2012-06-20 15:25:55 -0600 | [diff] [blame] | 1783 | if (!power_management_only_mode) |
| 1784 | queue_rx(); |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 1785 | } |
| 1786 | |
| 1787 | static void disconnect_to_bam(void) |
| 1788 | { |
| 1789 | struct list_head *node; |
| 1790 | struct rx_pkt_info *info; |
Eric Holmberg | 454d9da | 2012-01-12 09:37:14 -0700 | [diff] [blame] | 1791 | unsigned long flags; |
Brent Hronik | 096f7d3 | 2013-06-28 15:43:08 -0600 | [diff] [blame] | 1792 | unsigned long time_remaining; |
| 1793 | |
Eric Holmberg | 7614a7f | 2013-07-29 15:47:12 -0600 | [diff] [blame] | 1794 | if (!in_global_reset) { |
| 1795 | time_remaining = wait_for_completion_timeout( |
| 1796 | &shutdown_completion, |
| 1797 | msecs_to_jiffies(SHUTDOWN_TIMEOUT_MS)); |
| 1798 | if (time_remaining == 0) { |
| 1799 | DMUX_LOG_KERR("%s: shutdown completion timed out\n", |
| 1800 | __func__); |
Jeff Hugo | 7c18560 | 2013-09-11 17:39:54 -0600 | [diff] [blame] | 1801 | log_rx_timestamp(); |
Eric Holmberg | 7614a7f | 2013-07-29 15:47:12 -0600 | [diff] [blame] | 1802 | ssrestart_check(); |
| 1803 | } |
Brent Hronik | 096f7d3 | 2013-06-28 15:43:08 -0600 | [diff] [blame] | 1804 | } |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 1805 | |
Jeff Hugo | 6e7a92a | 2011-10-24 05:25:13 -0600 | [diff] [blame] | 1806 | bam_connection_is_active = 0; |
Eric Holmberg | 454d9da | 2012-01-12 09:37:14 -0700 | [diff] [blame] | 1807 | |
| 1808 | /* handle disconnect during active UL */ |
| 1809 | write_lock_irqsave(&ul_wakeup_lock, flags); |
| 1810 | if (bam_is_connected) { |
Zaheerulla Meer | 6fbf32c | 2013-01-31 17:06:44 +0530 | [diff] [blame] | 1811 | BAM_DMUX_LOG("%s: UL active - forcing powerdown\n", __func__); |
Eric Holmberg | 454d9da | 2012-01-12 09:37:14 -0700 | [diff] [blame] | 1812 | ul_powerdown(); |
| 1813 | } |
| 1814 | write_unlock_irqrestore(&ul_wakeup_lock, flags); |
| 1815 | ul_powerdown_finish(); |
| 1816 | |
| 1817 | /* tear down BAM connection */ |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 1818 | INIT_COMPLETION(bam_connection_completion); |
Jeff Hugo | a82a95c | 2012-12-14 17:56:19 -0700 | [diff] [blame] | 1819 | |
| 1820 | /* in_ssr documentation/assumptions found in restart_notifier_cb */ |
Jeff Hugo | 18792a3 | 2012-06-20 15:25:55 -0600 | [diff] [blame] | 1821 | if (!power_management_only_mode) { |
Jeff Hugo | a82a95c | 2012-12-14 17:56:19 -0700 | [diff] [blame] | 1822 | if (likely(!in_ssr)) { |
Jeff Hugo | f7ae7a6 | 2013-04-19 11:18:32 -0600 | [diff] [blame] | 1823 | BAM_DMUX_LOG("%s: disconnect tx\n", __func__); |
Brent Hronik | 89c96ba | 2013-08-27 14:34:22 -0600 | [diff] [blame^] | 1824 | bam_ops->sps_disconnect_ptr(bam_tx_pipe); |
Jeff Hugo | f7ae7a6 | 2013-04-19 11:18:32 -0600 | [diff] [blame] | 1825 | BAM_DMUX_LOG("%s: disconnect rx\n", __func__); |
Brent Hronik | 89c96ba | 2013-08-27 14:34:22 -0600 | [diff] [blame^] | 1826 | bam_ops->sps_disconnect_ptr(bam_rx_pipe); |
Jeff Hugo | a82a95c | 2012-12-14 17:56:19 -0700 | [diff] [blame] | 1827 | __memzero(rx_desc_mem_buf.base, rx_desc_mem_buf.size); |
| 1828 | __memzero(tx_desc_mem_buf.base, tx_desc_mem_buf.size); |
Jeff Hugo | f7ae7a6 | 2013-04-19 11:18:32 -0600 | [diff] [blame] | 1829 | BAM_DMUX_LOG("%s: device reset\n", __func__); |
Jeff Hugo | a82a95c | 2012-12-14 17:56:19 -0700 | [diff] [blame] | 1830 | sps_device_reset(a2_device_handle); |
| 1831 | } else { |
| 1832 | ssr_skipped_disconnect = 1; |
| 1833 | } |
Jeff Hugo | 18792a3 | 2012-06-20 15:25:55 -0600 | [diff] [blame] | 1834 | } |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 1835 | unvote_dfab(); |
Eric Holmberg | 8df0cdb | 2012-01-04 17:40:46 -0700 | [diff] [blame] | 1836 | |
| 1837 | mutex_lock(&bam_rx_pool_mutexlock); |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 1838 | while (!list_empty(&bam_rx_pool)) { |
| 1839 | node = bam_rx_pool.next; |
| 1840 | list_del(node); |
| 1841 | info = container_of(node, struct rx_pkt_info, list_node); |
| 1842 | dma_unmap_single(NULL, info->dma_address, BUFFER_SIZE, |
Brent Hronik | 89c96ba | 2013-08-27 14:34:22 -0600 | [diff] [blame^] | 1843 | bam_ops->dma_from); |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 1844 | dev_kfree_skb_any(info->skb); |
| 1845 | kfree(info); |
| 1846 | } |
Eric Holmberg | b5b08e5 | 2012-01-20 14:19:00 -0700 | [diff] [blame] | 1847 | bam_rx_pool_len = 0; |
Eric Holmberg | 8df0cdb | 2012-01-04 17:40:46 -0700 | [diff] [blame] | 1848 | mutex_unlock(&bam_rx_pool_mutexlock); |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 1849 | |
Jeff Hugo | 0b13a35 | 2012-03-17 23:18:30 -0600 | [diff] [blame] | 1850 | if (disconnect_ack) |
| 1851 | toggle_apps_ack(); |
| 1852 | |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 1853 | verify_tx_queue_is_empty(__func__); |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 1854 | } |
| 1855 | |
| 1856 | static void vote_dfab(void) |
| 1857 | { |
Jeff Hugo | ca0caa8 | 2011-12-05 16:05:23 -0700 | [diff] [blame] | 1858 | int rc; |
| 1859 | |
Zaheerulla Meer | 6fbf32c | 2013-01-31 17:06:44 +0530 | [diff] [blame] | 1860 | BAM_DMUX_LOG("%s\n", __func__); |
Eric Holmberg | 006057d | 2012-01-11 10:10:42 -0700 | [diff] [blame] | 1861 | mutex_lock(&dfab_status_lock); |
| 1862 | if (dfab_is_on) { |
Zaheerulla Meer | 6fbf32c | 2013-01-31 17:06:44 +0530 | [diff] [blame] | 1863 | BAM_DMUX_LOG("%s: dfab is already on\n", __func__); |
Eric Holmberg | 006057d | 2012-01-11 10:10:42 -0700 | [diff] [blame] | 1864 | mutex_unlock(&dfab_status_lock); |
| 1865 | return; |
| 1866 | } |
Jeff Hugo | d0befde | 2012-08-09 15:32:49 -0600 | [diff] [blame] | 1867 | if (dfab_clk) { |
| 1868 | rc = clk_prepare_enable(dfab_clk); |
| 1869 | if (rc) |
| 1870 | DMUX_LOG_KERR("bam_dmux vote for dfab failed rc = %d\n", |
| 1871 | rc); |
| 1872 | } |
| 1873 | if (xo_clk) { |
| 1874 | rc = clk_prepare_enable(xo_clk); |
| 1875 | if (rc) |
| 1876 | DMUX_LOG_KERR("bam_dmux vote for xo failed rc = %d\n", |
| 1877 | rc); |
| 1878 | } |
Eric Holmberg | 006057d | 2012-01-11 10:10:42 -0700 | [diff] [blame] | 1879 | dfab_is_on = 1; |
| 1880 | mutex_unlock(&dfab_status_lock); |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 1881 | } |
| 1882 | |
| 1883 | static void unvote_dfab(void) |
| 1884 | { |
Zaheerulla Meer | 6fbf32c | 2013-01-31 17:06:44 +0530 | [diff] [blame] | 1885 | BAM_DMUX_LOG("%s\n", __func__); |
Eric Holmberg | 006057d | 2012-01-11 10:10:42 -0700 | [diff] [blame] | 1886 | mutex_lock(&dfab_status_lock); |
| 1887 | if (!dfab_is_on) { |
| 1888 | DMUX_LOG_KERR("%s: dfab is already off\n", __func__); |
| 1889 | dump_stack(); |
| 1890 | mutex_unlock(&dfab_status_lock); |
| 1891 | return; |
| 1892 | } |
Jeff Hugo | d0befde | 2012-08-09 15:32:49 -0600 | [diff] [blame] | 1893 | if (dfab_clk) |
| 1894 | clk_disable_unprepare(dfab_clk); |
| 1895 | if (xo_clk) |
| 1896 | clk_disable_unprepare(xo_clk); |
Eric Holmberg | 006057d | 2012-01-11 10:10:42 -0700 | [diff] [blame] | 1897 | dfab_is_on = 0; |
| 1898 | mutex_unlock(&dfab_status_lock); |
| 1899 | } |
| 1900 | |
| 1901 | /* reference counting wrapper around wakelock */ |
| 1902 | static void grab_wakelock(void) |
| 1903 | { |
| 1904 | unsigned long flags; |
| 1905 | |
| 1906 | spin_lock_irqsave(&wakelock_reference_lock, flags); |
Zaheerulla Meer | 6fbf32c | 2013-01-31 17:06:44 +0530 | [diff] [blame] | 1907 | BAM_DMUX_LOG("%s: ref count = %d\n", __func__, |
Eric Holmberg | 006057d | 2012-01-11 10:10:42 -0700 | [diff] [blame] | 1908 | wakelock_reference_count); |
| 1909 | if (wakelock_reference_count == 0) |
| 1910 | wake_lock(&bam_wakelock); |
| 1911 | ++wakelock_reference_count; |
| 1912 | spin_unlock_irqrestore(&wakelock_reference_lock, flags); |
| 1913 | } |
| 1914 | |
| 1915 | static void release_wakelock(void) |
| 1916 | { |
| 1917 | unsigned long flags; |
| 1918 | |
| 1919 | spin_lock_irqsave(&wakelock_reference_lock, flags); |
| 1920 | if (wakelock_reference_count == 0) { |
| 1921 | DMUX_LOG_KERR("%s: bam_dmux wakelock not locked\n", __func__); |
| 1922 | dump_stack(); |
| 1923 | spin_unlock_irqrestore(&wakelock_reference_lock, flags); |
| 1924 | return; |
| 1925 | } |
Zaheerulla Meer | 6fbf32c | 2013-01-31 17:06:44 +0530 | [diff] [blame] | 1926 | BAM_DMUX_LOG("%s: ref count = %d\n", __func__, |
Eric Holmberg | 006057d | 2012-01-11 10:10:42 -0700 | [diff] [blame] | 1927 | wakelock_reference_count); |
| 1928 | --wakelock_reference_count; |
| 1929 | if (wakelock_reference_count == 0) |
| 1930 | wake_unlock(&bam_wakelock); |
| 1931 | spin_unlock_irqrestore(&wakelock_reference_lock, flags); |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 1932 | } |
| 1933 | |
Jeff Hugo | 6e7a92a | 2011-10-24 05:25:13 -0600 | [diff] [blame] | 1934 | static int restart_notifier_cb(struct notifier_block *this, |
| 1935 | unsigned long code, |
| 1936 | void *data) |
| 1937 | { |
| 1938 | int i; |
| 1939 | struct list_head *node; |
| 1940 | struct tx_pkt_info *info; |
| 1941 | int temp_remote_status; |
Jeff Hugo | 626303bf | 2011-11-21 11:43:28 -0700 | [diff] [blame] | 1942 | unsigned long flags; |
Jeff Hugo | 6e7a92a | 2011-10-24 05:25:13 -0600 | [diff] [blame] | 1943 | |
Jeff Hugo | a82a95c | 2012-12-14 17:56:19 -0700 | [diff] [blame] | 1944 | /* |
| 1945 | * Bam_dmux counts on the fact that the BEFORE_SHUTDOWN level of |
| 1946 | * notifications are guarenteed to execute before the AFTER_SHUTDOWN |
| 1947 | * level of notifications, and that BEFORE_SHUTDOWN always occurs in |
| 1948 | * all SSR events, no matter what triggered the SSR. Also, bam_dmux |
| 1949 | * assumes that SMD does its SSR processing in the AFTER_SHUTDOWN level |
| 1950 | * thus bam_dmux is guarenteed to detect SSR before SMD, since the |
| 1951 | * callbacks for all the drivers within the AFTER_SHUTDOWN level could |
| 1952 | * occur in any order. Bam_dmux uses this knowledge to skip accessing |
| 1953 | * the bam hardware when disconnect_to_bam() is triggered by SMD's SSR |
| 1954 | * processing. We do not wat to access the bam hardware during SSR |
| 1955 | * because a watchdog crash from a bus stall would likely occur. |
| 1956 | */ |
Jeff Hugo | 199294b | 2013-02-25 13:46:56 -0700 | [diff] [blame] | 1957 | if (code == SUBSYS_BEFORE_SHUTDOWN) { |
| 1958 | in_global_reset = 1; |
Jeff Hugo | a82a95c | 2012-12-14 17:56:19 -0700 | [diff] [blame] | 1959 | in_ssr = 1; |
Zaheerulla Meer | f800bba | 2013-02-13 15:49:14 +0530 | [diff] [blame] | 1960 | BAM_DMUX_LOG("%s: begin\n", __func__); |
Jeff Hugo | 199294b | 2013-02-25 13:46:56 -0700 | [diff] [blame] | 1961 | flush_workqueue(bam_mux_rx_workqueue); |
| 1962 | } |
Jeff Hugo | 6e7a92a | 2011-10-24 05:25:13 -0600 | [diff] [blame] | 1963 | if (code != SUBSYS_AFTER_SHUTDOWN) |
| 1964 | return NOTIFY_DONE; |
| 1965 | |
Eric Holmberg | 454d9da | 2012-01-12 09:37:14 -0700 | [diff] [blame] | 1966 | /* Handle uplink Powerdown */ |
| 1967 | write_lock_irqsave(&ul_wakeup_lock, flags); |
| 1968 | if (bam_is_connected) { |
| 1969 | ul_powerdown(); |
| 1970 | wait_for_ack = 0; |
| 1971 | } |
Jeff Hugo | 4838f41 | 2012-01-20 11:19:37 -0700 | [diff] [blame] | 1972 | /* |
| 1973 | * if modem crash during ul_wakeup(), power_vote is 1, needs to be |
| 1974 | * reset to 0. harmless if bam_is_connected check above passes |
| 1975 | */ |
| 1976 | power_vote(0); |
Eric Holmberg | 454d9da | 2012-01-12 09:37:14 -0700 | [diff] [blame] | 1977 | write_unlock_irqrestore(&ul_wakeup_lock, flags); |
| 1978 | ul_powerdown_finish(); |
Eric Holmberg | 006057d | 2012-01-11 10:10:42 -0700 | [diff] [blame] | 1979 | a2_pc_disabled = 0; |
Jeff Hugo | 583a6da | 2012-02-03 11:37:30 -0700 | [diff] [blame] | 1980 | a2_pc_disabled_wakelock_skipped = 0; |
Jeff Hugo | f62029d | 2012-07-17 13:39:53 -0600 | [diff] [blame] | 1981 | disconnect_ack = 1; |
Eric Holmberg | 454d9da | 2012-01-12 09:37:14 -0700 | [diff] [blame] | 1982 | |
| 1983 | /* Cleanup Channel States */ |
Eric Holmberg | a623da8 | 2012-07-12 09:37:09 -0600 | [diff] [blame] | 1984 | mutex_lock(&bam_pdev_mutexlock); |
Jeff Hugo | 6e7a92a | 2011-10-24 05:25:13 -0600 | [diff] [blame] | 1985 | for (i = 0; i < BAM_DMUX_NUM_CHANNELS; ++i) { |
| 1986 | temp_remote_status = bam_ch_is_remote_open(i); |
| 1987 | bam_ch[i].status &= ~BAM_CH_REMOTE_OPEN; |
Karthikeyan Ramasubramanian | 7bf5ca8 | 2011-11-21 13:33:19 -0700 | [diff] [blame] | 1988 | bam_ch[i].num_tx_pkts = 0; |
Jeff Hugo | 6e7a92a | 2011-10-24 05:25:13 -0600 | [diff] [blame] | 1989 | if (bam_ch_is_local_open(i)) |
| 1990 | bam_ch[i].status |= BAM_CH_IN_RESET; |
| 1991 | if (temp_remote_status) { |
| 1992 | platform_device_unregister(bam_ch[i].pdev); |
| 1993 | bam_ch[i].pdev = platform_device_alloc( |
| 1994 | bam_ch[i].name, 2); |
| 1995 | } |
| 1996 | } |
Eric Holmberg | a623da8 | 2012-07-12 09:37:09 -0600 | [diff] [blame] | 1997 | mutex_unlock(&bam_pdev_mutexlock); |
Eric Holmberg | 454d9da | 2012-01-12 09:37:14 -0700 | [diff] [blame] | 1998 | |
| 1999 | /* Cleanup pending UL data */ |
Jeff Hugo | 626303bf | 2011-11-21 11:43:28 -0700 | [diff] [blame] | 2000 | spin_lock_irqsave(&bam_tx_pool_spinlock, flags); |
Jeff Hugo | 6e7a92a | 2011-10-24 05:25:13 -0600 | [diff] [blame] | 2001 | while (!list_empty(&bam_tx_pool)) { |
| 2002 | node = bam_tx_pool.next; |
| 2003 | list_del(node); |
| 2004 | info = container_of(node, struct tx_pkt_info, |
| 2005 | list_node); |
| 2006 | if (!info->is_cmd) { |
| 2007 | dma_unmap_single(NULL, info->dma_address, |
| 2008 | info->skb->len, |
Brent Hronik | 89c96ba | 2013-08-27 14:34:22 -0600 | [diff] [blame^] | 2009 | bam_ops->dma_to); |
Jeff Hugo | 6e7a92a | 2011-10-24 05:25:13 -0600 | [diff] [blame] | 2010 | dev_kfree_skb_any(info->skb); |
| 2011 | } else { |
| 2012 | dma_unmap_single(NULL, info->dma_address, |
| 2013 | info->len, |
Brent Hronik | 89c96ba | 2013-08-27 14:34:22 -0600 | [diff] [blame^] | 2014 | bam_ops->dma_to); |
Jeff Hugo | 6e7a92a | 2011-10-24 05:25:13 -0600 | [diff] [blame] | 2015 | kfree(info->skb); |
| 2016 | } |
| 2017 | kfree(info); |
| 2018 | } |
Jeff Hugo | 626303bf | 2011-11-21 11:43:28 -0700 | [diff] [blame] | 2019 | spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags); |
Eric Holmberg | 454d9da | 2012-01-12 09:37:14 -0700 | [diff] [blame] | 2020 | |
Zaheerulla Meer | 6fbf32c | 2013-01-31 17:06:44 +0530 | [diff] [blame] | 2021 | BAM_DMUX_LOG("%s: complete\n", __func__); |
Jeff Hugo | 6e7a92a | 2011-10-24 05:25:13 -0600 | [diff] [blame] | 2022 | return NOTIFY_DONE; |
| 2023 | } |
| 2024 | |
Jeff Hugo | 9dea05c | 2011-12-21 12:23:05 -0700 | [diff] [blame] | 2025 | static int bam_init(void) |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2026 | { |
| 2027 | u32 h; |
| 2028 | dma_addr_t dma_addr; |
| 2029 | int ret; |
| 2030 | void *a2_virt_addr; |
Jeff Hugo | 4b2890d | 2012-01-16 16:14:21 -0700 | [diff] [blame] | 2031 | int skip_iounmap = 0; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2032 | |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 2033 | vote_dfab(); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2034 | /* init BAM */ |
Jeff Hugo | 3910ee1 | 2012-08-21 14:08:20 -0600 | [diff] [blame] | 2035 | a2_virt_addr = ioremap_nocache((unsigned long)(a2_phys_base), |
| 2036 | a2_phys_size); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2037 | if (!a2_virt_addr) { |
| 2038 | pr_err("%s: ioremap failed\n", __func__); |
| 2039 | ret = -ENOMEM; |
Jeff Hugo | 994a92d | 2012-01-05 13:25:21 -0700 | [diff] [blame] | 2040 | goto ioremap_failed; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2041 | } |
Jeff Hugo | 3910ee1 | 2012-08-21 14:08:20 -0600 | [diff] [blame] | 2042 | a2_props.phys_addr = (u32)(a2_phys_base); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2043 | a2_props.virt_addr = a2_virt_addr; |
Jeff Hugo | 3910ee1 | 2012-08-21 14:08:20 -0600 | [diff] [blame] | 2044 | a2_props.virt_size = a2_phys_size; |
| 2045 | a2_props.irq = a2_bam_irq; |
Jeff Hugo | 927cba6 | 2011-11-11 11:49:52 -0700 | [diff] [blame] | 2046 | a2_props.options = SPS_BAM_OPT_IRQ_WAKEUP; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2047 | a2_props.num_pipes = A2_NUM_PIPES; |
| 2048 | a2_props.summing_threshold = A2_SUMMING_THRESHOLD; |
Jeff Hugo | 75913c8 | 2011-12-05 15:59:01 -0700 | [diff] [blame] | 2049 | if (cpu_is_msm9615()) |
| 2050 | a2_props.manage = SPS_BAM_MGR_DEVICE_REMOTE; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2051 | /* need to free on tear down */ |
Brent Hronik | 89c96ba | 2013-08-27 14:34:22 -0600 | [diff] [blame^] | 2052 | ret = bam_ops->sps_register_bam_device_ptr(&a2_props, &h); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2053 | if (ret < 0) { |
| 2054 | pr_err("%s: register bam error %d\n", __func__, ret); |
| 2055 | goto register_bam_failed; |
| 2056 | } |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 2057 | a2_device_handle = h; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2058 | |
Brent Hronik | 89c96ba | 2013-08-27 14:34:22 -0600 | [diff] [blame^] | 2059 | bam_tx_pipe = bam_ops->sps_alloc_endpoint_ptr(); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2060 | if (bam_tx_pipe == NULL) { |
| 2061 | pr_err("%s: tx alloc endpoint failed\n", __func__); |
| 2062 | ret = -ENOMEM; |
Jeff Hugo | 8ff4a81 | 2012-01-17 11:03:13 -0700 | [diff] [blame] | 2063 | goto tx_alloc_endpoint_failed; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2064 | } |
Brent Hronik | 89c96ba | 2013-08-27 14:34:22 -0600 | [diff] [blame^] | 2065 | ret = bam_ops->sps_get_config_ptr(bam_tx_pipe, &tx_connection); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2066 | if (ret) { |
| 2067 | pr_err("%s: tx get config failed %d\n", __func__, ret); |
| 2068 | goto tx_get_config_failed; |
| 2069 | } |
| 2070 | |
| 2071 | tx_connection.source = SPS_DEV_HANDLE_MEM; |
| 2072 | tx_connection.src_pipe_index = 0; |
| 2073 | tx_connection.destination = h; |
| 2074 | tx_connection.dest_pipe_index = 4; |
| 2075 | tx_connection.mode = SPS_MODE_DEST; |
| 2076 | tx_connection.options = SPS_O_AUTO_ENABLE | SPS_O_EOT; |
| 2077 | tx_desc_mem_buf.size = 0x800; /* 2k */ |
| 2078 | tx_desc_mem_buf.base = dma_alloc_coherent(NULL, tx_desc_mem_buf.size, |
| 2079 | &dma_addr, 0); |
| 2080 | if (tx_desc_mem_buf.base == NULL) { |
| 2081 | pr_err("%s: tx memory alloc failed\n", __func__); |
| 2082 | ret = -ENOMEM; |
Jeff Hugo | 8ff4a81 | 2012-01-17 11:03:13 -0700 | [diff] [blame] | 2083 | goto tx_get_config_failed; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2084 | } |
| 2085 | tx_desc_mem_buf.phys_base = dma_addr; |
| 2086 | memset(tx_desc_mem_buf.base, 0x0, tx_desc_mem_buf.size); |
| 2087 | tx_connection.desc = tx_desc_mem_buf; |
| 2088 | tx_connection.event_thresh = 0x10; |
| 2089 | |
Brent Hronik | 89c96ba | 2013-08-27 14:34:22 -0600 | [diff] [blame^] | 2090 | ret = bam_ops->sps_connect_ptr(bam_tx_pipe, &tx_connection); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2091 | if (ret < 0) { |
| 2092 | pr_err("%s: tx connect error %d\n", __func__, ret); |
| 2093 | goto tx_connect_failed; |
| 2094 | } |
| 2095 | |
Brent Hronik | 89c96ba | 2013-08-27 14:34:22 -0600 | [diff] [blame^] | 2096 | bam_rx_pipe = bam_ops->sps_alloc_endpoint_ptr(); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2097 | if (bam_rx_pipe == NULL) { |
| 2098 | pr_err("%s: rx alloc endpoint failed\n", __func__); |
| 2099 | ret = -ENOMEM; |
Jeff Hugo | 8ff4a81 | 2012-01-17 11:03:13 -0700 | [diff] [blame] | 2100 | goto rx_alloc_endpoint_failed; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2101 | } |
Brent Hronik | 89c96ba | 2013-08-27 14:34:22 -0600 | [diff] [blame^] | 2102 | ret = bam_ops->sps_get_config_ptr(bam_rx_pipe, &rx_connection); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2103 | if (ret) { |
| 2104 | pr_err("%s: rx get config failed %d\n", __func__, ret); |
| 2105 | goto rx_get_config_failed; |
| 2106 | } |
| 2107 | |
| 2108 | rx_connection.source = h; |
| 2109 | rx_connection.src_pipe_index = 5; |
| 2110 | rx_connection.destination = SPS_DEV_HANDLE_MEM; |
| 2111 | rx_connection.dest_pipe_index = 1; |
| 2112 | rx_connection.mode = SPS_MODE_SRC; |
Jeff Hugo | 949080a | 2011-08-30 11:58:56 -0600 | [diff] [blame] | 2113 | rx_connection.options = SPS_O_AUTO_ENABLE | SPS_O_EOT | |
| 2114 | SPS_O_ACK_TRANSFERS; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2115 | rx_desc_mem_buf.size = 0x800; /* 2k */ |
| 2116 | rx_desc_mem_buf.base = dma_alloc_coherent(NULL, rx_desc_mem_buf.size, |
| 2117 | &dma_addr, 0); |
| 2118 | if (rx_desc_mem_buf.base == NULL) { |
| 2119 | pr_err("%s: rx memory alloc failed\n", __func__); |
| 2120 | ret = -ENOMEM; |
| 2121 | goto rx_mem_failed; |
| 2122 | } |
| 2123 | rx_desc_mem_buf.phys_base = dma_addr; |
| 2124 | memset(rx_desc_mem_buf.base, 0x0, rx_desc_mem_buf.size); |
| 2125 | rx_connection.desc = rx_desc_mem_buf; |
| 2126 | rx_connection.event_thresh = 0x10; |
| 2127 | |
Brent Hronik | 89c96ba | 2013-08-27 14:34:22 -0600 | [diff] [blame^] | 2128 | ret = bam_ops->sps_connect_ptr(bam_rx_pipe, &rx_connection); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2129 | if (ret < 0) { |
| 2130 | pr_err("%s: rx connect error %d\n", __func__, ret); |
| 2131 | goto rx_connect_failed; |
| 2132 | } |
| 2133 | |
| 2134 | tx_register_event.options = SPS_O_EOT; |
| 2135 | tx_register_event.mode = SPS_TRIGGER_CALLBACK; |
| 2136 | tx_register_event.xfer_done = NULL; |
| 2137 | tx_register_event.callback = bam_mux_tx_notify; |
| 2138 | tx_register_event.user = NULL; |
Brent Hronik | 89c96ba | 2013-08-27 14:34:22 -0600 | [diff] [blame^] | 2139 | ret = bam_ops->sps_register_event_ptr(bam_tx_pipe, &tx_register_event); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2140 | if (ret < 0) { |
| 2141 | pr_err("%s: tx register event error %d\n", __func__, ret); |
| 2142 | goto rx_event_reg_failed; |
| 2143 | } |
| 2144 | |
Jeff Hugo | 33dbc00 | 2011-08-25 15:52:53 -0600 | [diff] [blame] | 2145 | rx_register_event.options = SPS_O_EOT; |
| 2146 | rx_register_event.mode = SPS_TRIGGER_CALLBACK; |
| 2147 | rx_register_event.xfer_done = NULL; |
| 2148 | rx_register_event.callback = bam_mux_rx_notify; |
| 2149 | rx_register_event.user = NULL; |
Brent Hronik | 89c96ba | 2013-08-27 14:34:22 -0600 | [diff] [blame^] | 2150 | ret = bam_ops->sps_register_event_ptr(bam_rx_pipe, &rx_register_event); |
Jeff Hugo | 33dbc00 | 2011-08-25 15:52:53 -0600 | [diff] [blame] | 2151 | if (ret < 0) { |
| 2152 | pr_err("%s: tx register event error %d\n", __func__, ret); |
| 2153 | goto rx_event_reg_failed; |
| 2154 | } |
| 2155 | |
Jeff Hugo | c269614 | 2012-05-03 11:42:13 -0600 | [diff] [blame] | 2156 | mutex_lock(&delayed_ul_vote_lock); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2157 | bam_mux_initialized = 1; |
Jeff Hugo | c269614 | 2012-05-03 11:42:13 -0600 | [diff] [blame] | 2158 | if (need_delayed_ul_vote) { |
| 2159 | need_delayed_ul_vote = 0; |
| 2160 | msm_bam_dmux_kickoff_ul_wakeup(); |
| 2161 | } |
| 2162 | mutex_unlock(&delayed_ul_vote_lock); |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 2163 | toggle_apps_ack(); |
Jeff Hugo | 6e7a92a | 2011-10-24 05:25:13 -0600 | [diff] [blame] | 2164 | bam_connection_is_active = 1; |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 2165 | complete_all(&bam_connection_completion); |
Jeff Hugo | 2fb555e | 2012-03-14 16:33:47 -0600 | [diff] [blame] | 2166 | queue_rx(); |
Jeff Hugo | 9dea05c | 2011-12-21 12:23:05 -0700 | [diff] [blame] | 2167 | return 0; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2168 | |
| 2169 | rx_event_reg_failed: |
Brent Hronik | 89c96ba | 2013-08-27 14:34:22 -0600 | [diff] [blame^] | 2170 | bam_ops->sps_disconnect_ptr(bam_rx_pipe); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2171 | rx_connect_failed: |
| 2172 | dma_free_coherent(NULL, rx_desc_mem_buf.size, rx_desc_mem_buf.base, |
| 2173 | rx_desc_mem_buf.phys_base); |
| 2174 | rx_mem_failed: |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2175 | rx_get_config_failed: |
Brent Hronik | 89c96ba | 2013-08-27 14:34:22 -0600 | [diff] [blame^] | 2176 | bam_ops->sps_free_endpoint_ptr(bam_rx_pipe); |
Jeff Hugo | 8ff4a81 | 2012-01-17 11:03:13 -0700 | [diff] [blame] | 2177 | rx_alloc_endpoint_failed: |
Brent Hronik | 89c96ba | 2013-08-27 14:34:22 -0600 | [diff] [blame^] | 2178 | bam_ops->sps_disconnect_ptr(bam_tx_pipe); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2179 | tx_connect_failed: |
| 2180 | dma_free_coherent(NULL, tx_desc_mem_buf.size, tx_desc_mem_buf.base, |
| 2181 | tx_desc_mem_buf.phys_base); |
| 2182 | tx_get_config_failed: |
Brent Hronik | 89c96ba | 2013-08-27 14:34:22 -0600 | [diff] [blame^] | 2183 | bam_ops->sps_free_endpoint_ptr(bam_tx_pipe); |
Jeff Hugo | 8ff4a81 | 2012-01-17 11:03:13 -0700 | [diff] [blame] | 2184 | tx_alloc_endpoint_failed: |
Brent Hronik | 89c96ba | 2013-08-27 14:34:22 -0600 | [diff] [blame^] | 2185 | bam_ops->sps_deregister_bam_device_ptr(h); |
Jeff Hugo | 4b2890d | 2012-01-16 16:14:21 -0700 | [diff] [blame] | 2186 | /* |
| 2187 | * sps_deregister_bam_device() calls iounmap. calling iounmap on the |
| 2188 | * same handle below will cause a crash, so skip it if we've freed |
| 2189 | * the handle here. |
| 2190 | */ |
| 2191 | skip_iounmap = 1; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2192 | register_bam_failed: |
Jeff Hugo | 4b2890d | 2012-01-16 16:14:21 -0700 | [diff] [blame] | 2193 | if (!skip_iounmap) |
| 2194 | iounmap(a2_virt_addr); |
Jeff Hugo | 994a92d | 2012-01-05 13:25:21 -0700 | [diff] [blame] | 2195 | ioremap_failed: |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2196 | /*destroy_workqueue(bam_mux_workqueue);*/ |
Jeff Hugo | 9dea05c | 2011-12-21 12:23:05 -0700 | [diff] [blame] | 2197 | return ret; |
| 2198 | } |
| 2199 | |
| 2200 | static int bam_init_fallback(void) |
| 2201 | { |
| 2202 | u32 h; |
| 2203 | int ret; |
| 2204 | void *a2_virt_addr; |
| 2205 | |
Jeff Hugo | 9dea05c | 2011-12-21 12:23:05 -0700 | [diff] [blame] | 2206 | /* init BAM */ |
Jeff Hugo | 3910ee1 | 2012-08-21 14:08:20 -0600 | [diff] [blame] | 2207 | a2_virt_addr = ioremap_nocache((unsigned long)(a2_phys_base), |
| 2208 | a2_phys_size); |
Jeff Hugo | 9dea05c | 2011-12-21 12:23:05 -0700 | [diff] [blame] | 2209 | if (!a2_virt_addr) { |
| 2210 | pr_err("%s: ioremap failed\n", __func__); |
| 2211 | ret = -ENOMEM; |
| 2212 | goto ioremap_failed; |
| 2213 | } |
Jeff Hugo | 3910ee1 | 2012-08-21 14:08:20 -0600 | [diff] [blame] | 2214 | a2_props.phys_addr = (u32)(a2_phys_base); |
Jeff Hugo | 9dea05c | 2011-12-21 12:23:05 -0700 | [diff] [blame] | 2215 | a2_props.virt_addr = a2_virt_addr; |
Jeff Hugo | 3910ee1 | 2012-08-21 14:08:20 -0600 | [diff] [blame] | 2216 | a2_props.virt_size = a2_phys_size; |
| 2217 | a2_props.irq = a2_bam_irq; |
Jeff Hugo | 9dea05c | 2011-12-21 12:23:05 -0700 | [diff] [blame] | 2218 | a2_props.options = SPS_BAM_OPT_IRQ_WAKEUP; |
| 2219 | a2_props.num_pipes = A2_NUM_PIPES; |
| 2220 | a2_props.summing_threshold = A2_SUMMING_THRESHOLD; |
| 2221 | if (cpu_is_msm9615()) |
| 2222 | a2_props.manage = SPS_BAM_MGR_DEVICE_REMOTE; |
Brent Hronik | 89c96ba | 2013-08-27 14:34:22 -0600 | [diff] [blame^] | 2223 | ret = bam_ops->sps_register_bam_device_ptr(&a2_props, &h); |
Jeff Hugo | 9dea05c | 2011-12-21 12:23:05 -0700 | [diff] [blame] | 2224 | if (ret < 0) { |
| 2225 | pr_err("%s: register bam error %d\n", __func__, ret); |
| 2226 | goto register_bam_failed; |
| 2227 | } |
| 2228 | a2_device_handle = h; |
Jeff Hugo | c269614 | 2012-05-03 11:42:13 -0600 | [diff] [blame] | 2229 | |
| 2230 | mutex_lock(&delayed_ul_vote_lock); |
| 2231 | bam_mux_initialized = 1; |
| 2232 | if (need_delayed_ul_vote) { |
| 2233 | need_delayed_ul_vote = 0; |
| 2234 | msm_bam_dmux_kickoff_ul_wakeup(); |
| 2235 | } |
| 2236 | mutex_unlock(&delayed_ul_vote_lock); |
Jeff Hugo | 2bec977 | 2012-04-05 12:25:16 -0600 | [diff] [blame] | 2237 | toggle_apps_ack(); |
Jeff Hugo | 9dea05c | 2011-12-21 12:23:05 -0700 | [diff] [blame] | 2238 | |
Jeff Hugo | 18792a3 | 2012-06-20 15:25:55 -0600 | [diff] [blame] | 2239 | power_management_only_mode = 1; |
| 2240 | bam_connection_is_active = 1; |
| 2241 | complete_all(&bam_connection_completion); |
| 2242 | |
Jeff Hugo | 9dea05c | 2011-12-21 12:23:05 -0700 | [diff] [blame] | 2243 | return 0; |
| 2244 | |
| 2245 | register_bam_failed: |
Jeff Hugo | 4b2890d | 2012-01-16 16:14:21 -0700 | [diff] [blame] | 2246 | iounmap(a2_virt_addr); |
Jeff Hugo | 9dea05c | 2011-12-21 12:23:05 -0700 | [diff] [blame] | 2247 | ioremap_failed: |
| 2248 | return ret; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2249 | } |
Jeff Hugo | ade1f84 | 2011-08-03 15:53:59 -0600 | [diff] [blame] | 2250 | |
Jeff Hugo | a670b76 | 2012-03-15 15:58:28 -0600 | [diff] [blame] | 2251 | static void msm9615_bam_init(void) |
Eric Holmberg | 604ab25 | 2012-01-15 00:01:18 -0700 | [diff] [blame] | 2252 | { |
| 2253 | int ret = 0; |
| 2254 | |
| 2255 | ret = bam_init(); |
| 2256 | if (ret) { |
| 2257 | ret = bam_init_fallback(); |
| 2258 | if (ret) |
| 2259 | pr_err("%s: bam init fallback failed: %d", |
| 2260 | __func__, ret); |
| 2261 | } |
| 2262 | } |
| 2263 | |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 2264 | static void toggle_apps_ack(void) |
| 2265 | { |
| 2266 | static unsigned int clear_bit; /* 0 = set the bit, else clear bit */ |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 2267 | |
Eric Holmberg | 7614a7f | 2013-07-29 15:47:12 -0600 | [diff] [blame] | 2268 | if (in_global_reset) { |
| 2269 | BAM_DMUX_LOG("%s: skipped due to SSR\n", __func__); |
| 2270 | return; |
| 2271 | } |
| 2272 | |
Zaheerulla Meer | 6fbf32c | 2013-01-31 17:06:44 +0530 | [diff] [blame] | 2273 | BAM_DMUX_LOG("%s: apps ack %d->%d\n", __func__, |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 2274 | clear_bit & 0x1, ~clear_bit & 0x1); |
Brent Hronik | 89c96ba | 2013-08-27 14:34:22 -0600 | [diff] [blame^] | 2275 | bam_ops->smsm_change_state_ptr(SMSM_APPS_STATE, |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 2276 | clear_bit & SMSM_A2_POWER_CONTROL_ACK, |
| 2277 | ~clear_bit & SMSM_A2_POWER_CONTROL_ACK); |
| 2278 | clear_bit = ~clear_bit; |
Eric Holmberg | 1f1255d | 2012-02-22 13:37:21 -0700 | [diff] [blame] | 2279 | DBG_INC_ACK_OUT_CNT(); |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 2280 | } |
| 2281 | |
Jeff Hugo | ade1f84 | 2011-08-03 15:53:59 -0600 | [diff] [blame] | 2282 | static void bam_dmux_smsm_cb(void *priv, uint32_t old_state, uint32_t new_state) |
| 2283 | { |
Jeff Hugo | 4b7c7b3 | 2012-04-18 16:25:14 -0600 | [diff] [blame] | 2284 | static int last_processed_state; |
| 2285 | |
| 2286 | mutex_lock(&smsm_cb_lock); |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 2287 | bam_dmux_power_state = new_state & SMSM_A2_POWER_CONTROL ? 1 : 0; |
Eric Holmberg | 1f1255d | 2012-02-22 13:37:21 -0700 | [diff] [blame] | 2288 | DBG_INC_A2_POWER_CONTROL_IN_CNT(); |
Zaheerulla Meer | 6fbf32c | 2013-01-31 17:06:44 +0530 | [diff] [blame] | 2289 | BAM_DMUX_LOG("%s: 0x%08x -> 0x%08x\n", __func__, old_state, |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 2290 | new_state); |
Jeff Hugo | 4b7c7b3 | 2012-04-18 16:25:14 -0600 | [diff] [blame] | 2291 | if (last_processed_state == (new_state & SMSM_A2_POWER_CONTROL)) { |
Zaheerulla Meer | 6fbf32c | 2013-01-31 17:06:44 +0530 | [diff] [blame] | 2292 | BAM_DMUX_LOG("%s: already processed this state\n", __func__); |
Jeff Hugo | 4b7c7b3 | 2012-04-18 16:25:14 -0600 | [diff] [blame] | 2293 | mutex_unlock(&smsm_cb_lock); |
| 2294 | return; |
| 2295 | } |
| 2296 | |
| 2297 | last_processed_state = new_state & SMSM_A2_POWER_CONTROL; |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 2298 | |
Jeff Hugo | ae3a85e | 2011-12-02 17:10:18 -0700 | [diff] [blame] | 2299 | if (bam_mux_initialized && new_state & SMSM_A2_POWER_CONTROL) { |
Zaheerulla Meer | 6fbf32c | 2013-01-31 17:06:44 +0530 | [diff] [blame] | 2300 | BAM_DMUX_LOG("%s: reconnect\n", __func__); |
Eric Holmberg | 006057d | 2012-01-11 10:10:42 -0700 | [diff] [blame] | 2301 | grab_wakelock(); |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 2302 | reconnect_to_bam(); |
Jeff Hugo | ae3a85e | 2011-12-02 17:10:18 -0700 | [diff] [blame] | 2303 | } else if (bam_mux_initialized && |
| 2304 | !(new_state & SMSM_A2_POWER_CONTROL)) { |
Zaheerulla Meer | 6fbf32c | 2013-01-31 17:06:44 +0530 | [diff] [blame] | 2305 | BAM_DMUX_LOG("%s: disconnect\n", __func__); |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 2306 | disconnect_to_bam(); |
Eric Holmberg | 006057d | 2012-01-11 10:10:42 -0700 | [diff] [blame] | 2307 | release_wakelock(); |
Jeff Hugo | ae3a85e | 2011-12-02 17:10:18 -0700 | [diff] [blame] | 2308 | } else if (new_state & SMSM_A2_POWER_CONTROL) { |
Zaheerulla Meer | 6fbf32c | 2013-01-31 17:06:44 +0530 | [diff] [blame] | 2309 | BAM_DMUX_LOG("%s: init\n", __func__); |
Eric Holmberg | 006057d | 2012-01-11 10:10:42 -0700 | [diff] [blame] | 2310 | grab_wakelock(); |
Jeff Hugo | a670b76 | 2012-03-15 15:58:28 -0600 | [diff] [blame] | 2311 | if (cpu_is_msm9615()) |
| 2312 | msm9615_bam_init(); |
| 2313 | else |
Eric Holmberg | 604ab25 | 2012-01-15 00:01:18 -0700 | [diff] [blame] | 2314 | bam_init(); |
Jeff Hugo | ae3a85e | 2011-12-02 17:10:18 -0700 | [diff] [blame] | 2315 | } else { |
Zaheerulla Meer | 6fbf32c | 2013-01-31 17:06:44 +0530 | [diff] [blame] | 2316 | BAM_DMUX_LOG("%s: bad state change\n", __func__); |
Jeff Hugo | ade1f84 | 2011-08-03 15:53:59 -0600 | [diff] [blame] | 2317 | pr_err("%s: unsupported state change\n", __func__); |
Jeff Hugo | ae3a85e | 2011-12-02 17:10:18 -0700 | [diff] [blame] | 2318 | } |
Jeff Hugo | 4b7c7b3 | 2012-04-18 16:25:14 -0600 | [diff] [blame] | 2319 | mutex_unlock(&smsm_cb_lock); |
Jeff Hugo | ade1f84 | 2011-08-03 15:53:59 -0600 | [diff] [blame] | 2320 | |
| 2321 | } |
| 2322 | |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 2323 | static void bam_dmux_smsm_ack_cb(void *priv, uint32_t old_state, |
| 2324 | uint32_t new_state) |
| 2325 | { |
Eric Holmberg | 1f1255d | 2012-02-22 13:37:21 -0700 | [diff] [blame] | 2326 | DBG_INC_ACK_IN_CNT(); |
Zaheerulla Meer | 6fbf32c | 2013-01-31 17:06:44 +0530 | [diff] [blame] | 2327 | BAM_DMUX_LOG("%s: 0x%08x -> 0x%08x\n", __func__, old_state, |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 2328 | new_state); |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 2329 | complete_all(&ul_wakeup_ack_completion); |
| 2330 | } |
| 2331 | |
Brent Hronik | 89c96ba | 2013-08-27 14:34:22 -0600 | [diff] [blame^] | 2332 | /** |
| 2333 | * msm_bam_dmux_set_bam_ops() - sets the bam_ops |
| 2334 | * @ops: bam_ops_if to set |
| 2335 | * |
| 2336 | * Sets bam_ops to allow switching of runtime behavior. Preconditon, bam dmux |
| 2337 | * must be in an idle state. If input ops is NULL, then bam_ops will be |
| 2338 | * restored to their default state. |
| 2339 | */ |
| 2340 | void msm_bam_dmux_set_bam_ops(struct bam_ops_if *ops) |
| 2341 | { |
| 2342 | if (ops != NULL) |
| 2343 | bam_ops = ops; |
| 2344 | else |
| 2345 | bam_ops = &bam_default_ops; |
| 2346 | } |
| 2347 | EXPORT_SYMBOL(msm_bam_dmux_set_bam_ops); |
| 2348 | |
| 2349 | /** |
| 2350 | * msm_bam_dmux_deinit() - puts bam dmux into a deinited state |
| 2351 | * |
| 2352 | * Puts bam dmux into a deinitialized state by simulating an ssr. |
| 2353 | */ |
| 2354 | void msm_bam_dmux_deinit(void) |
| 2355 | { |
| 2356 | restart_notifier_cb(NULL, SUBSYS_BEFORE_SHUTDOWN, NULL); |
| 2357 | restart_notifier_cb(NULL, SUBSYS_AFTER_SHUTDOWN, NULL); |
| 2358 | } |
| 2359 | EXPORT_SYMBOL(msm_bam_dmux_deinit); |
| 2360 | |
| 2361 | /** |
| 2362 | * msm_bam_dmux_reinit() - reinitializes bam dmux |
| 2363 | */ |
| 2364 | void msm_bam_dmux_reinit(void) |
| 2365 | { |
| 2366 | bam_ops->smsm_state_cb_register_ptr(SMSM_MODEM_STATE, |
| 2367 | SMSM_A2_POWER_CONTROL, |
| 2368 | bam_dmux_smsm_cb, NULL); |
| 2369 | bam_ops->smsm_state_cb_register_ptr(SMSM_MODEM_STATE, |
| 2370 | SMSM_A2_POWER_CONTROL_ACK, |
| 2371 | bam_dmux_smsm_ack_cb, NULL); |
| 2372 | bam_mux_initialized = 0; |
| 2373 | bam_init(); |
| 2374 | } |
| 2375 | EXPORT_SYMBOL(msm_bam_dmux_reinit); |
| 2376 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2377 | static int bam_dmux_probe(struct platform_device *pdev) |
| 2378 | { |
| 2379 | int rc; |
Jeff Hugo | 3910ee1 | 2012-08-21 14:08:20 -0600 | [diff] [blame] | 2380 | struct resource *r; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2381 | |
| 2382 | DBG("%s probe called\n", __func__); |
| 2383 | if (bam_mux_initialized) |
| 2384 | return 0; |
| 2385 | |
Jeff Hugo | 3910ee1 | 2012-08-21 14:08:20 -0600 | [diff] [blame] | 2386 | if (pdev->dev.of_node) { |
| 2387 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 2388 | if (!r) { |
| 2389 | pr_err("%s: reg field missing\n", __func__); |
| 2390 | return -ENODEV; |
| 2391 | } |
| 2392 | a2_phys_base = (void *)(r->start); |
| 2393 | a2_phys_size = (uint32_t)(resource_size(r)); |
| 2394 | a2_bam_irq = platform_get_irq(pdev, 0); |
| 2395 | if (a2_bam_irq == -ENXIO) { |
| 2396 | pr_err("%s: irq field missing\n", __func__); |
| 2397 | return -ENODEV; |
| 2398 | } |
| 2399 | DBG("%s: base:%p size:%x irq:%d\n", __func__, |
| 2400 | a2_phys_base, |
| 2401 | a2_phys_size, |
| 2402 | a2_bam_irq); |
| 2403 | } else { /* fallback to default init data */ |
| 2404 | a2_phys_base = (void *)(A2_PHYS_BASE); |
| 2405 | a2_phys_size = A2_PHYS_SIZE; |
| 2406 | a2_bam_irq = A2_BAM_IRQ; |
| 2407 | } |
| 2408 | |
Stephen Boyd | 69d35e3 | 2012-02-14 15:33:30 -0800 | [diff] [blame] | 2409 | xo_clk = clk_get(&pdev->dev, "xo"); |
| 2410 | if (IS_ERR(xo_clk)) { |
Zaheerulla Meer | 6fbf32c | 2013-01-31 17:06:44 +0530 | [diff] [blame] | 2411 | BAM_DMUX_LOG("%s: did not get xo clock\n", __func__); |
Jeff Hugo | d0befde | 2012-08-09 15:32:49 -0600 | [diff] [blame] | 2412 | xo_clk = NULL; |
Stephen Boyd | 69d35e3 | 2012-02-14 15:33:30 -0800 | [diff] [blame] | 2413 | } |
Stephen Boyd | 1c51a49 | 2011-10-26 12:11:47 -0700 | [diff] [blame] | 2414 | dfab_clk = clk_get(&pdev->dev, "bus_clk"); |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 2415 | if (IS_ERR(dfab_clk)) { |
Zaheerulla Meer | 6fbf32c | 2013-01-31 17:06:44 +0530 | [diff] [blame] | 2416 | BAM_DMUX_LOG("%s: did not get dfab clock\n", __func__); |
Jeff Hugo | d0befde | 2012-08-09 15:32:49 -0600 | [diff] [blame] | 2417 | dfab_clk = NULL; |
| 2418 | } else { |
| 2419 | rc = clk_set_rate(dfab_clk, 64000000); |
| 2420 | if (rc) |
| 2421 | pr_err("%s: unable to set dfab clock rate\n", __func__); |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 2422 | } |
| 2423 | |
Jeff Hugo | fff43af9 | 2012-03-29 17:54:52 -0600 | [diff] [blame] | 2424 | /* |
| 2425 | * setup the workqueue so that it can be pinned to core 0 and not |
| 2426 | * block the watchdog pet function, so that netif_rx() in rmnet |
| 2427 | * only uses one queue. |
| 2428 | */ |
| 2429 | bam_mux_rx_workqueue = alloc_workqueue("bam_dmux_rx", |
| 2430 | WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2431 | if (!bam_mux_rx_workqueue) |
| 2432 | return -ENOMEM; |
| 2433 | |
| 2434 | bam_mux_tx_workqueue = create_singlethread_workqueue("bam_dmux_tx"); |
| 2435 | if (!bam_mux_tx_workqueue) { |
| 2436 | destroy_workqueue(bam_mux_rx_workqueue); |
| 2437 | return -ENOMEM; |
| 2438 | } |
| 2439 | |
Jeff Hugo | 7960abd | 2011-08-02 15:39:38 -0600 | [diff] [blame] | 2440 | for (rc = 0; rc < BAM_DMUX_NUM_CHANNELS; ++rc) { |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2441 | spin_lock_init(&bam_ch[rc].lock); |
Jeff Hugo | 7960abd | 2011-08-02 15:39:38 -0600 | [diff] [blame] | 2442 | scnprintf(bam_ch[rc].name, BAM_DMUX_CH_NAME_MAX_LEN, |
| 2443 | "bam_dmux_ch_%d", rc); |
| 2444 | /* bus 2, ie a2 stream 2 */ |
| 2445 | bam_ch[rc].pdev = platform_device_alloc(bam_ch[rc].name, 2); |
| 2446 | if (!bam_ch[rc].pdev) { |
| 2447 | pr_err("%s: platform device alloc failed\n", __func__); |
| 2448 | destroy_workqueue(bam_mux_rx_workqueue); |
| 2449 | destroy_workqueue(bam_mux_tx_workqueue); |
| 2450 | return -ENOMEM; |
| 2451 | } |
| 2452 | } |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2453 | |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 2454 | init_completion(&ul_wakeup_ack_completion); |
| 2455 | init_completion(&bam_connection_completion); |
Eric Holmberg | 006057d | 2012-01-11 10:10:42 -0700 | [diff] [blame] | 2456 | init_completion(&dfab_unvote_completion); |
Brent Hronik | 096f7d3 | 2013-06-28 15:43:08 -0600 | [diff] [blame] | 2457 | init_completion(&shutdown_completion); |
| 2458 | complete_all(&shutdown_completion); |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 2459 | INIT_DELAYED_WORK(&ul_timeout_work, ul_timeout); |
Jeff Hugo | 988e7ba | 2012-10-03 15:53:54 -0600 | [diff] [blame] | 2460 | INIT_DELAYED_WORK(&queue_rx_work, queue_rx_work_func); |
Jeff Hugo | ae3a85e | 2011-12-02 17:10:18 -0700 | [diff] [blame] | 2461 | wake_lock_init(&bam_wakelock, WAKE_LOCK_SUSPEND, "bam_dmux_wakelock"); |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 2462 | |
Brent Hronik | 89c96ba | 2013-08-27 14:34:22 -0600 | [diff] [blame^] | 2463 | rc = bam_ops->smsm_state_cb_register_ptr(SMSM_MODEM_STATE, |
| 2464 | SMSM_A2_POWER_CONTROL, |
| 2465 | bam_dmux_smsm_cb, NULL); |
Jeff Hugo | ade1f84 | 2011-08-03 15:53:59 -0600 | [diff] [blame] | 2466 | |
| 2467 | if (rc) { |
| 2468 | destroy_workqueue(bam_mux_rx_workqueue); |
| 2469 | destroy_workqueue(bam_mux_tx_workqueue); |
| 2470 | pr_err("%s: smsm cb register failed, rc: %d\n", __func__, rc); |
| 2471 | return -ENOMEM; |
| 2472 | } |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2473 | |
Brent Hronik | 89c96ba | 2013-08-27 14:34:22 -0600 | [diff] [blame^] | 2474 | rc = bam_ops->smsm_state_cb_register_ptr(SMSM_MODEM_STATE, |
| 2475 | SMSM_A2_POWER_CONTROL_ACK, |
| 2476 | bam_dmux_smsm_ack_cb, NULL); |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 2477 | |
| 2478 | if (rc) { |
| 2479 | destroy_workqueue(bam_mux_rx_workqueue); |
| 2480 | destroy_workqueue(bam_mux_tx_workqueue); |
Brent Hronik | 89c96ba | 2013-08-27 14:34:22 -0600 | [diff] [blame^] | 2481 | bam_ops->smsm_state_cb_deregister_ptr(SMSM_MODEM_STATE, |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 2482 | SMSM_A2_POWER_CONTROL, |
| 2483 | bam_dmux_smsm_cb, NULL); |
| 2484 | pr_err("%s: smsm ack cb register failed, rc: %d\n", __func__, |
| 2485 | rc); |
| 2486 | for (rc = 0; rc < BAM_DMUX_NUM_CHANNELS; ++rc) |
| 2487 | platform_device_put(bam_ch[rc].pdev); |
| 2488 | return -ENOMEM; |
| 2489 | } |
| 2490 | |
Brent Hronik | 89c96ba | 2013-08-27 14:34:22 -0600 | [diff] [blame^] | 2491 | if (bam_ops->smsm_get_state_ptr(SMSM_MODEM_STATE) & |
| 2492 | SMSM_A2_POWER_CONTROL) |
| 2493 | bam_dmux_smsm_cb(NULL, 0, |
| 2494 | bam_ops->smsm_get_state_ptr(SMSM_MODEM_STATE)); |
Eric Holmberg | fd1e2ae | 2011-11-15 18:28:17 -0700 | [diff] [blame] | 2495 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2496 | return 0; |
| 2497 | } |
| 2498 | |
Jeff Hugo | 3910ee1 | 2012-08-21 14:08:20 -0600 | [diff] [blame] | 2499 | static struct of_device_id msm_match_table[] = { |
| 2500 | {.compatible = "qcom,bam_dmux"}, |
| 2501 | {}, |
| 2502 | }; |
| 2503 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2504 | static struct platform_driver bam_dmux_driver = { |
| 2505 | .probe = bam_dmux_probe, |
| 2506 | .driver = { |
| 2507 | .name = "BAM_RMNT", |
| 2508 | .owner = THIS_MODULE, |
Jeff Hugo | 3910ee1 | 2012-08-21 14:08:20 -0600 | [diff] [blame] | 2509 | .of_match_table = msm_match_table, |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2510 | }, |
| 2511 | }; |
| 2512 | |
| 2513 | static int __init bam_dmux_init(void) |
| 2514 | { |
| 2515 | #ifdef CONFIG_DEBUG_FS |
| 2516 | struct dentry *dent; |
| 2517 | |
| 2518 | dent = debugfs_create_dir("bam_dmux", 0); |
Eric Holmberg | 2fddbcd | 2011-11-28 18:25:57 -0700 | [diff] [blame] | 2519 | if (!IS_ERR(dent)) { |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2520 | debug_create("tbl", 0444, dent, debug_tbl); |
Eric Holmberg | 2fddbcd | 2011-11-28 18:25:57 -0700 | [diff] [blame] | 2521 | debug_create("ul_pkt_cnt", 0444, dent, debug_ul_pkt_cnt); |
| 2522 | debug_create("stats", 0444, dent, debug_stats); |
| 2523 | } |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2524 | #endif |
Zaheerulla Meer | 6fbf32c | 2013-01-31 17:06:44 +0530 | [diff] [blame] | 2525 | |
| 2526 | bam_ipc_log_txt = ipc_log_context_create(BAM_IPC_LOG_PAGES, "bam_dmux"); |
| 2527 | if (!bam_ipc_log_txt) { |
| 2528 | pr_err("%s : unable to create IPC Logging Context", __func__); |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 2529 | } |
| 2530 | |
Anurag Singh | dcd8b4e | 2012-07-30 16:46:37 -0700 | [diff] [blame] | 2531 | rx_timer_interval = DEFAULT_POLLING_MIN_SLEEP; |
| 2532 | |
Jeff Hugo | 6e7a92a | 2011-10-24 05:25:13 -0600 | [diff] [blame] | 2533 | subsys_notif_register_notifier("modem", &restart_notifier); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2534 | return platform_driver_register(&bam_dmux_driver); |
| 2535 | } |
| 2536 | |
Jeff Hugo | ade1f84 | 2011-08-03 15:53:59 -0600 | [diff] [blame] | 2537 | late_initcall(bam_dmux_init); /* needs to init after SMD */ |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2538 | MODULE_DESCRIPTION("MSM BAM DMUX"); |
| 2539 | MODULE_LICENSE("GPL v2"); |