Eric Holmberg | 8df0cdb | 2012-01-04 17:40:46 -0700 | [diff] [blame] | 1 | /* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved. |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2 | * |
| 3 | * This program is free software; you can redistribute it and/or modify |
| 4 | * it under the terms of the GNU General Public License version 2 and |
| 5 | * only version 2 as published by the Free Software Foundation. |
| 6 | * |
| 7 | * This program is distributed in the hope that it will be useful, |
| 8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 10 | * GNU General Public License for more details. |
| 11 | * |
| 12 | */ |
| 13 | |
| 14 | /* |
| 15 | * BAM DMUX module. |
| 16 | */ |
| 17 | |
| 18 | #define DEBUG |
| 19 | |
| 20 | #include <linux/delay.h> |
| 21 | #include <linux/module.h> |
| 22 | #include <linux/netdevice.h> |
| 23 | #include <linux/platform_device.h> |
| 24 | #include <linux/sched.h> |
| 25 | #include <linux/skbuff.h> |
| 26 | #include <linux/debugfs.h> |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 27 | #include <linux/clk.h> |
Jeff Hugo | ae3a85e | 2011-12-02 17:10:18 -0700 | [diff] [blame] | 28 | #include <linux/wakelock.h> |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 29 | #include <linux/kfifo.h> |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 30 | |
| 31 | #include <mach/sps.h> |
| 32 | #include <mach/bam_dmux.h> |
Jeff Hugo | ade1f84 | 2011-08-03 15:53:59 -0600 | [diff] [blame] | 33 | #include <mach/msm_smsm.h> |
Jeff Hugo | 6e7a92a | 2011-10-24 05:25:13 -0600 | [diff] [blame] | 34 | #include <mach/subsystem_notif.h> |
Jeff Hugo | 75913c8 | 2011-12-05 15:59:01 -0700 | [diff] [blame] | 35 | #include <mach/socinfo.h> |
Jeff Hugo | 4838f41 | 2012-01-20 11:19:37 -0700 | [diff] [blame] | 36 | #include <mach/subsystem_restart.h> |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 37 | |
| 38 | #define BAM_CH_LOCAL_OPEN 0x1 |
| 39 | #define BAM_CH_REMOTE_OPEN 0x2 |
Jeff Hugo | 6e7a92a | 2011-10-24 05:25:13 -0600 | [diff] [blame] | 40 | #define BAM_CH_IN_RESET 0x4 |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 41 | |
| 42 | #define BAM_MUX_HDR_MAGIC_NO 0x33fc |
| 43 | |
Eric Holmberg | 006057d | 2012-01-11 10:10:42 -0700 | [diff] [blame] | 44 | #define BAM_MUX_HDR_CMD_DATA 0 |
| 45 | #define BAM_MUX_HDR_CMD_OPEN 1 |
| 46 | #define BAM_MUX_HDR_CMD_CLOSE 2 |
| 47 | #define BAM_MUX_HDR_CMD_STATUS 3 /* unused */ |
| 48 | #define BAM_MUX_HDR_CMD_OPEN_NO_A2_PC 4 |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 49 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 50 | |
Karthikeyan Ramasubramanian | 7bf5ca8 | 2011-11-21 13:33:19 -0700 | [diff] [blame] | 51 | #define LOW_WATERMARK 2 |
| 52 | #define HIGH_WATERMARK 4 |
Anurag Singh | dcd8b4e | 2012-07-30 16:46:37 -0700 | [diff] [blame^] | 53 | #define DEFAULT_POLLING_MIN_SLEEP (950) |
| 54 | #define MAX_POLLING_SLEEP (6050) |
| 55 | #define MIN_POLLING_SLEEP (950) |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 56 | |
| 57 | static int msm_bam_dmux_debug_enable; |
| 58 | module_param_named(debug_enable, msm_bam_dmux_debug_enable, |
| 59 | int, S_IRUGO | S_IWUSR | S_IWGRP); |
Anurag Singh | dcd8b4e | 2012-07-30 16:46:37 -0700 | [diff] [blame^] | 60 | static int POLLING_MIN_SLEEP = 950; |
| 61 | module_param_named(min_sleep, POLLING_MIN_SLEEP, |
| 62 | int, S_IRUGO | S_IWUSR | S_IWGRP); |
| 63 | static int POLLING_MAX_SLEEP = 1050; |
| 64 | module_param_named(max_sleep, POLLING_MAX_SLEEP, |
| 65 | int, S_IRUGO | S_IWUSR | S_IWGRP); |
| 66 | static int POLLING_INACTIVITY = 40; |
| 67 | module_param_named(inactivity, POLLING_INACTIVITY, |
| 68 | int, S_IRUGO | S_IWUSR | S_IWGRP); |
| 69 | static int bam_adaptive_timer_enabled = 1; |
| 70 | module_param_named(adaptive_timer_enabled, |
| 71 | bam_adaptive_timer_enabled, |
| 72 | int, S_IRUGO | S_IWUSR | S_IWGRP); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 73 | |
| 74 | #if defined(DEBUG) |
| 75 | static uint32_t bam_dmux_read_cnt; |
| 76 | static uint32_t bam_dmux_write_cnt; |
| 77 | static uint32_t bam_dmux_write_cpy_cnt; |
| 78 | static uint32_t bam_dmux_write_cpy_bytes; |
Eric Holmberg | 2fddbcd | 2011-11-28 18:25:57 -0700 | [diff] [blame] | 79 | static uint32_t bam_dmux_tx_sps_failure_cnt; |
Eric Holmberg | 6074aba | 2012-01-18 17:59:44 -0700 | [diff] [blame] | 80 | static uint32_t bam_dmux_tx_stall_cnt; |
Eric Holmberg | 1f1255d | 2012-02-22 13:37:21 -0700 | [diff] [blame] | 81 | static atomic_t bam_dmux_ack_out_cnt = ATOMIC_INIT(0); |
| 82 | static atomic_t bam_dmux_ack_in_cnt = ATOMIC_INIT(0); |
| 83 | static atomic_t bam_dmux_a2_pwr_cntl_in_cnt = ATOMIC_INIT(0); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 84 | |
| 85 | #define DBG(x...) do { \ |
| 86 | if (msm_bam_dmux_debug_enable) \ |
| 87 | pr_debug(x); \ |
| 88 | } while (0) |
| 89 | |
| 90 | #define DBG_INC_READ_CNT(x) do { \ |
| 91 | bam_dmux_read_cnt += (x); \ |
| 92 | if (msm_bam_dmux_debug_enable) \ |
| 93 | pr_debug("%s: total read bytes %u\n", \ |
| 94 | __func__, bam_dmux_read_cnt); \ |
| 95 | } while (0) |
| 96 | |
| 97 | #define DBG_INC_WRITE_CNT(x) do { \ |
| 98 | bam_dmux_write_cnt += (x); \ |
| 99 | if (msm_bam_dmux_debug_enable) \ |
| 100 | pr_debug("%s: total written bytes %u\n", \ |
| 101 | __func__, bam_dmux_write_cnt); \ |
| 102 | } while (0) |
| 103 | |
| 104 | #define DBG_INC_WRITE_CPY(x) do { \ |
| 105 | bam_dmux_write_cpy_bytes += (x); \ |
| 106 | bam_dmux_write_cpy_cnt++; \ |
| 107 | if (msm_bam_dmux_debug_enable) \ |
| 108 | pr_debug("%s: total write copy cnt %u, bytes %u\n", \ |
| 109 | __func__, bam_dmux_write_cpy_cnt, \ |
| 110 | bam_dmux_write_cpy_bytes); \ |
| 111 | } while (0) |
Eric Holmberg | 2fddbcd | 2011-11-28 18:25:57 -0700 | [diff] [blame] | 112 | |
| 113 | #define DBG_INC_TX_SPS_FAILURE_CNT() do { \ |
| 114 | bam_dmux_tx_sps_failure_cnt++; \ |
| 115 | } while (0) |
| 116 | |
Eric Holmberg | 6074aba | 2012-01-18 17:59:44 -0700 | [diff] [blame] | 117 | #define DBG_INC_TX_STALL_CNT() do { \ |
| 118 | bam_dmux_tx_stall_cnt++; \ |
| 119 | } while (0) |
| 120 | |
Eric Holmberg | 1f1255d | 2012-02-22 13:37:21 -0700 | [diff] [blame] | 121 | #define DBG_INC_ACK_OUT_CNT() \ |
| 122 | atomic_inc(&bam_dmux_ack_out_cnt) |
| 123 | |
| 124 | #define DBG_INC_A2_POWER_CONTROL_IN_CNT() \ |
| 125 | atomic_inc(&bam_dmux_a2_pwr_cntl_in_cnt) |
| 126 | |
| 127 | #define DBG_INC_ACK_IN_CNT() \ |
| 128 | atomic_inc(&bam_dmux_ack_in_cnt) |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 129 | #else |
| 130 | #define DBG(x...) do { } while (0) |
| 131 | #define DBG_INC_READ_CNT(x...) do { } while (0) |
| 132 | #define DBG_INC_WRITE_CNT(x...) do { } while (0) |
| 133 | #define DBG_INC_WRITE_CPY(x...) do { } while (0) |
Eric Holmberg | 2fddbcd | 2011-11-28 18:25:57 -0700 | [diff] [blame] | 134 | #define DBG_INC_TX_SPS_FAILURE_CNT() do { } while (0) |
Eric Holmberg | 6074aba | 2012-01-18 17:59:44 -0700 | [diff] [blame] | 135 | #define DBG_INC_TX_STALL_CNT() do { } while (0) |
Eric Holmberg | 1f1255d | 2012-02-22 13:37:21 -0700 | [diff] [blame] | 136 | #define DBG_INC_ACK_OUT_CNT() do { } while (0) |
| 137 | #define DBG_INC_A2_POWER_CONTROL_IN_CNT() \ |
| 138 | do { } while (0) |
| 139 | #define DBG_INC_ACK_IN_CNT() do { } while (0) |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 140 | #endif |
| 141 | |
| 142 | struct bam_ch_info { |
| 143 | uint32_t status; |
Jeff Hugo | 1c4531c | 2011-08-02 14:55:37 -0600 | [diff] [blame] | 144 | void (*notify)(void *, int, unsigned long); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 145 | void *priv; |
| 146 | spinlock_t lock; |
Jeff Hugo | 7960abd | 2011-08-02 15:39:38 -0600 | [diff] [blame] | 147 | struct platform_device *pdev; |
| 148 | char name[BAM_DMUX_CH_NAME_MAX_LEN]; |
Karthikeyan Ramasubramanian | 7bf5ca8 | 2011-11-21 13:33:19 -0700 | [diff] [blame] | 149 | int num_tx_pkts; |
| 150 | int use_wm; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 151 | }; |
| 152 | |
| 153 | struct tx_pkt_info { |
| 154 | struct sk_buff *skb; |
| 155 | dma_addr_t dma_address; |
| 156 | char is_cmd; |
| 157 | uint32_t len; |
| 158 | struct work_struct work; |
Jeff Hugo | 6e7a92a | 2011-10-24 05:25:13 -0600 | [diff] [blame] | 159 | struct list_head list_node; |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 160 | unsigned ts_sec; |
| 161 | unsigned long ts_nsec; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 162 | }; |
| 163 | |
| 164 | struct rx_pkt_info { |
| 165 | struct sk_buff *skb; |
| 166 | dma_addr_t dma_address; |
| 167 | struct work_struct work; |
Jeff Hugo | 949080a | 2011-08-30 11:58:56 -0600 | [diff] [blame] | 168 | struct list_head list_node; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 169 | }; |
| 170 | |
| 171 | #define A2_NUM_PIPES 6 |
| 172 | #define A2_SUMMING_THRESHOLD 4096 |
| 173 | #define A2_DEFAULT_DESCRIPTORS 32 |
| 174 | #define A2_PHYS_BASE 0x124C2000 |
| 175 | #define A2_PHYS_SIZE 0x2000 |
| 176 | #define BUFFER_SIZE 2048 |
| 177 | #define NUM_BUFFERS 32 |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 178 | static struct sps_bam_props a2_props; |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 179 | static u32 a2_device_handle; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 180 | static struct sps_pipe *bam_tx_pipe; |
| 181 | static struct sps_pipe *bam_rx_pipe; |
| 182 | static struct sps_connect tx_connection; |
| 183 | static struct sps_connect rx_connection; |
| 184 | static struct sps_mem_buffer tx_desc_mem_buf; |
| 185 | static struct sps_mem_buffer rx_desc_mem_buf; |
| 186 | static struct sps_register_event tx_register_event; |
Jeff Hugo | 33dbc00 | 2011-08-25 15:52:53 -0600 | [diff] [blame] | 187 | static struct sps_register_event rx_register_event; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 188 | |
| 189 | static struct bam_ch_info bam_ch[BAM_DMUX_NUM_CHANNELS]; |
| 190 | static int bam_mux_initialized; |
| 191 | |
Jeff Hugo | 949080a | 2011-08-30 11:58:56 -0600 | [diff] [blame] | 192 | static int polling_mode; |
Anurag Singh | dcd8b4e | 2012-07-30 16:46:37 -0700 | [diff] [blame^] | 193 | static unsigned long rx_timer_interval; |
Jeff Hugo | 949080a | 2011-08-30 11:58:56 -0600 | [diff] [blame] | 194 | |
| 195 | static LIST_HEAD(bam_rx_pool); |
Jeff Hugo | c974993 | 2011-11-02 17:50:40 -0600 | [diff] [blame] | 196 | static DEFINE_MUTEX(bam_rx_pool_mutexlock); |
Eric Holmberg | b5b08e5 | 2012-01-20 14:19:00 -0700 | [diff] [blame] | 197 | static int bam_rx_pool_len; |
Jeff Hugo | 6e7a92a | 2011-10-24 05:25:13 -0600 | [diff] [blame] | 198 | static LIST_HEAD(bam_tx_pool); |
Jeff Hugo | c974993 | 2011-11-02 17:50:40 -0600 | [diff] [blame] | 199 | static DEFINE_SPINLOCK(bam_tx_pool_spinlock); |
Eric Holmberg | a623da8 | 2012-07-12 09:37:09 -0600 | [diff] [blame] | 200 | static DEFINE_MUTEX(bam_pdev_mutexlock); |
Jeff Hugo | 949080a | 2011-08-30 11:58:56 -0600 | [diff] [blame] | 201 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 202 | struct bam_mux_hdr { |
| 203 | uint16_t magic_num; |
| 204 | uint8_t reserved; |
| 205 | uint8_t cmd; |
| 206 | uint8_t pad_len; |
| 207 | uint8_t ch_id; |
| 208 | uint16_t pkt_len; |
| 209 | }; |
| 210 | |
Jeff Hugo | d98b108 | 2011-10-24 10:30:23 -0600 | [diff] [blame] | 211 | static void notify_all(int event, unsigned long data); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 212 | static void bam_mux_write_done(struct work_struct *work); |
| 213 | static void handle_bam_mux_cmd(struct work_struct *work); |
Jeff Hugo | 949080a | 2011-08-30 11:58:56 -0600 | [diff] [blame] | 214 | static void rx_timer_work_func(struct work_struct *work); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 215 | |
Jeff Hugo | 949080a | 2011-08-30 11:58:56 -0600 | [diff] [blame] | 216 | static DECLARE_WORK(rx_timer_work, rx_timer_work_func); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 217 | |
| 218 | static struct workqueue_struct *bam_mux_rx_workqueue; |
| 219 | static struct workqueue_struct *bam_mux_tx_workqueue; |
| 220 | |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 221 | /* A2 power collaspe */ |
| 222 | #define UL_TIMEOUT_DELAY 1000 /* in ms */ |
Jeff Hugo | 0b13a35 | 2012-03-17 23:18:30 -0600 | [diff] [blame] | 223 | #define ENABLE_DISCONNECT_ACK 0x1 |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 224 | static void toggle_apps_ack(void); |
| 225 | static void reconnect_to_bam(void); |
| 226 | static void disconnect_to_bam(void); |
| 227 | static void ul_wakeup(void); |
| 228 | static void ul_timeout(struct work_struct *work); |
| 229 | static void vote_dfab(void); |
| 230 | static void unvote_dfab(void); |
Jeff Hugo | d98b108 | 2011-10-24 10:30:23 -0600 | [diff] [blame] | 231 | static void kickoff_ul_wakeup_func(struct work_struct *work); |
Eric Holmberg | 006057d | 2012-01-11 10:10:42 -0700 | [diff] [blame] | 232 | static void grab_wakelock(void); |
| 233 | static void release_wakelock(void); |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 234 | |
| 235 | static int bam_is_connected; |
| 236 | static DEFINE_MUTEX(wakeup_lock); |
| 237 | static struct completion ul_wakeup_ack_completion; |
| 238 | static struct completion bam_connection_completion; |
| 239 | static struct delayed_work ul_timeout_work; |
| 240 | static int ul_packet_written; |
Eric Holmberg | bc9f21c | 2012-01-18 11:33:33 -0700 | [diff] [blame] | 241 | static atomic_t ul_ondemand_vote = ATOMIC_INIT(0); |
Stephen Boyd | 69d35e3 | 2012-02-14 15:33:30 -0800 | [diff] [blame] | 242 | static struct clk *dfab_clk, *xo_clk; |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 243 | static DEFINE_RWLOCK(ul_wakeup_lock); |
Jeff Hugo | d98b108 | 2011-10-24 10:30:23 -0600 | [diff] [blame] | 244 | static DECLARE_WORK(kickoff_ul_wakeup, kickoff_ul_wakeup_func); |
Jeff Hugo | 6e7a92a | 2011-10-24 05:25:13 -0600 | [diff] [blame] | 245 | static int bam_connection_is_active; |
Jeff Hugo | f6c1c1e | 2011-12-01 17:43:49 -0700 | [diff] [blame] | 246 | static int wait_for_ack; |
Jeff Hugo | ae3a85e | 2011-12-02 17:10:18 -0700 | [diff] [blame] | 247 | static struct wake_lock bam_wakelock; |
Eric Holmberg | 006057d | 2012-01-11 10:10:42 -0700 | [diff] [blame] | 248 | static int a2_pc_disabled; |
| 249 | static DEFINE_MUTEX(dfab_status_lock); |
| 250 | static int dfab_is_on; |
| 251 | static int wait_for_dfab; |
| 252 | static struct completion dfab_unvote_completion; |
| 253 | static DEFINE_SPINLOCK(wakelock_reference_lock); |
| 254 | static int wakelock_reference_count; |
Jeff Hugo | 583a6da | 2012-02-03 11:37:30 -0700 | [diff] [blame] | 255 | static int a2_pc_disabled_wakelock_skipped; |
Jeff Hugo | b1e7c58 | 2012-06-20 15:02:11 -0600 | [diff] [blame] | 256 | static int disconnect_ack = 1; |
Jeff Hugo | cb79802 | 2012-04-09 14:55:40 -0600 | [diff] [blame] | 257 | static LIST_HEAD(bam_other_notify_funcs); |
Jeff Hugo | 4b7c7b3 | 2012-04-18 16:25:14 -0600 | [diff] [blame] | 258 | static DEFINE_MUTEX(smsm_cb_lock); |
Jeff Hugo | c269614 | 2012-05-03 11:42:13 -0600 | [diff] [blame] | 259 | static DEFINE_MUTEX(delayed_ul_vote_lock); |
| 260 | static int need_delayed_ul_vote; |
Jeff Hugo | 18792a3 | 2012-06-20 15:25:55 -0600 | [diff] [blame] | 261 | static int power_management_only_mode; |
Jeff Hugo | cb79802 | 2012-04-09 14:55:40 -0600 | [diff] [blame] | 262 | |
| 263 | struct outside_notify_func { |
| 264 | void (*notify)(void *, int, unsigned long); |
| 265 | void *priv; |
| 266 | struct list_head list_node; |
| 267 | }; |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 268 | /* End A2 power collaspe */ |
| 269 | |
Jeff Hugo | 6e7a92a | 2011-10-24 05:25:13 -0600 | [diff] [blame] | 270 | /* subsystem restart */ |
| 271 | static int restart_notifier_cb(struct notifier_block *this, |
| 272 | unsigned long code, |
| 273 | void *data); |
| 274 | |
| 275 | static struct notifier_block restart_notifier = { |
| 276 | .notifier_call = restart_notifier_cb, |
| 277 | }; |
| 278 | static int in_global_reset; |
| 279 | /* end subsystem restart */ |
| 280 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 281 | #define bam_ch_is_open(x) \ |
| 282 | (bam_ch[(x)].status == (BAM_CH_LOCAL_OPEN | BAM_CH_REMOTE_OPEN)) |
| 283 | |
| 284 | #define bam_ch_is_local_open(x) \ |
| 285 | (bam_ch[(x)].status & BAM_CH_LOCAL_OPEN) |
| 286 | |
| 287 | #define bam_ch_is_remote_open(x) \ |
| 288 | (bam_ch[(x)].status & BAM_CH_REMOTE_OPEN) |
| 289 | |
Jeff Hugo | 6e7a92a | 2011-10-24 05:25:13 -0600 | [diff] [blame] | 290 | #define bam_ch_is_in_reset(x) \ |
| 291 | (bam_ch[(x)].status & BAM_CH_IN_RESET) |
| 292 | |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 293 | #define LOG_MESSAGE_MAX_SIZE 80 |
| 294 | struct kfifo bam_dmux_state_log; |
| 295 | static uint32_t bam_dmux_state_logging_disabled; |
| 296 | static DEFINE_SPINLOCK(bam_dmux_logging_spinlock); |
| 297 | static int bam_dmux_uplink_vote; |
| 298 | static int bam_dmux_power_state; |
| 299 | |
Jeff Hugo | d7d2b06 | 2012-07-24 14:29:56 -0600 | [diff] [blame] | 300 | static void bam_dmux_log(const char *fmt, ...) |
| 301 | __printf(1, 2); |
| 302 | |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 303 | |
| 304 | #define DMUX_LOG_KERR(fmt...) \ |
| 305 | do { \ |
| 306 | bam_dmux_log(fmt); \ |
| 307 | pr_err(fmt); \ |
| 308 | } while (0) |
| 309 | |
| 310 | /** |
| 311 | * Log a state change along with a small message. |
| 312 | * |
| 313 | * Complete size of messsage is limited to @todo. |
| 314 | */ |
| 315 | static void bam_dmux_log(const char *fmt, ...) |
| 316 | { |
| 317 | char buff[LOG_MESSAGE_MAX_SIZE]; |
| 318 | unsigned long flags; |
| 319 | va_list arg_list; |
| 320 | unsigned long long t_now; |
| 321 | unsigned long nanosec_rem; |
| 322 | int len = 0; |
| 323 | |
| 324 | if (bam_dmux_state_logging_disabled) |
| 325 | return; |
| 326 | |
| 327 | t_now = sched_clock(); |
| 328 | nanosec_rem = do_div(t_now, 1000000000U); |
| 329 | |
| 330 | /* |
| 331 | * States |
Eric Holmberg | 006057d | 2012-01-11 10:10:42 -0700 | [diff] [blame] | 332 | * D: 1 = Power collapse disabled |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 333 | * R: 1 = in global reset |
| 334 | * P: 1 = BAM is powered up |
| 335 | * A: 1 = BAM initialized and ready for data |
| 336 | * |
| 337 | * V: 1 = Uplink vote for power |
| 338 | * U: 1 = Uplink active |
| 339 | * W: 1 = Uplink Wait-for-ack |
| 340 | * A: 1 = Uplink ACK received |
Eric Holmberg | bc9f21c | 2012-01-18 11:33:33 -0700 | [diff] [blame] | 341 | * #: >=1 On-demand uplink vote |
Jeff Hugo | 0b13a35 | 2012-03-17 23:18:30 -0600 | [diff] [blame] | 342 | * D: 1 = Disconnect ACK active |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 343 | */ |
| 344 | len += scnprintf(buff, sizeof(buff), |
Jeff Hugo | 0b13a35 | 2012-03-17 23:18:30 -0600 | [diff] [blame] | 345 | "<DMUX> %u.%09lu %c%c%c%c %c%c%c%c%d%c ", |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 346 | (unsigned)t_now, nanosec_rem, |
Eric Holmberg | 006057d | 2012-01-11 10:10:42 -0700 | [diff] [blame] | 347 | a2_pc_disabled ? 'D' : 'd', |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 348 | in_global_reset ? 'R' : 'r', |
| 349 | bam_dmux_power_state ? 'P' : 'p', |
| 350 | bam_connection_is_active ? 'A' : 'a', |
| 351 | bam_dmux_uplink_vote ? 'V' : 'v', |
| 352 | bam_is_connected ? 'U' : 'u', |
| 353 | wait_for_ack ? 'W' : 'w', |
Eric Holmberg | bc9f21c | 2012-01-18 11:33:33 -0700 | [diff] [blame] | 354 | ul_wakeup_ack_completion.done ? 'A' : 'a', |
Jeff Hugo | 0b13a35 | 2012-03-17 23:18:30 -0600 | [diff] [blame] | 355 | atomic_read(&ul_ondemand_vote), |
| 356 | disconnect_ack ? 'D' : 'd' |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 357 | ); |
| 358 | |
| 359 | va_start(arg_list, fmt); |
| 360 | len += vscnprintf(buff + len, sizeof(buff) - len, fmt, arg_list); |
| 361 | va_end(arg_list); |
| 362 | memset(buff + len, 0x0, sizeof(buff) - len); |
| 363 | |
| 364 | spin_lock_irqsave(&bam_dmux_logging_spinlock, flags); |
| 365 | if (kfifo_avail(&bam_dmux_state_log) < LOG_MESSAGE_MAX_SIZE) { |
| 366 | char junk[LOG_MESSAGE_MAX_SIZE]; |
| 367 | int ret; |
| 368 | |
| 369 | ret = kfifo_out(&bam_dmux_state_log, junk, sizeof(junk)); |
| 370 | if (ret != LOG_MESSAGE_MAX_SIZE) { |
| 371 | pr_err("%s: unable to empty log %d\n", __func__, ret); |
| 372 | spin_unlock_irqrestore(&bam_dmux_logging_spinlock, |
| 373 | flags); |
| 374 | return; |
| 375 | } |
| 376 | } |
| 377 | kfifo_in(&bam_dmux_state_log, buff, sizeof(buff)); |
| 378 | spin_unlock_irqrestore(&bam_dmux_logging_spinlock, flags); |
| 379 | } |
| 380 | |
| 381 | static inline void set_tx_timestamp(struct tx_pkt_info *pkt) |
| 382 | { |
| 383 | unsigned long long t_now; |
| 384 | |
| 385 | t_now = sched_clock(); |
| 386 | pkt->ts_nsec = do_div(t_now, 1000000000U); |
| 387 | pkt->ts_sec = (unsigned)t_now; |
| 388 | } |
| 389 | |
| 390 | static inline void verify_tx_queue_is_empty(const char *func) |
| 391 | { |
| 392 | unsigned long flags; |
| 393 | struct tx_pkt_info *info; |
| 394 | int reported = 0; |
| 395 | |
| 396 | spin_lock_irqsave(&bam_tx_pool_spinlock, flags); |
| 397 | list_for_each_entry(info, &bam_tx_pool, list_node) { |
| 398 | if (!reported) { |
Eric Holmberg | 454d9da | 2012-01-12 09:37:14 -0700 | [diff] [blame] | 399 | bam_dmux_log("%s: tx pool not empty\n", func); |
| 400 | if (!in_global_reset) |
| 401 | pr_err("%s: tx pool not empty\n", func); |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 402 | reported = 1; |
| 403 | } |
Eric Holmberg | 454d9da | 2012-01-12 09:37:14 -0700 | [diff] [blame] | 404 | bam_dmux_log("%s: node=%p ts=%u.%09lu\n", __func__, |
| 405 | &info->list_node, info->ts_sec, info->ts_nsec); |
| 406 | if (!in_global_reset) |
| 407 | pr_err("%s: node=%p ts=%u.%09lu\n", __func__, |
| 408 | &info->list_node, info->ts_sec, info->ts_nsec); |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 409 | } |
| 410 | spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags); |
| 411 | } |
| 412 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 413 | static void queue_rx(void) |
| 414 | { |
| 415 | void *ptr; |
| 416 | struct rx_pkt_info *info; |
Eric Holmberg | b5b08e5 | 2012-01-20 14:19:00 -0700 | [diff] [blame] | 417 | int ret; |
| 418 | int rx_len_cached; |
Jeff Hugo | 949080a | 2011-08-30 11:58:56 -0600 | [diff] [blame] | 419 | |
Jeff Hugo | c974993 | 2011-11-02 17:50:40 -0600 | [diff] [blame] | 420 | mutex_lock(&bam_rx_pool_mutexlock); |
Eric Holmberg | b5b08e5 | 2012-01-20 14:19:00 -0700 | [diff] [blame] | 421 | rx_len_cached = bam_rx_pool_len; |
Jeff Hugo | c974993 | 2011-11-02 17:50:40 -0600 | [diff] [blame] | 422 | mutex_unlock(&bam_rx_pool_mutexlock); |
Jeff Hugo | 949080a | 2011-08-30 11:58:56 -0600 | [diff] [blame] | 423 | |
Eric Holmberg | b5b08e5 | 2012-01-20 14:19:00 -0700 | [diff] [blame] | 424 | while (rx_len_cached < NUM_BUFFERS) { |
| 425 | if (in_global_reset) |
| 426 | goto fail; |
| 427 | |
| 428 | info = kmalloc(sizeof(struct rx_pkt_info), GFP_KERNEL); |
| 429 | if (!info) { |
| 430 | pr_err("%s: unable to alloc rx_pkt_info\n", __func__); |
| 431 | goto fail; |
| 432 | } |
| 433 | |
| 434 | INIT_WORK(&info->work, handle_bam_mux_cmd); |
| 435 | |
| 436 | info->skb = __dev_alloc_skb(BUFFER_SIZE, GFP_KERNEL); |
| 437 | if (info->skb == NULL) { |
| 438 | DMUX_LOG_KERR("%s: unable to alloc skb\n", __func__); |
| 439 | goto fail_info; |
| 440 | } |
| 441 | ptr = skb_put(info->skb, BUFFER_SIZE); |
| 442 | |
| 443 | info->dma_address = dma_map_single(NULL, ptr, BUFFER_SIZE, |
| 444 | DMA_FROM_DEVICE); |
| 445 | if (info->dma_address == 0 || info->dma_address == ~0) { |
| 446 | DMUX_LOG_KERR("%s: dma_map_single failure %p for %p\n", |
| 447 | __func__, (void *)info->dma_address, ptr); |
| 448 | goto fail_skb; |
| 449 | } |
| 450 | |
| 451 | mutex_lock(&bam_rx_pool_mutexlock); |
| 452 | list_add_tail(&info->list_node, &bam_rx_pool); |
| 453 | rx_len_cached = ++bam_rx_pool_len; |
Eric Holmberg | b5b08e5 | 2012-01-20 14:19:00 -0700 | [diff] [blame] | 454 | ret = sps_transfer_one(bam_rx_pipe, info->dma_address, |
| 455 | BUFFER_SIZE, info, |
| 456 | SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT); |
Eric Holmberg | b5b08e5 | 2012-01-20 14:19:00 -0700 | [diff] [blame] | 457 | if (ret) { |
Eric Holmberg | 00cf869 | 2012-07-16 14:21:19 -0600 | [diff] [blame] | 458 | list_del(&info->list_node); |
| 459 | rx_len_cached = --bam_rx_pool_len; |
| 460 | mutex_unlock(&bam_rx_pool_mutexlock); |
Eric Holmberg | b5b08e5 | 2012-01-20 14:19:00 -0700 | [diff] [blame] | 461 | DMUX_LOG_KERR("%s: sps_transfer_one failed %d\n", |
| 462 | __func__, ret); |
Eric Holmberg | 00cf869 | 2012-07-16 14:21:19 -0600 | [diff] [blame] | 463 | |
| 464 | dma_unmap_single(NULL, info->dma_address, BUFFER_SIZE, |
| 465 | DMA_FROM_DEVICE); |
| 466 | |
| 467 | goto fail_skb; |
Eric Holmberg | b5b08e5 | 2012-01-20 14:19:00 -0700 | [diff] [blame] | 468 | } |
Eric Holmberg | 00cf869 | 2012-07-16 14:21:19 -0600 | [diff] [blame] | 469 | mutex_unlock(&bam_rx_pool_mutexlock); |
| 470 | |
Eric Holmberg | b5b08e5 | 2012-01-20 14:19:00 -0700 | [diff] [blame] | 471 | } |
| 472 | return; |
| 473 | |
Eric Holmberg | b5b08e5 | 2012-01-20 14:19:00 -0700 | [diff] [blame] | 474 | fail_skb: |
| 475 | dev_kfree_skb_any(info->skb); |
| 476 | |
| 477 | fail_info: |
| 478 | kfree(info); |
| 479 | |
| 480 | fail: |
| 481 | if (rx_len_cached == 0) { |
| 482 | DMUX_LOG_KERR("%s: RX queue failure\n", __func__); |
| 483 | in_global_reset = 1; |
| 484 | } |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 485 | } |
| 486 | |
| 487 | static void bam_mux_process_data(struct sk_buff *rx_skb) |
| 488 | { |
| 489 | unsigned long flags; |
| 490 | struct bam_mux_hdr *rx_hdr; |
Jeff Hugo | 1c4531c | 2011-08-02 14:55:37 -0600 | [diff] [blame] | 491 | unsigned long event_data; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 492 | |
| 493 | rx_hdr = (struct bam_mux_hdr *)rx_skb->data; |
| 494 | |
| 495 | rx_skb->data = (unsigned char *)(rx_hdr + 1); |
| 496 | rx_skb->tail = rx_skb->data + rx_hdr->pkt_len; |
| 497 | rx_skb->len = rx_hdr->pkt_len; |
Jeff Hugo | ee88f67 | 2011-10-04 17:14:52 -0600 | [diff] [blame] | 498 | rx_skb->truesize = rx_hdr->pkt_len + sizeof(struct sk_buff); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 499 | |
Jeff Hugo | 1c4531c | 2011-08-02 14:55:37 -0600 | [diff] [blame] | 500 | event_data = (unsigned long)(rx_skb); |
| 501 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 502 | spin_lock_irqsave(&bam_ch[rx_hdr->ch_id].lock, flags); |
Jeff Hugo | 1c4531c | 2011-08-02 14:55:37 -0600 | [diff] [blame] | 503 | if (bam_ch[rx_hdr->ch_id].notify) |
| 504 | bam_ch[rx_hdr->ch_id].notify( |
| 505 | bam_ch[rx_hdr->ch_id].priv, BAM_DMUX_RECEIVE, |
| 506 | event_data); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 507 | else |
| 508 | dev_kfree_skb_any(rx_skb); |
| 509 | spin_unlock_irqrestore(&bam_ch[rx_hdr->ch_id].lock, flags); |
| 510 | |
| 511 | queue_rx(); |
| 512 | } |
| 513 | |
Eric Holmberg | 006057d | 2012-01-11 10:10:42 -0700 | [diff] [blame] | 514 | static inline void handle_bam_mux_cmd_open(struct bam_mux_hdr *rx_hdr) |
| 515 | { |
| 516 | unsigned long flags; |
| 517 | int ret; |
| 518 | |
Eric Holmberg | a623da8 | 2012-07-12 09:37:09 -0600 | [diff] [blame] | 519 | mutex_lock(&bam_pdev_mutexlock); |
| 520 | if (in_global_reset) { |
| 521 | bam_dmux_log("%s: open cid %d aborted due to ssr\n", |
| 522 | __func__, rx_hdr->ch_id); |
| 523 | mutex_unlock(&bam_pdev_mutexlock); |
| 524 | queue_rx(); |
| 525 | return; |
| 526 | } |
Eric Holmberg | 006057d | 2012-01-11 10:10:42 -0700 | [diff] [blame] | 527 | spin_lock_irqsave(&bam_ch[rx_hdr->ch_id].lock, flags); |
| 528 | bam_ch[rx_hdr->ch_id].status |= BAM_CH_REMOTE_OPEN; |
| 529 | bam_ch[rx_hdr->ch_id].num_tx_pkts = 0; |
| 530 | spin_unlock_irqrestore(&bam_ch[rx_hdr->ch_id].lock, flags); |
Eric Holmberg | 006057d | 2012-01-11 10:10:42 -0700 | [diff] [blame] | 531 | ret = platform_device_add(bam_ch[rx_hdr->ch_id].pdev); |
| 532 | if (ret) |
| 533 | pr_err("%s: platform_device_add() error: %d\n", |
| 534 | __func__, ret); |
Eric Holmberg | a623da8 | 2012-07-12 09:37:09 -0600 | [diff] [blame] | 535 | mutex_unlock(&bam_pdev_mutexlock); |
| 536 | queue_rx(); |
Eric Holmberg | 006057d | 2012-01-11 10:10:42 -0700 | [diff] [blame] | 537 | } |
| 538 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 539 | static void handle_bam_mux_cmd(struct work_struct *work) |
| 540 | { |
| 541 | unsigned long flags; |
| 542 | struct bam_mux_hdr *rx_hdr; |
| 543 | struct rx_pkt_info *info; |
| 544 | struct sk_buff *rx_skb; |
| 545 | |
| 546 | info = container_of(work, struct rx_pkt_info, work); |
| 547 | rx_skb = info->skb; |
Jeff Hugo | 949080a | 2011-08-30 11:58:56 -0600 | [diff] [blame] | 548 | dma_unmap_single(NULL, info->dma_address, BUFFER_SIZE, DMA_FROM_DEVICE); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 549 | kfree(info); |
| 550 | |
| 551 | rx_hdr = (struct bam_mux_hdr *)rx_skb->data; |
| 552 | |
| 553 | DBG_INC_READ_CNT(sizeof(struct bam_mux_hdr)); |
| 554 | DBG("%s: magic %x reserved %d cmd %d pad %d ch %d len %d\n", __func__, |
| 555 | rx_hdr->magic_num, rx_hdr->reserved, rx_hdr->cmd, |
| 556 | rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len); |
| 557 | if (rx_hdr->magic_num != BAM_MUX_HDR_MAGIC_NO) { |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 558 | DMUX_LOG_KERR("%s: dropping invalid hdr. magic %x" |
| 559 | " reserved %d cmd %d" |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 560 | " pad %d ch %d len %d\n", __func__, |
| 561 | rx_hdr->magic_num, rx_hdr->reserved, rx_hdr->cmd, |
| 562 | rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len); |
| 563 | dev_kfree_skb_any(rx_skb); |
| 564 | queue_rx(); |
| 565 | return; |
| 566 | } |
Eric Holmberg | 9ff40a5 | 2011-11-17 19:17:00 -0700 | [diff] [blame] | 567 | |
| 568 | if (rx_hdr->ch_id >= BAM_DMUX_NUM_CHANNELS) { |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 569 | DMUX_LOG_KERR("%s: dropping invalid LCID %d" |
| 570 | " reserved %d cmd %d" |
Eric Holmberg | 9ff40a5 | 2011-11-17 19:17:00 -0700 | [diff] [blame] | 571 | " pad %d ch %d len %d\n", __func__, |
| 572 | rx_hdr->ch_id, rx_hdr->reserved, rx_hdr->cmd, |
| 573 | rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len); |
| 574 | dev_kfree_skb_any(rx_skb); |
| 575 | queue_rx(); |
| 576 | return; |
| 577 | } |
| 578 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 579 | switch (rx_hdr->cmd) { |
| 580 | case BAM_MUX_HDR_CMD_DATA: |
| 581 | DBG_INC_READ_CNT(rx_hdr->pkt_len); |
| 582 | bam_mux_process_data(rx_skb); |
| 583 | break; |
| 584 | case BAM_MUX_HDR_CMD_OPEN: |
Eric Holmberg | 006057d | 2012-01-11 10:10:42 -0700 | [diff] [blame] | 585 | bam_dmux_log("%s: opening cid %d PC enabled\n", __func__, |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 586 | rx_hdr->ch_id); |
Eric Holmberg | 006057d | 2012-01-11 10:10:42 -0700 | [diff] [blame] | 587 | handle_bam_mux_cmd_open(rx_hdr); |
Jeff Hugo | b1e7c58 | 2012-06-20 15:02:11 -0600 | [diff] [blame] | 588 | if (!(rx_hdr->reserved & ENABLE_DISCONNECT_ACK)) { |
Jeff Hugo | d7d2b06 | 2012-07-24 14:29:56 -0600 | [diff] [blame] | 589 | bam_dmux_log("%s: deactivating disconnect ack\n", |
| 590 | __func__); |
Jeff Hugo | b1e7c58 | 2012-06-20 15:02:11 -0600 | [diff] [blame] | 591 | disconnect_ack = 0; |
Jeff Hugo | 0b13a35 | 2012-03-17 23:18:30 -0600 | [diff] [blame] | 592 | } |
Eric Holmberg | 006057d | 2012-01-11 10:10:42 -0700 | [diff] [blame] | 593 | dev_kfree_skb_any(rx_skb); |
| 594 | break; |
| 595 | case BAM_MUX_HDR_CMD_OPEN_NO_A2_PC: |
| 596 | bam_dmux_log("%s: opening cid %d PC disabled\n", __func__, |
| 597 | rx_hdr->ch_id); |
| 598 | |
| 599 | if (!a2_pc_disabled) { |
| 600 | a2_pc_disabled = 1; |
Jeff Hugo | 322179f | 2012-02-29 10:52:34 -0700 | [diff] [blame] | 601 | ul_wakeup(); |
Eric Holmberg | 006057d | 2012-01-11 10:10:42 -0700 | [diff] [blame] | 602 | } |
| 603 | |
| 604 | handle_bam_mux_cmd_open(rx_hdr); |
Eric Holmberg | e779dba | 2011-11-04 18:22:01 -0600 | [diff] [blame] | 605 | dev_kfree_skb_any(rx_skb); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 606 | break; |
| 607 | case BAM_MUX_HDR_CMD_CLOSE: |
| 608 | /* probably should drop pending write */ |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 609 | bam_dmux_log("%s: closing cid %d\n", __func__, |
| 610 | rx_hdr->ch_id); |
Eric Holmberg | a623da8 | 2012-07-12 09:37:09 -0600 | [diff] [blame] | 611 | mutex_lock(&bam_pdev_mutexlock); |
| 612 | if (in_global_reset) { |
| 613 | bam_dmux_log("%s: close cid %d aborted due to ssr\n", |
| 614 | __func__, rx_hdr->ch_id); |
| 615 | mutex_unlock(&bam_pdev_mutexlock); |
| 616 | break; |
| 617 | } |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 618 | spin_lock_irqsave(&bam_ch[rx_hdr->ch_id].lock, flags); |
| 619 | bam_ch[rx_hdr->ch_id].status &= ~BAM_CH_REMOTE_OPEN; |
| 620 | spin_unlock_irqrestore(&bam_ch[rx_hdr->ch_id].lock, flags); |
Jeff Hugo | 7960abd | 2011-08-02 15:39:38 -0600 | [diff] [blame] | 621 | platform_device_unregister(bam_ch[rx_hdr->ch_id].pdev); |
| 622 | bam_ch[rx_hdr->ch_id].pdev = |
| 623 | platform_device_alloc(bam_ch[rx_hdr->ch_id].name, 2); |
| 624 | if (!bam_ch[rx_hdr->ch_id].pdev) |
| 625 | pr_err("%s: platform_device_alloc failed\n", __func__); |
Eric Holmberg | a623da8 | 2012-07-12 09:37:09 -0600 | [diff] [blame] | 626 | mutex_unlock(&bam_pdev_mutexlock); |
Eric Holmberg | e779dba | 2011-11-04 18:22:01 -0600 | [diff] [blame] | 627 | dev_kfree_skb_any(rx_skb); |
Eric Holmberg | a623da8 | 2012-07-12 09:37:09 -0600 | [diff] [blame] | 628 | queue_rx(); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 629 | break; |
| 630 | default: |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 631 | DMUX_LOG_KERR("%s: dropping invalid hdr. magic %x" |
| 632 | " reserved %d cmd %d pad %d ch %d len %d\n", |
| 633 | __func__, rx_hdr->magic_num, rx_hdr->reserved, |
| 634 | rx_hdr->cmd, rx_hdr->pad_len, rx_hdr->ch_id, |
| 635 | rx_hdr->pkt_len); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 636 | dev_kfree_skb_any(rx_skb); |
| 637 | queue_rx(); |
| 638 | return; |
| 639 | } |
| 640 | } |
| 641 | |
| 642 | static int bam_mux_write_cmd(void *data, uint32_t len) |
| 643 | { |
| 644 | int rc; |
| 645 | struct tx_pkt_info *pkt; |
| 646 | dma_addr_t dma_address; |
Jeff Hugo | 626303bf | 2011-11-21 11:43:28 -0700 | [diff] [blame] | 647 | unsigned long flags; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 648 | |
Eric Holmberg | d83cd2b | 2011-11-04 15:54:17 -0600 | [diff] [blame] | 649 | pkt = kmalloc(sizeof(struct tx_pkt_info), GFP_ATOMIC); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 650 | if (pkt == NULL) { |
| 651 | pr_err("%s: mem alloc for tx_pkt_info failed\n", __func__); |
| 652 | rc = -ENOMEM; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 653 | return rc; |
| 654 | } |
| 655 | |
| 656 | dma_address = dma_map_single(NULL, data, len, |
| 657 | DMA_TO_DEVICE); |
| 658 | if (!dma_address) { |
| 659 | pr_err("%s: dma_map_single() failed\n", __func__); |
Jeff Hugo | 96cb748 | 2011-12-07 13:28:31 -0700 | [diff] [blame] | 660 | kfree(pkt); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 661 | rc = -ENOMEM; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 662 | return rc; |
| 663 | } |
| 664 | pkt->skb = (struct sk_buff *)(data); |
| 665 | pkt->len = len; |
| 666 | pkt->dma_address = dma_address; |
| 667 | pkt->is_cmd = 1; |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 668 | set_tx_timestamp(pkt); |
Jeff Hugo | 6e7a92a | 2011-10-24 05:25:13 -0600 | [diff] [blame] | 669 | INIT_WORK(&pkt->work, bam_mux_write_done); |
Jeff Hugo | 626303bf | 2011-11-21 11:43:28 -0700 | [diff] [blame] | 670 | spin_lock_irqsave(&bam_tx_pool_spinlock, flags); |
Jeff Hugo | 6e7a92a | 2011-10-24 05:25:13 -0600 | [diff] [blame] | 671 | list_add_tail(&pkt->list_node, &bam_tx_pool); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 672 | rc = sps_transfer_one(bam_tx_pipe, dma_address, len, |
| 673 | pkt, SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT); |
Jeff Hugo | 7b80c80 | 2011-11-04 16:12:20 -0600 | [diff] [blame] | 674 | if (rc) { |
Eric Holmberg | b5b08e5 | 2012-01-20 14:19:00 -0700 | [diff] [blame] | 675 | DMUX_LOG_KERR("%s sps_transfer_one failed rc=%d\n", |
| 676 | __func__, rc); |
Jeff Hugo | 7b80c80 | 2011-11-04 16:12:20 -0600 | [diff] [blame] | 677 | list_del(&pkt->list_node); |
Eric Holmberg | 2fddbcd | 2011-11-28 18:25:57 -0700 | [diff] [blame] | 678 | DBG_INC_TX_SPS_FAILURE_CNT(); |
Jeff Hugo | 626303bf | 2011-11-21 11:43:28 -0700 | [diff] [blame] | 679 | spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags); |
Eric Holmberg | b5b08e5 | 2012-01-20 14:19:00 -0700 | [diff] [blame] | 680 | dma_unmap_single(NULL, pkt->dma_address, |
| 681 | pkt->len, |
| 682 | DMA_TO_DEVICE); |
Jeff Hugo | 7b80c80 | 2011-11-04 16:12:20 -0600 | [diff] [blame] | 683 | kfree(pkt); |
Jeff Hugo | bb6da95 | 2012-01-16 15:02:42 -0700 | [diff] [blame] | 684 | } else { |
| 685 | spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags); |
Jeff Hugo | 7b80c80 | 2011-11-04 16:12:20 -0600 | [diff] [blame] | 686 | } |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 687 | |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 688 | ul_packet_written = 1; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 689 | return rc; |
| 690 | } |
| 691 | |
| 692 | static void bam_mux_write_done(struct work_struct *work) |
| 693 | { |
| 694 | struct sk_buff *skb; |
| 695 | struct bam_mux_hdr *hdr; |
| 696 | struct tx_pkt_info *info; |
Eric Holmberg | 1cde7a6 | 2011-12-19 18:34:01 -0700 | [diff] [blame] | 697 | struct tx_pkt_info *info_expected; |
Jeff Hugo | 1c4531c | 2011-08-02 14:55:37 -0600 | [diff] [blame] | 698 | unsigned long event_data; |
Karthikeyan Ramasubramanian | 7bf5ca8 | 2011-11-21 13:33:19 -0700 | [diff] [blame] | 699 | unsigned long flags; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 700 | |
Jeff Hugo | 6e7a92a | 2011-10-24 05:25:13 -0600 | [diff] [blame] | 701 | if (in_global_reset) |
| 702 | return; |
Eric Holmberg | 1cde7a6 | 2011-12-19 18:34:01 -0700 | [diff] [blame] | 703 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 704 | info = container_of(work, struct tx_pkt_info, work); |
Eric Holmberg | 1cde7a6 | 2011-12-19 18:34:01 -0700 | [diff] [blame] | 705 | |
| 706 | spin_lock_irqsave(&bam_tx_pool_spinlock, flags); |
| 707 | info_expected = list_first_entry(&bam_tx_pool, |
| 708 | struct tx_pkt_info, list_node); |
| 709 | if (unlikely(info != info_expected)) { |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 710 | struct tx_pkt_info *errant_pkt; |
Eric Holmberg | 1cde7a6 | 2011-12-19 18:34:01 -0700 | [diff] [blame] | 711 | |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 712 | DMUX_LOG_KERR("%s: bam_tx_pool mismatch .next=%p," |
| 713 | " list_node=%p, ts=%u.%09lu\n", |
| 714 | __func__, bam_tx_pool.next, &info->list_node, |
| 715 | info->ts_sec, info->ts_nsec |
| 716 | ); |
| 717 | |
| 718 | list_for_each_entry(errant_pkt, &bam_tx_pool, list_node) { |
| 719 | DMUX_LOG_KERR("%s: node=%p ts=%u.%09lu\n", __func__, |
| 720 | &errant_pkt->list_node, errant_pkt->ts_sec, |
| 721 | errant_pkt->ts_nsec); |
| 722 | |
| 723 | } |
Eric Holmberg | 1cde7a6 | 2011-12-19 18:34:01 -0700 | [diff] [blame] | 724 | spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags); |
| 725 | BUG(); |
| 726 | } |
| 727 | list_del(&info->list_node); |
| 728 | spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags); |
| 729 | |
Jeff Hugo | 6e7a92a | 2011-10-24 05:25:13 -0600 | [diff] [blame] | 730 | if (info->is_cmd) { |
| 731 | kfree(info->skb); |
| 732 | kfree(info); |
| 733 | return; |
| 734 | } |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 735 | skb = info->skb; |
| 736 | kfree(info); |
| 737 | hdr = (struct bam_mux_hdr *)skb->data; |
Eric Holmberg | 9fdef26 | 2012-02-14 11:46:05 -0700 | [diff] [blame] | 738 | DBG_INC_WRITE_CNT(skb->len); |
Jeff Hugo | 1c4531c | 2011-08-02 14:55:37 -0600 | [diff] [blame] | 739 | event_data = (unsigned long)(skb); |
Karthikeyan Ramasubramanian | 7bf5ca8 | 2011-11-21 13:33:19 -0700 | [diff] [blame] | 740 | spin_lock_irqsave(&bam_ch[hdr->ch_id].lock, flags); |
| 741 | bam_ch[hdr->ch_id].num_tx_pkts--; |
| 742 | spin_unlock_irqrestore(&bam_ch[hdr->ch_id].lock, flags); |
Jeff Hugo | 1c4531c | 2011-08-02 14:55:37 -0600 | [diff] [blame] | 743 | if (bam_ch[hdr->ch_id].notify) |
| 744 | bam_ch[hdr->ch_id].notify( |
| 745 | bam_ch[hdr->ch_id].priv, BAM_DMUX_WRITE_DONE, |
| 746 | event_data); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 747 | else |
| 748 | dev_kfree_skb_any(skb); |
| 749 | } |
| 750 | |
| 751 | int msm_bam_dmux_write(uint32_t id, struct sk_buff *skb) |
| 752 | { |
| 753 | int rc = 0; |
| 754 | struct bam_mux_hdr *hdr; |
| 755 | unsigned long flags; |
| 756 | struct sk_buff *new_skb = NULL; |
| 757 | dma_addr_t dma_address; |
| 758 | struct tx_pkt_info *pkt; |
| 759 | |
| 760 | if (id >= BAM_DMUX_NUM_CHANNELS) |
| 761 | return -EINVAL; |
| 762 | if (!skb) |
| 763 | return -EINVAL; |
| 764 | if (!bam_mux_initialized) |
| 765 | return -ENODEV; |
| 766 | |
| 767 | DBG("%s: writing to ch %d len %d\n", __func__, id, skb->len); |
| 768 | spin_lock_irqsave(&bam_ch[id].lock, flags); |
| 769 | if (!bam_ch_is_open(id)) { |
| 770 | spin_unlock_irqrestore(&bam_ch[id].lock, flags); |
| 771 | pr_err("%s: port not open: %d\n", __func__, bam_ch[id].status); |
| 772 | return -ENODEV; |
| 773 | } |
Karthikeyan Ramasubramanian | 7bf5ca8 | 2011-11-21 13:33:19 -0700 | [diff] [blame] | 774 | |
| 775 | if (bam_ch[id].use_wm && |
| 776 | (bam_ch[id].num_tx_pkts >= HIGH_WATERMARK)) { |
| 777 | spin_unlock_irqrestore(&bam_ch[id].lock, flags); |
| 778 | pr_err("%s: watermark exceeded: %d\n", __func__, id); |
| 779 | return -EAGAIN; |
| 780 | } |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 781 | spin_unlock_irqrestore(&bam_ch[id].lock, flags); |
| 782 | |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 783 | read_lock(&ul_wakeup_lock); |
Jeff Hugo | 061ce67 | 2011-10-21 17:15:32 -0600 | [diff] [blame] | 784 | if (!bam_is_connected) { |
| 785 | read_unlock(&ul_wakeup_lock); |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 786 | ul_wakeup(); |
Jeff Hugo | 4838f41 | 2012-01-20 11:19:37 -0700 | [diff] [blame] | 787 | if (unlikely(in_global_reset == 1)) |
| 788 | return -EFAULT; |
Jeff Hugo | 061ce67 | 2011-10-21 17:15:32 -0600 | [diff] [blame] | 789 | read_lock(&ul_wakeup_lock); |
Jeff Hugo | d98b108 | 2011-10-24 10:30:23 -0600 | [diff] [blame] | 790 | notify_all(BAM_DMUX_UL_CONNECTED, (unsigned long)(NULL)); |
Jeff Hugo | 061ce67 | 2011-10-21 17:15:32 -0600 | [diff] [blame] | 791 | } |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 792 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 793 | /* if skb do not have any tailroom for padding, |
| 794 | copy the skb into a new expanded skb */ |
| 795 | if ((skb->len & 0x3) && (skb_tailroom(skb) < (4 - (skb->len & 0x3)))) { |
| 796 | /* revisit, probably dev_alloc_skb and memcpy is effecient */ |
| 797 | new_skb = skb_copy_expand(skb, skb_headroom(skb), |
| 798 | 4 - (skb->len & 0x3), GFP_ATOMIC); |
| 799 | if (new_skb == NULL) { |
| 800 | pr_err("%s: cannot allocate skb\n", __func__); |
Jeff Hugo | c6af54d | 2011-11-02 17:00:27 -0600 | [diff] [blame] | 801 | goto write_fail; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 802 | } |
| 803 | dev_kfree_skb_any(skb); |
| 804 | skb = new_skb; |
| 805 | DBG_INC_WRITE_CPY(skb->len); |
| 806 | } |
| 807 | |
| 808 | hdr = (struct bam_mux_hdr *)skb_push(skb, sizeof(struct bam_mux_hdr)); |
| 809 | |
| 810 | /* caller should allocate for hdr and padding |
| 811 | hdr is fine, padding is tricky */ |
| 812 | hdr->magic_num = BAM_MUX_HDR_MAGIC_NO; |
| 813 | hdr->cmd = BAM_MUX_HDR_CMD_DATA; |
| 814 | hdr->reserved = 0; |
| 815 | hdr->ch_id = id; |
| 816 | hdr->pkt_len = skb->len - sizeof(struct bam_mux_hdr); |
| 817 | if (skb->len & 0x3) |
| 818 | skb_put(skb, 4 - (skb->len & 0x3)); |
| 819 | |
| 820 | hdr->pad_len = skb->len - (sizeof(struct bam_mux_hdr) + hdr->pkt_len); |
| 821 | |
| 822 | DBG("%s: data %p, tail %p skb len %d pkt len %d pad len %d\n", |
| 823 | __func__, skb->data, skb->tail, skb->len, |
| 824 | hdr->pkt_len, hdr->pad_len); |
| 825 | |
| 826 | pkt = kmalloc(sizeof(struct tx_pkt_info), GFP_ATOMIC); |
| 827 | if (pkt == NULL) { |
| 828 | pr_err("%s: mem alloc for tx_pkt_info failed\n", __func__); |
Jeff Hugo | c6af54d | 2011-11-02 17:00:27 -0600 | [diff] [blame] | 829 | goto write_fail2; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 830 | } |
| 831 | |
| 832 | dma_address = dma_map_single(NULL, skb->data, skb->len, |
| 833 | DMA_TO_DEVICE); |
| 834 | if (!dma_address) { |
| 835 | pr_err("%s: dma_map_single() failed\n", __func__); |
Jeff Hugo | c6af54d | 2011-11-02 17:00:27 -0600 | [diff] [blame] | 836 | goto write_fail3; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 837 | } |
| 838 | pkt->skb = skb; |
| 839 | pkt->dma_address = dma_address; |
| 840 | pkt->is_cmd = 0; |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 841 | set_tx_timestamp(pkt); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 842 | INIT_WORK(&pkt->work, bam_mux_write_done); |
Jeff Hugo | 626303bf | 2011-11-21 11:43:28 -0700 | [diff] [blame] | 843 | spin_lock_irqsave(&bam_tx_pool_spinlock, flags); |
Jeff Hugo | 6e7a92a | 2011-10-24 05:25:13 -0600 | [diff] [blame] | 844 | list_add_tail(&pkt->list_node, &bam_tx_pool); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 845 | rc = sps_transfer_one(bam_tx_pipe, dma_address, skb->len, |
| 846 | pkt, SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT); |
Jeff Hugo | 7b80c80 | 2011-11-04 16:12:20 -0600 | [diff] [blame] | 847 | if (rc) { |
Eric Holmberg | b5b08e5 | 2012-01-20 14:19:00 -0700 | [diff] [blame] | 848 | DMUX_LOG_KERR("%s sps_transfer_one failed rc=%d\n", |
| 849 | __func__, rc); |
Jeff Hugo | 7b80c80 | 2011-11-04 16:12:20 -0600 | [diff] [blame] | 850 | list_del(&pkt->list_node); |
Eric Holmberg | 2fddbcd | 2011-11-28 18:25:57 -0700 | [diff] [blame] | 851 | DBG_INC_TX_SPS_FAILURE_CNT(); |
Jeff Hugo | 626303bf | 2011-11-21 11:43:28 -0700 | [diff] [blame] | 852 | spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags); |
Eric Holmberg | b5b08e5 | 2012-01-20 14:19:00 -0700 | [diff] [blame] | 853 | dma_unmap_single(NULL, pkt->dma_address, |
| 854 | pkt->skb->len, DMA_TO_DEVICE); |
Jeff Hugo | 7b80c80 | 2011-11-04 16:12:20 -0600 | [diff] [blame] | 855 | kfree(pkt); |
Jeff Hugo | 872bd06 | 2011-11-15 17:47:21 -0700 | [diff] [blame] | 856 | if (new_skb) |
| 857 | dev_kfree_skb_any(new_skb); |
Karthikeyan Ramasubramanian | 7bf5ca8 | 2011-11-21 13:33:19 -0700 | [diff] [blame] | 858 | } else { |
Jeff Hugo | bb6da95 | 2012-01-16 15:02:42 -0700 | [diff] [blame] | 859 | spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags); |
Karthikeyan Ramasubramanian | 7bf5ca8 | 2011-11-21 13:33:19 -0700 | [diff] [blame] | 860 | spin_lock_irqsave(&bam_ch[id].lock, flags); |
| 861 | bam_ch[id].num_tx_pkts++; |
| 862 | spin_unlock_irqrestore(&bam_ch[id].lock, flags); |
Jeff Hugo | 7b80c80 | 2011-11-04 16:12:20 -0600 | [diff] [blame] | 863 | } |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 864 | ul_packet_written = 1; |
| 865 | read_unlock(&ul_wakeup_lock); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 866 | return rc; |
Jeff Hugo | c6af54d | 2011-11-02 17:00:27 -0600 | [diff] [blame] | 867 | |
| 868 | write_fail3: |
| 869 | kfree(pkt); |
| 870 | write_fail2: |
| 871 | if (new_skb) |
| 872 | dev_kfree_skb_any(new_skb); |
| 873 | write_fail: |
| 874 | read_unlock(&ul_wakeup_lock); |
| 875 | return -ENOMEM; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 876 | } |
| 877 | |
| 878 | int msm_bam_dmux_open(uint32_t id, void *priv, |
Jeff Hugo | 1c4531c | 2011-08-02 14:55:37 -0600 | [diff] [blame] | 879 | void (*notify)(void *, int, unsigned long)) |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 880 | { |
| 881 | struct bam_mux_hdr *hdr; |
| 882 | unsigned long flags; |
| 883 | int rc = 0; |
| 884 | |
| 885 | DBG("%s: opening ch %d\n", __func__, id); |
Eric Holmberg | 5d77543 | 2011-11-09 10:23:35 -0700 | [diff] [blame] | 886 | if (!bam_mux_initialized) { |
| 887 | DBG("%s: not inititialized\n", __func__); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 888 | return -ENODEV; |
Eric Holmberg | 5d77543 | 2011-11-09 10:23:35 -0700 | [diff] [blame] | 889 | } |
| 890 | if (id >= BAM_DMUX_NUM_CHANNELS) { |
| 891 | pr_err("%s: invalid channel id %d\n", __func__, id); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 892 | return -EINVAL; |
Eric Holmberg | 5d77543 | 2011-11-09 10:23:35 -0700 | [diff] [blame] | 893 | } |
| 894 | if (notify == NULL) { |
| 895 | pr_err("%s: notify function is NULL\n", __func__); |
Jeff Hugo | 1c4531c | 2011-08-02 14:55:37 -0600 | [diff] [blame] | 896 | return -EINVAL; |
Eric Holmberg | 5d77543 | 2011-11-09 10:23:35 -0700 | [diff] [blame] | 897 | } |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 898 | |
| 899 | hdr = kmalloc(sizeof(struct bam_mux_hdr), GFP_KERNEL); |
| 900 | if (hdr == NULL) { |
| 901 | pr_err("%s: hdr kmalloc failed. ch: %d\n", __func__, id); |
| 902 | return -ENOMEM; |
| 903 | } |
| 904 | spin_lock_irqsave(&bam_ch[id].lock, flags); |
| 905 | if (bam_ch_is_open(id)) { |
| 906 | DBG("%s: Already opened %d\n", __func__, id); |
| 907 | spin_unlock_irqrestore(&bam_ch[id].lock, flags); |
| 908 | kfree(hdr); |
| 909 | goto open_done; |
| 910 | } |
| 911 | if (!bam_ch_is_remote_open(id)) { |
| 912 | DBG("%s: Remote not open; ch: %d\n", __func__, id); |
| 913 | spin_unlock_irqrestore(&bam_ch[id].lock, flags); |
| 914 | kfree(hdr); |
Eric Holmberg | 5d77543 | 2011-11-09 10:23:35 -0700 | [diff] [blame] | 915 | return -ENODEV; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 916 | } |
| 917 | |
Jeff Hugo | 1c4531c | 2011-08-02 14:55:37 -0600 | [diff] [blame] | 918 | bam_ch[id].notify = notify; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 919 | bam_ch[id].priv = priv; |
| 920 | bam_ch[id].status |= BAM_CH_LOCAL_OPEN; |
Karthikeyan Ramasubramanian | 7bf5ca8 | 2011-11-21 13:33:19 -0700 | [diff] [blame] | 921 | bam_ch[id].num_tx_pkts = 0; |
| 922 | bam_ch[id].use_wm = 0; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 923 | spin_unlock_irqrestore(&bam_ch[id].lock, flags); |
| 924 | |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 925 | read_lock(&ul_wakeup_lock); |
Jeff Hugo | 061ce67 | 2011-10-21 17:15:32 -0600 | [diff] [blame] | 926 | if (!bam_is_connected) { |
| 927 | read_unlock(&ul_wakeup_lock); |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 928 | ul_wakeup(); |
Jeff Hugo | 4838f41 | 2012-01-20 11:19:37 -0700 | [diff] [blame] | 929 | if (unlikely(in_global_reset == 1)) |
| 930 | return -EFAULT; |
Jeff Hugo | 061ce67 | 2011-10-21 17:15:32 -0600 | [diff] [blame] | 931 | read_lock(&ul_wakeup_lock); |
Jeff Hugo | d98b108 | 2011-10-24 10:30:23 -0600 | [diff] [blame] | 932 | notify_all(BAM_DMUX_UL_CONNECTED, (unsigned long)(NULL)); |
Jeff Hugo | 061ce67 | 2011-10-21 17:15:32 -0600 | [diff] [blame] | 933 | } |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 934 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 935 | hdr->magic_num = BAM_MUX_HDR_MAGIC_NO; |
| 936 | hdr->cmd = BAM_MUX_HDR_CMD_OPEN; |
| 937 | hdr->reserved = 0; |
| 938 | hdr->ch_id = id; |
| 939 | hdr->pkt_len = 0; |
| 940 | hdr->pad_len = 0; |
| 941 | |
| 942 | rc = bam_mux_write_cmd((void *)hdr, sizeof(struct bam_mux_hdr)); |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 943 | read_unlock(&ul_wakeup_lock); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 944 | |
| 945 | open_done: |
| 946 | DBG("%s: opened ch %d\n", __func__, id); |
| 947 | return rc; |
| 948 | } |
| 949 | |
| 950 | int msm_bam_dmux_close(uint32_t id) |
| 951 | { |
| 952 | struct bam_mux_hdr *hdr; |
| 953 | unsigned long flags; |
| 954 | int rc; |
| 955 | |
| 956 | if (id >= BAM_DMUX_NUM_CHANNELS) |
| 957 | return -EINVAL; |
| 958 | DBG("%s: closing ch %d\n", __func__, id); |
| 959 | if (!bam_mux_initialized) |
| 960 | return -ENODEV; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 961 | |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 962 | read_lock(&ul_wakeup_lock); |
Jeff Hugo | 6e7a92a | 2011-10-24 05:25:13 -0600 | [diff] [blame] | 963 | if (!bam_is_connected && !bam_ch_is_in_reset(id)) { |
Jeff Hugo | 061ce67 | 2011-10-21 17:15:32 -0600 | [diff] [blame] | 964 | read_unlock(&ul_wakeup_lock); |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 965 | ul_wakeup(); |
Jeff Hugo | 4838f41 | 2012-01-20 11:19:37 -0700 | [diff] [blame] | 966 | if (unlikely(in_global_reset == 1)) |
| 967 | return -EFAULT; |
Jeff Hugo | 061ce67 | 2011-10-21 17:15:32 -0600 | [diff] [blame] | 968 | read_lock(&ul_wakeup_lock); |
Jeff Hugo | d98b108 | 2011-10-24 10:30:23 -0600 | [diff] [blame] | 969 | notify_all(BAM_DMUX_UL_CONNECTED, (unsigned long)(NULL)); |
Jeff Hugo | 061ce67 | 2011-10-21 17:15:32 -0600 | [diff] [blame] | 970 | } |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 971 | |
Jeff Hugo | 061ce67 | 2011-10-21 17:15:32 -0600 | [diff] [blame] | 972 | spin_lock_irqsave(&bam_ch[id].lock, flags); |
Jeff Hugo | 1c4531c | 2011-08-02 14:55:37 -0600 | [diff] [blame] | 973 | bam_ch[id].notify = NULL; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 974 | bam_ch[id].priv = NULL; |
| 975 | bam_ch[id].status &= ~BAM_CH_LOCAL_OPEN; |
| 976 | spin_unlock_irqrestore(&bam_ch[id].lock, flags); |
| 977 | |
Jeff Hugo | 6e7a92a | 2011-10-24 05:25:13 -0600 | [diff] [blame] | 978 | if (bam_ch_is_in_reset(id)) { |
| 979 | read_unlock(&ul_wakeup_lock); |
| 980 | bam_ch[id].status &= ~BAM_CH_IN_RESET; |
| 981 | return 0; |
| 982 | } |
| 983 | |
Jeff Hugo | bb5802f | 2011-11-02 17:10:29 -0600 | [diff] [blame] | 984 | hdr = kmalloc(sizeof(struct bam_mux_hdr), GFP_ATOMIC); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 985 | if (hdr == NULL) { |
| 986 | pr_err("%s: hdr kmalloc failed. ch: %d\n", __func__, id); |
Jeff Hugo | c6af54d | 2011-11-02 17:00:27 -0600 | [diff] [blame] | 987 | read_unlock(&ul_wakeup_lock); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 988 | return -ENOMEM; |
| 989 | } |
| 990 | hdr->magic_num = BAM_MUX_HDR_MAGIC_NO; |
| 991 | hdr->cmd = BAM_MUX_HDR_CMD_CLOSE; |
| 992 | hdr->reserved = 0; |
| 993 | hdr->ch_id = id; |
| 994 | hdr->pkt_len = 0; |
| 995 | hdr->pad_len = 0; |
| 996 | |
| 997 | rc = bam_mux_write_cmd((void *)hdr, sizeof(struct bam_mux_hdr)); |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 998 | read_unlock(&ul_wakeup_lock); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 999 | |
| 1000 | DBG("%s: closed ch %d\n", __func__, id); |
| 1001 | return rc; |
| 1002 | } |
| 1003 | |
Karthikeyan Ramasubramanian | 7bf5ca8 | 2011-11-21 13:33:19 -0700 | [diff] [blame] | 1004 | int msm_bam_dmux_is_ch_full(uint32_t id) |
| 1005 | { |
| 1006 | unsigned long flags; |
| 1007 | int ret; |
| 1008 | |
| 1009 | if (id >= BAM_DMUX_NUM_CHANNELS) |
| 1010 | return -EINVAL; |
| 1011 | |
| 1012 | spin_lock_irqsave(&bam_ch[id].lock, flags); |
| 1013 | bam_ch[id].use_wm = 1; |
| 1014 | ret = bam_ch[id].num_tx_pkts >= HIGH_WATERMARK; |
| 1015 | DBG("%s: ch %d num tx pkts=%d, HWM=%d\n", __func__, |
| 1016 | id, bam_ch[id].num_tx_pkts, ret); |
| 1017 | if (!bam_ch_is_local_open(id)) { |
| 1018 | ret = -ENODEV; |
| 1019 | pr_err("%s: port not open: %d\n", __func__, bam_ch[id].status); |
| 1020 | } |
| 1021 | spin_unlock_irqrestore(&bam_ch[id].lock, flags); |
| 1022 | |
| 1023 | return ret; |
| 1024 | } |
| 1025 | |
| 1026 | int msm_bam_dmux_is_ch_low(uint32_t id) |
| 1027 | { |
Eric Holmberg | ed3ca0a | 2012-04-09 15:44:58 -0600 | [diff] [blame] | 1028 | unsigned long flags; |
Karthikeyan Ramasubramanian | 7bf5ca8 | 2011-11-21 13:33:19 -0700 | [diff] [blame] | 1029 | int ret; |
| 1030 | |
| 1031 | if (id >= BAM_DMUX_NUM_CHANNELS) |
| 1032 | return -EINVAL; |
| 1033 | |
Eric Holmberg | ed3ca0a | 2012-04-09 15:44:58 -0600 | [diff] [blame] | 1034 | spin_lock_irqsave(&bam_ch[id].lock, flags); |
Karthikeyan Ramasubramanian | 7bf5ca8 | 2011-11-21 13:33:19 -0700 | [diff] [blame] | 1035 | bam_ch[id].use_wm = 1; |
| 1036 | ret = bam_ch[id].num_tx_pkts <= LOW_WATERMARK; |
| 1037 | DBG("%s: ch %d num tx pkts=%d, LWM=%d\n", __func__, |
| 1038 | id, bam_ch[id].num_tx_pkts, ret); |
| 1039 | if (!bam_ch_is_local_open(id)) { |
| 1040 | ret = -ENODEV; |
| 1041 | pr_err("%s: port not open: %d\n", __func__, bam_ch[id].status); |
| 1042 | } |
Eric Holmberg | ed3ca0a | 2012-04-09 15:44:58 -0600 | [diff] [blame] | 1043 | spin_unlock_irqrestore(&bam_ch[id].lock, flags); |
Karthikeyan Ramasubramanian | 7bf5ca8 | 2011-11-21 13:33:19 -0700 | [diff] [blame] | 1044 | |
| 1045 | return ret; |
| 1046 | } |
| 1047 | |
Eric Holmberg | 8df0cdb | 2012-01-04 17:40:46 -0700 | [diff] [blame] | 1048 | static void rx_switch_to_interrupt_mode(void) |
| 1049 | { |
| 1050 | struct sps_connect cur_rx_conn; |
| 1051 | struct sps_iovec iov; |
| 1052 | struct rx_pkt_info *info; |
| 1053 | int ret; |
| 1054 | |
| 1055 | /* |
| 1056 | * Attempt to enable interrupts - if this fails, |
| 1057 | * continue polling and we will retry later. |
| 1058 | */ |
| 1059 | ret = sps_get_config(bam_rx_pipe, &cur_rx_conn); |
| 1060 | if (ret) { |
| 1061 | pr_err("%s: sps_get_config() failed %d\n", __func__, ret); |
| 1062 | goto fail; |
| 1063 | } |
| 1064 | |
| 1065 | rx_register_event.options = SPS_O_EOT; |
| 1066 | ret = sps_register_event(bam_rx_pipe, &rx_register_event); |
| 1067 | if (ret) { |
| 1068 | pr_err("%s: sps_register_event() failed %d\n", __func__, ret); |
| 1069 | goto fail; |
| 1070 | } |
| 1071 | |
| 1072 | cur_rx_conn.options = SPS_O_AUTO_ENABLE | |
| 1073 | SPS_O_EOT | SPS_O_ACK_TRANSFERS; |
| 1074 | ret = sps_set_config(bam_rx_pipe, &cur_rx_conn); |
| 1075 | if (ret) { |
| 1076 | pr_err("%s: sps_set_config() failed %d\n", __func__, ret); |
| 1077 | goto fail; |
| 1078 | } |
| 1079 | polling_mode = 0; |
Eric Holmberg | 006057d | 2012-01-11 10:10:42 -0700 | [diff] [blame] | 1080 | release_wakelock(); |
Eric Holmberg | 8df0cdb | 2012-01-04 17:40:46 -0700 | [diff] [blame] | 1081 | |
| 1082 | /* handle any rx packets before interrupt was enabled */ |
| 1083 | while (bam_connection_is_active && !polling_mode) { |
| 1084 | ret = sps_get_iovec(bam_rx_pipe, &iov); |
| 1085 | if (ret) { |
| 1086 | pr_err("%s: sps_get_iovec failed %d\n", |
| 1087 | __func__, ret); |
| 1088 | break; |
| 1089 | } |
| 1090 | if (iov.addr == 0) |
| 1091 | break; |
| 1092 | |
| 1093 | mutex_lock(&bam_rx_pool_mutexlock); |
| 1094 | if (unlikely(list_empty(&bam_rx_pool))) { |
Eric Holmberg | 00cf869 | 2012-07-16 14:21:19 -0600 | [diff] [blame] | 1095 | DMUX_LOG_KERR("%s: have iovec %p but rx pool empty\n", |
| 1096 | __func__, (void *)iov.addr); |
Eric Holmberg | 8df0cdb | 2012-01-04 17:40:46 -0700 | [diff] [blame] | 1097 | mutex_unlock(&bam_rx_pool_mutexlock); |
| 1098 | continue; |
| 1099 | } |
| 1100 | info = list_first_entry(&bam_rx_pool, struct rx_pkt_info, |
| 1101 | list_node); |
Eric Holmberg | 00cf869 | 2012-07-16 14:21:19 -0600 | [diff] [blame] | 1102 | if (info->dma_address != iov.addr) { |
| 1103 | DMUX_LOG_KERR("%s: iovec %p != dma %p\n", |
| 1104 | __func__, |
| 1105 | (void *)iov.addr, |
| 1106 | (void *)info->dma_address); |
| 1107 | list_for_each_entry(info, &bam_rx_pool, list_node) { |
| 1108 | DMUX_LOG_KERR("%s: dma %p\n", __func__, |
| 1109 | (void *)info->dma_address); |
| 1110 | if (iov.addr == info->dma_address) |
| 1111 | break; |
| 1112 | } |
| 1113 | } |
| 1114 | BUG_ON(info->dma_address != iov.addr); |
Eric Holmberg | 8df0cdb | 2012-01-04 17:40:46 -0700 | [diff] [blame] | 1115 | list_del(&info->list_node); |
Eric Holmberg | b5b08e5 | 2012-01-20 14:19:00 -0700 | [diff] [blame] | 1116 | --bam_rx_pool_len; |
Eric Holmberg | 8df0cdb | 2012-01-04 17:40:46 -0700 | [diff] [blame] | 1117 | mutex_unlock(&bam_rx_pool_mutexlock); |
| 1118 | handle_bam_mux_cmd(&info->work); |
| 1119 | } |
| 1120 | return; |
| 1121 | |
| 1122 | fail: |
| 1123 | pr_err("%s: reverting to polling\n", __func__); |
Jeff Hugo | fff43af9 | 2012-03-29 17:54:52 -0600 | [diff] [blame] | 1124 | queue_work_on(0, bam_mux_rx_workqueue, &rx_timer_work); |
Eric Holmberg | 8df0cdb | 2012-01-04 17:40:46 -0700 | [diff] [blame] | 1125 | } |
| 1126 | |
Jeff Hugo | 949080a | 2011-08-30 11:58:56 -0600 | [diff] [blame] | 1127 | static void rx_timer_work_func(struct work_struct *work) |
| 1128 | { |
| 1129 | struct sps_iovec iov; |
Jeff Hugo | 949080a | 2011-08-30 11:58:56 -0600 | [diff] [blame] | 1130 | struct rx_pkt_info *info; |
| 1131 | int inactive_cycles = 0; |
| 1132 | int ret; |
Anurag Singh | dcd8b4e | 2012-07-30 16:46:37 -0700 | [diff] [blame^] | 1133 | u32 buffs_unused, buffs_used; |
Jeff Hugo | 949080a | 2011-08-30 11:58:56 -0600 | [diff] [blame] | 1134 | |
Eric Holmberg | 8df0cdb | 2012-01-04 17:40:46 -0700 | [diff] [blame] | 1135 | while (bam_connection_is_active) { /* timer loop */ |
Jeff Hugo | 949080a | 2011-08-30 11:58:56 -0600 | [diff] [blame] | 1136 | ++inactive_cycles; |
Eric Holmberg | 8df0cdb | 2012-01-04 17:40:46 -0700 | [diff] [blame] | 1137 | while (bam_connection_is_active) { /* deplete queue loop */ |
Jeff Hugo | 6e7a92a | 2011-10-24 05:25:13 -0600 | [diff] [blame] | 1138 | if (in_global_reset) |
| 1139 | return; |
Eric Holmberg | 8df0cdb | 2012-01-04 17:40:46 -0700 | [diff] [blame] | 1140 | |
| 1141 | ret = sps_get_iovec(bam_rx_pipe, &iov); |
| 1142 | if (ret) { |
| 1143 | pr_err("%s: sps_get_iovec failed %d\n", |
| 1144 | __func__, ret); |
| 1145 | break; |
| 1146 | } |
Jeff Hugo | 949080a | 2011-08-30 11:58:56 -0600 | [diff] [blame] | 1147 | if (iov.addr == 0) |
| 1148 | break; |
| 1149 | inactive_cycles = 0; |
Jeff Hugo | c974993 | 2011-11-02 17:50:40 -0600 | [diff] [blame] | 1150 | mutex_lock(&bam_rx_pool_mutexlock); |
Eric Holmberg | 8df0cdb | 2012-01-04 17:40:46 -0700 | [diff] [blame] | 1151 | if (unlikely(list_empty(&bam_rx_pool))) { |
Eric Holmberg | 00cf869 | 2012-07-16 14:21:19 -0600 | [diff] [blame] | 1152 | DMUX_LOG_KERR( |
| 1153 | "%s: have iovec %p but rx pool empty\n", |
| 1154 | __func__, (void *)iov.addr); |
Eric Holmberg | 8df0cdb | 2012-01-04 17:40:46 -0700 | [diff] [blame] | 1155 | mutex_unlock(&bam_rx_pool_mutexlock); |
| 1156 | continue; |
| 1157 | } |
| 1158 | info = list_first_entry(&bam_rx_pool, |
| 1159 | struct rx_pkt_info, list_node); |
Eric Holmberg | 00cf869 | 2012-07-16 14:21:19 -0600 | [diff] [blame] | 1160 | if (info->dma_address != iov.addr) { |
| 1161 | DMUX_LOG_KERR("%s: iovec %p != dma %p\n", |
| 1162 | __func__, |
| 1163 | (void *)iov.addr, |
| 1164 | (void *)info->dma_address); |
| 1165 | list_for_each_entry(info, &bam_rx_pool, |
| 1166 | list_node) { |
| 1167 | DMUX_LOG_KERR("%s: dma %p\n", __func__, |
| 1168 | (void *)info->dma_address); |
| 1169 | if (iov.addr == info->dma_address) |
| 1170 | break; |
| 1171 | } |
| 1172 | } |
| 1173 | BUG_ON(info->dma_address != iov.addr); |
Eric Holmberg | 8df0cdb | 2012-01-04 17:40:46 -0700 | [diff] [blame] | 1174 | list_del(&info->list_node); |
Eric Holmberg | 00cf869 | 2012-07-16 14:21:19 -0600 | [diff] [blame] | 1175 | --bam_rx_pool_len; |
Jeff Hugo | c974993 | 2011-11-02 17:50:40 -0600 | [diff] [blame] | 1176 | mutex_unlock(&bam_rx_pool_mutexlock); |
Jeff Hugo | 949080a | 2011-08-30 11:58:56 -0600 | [diff] [blame] | 1177 | handle_bam_mux_cmd(&info->work); |
| 1178 | } |
| 1179 | |
Anurag Singh | dcd8b4e | 2012-07-30 16:46:37 -0700 | [diff] [blame^] | 1180 | if (inactive_cycles >= POLLING_INACTIVITY) { |
Eric Holmberg | 8df0cdb | 2012-01-04 17:40:46 -0700 | [diff] [blame] | 1181 | rx_switch_to_interrupt_mode(); |
| 1182 | break; |
Jeff Hugo | 949080a | 2011-08-30 11:58:56 -0600 | [diff] [blame] | 1183 | } |
| 1184 | |
Anurag Singh | dcd8b4e | 2012-07-30 16:46:37 -0700 | [diff] [blame^] | 1185 | if (bam_adaptive_timer_enabled) { |
| 1186 | usleep_range(rx_timer_interval, rx_timer_interval + 50); |
| 1187 | |
| 1188 | ret = sps_get_unused_desc_num(bam_rx_pipe, |
| 1189 | &buffs_unused); |
| 1190 | |
| 1191 | if (ret) { |
| 1192 | pr_err("%s: error getting num buffers unused after sleep\n", |
| 1193 | __func__); |
| 1194 | |
| 1195 | break; |
| 1196 | } |
| 1197 | |
| 1198 | buffs_used = NUM_BUFFERS - buffs_unused; |
| 1199 | |
| 1200 | if (buffs_unused == 0) { |
| 1201 | rx_timer_interval = MIN_POLLING_SLEEP; |
| 1202 | } else { |
| 1203 | if (buffs_used > 0) { |
| 1204 | rx_timer_interval = |
| 1205 | (2 * NUM_BUFFERS * |
| 1206 | rx_timer_interval)/ |
| 1207 | (3 * buffs_used); |
| 1208 | } else { |
| 1209 | rx_timer_interval = |
| 1210 | MAX_POLLING_SLEEP; |
| 1211 | } |
| 1212 | } |
| 1213 | |
| 1214 | if (rx_timer_interval > MAX_POLLING_SLEEP) |
| 1215 | rx_timer_interval = MAX_POLLING_SLEEP; |
| 1216 | else if (rx_timer_interval < MIN_POLLING_SLEEP) |
| 1217 | rx_timer_interval = MIN_POLLING_SLEEP; |
| 1218 | } else { |
| 1219 | usleep_range(POLLING_MIN_SLEEP, POLLING_MAX_SLEEP); |
| 1220 | } |
Jeff Hugo | 949080a | 2011-08-30 11:58:56 -0600 | [diff] [blame] | 1221 | } |
| 1222 | } |
| 1223 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 1224 | static void bam_mux_tx_notify(struct sps_event_notify *notify) |
| 1225 | { |
| 1226 | struct tx_pkt_info *pkt; |
| 1227 | |
| 1228 | DBG("%s: event %d notified\n", __func__, notify->event_id); |
| 1229 | |
Jeff Hugo | 6e7a92a | 2011-10-24 05:25:13 -0600 | [diff] [blame] | 1230 | if (in_global_reset) |
| 1231 | return; |
| 1232 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 1233 | switch (notify->event_id) { |
| 1234 | case SPS_EVENT_EOT: |
| 1235 | pkt = notify->data.transfer.user; |
Jeff Hugo | 6e7a92a | 2011-10-24 05:25:13 -0600 | [diff] [blame] | 1236 | if (!pkt->is_cmd) |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 1237 | dma_unmap_single(NULL, pkt->dma_address, |
| 1238 | pkt->skb->len, |
| 1239 | DMA_TO_DEVICE); |
Jeff Hugo | 6e7a92a | 2011-10-24 05:25:13 -0600 | [diff] [blame] | 1240 | else |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 1241 | dma_unmap_single(NULL, pkt->dma_address, |
| 1242 | pkt->len, |
| 1243 | DMA_TO_DEVICE); |
Jeff Hugo | 6e7a92a | 2011-10-24 05:25:13 -0600 | [diff] [blame] | 1244 | queue_work(bam_mux_tx_workqueue, &pkt->work); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 1245 | break; |
| 1246 | default: |
| 1247 | pr_err("%s: recieved unexpected event id %d\n", __func__, |
| 1248 | notify->event_id); |
| 1249 | } |
| 1250 | } |
| 1251 | |
Jeff Hugo | 33dbc00 | 2011-08-25 15:52:53 -0600 | [diff] [blame] | 1252 | static void bam_mux_rx_notify(struct sps_event_notify *notify) |
| 1253 | { |
Jeff Hugo | 949080a | 2011-08-30 11:58:56 -0600 | [diff] [blame] | 1254 | int ret; |
| 1255 | struct sps_connect cur_rx_conn; |
Jeff Hugo | 33dbc00 | 2011-08-25 15:52:53 -0600 | [diff] [blame] | 1256 | |
| 1257 | DBG("%s: event %d notified\n", __func__, notify->event_id); |
| 1258 | |
Jeff Hugo | 6e7a92a | 2011-10-24 05:25:13 -0600 | [diff] [blame] | 1259 | if (in_global_reset) |
| 1260 | return; |
| 1261 | |
Jeff Hugo | 33dbc00 | 2011-08-25 15:52:53 -0600 | [diff] [blame] | 1262 | switch (notify->event_id) { |
| 1263 | case SPS_EVENT_EOT: |
Jeff Hugo | 949080a | 2011-08-30 11:58:56 -0600 | [diff] [blame] | 1264 | /* attempt to disable interrupts in this pipe */ |
| 1265 | if (!polling_mode) { |
| 1266 | ret = sps_get_config(bam_rx_pipe, &cur_rx_conn); |
| 1267 | if (ret) { |
Eric Holmberg | 8df0cdb | 2012-01-04 17:40:46 -0700 | [diff] [blame] | 1268 | pr_err("%s: sps_get_config() failed %d, interrupts" |
| 1269 | " not disabled\n", __func__, ret); |
Jeff Hugo | 949080a | 2011-08-30 11:58:56 -0600 | [diff] [blame] | 1270 | break; |
| 1271 | } |
Jeff Hugo | a9d32ba | 2011-11-21 14:59:48 -0700 | [diff] [blame] | 1272 | cur_rx_conn.options = SPS_O_AUTO_ENABLE | |
Jeff Hugo | 949080a | 2011-08-30 11:58:56 -0600 | [diff] [blame] | 1273 | SPS_O_ACK_TRANSFERS | SPS_O_POLL; |
| 1274 | ret = sps_set_config(bam_rx_pipe, &cur_rx_conn); |
| 1275 | if (ret) { |
Eric Holmberg | 8df0cdb | 2012-01-04 17:40:46 -0700 | [diff] [blame] | 1276 | pr_err("%s: sps_set_config() failed %d, interrupts" |
| 1277 | " not disabled\n", __func__, ret); |
Jeff Hugo | 949080a | 2011-08-30 11:58:56 -0600 | [diff] [blame] | 1278 | break; |
| 1279 | } |
Eric Holmberg | 006057d | 2012-01-11 10:10:42 -0700 | [diff] [blame] | 1280 | grab_wakelock(); |
Jeff Hugo | 949080a | 2011-08-30 11:58:56 -0600 | [diff] [blame] | 1281 | polling_mode = 1; |
Jeff Hugo | fff43af9 | 2012-03-29 17:54:52 -0600 | [diff] [blame] | 1282 | /* |
| 1283 | * run on core 0 so that netif_rx() in rmnet uses only |
| 1284 | * one queue |
| 1285 | */ |
| 1286 | queue_work_on(0, bam_mux_rx_workqueue, &rx_timer_work); |
Jeff Hugo | 949080a | 2011-08-30 11:58:56 -0600 | [diff] [blame] | 1287 | } |
Jeff Hugo | 33dbc00 | 2011-08-25 15:52:53 -0600 | [diff] [blame] | 1288 | break; |
| 1289 | default: |
| 1290 | pr_err("%s: recieved unexpected event id %d\n", __func__, |
| 1291 | notify->event_id); |
| 1292 | } |
| 1293 | } |
| 1294 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 1295 | #ifdef CONFIG_DEBUG_FS |
| 1296 | |
| 1297 | static int debug_tbl(char *buf, int max) |
| 1298 | { |
| 1299 | int i = 0; |
| 1300 | int j; |
| 1301 | |
| 1302 | for (j = 0; j < BAM_DMUX_NUM_CHANNELS; ++j) { |
| 1303 | i += scnprintf(buf + i, max - i, |
| 1304 | "ch%02d local open=%s remote open=%s\n", |
| 1305 | j, bam_ch_is_local_open(j) ? "Y" : "N", |
| 1306 | bam_ch_is_remote_open(j) ? "Y" : "N"); |
| 1307 | } |
| 1308 | |
| 1309 | return i; |
| 1310 | } |
| 1311 | |
Eric Holmberg | 2fddbcd | 2011-11-28 18:25:57 -0700 | [diff] [blame] | 1312 | static int debug_ul_pkt_cnt(char *buf, int max) |
| 1313 | { |
| 1314 | struct list_head *p; |
| 1315 | unsigned long flags; |
| 1316 | int n = 0; |
| 1317 | |
| 1318 | spin_lock_irqsave(&bam_tx_pool_spinlock, flags); |
| 1319 | __list_for_each(p, &bam_tx_pool) { |
| 1320 | ++n; |
| 1321 | } |
| 1322 | spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags); |
| 1323 | |
| 1324 | return scnprintf(buf, max, "Number of UL packets in flight: %d\n", n); |
| 1325 | } |
| 1326 | |
| 1327 | static int debug_stats(char *buf, int max) |
| 1328 | { |
| 1329 | int i = 0; |
| 1330 | |
| 1331 | i += scnprintf(buf + i, max - i, |
Eric Holmberg | 9fdef26 | 2012-02-14 11:46:05 -0700 | [diff] [blame] | 1332 | "skb read cnt: %u\n" |
| 1333 | "skb write cnt: %u\n" |
Eric Holmberg | 2fddbcd | 2011-11-28 18:25:57 -0700 | [diff] [blame] | 1334 | "skb copy cnt: %u\n" |
| 1335 | "skb copy bytes: %u\n" |
Eric Holmberg | 6074aba | 2012-01-18 17:59:44 -0700 | [diff] [blame] | 1336 | "sps tx failures: %u\n" |
Eric Holmberg | b5b08e5 | 2012-01-20 14:19:00 -0700 | [diff] [blame] | 1337 | "sps tx stalls: %u\n" |
Eric Holmberg | 1f1255d | 2012-02-22 13:37:21 -0700 | [diff] [blame] | 1338 | "rx queue len: %d\n" |
| 1339 | "a2 ack out cnt: %d\n" |
| 1340 | "a2 ack in cnt: %d\n" |
| 1341 | "a2 pwr cntl in: %d\n", |
Eric Holmberg | 9fdef26 | 2012-02-14 11:46:05 -0700 | [diff] [blame] | 1342 | bam_dmux_read_cnt, |
| 1343 | bam_dmux_write_cnt, |
Eric Holmberg | 2fddbcd | 2011-11-28 18:25:57 -0700 | [diff] [blame] | 1344 | bam_dmux_write_cpy_cnt, |
| 1345 | bam_dmux_write_cpy_bytes, |
Eric Holmberg | 6074aba | 2012-01-18 17:59:44 -0700 | [diff] [blame] | 1346 | bam_dmux_tx_sps_failure_cnt, |
Eric Holmberg | b5b08e5 | 2012-01-20 14:19:00 -0700 | [diff] [blame] | 1347 | bam_dmux_tx_stall_cnt, |
Eric Holmberg | 1f1255d | 2012-02-22 13:37:21 -0700 | [diff] [blame] | 1348 | bam_rx_pool_len, |
| 1349 | atomic_read(&bam_dmux_ack_out_cnt), |
| 1350 | atomic_read(&bam_dmux_ack_in_cnt), |
| 1351 | atomic_read(&bam_dmux_a2_pwr_cntl_in_cnt) |
Eric Holmberg | 2fddbcd | 2011-11-28 18:25:57 -0700 | [diff] [blame] | 1352 | ); |
| 1353 | |
| 1354 | return i; |
| 1355 | } |
| 1356 | |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 1357 | static int debug_log(char *buff, int max, loff_t *ppos) |
| 1358 | { |
| 1359 | unsigned long flags; |
| 1360 | int i = 0; |
| 1361 | |
| 1362 | if (bam_dmux_state_logging_disabled) { |
| 1363 | i += scnprintf(buff - i, max - i, "Logging disabled\n"); |
| 1364 | return i; |
| 1365 | } |
| 1366 | |
| 1367 | if (*ppos == 0) { |
| 1368 | i += scnprintf(buff - i, max - i, |
| 1369 | "<DMUX> timestamp FLAGS [Message]\n" |
| 1370 | "FLAGS:\n" |
Eric Holmberg | 006057d | 2012-01-11 10:10:42 -0700 | [diff] [blame] | 1371 | "\tD: 1 = Power collapse disabled\n" |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 1372 | "\tR: 1 = in global reset\n" |
| 1373 | "\tP: 1 = BAM is powered up\n" |
| 1374 | "\tA: 1 = BAM initialized and ready for data\n" |
| 1375 | "\n" |
| 1376 | "\tV: 1 = Uplink vote for power\n" |
| 1377 | "\tU: 1 = Uplink active\n" |
| 1378 | "\tW: 1 = Uplink Wait-for-ack\n" |
| 1379 | "\tA: 1 = Uplink ACK received\n" |
Eric Holmberg | bc9f21c | 2012-01-18 11:33:33 -0700 | [diff] [blame] | 1380 | "\t#: >=1 On-demand uplink vote\n" |
Jeff Hugo | 0b13a35 | 2012-03-17 23:18:30 -0600 | [diff] [blame] | 1381 | "\tD: 1 = Disconnect ACK active\n" |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 1382 | ); |
| 1383 | buff += i; |
| 1384 | } |
| 1385 | |
| 1386 | spin_lock_irqsave(&bam_dmux_logging_spinlock, flags); |
| 1387 | while (kfifo_len(&bam_dmux_state_log) |
| 1388 | && (i + LOG_MESSAGE_MAX_SIZE) < max) { |
| 1389 | int k_len; |
| 1390 | k_len = kfifo_out(&bam_dmux_state_log, |
| 1391 | buff, LOG_MESSAGE_MAX_SIZE); |
| 1392 | if (k_len != LOG_MESSAGE_MAX_SIZE) { |
| 1393 | pr_err("%s: retrieve failure %d\n", __func__, k_len); |
| 1394 | break; |
| 1395 | } |
| 1396 | |
| 1397 | /* keep non-null portion of string and add line break */ |
| 1398 | k_len = strnlen(buff, LOG_MESSAGE_MAX_SIZE); |
| 1399 | buff += k_len; |
| 1400 | i += k_len; |
| 1401 | if (k_len && *(buff - 1) != '\n') { |
| 1402 | *buff++ = '\n'; |
| 1403 | ++i; |
| 1404 | } |
| 1405 | } |
| 1406 | spin_unlock_irqrestore(&bam_dmux_logging_spinlock, flags); |
| 1407 | |
| 1408 | return i; |
| 1409 | } |
| 1410 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 1411 | #define DEBUG_BUFMAX 4096 |
| 1412 | static char debug_buffer[DEBUG_BUFMAX]; |
| 1413 | |
| 1414 | static ssize_t debug_read(struct file *file, char __user *buf, |
| 1415 | size_t count, loff_t *ppos) |
| 1416 | { |
| 1417 | int (*fill)(char *buf, int max) = file->private_data; |
| 1418 | int bsize = fill(debug_buffer, DEBUG_BUFMAX); |
| 1419 | return simple_read_from_buffer(buf, count, ppos, debug_buffer, bsize); |
| 1420 | } |
| 1421 | |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 1422 | static ssize_t debug_read_multiple(struct file *file, char __user *buff, |
| 1423 | size_t count, loff_t *ppos) |
| 1424 | { |
| 1425 | int (*util_func)(char *buf, int max, loff_t *) = file->private_data; |
| 1426 | char *buffer; |
| 1427 | int bsize; |
| 1428 | |
| 1429 | buffer = kmalloc(count, GFP_KERNEL); |
| 1430 | if (!buffer) |
| 1431 | return -ENOMEM; |
| 1432 | |
| 1433 | bsize = util_func(buffer, count, ppos); |
| 1434 | |
| 1435 | if (bsize >= 0) { |
| 1436 | if (copy_to_user(buff, buffer, bsize)) { |
| 1437 | kfree(buffer); |
| 1438 | return -EFAULT; |
| 1439 | } |
| 1440 | *ppos += bsize; |
| 1441 | } |
| 1442 | kfree(buffer); |
| 1443 | return bsize; |
| 1444 | } |
| 1445 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 1446 | static int debug_open(struct inode *inode, struct file *file) |
| 1447 | { |
| 1448 | file->private_data = inode->i_private; |
| 1449 | return 0; |
| 1450 | } |
| 1451 | |
| 1452 | |
| 1453 | static const struct file_operations debug_ops = { |
| 1454 | .read = debug_read, |
| 1455 | .open = debug_open, |
| 1456 | }; |
| 1457 | |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 1458 | static const struct file_operations debug_ops_multiple = { |
| 1459 | .read = debug_read_multiple, |
| 1460 | .open = debug_open, |
| 1461 | }; |
| 1462 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 1463 | static void debug_create(const char *name, mode_t mode, |
| 1464 | struct dentry *dent, |
| 1465 | int (*fill)(char *buf, int max)) |
| 1466 | { |
Eric Holmberg | e4ac80b | 2012-01-12 09:21:59 -0700 | [diff] [blame] | 1467 | struct dentry *file; |
| 1468 | |
| 1469 | file = debugfs_create_file(name, mode, dent, fill, &debug_ops); |
| 1470 | if (IS_ERR(file)) |
| 1471 | pr_err("%s: debugfs create failed %d\n", __func__, |
| 1472 | (int)PTR_ERR(file)); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 1473 | } |
| 1474 | |
Eric Holmberg | e4ac80b | 2012-01-12 09:21:59 -0700 | [diff] [blame] | 1475 | static void debug_create_multiple(const char *name, mode_t mode, |
| 1476 | struct dentry *dent, |
| 1477 | int (*fill)(char *buf, int max, loff_t *ppos)) |
| 1478 | { |
| 1479 | struct dentry *file; |
| 1480 | |
| 1481 | file = debugfs_create_file(name, mode, dent, fill, &debug_ops_multiple); |
| 1482 | if (IS_ERR(file)) |
| 1483 | pr_err("%s: debugfs create failed %d\n", __func__, |
| 1484 | (int)PTR_ERR(file)); |
| 1485 | } |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 1486 | #endif |
| 1487 | |
Jeff Hugo | d98b108 | 2011-10-24 10:30:23 -0600 | [diff] [blame] | 1488 | static void notify_all(int event, unsigned long data) |
| 1489 | { |
| 1490 | int i; |
Jeff Hugo | cb79802 | 2012-04-09 14:55:40 -0600 | [diff] [blame] | 1491 | struct list_head *temp; |
| 1492 | struct outside_notify_func *func; |
Jeff Hugo | d98b108 | 2011-10-24 10:30:23 -0600 | [diff] [blame] | 1493 | |
| 1494 | for (i = 0; i < BAM_DMUX_NUM_CHANNELS; ++i) { |
Eric Holmberg | 454d9da | 2012-01-12 09:37:14 -0700 | [diff] [blame] | 1495 | if (bam_ch_is_open(i)) { |
Jeff Hugo | d98b108 | 2011-10-24 10:30:23 -0600 | [diff] [blame] | 1496 | bam_ch[i].notify(bam_ch[i].priv, event, data); |
Eric Holmberg | 454d9da | 2012-01-12 09:37:14 -0700 | [diff] [blame] | 1497 | bam_dmux_log("%s: cid=%d, event=%d, data=%lu\n", |
| 1498 | __func__, i, event, data); |
| 1499 | } |
Jeff Hugo | d98b108 | 2011-10-24 10:30:23 -0600 | [diff] [blame] | 1500 | } |
Jeff Hugo | cb79802 | 2012-04-09 14:55:40 -0600 | [diff] [blame] | 1501 | |
| 1502 | __list_for_each(temp, &bam_other_notify_funcs) { |
| 1503 | func = container_of(temp, struct outside_notify_func, |
| 1504 | list_node); |
| 1505 | func->notify(func->priv, event, data); |
| 1506 | } |
Jeff Hugo | d98b108 | 2011-10-24 10:30:23 -0600 | [diff] [blame] | 1507 | } |
| 1508 | |
| 1509 | static void kickoff_ul_wakeup_func(struct work_struct *work) |
| 1510 | { |
| 1511 | read_lock(&ul_wakeup_lock); |
| 1512 | if (!bam_is_connected) { |
| 1513 | read_unlock(&ul_wakeup_lock); |
| 1514 | ul_wakeup(); |
Jeff Hugo | 4838f41 | 2012-01-20 11:19:37 -0700 | [diff] [blame] | 1515 | if (unlikely(in_global_reset == 1)) |
| 1516 | return; |
Jeff Hugo | d98b108 | 2011-10-24 10:30:23 -0600 | [diff] [blame] | 1517 | read_lock(&ul_wakeup_lock); |
| 1518 | ul_packet_written = 1; |
| 1519 | notify_all(BAM_DMUX_UL_CONNECTED, (unsigned long)(NULL)); |
| 1520 | } |
| 1521 | read_unlock(&ul_wakeup_lock); |
| 1522 | } |
| 1523 | |
Eric Holmberg | bc9f21c | 2012-01-18 11:33:33 -0700 | [diff] [blame] | 1524 | int msm_bam_dmux_kickoff_ul_wakeup(void) |
Jeff Hugo | d98b108 | 2011-10-24 10:30:23 -0600 | [diff] [blame] | 1525 | { |
Eric Holmberg | bc9f21c | 2012-01-18 11:33:33 -0700 | [diff] [blame] | 1526 | int is_connected; |
| 1527 | |
| 1528 | read_lock(&ul_wakeup_lock); |
| 1529 | ul_packet_written = 1; |
| 1530 | is_connected = bam_is_connected; |
| 1531 | if (!is_connected) |
| 1532 | queue_work(bam_mux_tx_workqueue, &kickoff_ul_wakeup); |
| 1533 | read_unlock(&ul_wakeup_lock); |
| 1534 | |
| 1535 | return is_connected; |
Jeff Hugo | d98b108 | 2011-10-24 10:30:23 -0600 | [diff] [blame] | 1536 | } |
| 1537 | |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 1538 | static void power_vote(int vote) |
| 1539 | { |
| 1540 | bam_dmux_log("%s: curr=%d, vote=%d\n", __func__, |
| 1541 | bam_dmux_uplink_vote, vote); |
| 1542 | |
| 1543 | if (bam_dmux_uplink_vote == vote) |
| 1544 | bam_dmux_log("%s: warning - duplicate power vote\n", __func__); |
| 1545 | |
| 1546 | bam_dmux_uplink_vote = vote; |
| 1547 | if (vote) |
| 1548 | smsm_change_state(SMSM_APPS_STATE, 0, SMSM_A2_POWER_CONTROL); |
| 1549 | else |
| 1550 | smsm_change_state(SMSM_APPS_STATE, SMSM_A2_POWER_CONTROL, 0); |
| 1551 | } |
| 1552 | |
Eric Holmberg | 454d9da | 2012-01-12 09:37:14 -0700 | [diff] [blame] | 1553 | /* |
| 1554 | * @note: Must be called with ul_wakeup_lock locked. |
| 1555 | */ |
| 1556 | static inline void ul_powerdown(void) |
| 1557 | { |
| 1558 | bam_dmux_log("%s: powerdown\n", __func__); |
| 1559 | verify_tx_queue_is_empty(__func__); |
| 1560 | |
| 1561 | if (a2_pc_disabled) { |
| 1562 | wait_for_dfab = 1; |
| 1563 | INIT_COMPLETION(dfab_unvote_completion); |
| 1564 | release_wakelock(); |
| 1565 | } else { |
| 1566 | wait_for_ack = 1; |
| 1567 | INIT_COMPLETION(ul_wakeup_ack_completion); |
| 1568 | power_vote(0); |
| 1569 | } |
| 1570 | bam_is_connected = 0; |
| 1571 | notify_all(BAM_DMUX_UL_DISCONNECTED, (unsigned long)(NULL)); |
| 1572 | } |
| 1573 | |
| 1574 | static inline void ul_powerdown_finish(void) |
| 1575 | { |
| 1576 | if (a2_pc_disabled && wait_for_dfab) { |
| 1577 | unvote_dfab(); |
| 1578 | complete_all(&dfab_unvote_completion); |
| 1579 | wait_for_dfab = 0; |
| 1580 | } |
| 1581 | } |
| 1582 | |
Eric Holmberg | bc9f21c | 2012-01-18 11:33:33 -0700 | [diff] [blame] | 1583 | /* |
| 1584 | * Votes for UL power and returns current power state. |
| 1585 | * |
| 1586 | * @returns true if currently connected |
| 1587 | */ |
| 1588 | int msm_bam_dmux_ul_power_vote(void) |
| 1589 | { |
| 1590 | int is_connected; |
| 1591 | |
| 1592 | read_lock(&ul_wakeup_lock); |
| 1593 | atomic_inc(&ul_ondemand_vote); |
| 1594 | is_connected = bam_is_connected; |
| 1595 | if (!is_connected) |
| 1596 | queue_work(bam_mux_tx_workqueue, &kickoff_ul_wakeup); |
| 1597 | read_unlock(&ul_wakeup_lock); |
| 1598 | |
| 1599 | return is_connected; |
| 1600 | } |
| 1601 | |
| 1602 | /* |
| 1603 | * Unvotes for UL power. |
| 1604 | * |
| 1605 | * @returns true if vote count is 0 (UL shutdown possible) |
| 1606 | */ |
| 1607 | int msm_bam_dmux_ul_power_unvote(void) |
| 1608 | { |
| 1609 | int vote; |
| 1610 | |
| 1611 | read_lock(&ul_wakeup_lock); |
| 1612 | vote = atomic_dec_return(&ul_ondemand_vote); |
| 1613 | if (unlikely(vote) < 0) |
| 1614 | DMUX_LOG_KERR("%s: invalid power vote %d\n", __func__, vote); |
| 1615 | read_unlock(&ul_wakeup_lock); |
| 1616 | |
| 1617 | return vote == 0; |
| 1618 | } |
| 1619 | |
Jeff Hugo | cb79802 | 2012-04-09 14:55:40 -0600 | [diff] [blame] | 1620 | int msm_bam_dmux_reg_notify(void *priv, |
| 1621 | void (*notify)(void *priv, int event_type, |
| 1622 | unsigned long data)) |
| 1623 | { |
| 1624 | struct outside_notify_func *func; |
| 1625 | |
| 1626 | if (!notify) |
| 1627 | return -EINVAL; |
| 1628 | |
| 1629 | func = kmalloc(sizeof(struct outside_notify_func), GFP_KERNEL); |
| 1630 | if (!func) |
| 1631 | return -ENOMEM; |
| 1632 | |
| 1633 | func->notify = notify; |
| 1634 | func->priv = priv; |
| 1635 | list_add(&func->list_node, &bam_other_notify_funcs); |
| 1636 | |
| 1637 | return 0; |
| 1638 | } |
| 1639 | |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 1640 | static void ul_timeout(struct work_struct *work) |
| 1641 | { |
Jeff Hugo | c040a5b | 2011-11-15 14:26:01 -0700 | [diff] [blame] | 1642 | unsigned long flags; |
| 1643 | int ret; |
| 1644 | |
Jeff Hugo | 6e7a92a | 2011-10-24 05:25:13 -0600 | [diff] [blame] | 1645 | if (in_global_reset) |
| 1646 | return; |
Jeff Hugo | c040a5b | 2011-11-15 14:26:01 -0700 | [diff] [blame] | 1647 | ret = write_trylock_irqsave(&ul_wakeup_lock, flags); |
| 1648 | if (!ret) { /* failed to grab lock, reschedule and bail */ |
| 1649 | schedule_delayed_work(&ul_timeout_work, |
| 1650 | msecs_to_jiffies(UL_TIMEOUT_DELAY)); |
| 1651 | return; |
| 1652 | } |
Eric Holmberg | 454d9da | 2012-01-12 09:37:14 -0700 | [diff] [blame] | 1653 | if (bam_is_connected) { |
Eric Holmberg | 6074aba | 2012-01-18 17:59:44 -0700 | [diff] [blame] | 1654 | if (!ul_packet_written) { |
| 1655 | spin_lock(&bam_tx_pool_spinlock); |
| 1656 | if (!list_empty(&bam_tx_pool)) { |
| 1657 | struct tx_pkt_info *info; |
| 1658 | |
| 1659 | info = list_first_entry(&bam_tx_pool, |
| 1660 | struct tx_pkt_info, list_node); |
| 1661 | DMUX_LOG_KERR("%s: UL delayed ts=%u.%09lu\n", |
| 1662 | __func__, info->ts_sec, info->ts_nsec); |
| 1663 | DBG_INC_TX_STALL_CNT(); |
| 1664 | ul_packet_written = 1; |
| 1665 | } |
| 1666 | spin_unlock(&bam_tx_pool_spinlock); |
| 1667 | } |
| 1668 | |
Eric Holmberg | bc9f21c | 2012-01-18 11:33:33 -0700 | [diff] [blame] | 1669 | if (ul_packet_written || atomic_read(&ul_ondemand_vote)) { |
| 1670 | bam_dmux_log("%s: pkt written %d\n", |
| 1671 | __func__, ul_packet_written); |
Eric Holmberg | 454d9da | 2012-01-12 09:37:14 -0700 | [diff] [blame] | 1672 | ul_packet_written = 0; |
| 1673 | schedule_delayed_work(&ul_timeout_work, |
| 1674 | msecs_to_jiffies(UL_TIMEOUT_DELAY)); |
Eric Holmberg | 006057d | 2012-01-11 10:10:42 -0700 | [diff] [blame] | 1675 | } else { |
Eric Holmberg | 454d9da | 2012-01-12 09:37:14 -0700 | [diff] [blame] | 1676 | ul_powerdown(); |
Eric Holmberg | 006057d | 2012-01-11 10:10:42 -0700 | [diff] [blame] | 1677 | } |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 1678 | } |
Jeff Hugo | c040a5b | 2011-11-15 14:26:01 -0700 | [diff] [blame] | 1679 | write_unlock_irqrestore(&ul_wakeup_lock, flags); |
Eric Holmberg | 454d9da | 2012-01-12 09:37:14 -0700 | [diff] [blame] | 1680 | ul_powerdown_finish(); |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 1681 | } |
Jeff Hugo | 4838f41 | 2012-01-20 11:19:37 -0700 | [diff] [blame] | 1682 | |
| 1683 | static int ssrestart_check(void) |
| 1684 | { |
Eric Holmberg | 90285e2 | 2012-02-22 12:33:05 -0700 | [diff] [blame] | 1685 | DMUX_LOG_KERR("%s: modem timeout: BAM DMUX disabled\n", __func__); |
| 1686 | in_global_reset = 1; |
| 1687 | if (get_restart_level() <= RESET_SOC) |
| 1688 | DMUX_LOG_KERR("%s: ssrestart not enabled\n", __func__); |
| 1689 | return 1; |
Jeff Hugo | 4838f41 | 2012-01-20 11:19:37 -0700 | [diff] [blame] | 1690 | } |
| 1691 | |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 1692 | static void ul_wakeup(void) |
| 1693 | { |
Jeff Hugo | f6c1c1e | 2011-12-01 17:43:49 -0700 | [diff] [blame] | 1694 | int ret; |
Jeff Hugo | 5f57ec9 | 2012-05-14 13:34:28 -0600 | [diff] [blame] | 1695 | int do_vote_dfab = 0; |
Jeff Hugo | f6c1c1e | 2011-12-01 17:43:49 -0700 | [diff] [blame] | 1696 | |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 1697 | mutex_lock(&wakeup_lock); |
| 1698 | if (bam_is_connected) { /* bam got connected before lock grabbed */ |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 1699 | bam_dmux_log("%s Already awake\n", __func__); |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 1700 | mutex_unlock(&wakeup_lock); |
| 1701 | return; |
| 1702 | } |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 1703 | |
Jeff Hugo | c269614 | 2012-05-03 11:42:13 -0600 | [diff] [blame] | 1704 | /* |
| 1705 | * if someone is voting for UL before bam is inited (modem up first |
| 1706 | * time), set flag for init to kickoff ul wakeup once bam is inited |
| 1707 | */ |
| 1708 | mutex_lock(&delayed_ul_vote_lock); |
| 1709 | if (unlikely(!bam_mux_initialized)) { |
| 1710 | need_delayed_ul_vote = 1; |
| 1711 | mutex_unlock(&delayed_ul_vote_lock); |
| 1712 | mutex_unlock(&wakeup_lock); |
| 1713 | return; |
| 1714 | } |
| 1715 | mutex_unlock(&delayed_ul_vote_lock); |
| 1716 | |
Eric Holmberg | 006057d | 2012-01-11 10:10:42 -0700 | [diff] [blame] | 1717 | if (a2_pc_disabled) { |
| 1718 | /* |
| 1719 | * don't grab the wakelock the first time because it is |
| 1720 | * already grabbed when a2 powers on |
| 1721 | */ |
Jeff Hugo | 5f57ec9 | 2012-05-14 13:34:28 -0600 | [diff] [blame] | 1722 | if (likely(a2_pc_disabled_wakelock_skipped)) { |
Eric Holmberg | 006057d | 2012-01-11 10:10:42 -0700 | [diff] [blame] | 1723 | grab_wakelock(); |
Jeff Hugo | 5f57ec9 | 2012-05-14 13:34:28 -0600 | [diff] [blame] | 1724 | do_vote_dfab = 1; /* vote must occur after wait */ |
| 1725 | } else { |
Jeff Hugo | 583a6da | 2012-02-03 11:37:30 -0700 | [diff] [blame] | 1726 | a2_pc_disabled_wakelock_skipped = 1; |
Jeff Hugo | 5f57ec9 | 2012-05-14 13:34:28 -0600 | [diff] [blame] | 1727 | } |
Eric Holmberg | 006057d | 2012-01-11 10:10:42 -0700 | [diff] [blame] | 1728 | if (wait_for_dfab) { |
Jeff Hugo | 66f7f1e | 2012-01-16 14:30:42 -0700 | [diff] [blame] | 1729 | ret = wait_for_completion_timeout( |
Eric Holmberg | 006057d | 2012-01-11 10:10:42 -0700 | [diff] [blame] | 1730 | &dfab_unvote_completion, HZ); |
| 1731 | BUG_ON(ret == 0); |
| 1732 | } |
Jeff Hugo | 5f57ec9 | 2012-05-14 13:34:28 -0600 | [diff] [blame] | 1733 | if (likely(do_vote_dfab)) |
| 1734 | vote_dfab(); |
Eric Holmberg | 006057d | 2012-01-11 10:10:42 -0700 | [diff] [blame] | 1735 | schedule_delayed_work(&ul_timeout_work, |
| 1736 | msecs_to_jiffies(UL_TIMEOUT_DELAY)); |
| 1737 | bam_is_connected = 1; |
| 1738 | mutex_unlock(&wakeup_lock); |
| 1739 | return; |
| 1740 | } |
| 1741 | |
Jeff Hugo | f6c1c1e | 2011-12-01 17:43:49 -0700 | [diff] [blame] | 1742 | /* |
| 1743 | * must wait for the previous power down request to have been acked |
| 1744 | * chances are it already came in and this will just fall through |
| 1745 | * instead of waiting |
| 1746 | */ |
| 1747 | if (wait_for_ack) { |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 1748 | bam_dmux_log("%s waiting for previous ack\n", __func__); |
Jeff Hugo | 66f7f1e | 2012-01-16 14:30:42 -0700 | [diff] [blame] | 1749 | ret = wait_for_completion_timeout( |
Jeff Hugo | f6c1c1e | 2011-12-01 17:43:49 -0700 | [diff] [blame] | 1750 | &ul_wakeup_ack_completion, HZ); |
Eric Holmberg | 006057d | 2012-01-11 10:10:42 -0700 | [diff] [blame] | 1751 | wait_for_ack = 0; |
Jeff Hugo | 4838f41 | 2012-01-20 11:19:37 -0700 | [diff] [blame] | 1752 | if (unlikely(ret == 0) && ssrestart_check()) { |
| 1753 | mutex_unlock(&wakeup_lock); |
| 1754 | bam_dmux_log("%s timeout previous ack\n", __func__); |
| 1755 | return; |
| 1756 | } |
Jeff Hugo | f6c1c1e | 2011-12-01 17:43:49 -0700 | [diff] [blame] | 1757 | } |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 1758 | INIT_COMPLETION(ul_wakeup_ack_completion); |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 1759 | power_vote(1); |
| 1760 | bam_dmux_log("%s waiting for wakeup ack\n", __func__); |
Jeff Hugo | 66f7f1e | 2012-01-16 14:30:42 -0700 | [diff] [blame] | 1761 | ret = wait_for_completion_timeout(&ul_wakeup_ack_completion, HZ); |
Jeff Hugo | 4838f41 | 2012-01-20 11:19:37 -0700 | [diff] [blame] | 1762 | if (unlikely(ret == 0) && ssrestart_check()) { |
| 1763 | mutex_unlock(&wakeup_lock); |
| 1764 | bam_dmux_log("%s timeout wakeup ack\n", __func__); |
| 1765 | return; |
| 1766 | } |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 1767 | bam_dmux_log("%s waiting completion\n", __func__); |
Jeff Hugo | 66f7f1e | 2012-01-16 14:30:42 -0700 | [diff] [blame] | 1768 | ret = wait_for_completion_timeout(&bam_connection_completion, HZ); |
Jeff Hugo | 4838f41 | 2012-01-20 11:19:37 -0700 | [diff] [blame] | 1769 | if (unlikely(ret == 0) && ssrestart_check()) { |
| 1770 | mutex_unlock(&wakeup_lock); |
| 1771 | bam_dmux_log("%s timeout power on\n", __func__); |
| 1772 | return; |
| 1773 | } |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 1774 | |
| 1775 | bam_is_connected = 1; |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 1776 | bam_dmux_log("%s complete\n", __func__); |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 1777 | schedule_delayed_work(&ul_timeout_work, |
| 1778 | msecs_to_jiffies(UL_TIMEOUT_DELAY)); |
| 1779 | mutex_unlock(&wakeup_lock); |
| 1780 | } |
| 1781 | |
| 1782 | static void reconnect_to_bam(void) |
| 1783 | { |
| 1784 | int i; |
| 1785 | |
Jeff Hugo | 6e7a92a | 2011-10-24 05:25:13 -0600 | [diff] [blame] | 1786 | in_global_reset = 0; |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 1787 | vote_dfab(); |
Jeff Hugo | 18792a3 | 2012-06-20 15:25:55 -0600 | [diff] [blame] | 1788 | if (!power_management_only_mode) { |
| 1789 | i = sps_device_reset(a2_device_handle); |
| 1790 | if (i) |
| 1791 | pr_err("%s: device reset failed rc = %d\n", __func__, |
| 1792 | i); |
| 1793 | i = sps_connect(bam_tx_pipe, &tx_connection); |
| 1794 | if (i) |
| 1795 | pr_err("%s: tx connection failed rc = %d\n", __func__, |
| 1796 | i); |
| 1797 | i = sps_connect(bam_rx_pipe, &rx_connection); |
| 1798 | if (i) |
| 1799 | pr_err("%s: rx connection failed rc = %d\n", __func__, |
| 1800 | i); |
| 1801 | i = sps_register_event(bam_tx_pipe, &tx_register_event); |
| 1802 | if (i) |
| 1803 | pr_err("%s: tx event reg failed rc = %d\n", __func__, |
| 1804 | i); |
| 1805 | i = sps_register_event(bam_rx_pipe, &rx_register_event); |
| 1806 | if (i) |
| 1807 | pr_err("%s: rx event reg failed rc = %d\n", __func__, |
| 1808 | i); |
| 1809 | } |
Eric Holmberg | 8df0cdb | 2012-01-04 17:40:46 -0700 | [diff] [blame] | 1810 | |
| 1811 | bam_connection_is_active = 1; |
| 1812 | |
| 1813 | if (polling_mode) |
| 1814 | rx_switch_to_interrupt_mode(); |
| 1815 | |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 1816 | toggle_apps_ack(); |
| 1817 | complete_all(&bam_connection_completion); |
Jeff Hugo | 18792a3 | 2012-06-20 15:25:55 -0600 | [diff] [blame] | 1818 | if (!power_management_only_mode) |
| 1819 | queue_rx(); |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 1820 | } |
| 1821 | |
| 1822 | static void disconnect_to_bam(void) |
| 1823 | { |
| 1824 | struct list_head *node; |
| 1825 | struct rx_pkt_info *info; |
Eric Holmberg | 454d9da | 2012-01-12 09:37:14 -0700 | [diff] [blame] | 1826 | unsigned long flags; |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 1827 | |
Jeff Hugo | 6e7a92a | 2011-10-24 05:25:13 -0600 | [diff] [blame] | 1828 | bam_connection_is_active = 0; |
Eric Holmberg | 454d9da | 2012-01-12 09:37:14 -0700 | [diff] [blame] | 1829 | |
| 1830 | /* handle disconnect during active UL */ |
| 1831 | write_lock_irqsave(&ul_wakeup_lock, flags); |
| 1832 | if (bam_is_connected) { |
| 1833 | bam_dmux_log("%s: UL active - forcing powerdown\n", __func__); |
| 1834 | ul_powerdown(); |
| 1835 | } |
| 1836 | write_unlock_irqrestore(&ul_wakeup_lock, flags); |
| 1837 | ul_powerdown_finish(); |
| 1838 | |
| 1839 | /* tear down BAM connection */ |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 1840 | INIT_COMPLETION(bam_connection_completion); |
Jeff Hugo | 18792a3 | 2012-06-20 15:25:55 -0600 | [diff] [blame] | 1841 | if (!power_management_only_mode) { |
| 1842 | sps_disconnect(bam_tx_pipe); |
| 1843 | sps_disconnect(bam_rx_pipe); |
| 1844 | __memzero(rx_desc_mem_buf.base, rx_desc_mem_buf.size); |
| 1845 | __memzero(tx_desc_mem_buf.base, tx_desc_mem_buf.size); |
| 1846 | } |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 1847 | unvote_dfab(); |
Eric Holmberg | 8df0cdb | 2012-01-04 17:40:46 -0700 | [diff] [blame] | 1848 | |
| 1849 | mutex_lock(&bam_rx_pool_mutexlock); |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 1850 | while (!list_empty(&bam_rx_pool)) { |
| 1851 | node = bam_rx_pool.next; |
| 1852 | list_del(node); |
| 1853 | info = container_of(node, struct rx_pkt_info, list_node); |
| 1854 | dma_unmap_single(NULL, info->dma_address, BUFFER_SIZE, |
| 1855 | DMA_FROM_DEVICE); |
| 1856 | dev_kfree_skb_any(info->skb); |
| 1857 | kfree(info); |
| 1858 | } |
Eric Holmberg | b5b08e5 | 2012-01-20 14:19:00 -0700 | [diff] [blame] | 1859 | bam_rx_pool_len = 0; |
Eric Holmberg | 8df0cdb | 2012-01-04 17:40:46 -0700 | [diff] [blame] | 1860 | mutex_unlock(&bam_rx_pool_mutexlock); |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 1861 | |
Jeff Hugo | 0b13a35 | 2012-03-17 23:18:30 -0600 | [diff] [blame] | 1862 | if (disconnect_ack) |
| 1863 | toggle_apps_ack(); |
| 1864 | |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 1865 | verify_tx_queue_is_empty(__func__); |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 1866 | } |
| 1867 | |
| 1868 | static void vote_dfab(void) |
| 1869 | { |
Jeff Hugo | ca0caa8 | 2011-12-05 16:05:23 -0700 | [diff] [blame] | 1870 | int rc; |
| 1871 | |
Eric Holmberg | 006057d | 2012-01-11 10:10:42 -0700 | [diff] [blame] | 1872 | bam_dmux_log("%s\n", __func__); |
| 1873 | mutex_lock(&dfab_status_lock); |
| 1874 | if (dfab_is_on) { |
| 1875 | bam_dmux_log("%s: dfab is already on\n", __func__); |
| 1876 | mutex_unlock(&dfab_status_lock); |
| 1877 | return; |
| 1878 | } |
Jeff Hugo | 23a812b | 2012-01-13 13:43:42 -0700 | [diff] [blame] | 1879 | rc = clk_prepare_enable(dfab_clk); |
Jeff Hugo | ca0caa8 | 2011-12-05 16:05:23 -0700 | [diff] [blame] | 1880 | if (rc) |
Eric Holmberg | 006057d | 2012-01-11 10:10:42 -0700 | [diff] [blame] | 1881 | DMUX_LOG_KERR("bam_dmux vote for dfab failed rc = %d\n", rc); |
Stephen Boyd | 69d35e3 | 2012-02-14 15:33:30 -0800 | [diff] [blame] | 1882 | rc = clk_prepare_enable(xo_clk); |
| 1883 | if (rc) |
| 1884 | DMUX_LOG_KERR("bam_dmux vote for xo failed rc = %d\n", rc); |
Eric Holmberg | 006057d | 2012-01-11 10:10:42 -0700 | [diff] [blame] | 1885 | dfab_is_on = 1; |
| 1886 | mutex_unlock(&dfab_status_lock); |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 1887 | } |
| 1888 | |
| 1889 | static void unvote_dfab(void) |
| 1890 | { |
Eric Holmberg | 006057d | 2012-01-11 10:10:42 -0700 | [diff] [blame] | 1891 | bam_dmux_log("%s\n", __func__); |
| 1892 | mutex_lock(&dfab_status_lock); |
| 1893 | if (!dfab_is_on) { |
| 1894 | DMUX_LOG_KERR("%s: dfab is already off\n", __func__); |
| 1895 | dump_stack(); |
| 1896 | mutex_unlock(&dfab_status_lock); |
| 1897 | return; |
| 1898 | } |
Jeff Hugo | 23a812b | 2012-01-13 13:43:42 -0700 | [diff] [blame] | 1899 | clk_disable_unprepare(dfab_clk); |
Stephen Boyd | 69d35e3 | 2012-02-14 15:33:30 -0800 | [diff] [blame] | 1900 | clk_disable_unprepare(xo_clk); |
Eric Holmberg | 006057d | 2012-01-11 10:10:42 -0700 | [diff] [blame] | 1901 | dfab_is_on = 0; |
| 1902 | mutex_unlock(&dfab_status_lock); |
| 1903 | } |
| 1904 | |
| 1905 | /* reference counting wrapper around wakelock */ |
| 1906 | static void grab_wakelock(void) |
| 1907 | { |
| 1908 | unsigned long flags; |
| 1909 | |
| 1910 | spin_lock_irqsave(&wakelock_reference_lock, flags); |
| 1911 | bam_dmux_log("%s: ref count = %d\n", __func__, |
| 1912 | wakelock_reference_count); |
| 1913 | if (wakelock_reference_count == 0) |
| 1914 | wake_lock(&bam_wakelock); |
| 1915 | ++wakelock_reference_count; |
| 1916 | spin_unlock_irqrestore(&wakelock_reference_lock, flags); |
| 1917 | } |
| 1918 | |
| 1919 | static void release_wakelock(void) |
| 1920 | { |
| 1921 | unsigned long flags; |
| 1922 | |
| 1923 | spin_lock_irqsave(&wakelock_reference_lock, flags); |
| 1924 | if (wakelock_reference_count == 0) { |
| 1925 | DMUX_LOG_KERR("%s: bam_dmux wakelock not locked\n", __func__); |
| 1926 | dump_stack(); |
| 1927 | spin_unlock_irqrestore(&wakelock_reference_lock, flags); |
| 1928 | return; |
| 1929 | } |
| 1930 | bam_dmux_log("%s: ref count = %d\n", __func__, |
| 1931 | wakelock_reference_count); |
| 1932 | --wakelock_reference_count; |
| 1933 | if (wakelock_reference_count == 0) |
| 1934 | wake_unlock(&bam_wakelock); |
| 1935 | spin_unlock_irqrestore(&wakelock_reference_lock, flags); |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 1936 | } |
| 1937 | |
Jeff Hugo | 6e7a92a | 2011-10-24 05:25:13 -0600 | [diff] [blame] | 1938 | static int restart_notifier_cb(struct notifier_block *this, |
| 1939 | unsigned long code, |
| 1940 | void *data) |
| 1941 | { |
| 1942 | int i; |
| 1943 | struct list_head *node; |
| 1944 | struct tx_pkt_info *info; |
| 1945 | int temp_remote_status; |
Jeff Hugo | 626303bf | 2011-11-21 11:43:28 -0700 | [diff] [blame] | 1946 | unsigned long flags; |
Jeff Hugo | 6e7a92a | 2011-10-24 05:25:13 -0600 | [diff] [blame] | 1947 | |
| 1948 | if (code != SUBSYS_AFTER_SHUTDOWN) |
| 1949 | return NOTIFY_DONE; |
| 1950 | |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 1951 | bam_dmux_log("%s: begin\n", __func__); |
Jeff Hugo | 6e7a92a | 2011-10-24 05:25:13 -0600 | [diff] [blame] | 1952 | in_global_reset = 1; |
Eric Holmberg | 454d9da | 2012-01-12 09:37:14 -0700 | [diff] [blame] | 1953 | |
| 1954 | /* Handle uplink Powerdown */ |
| 1955 | write_lock_irqsave(&ul_wakeup_lock, flags); |
| 1956 | if (bam_is_connected) { |
| 1957 | ul_powerdown(); |
| 1958 | wait_for_ack = 0; |
| 1959 | } |
Jeff Hugo | 4838f41 | 2012-01-20 11:19:37 -0700 | [diff] [blame] | 1960 | /* |
| 1961 | * if modem crash during ul_wakeup(), power_vote is 1, needs to be |
| 1962 | * reset to 0. harmless if bam_is_connected check above passes |
| 1963 | */ |
| 1964 | power_vote(0); |
Eric Holmberg | 454d9da | 2012-01-12 09:37:14 -0700 | [diff] [blame] | 1965 | write_unlock_irqrestore(&ul_wakeup_lock, flags); |
| 1966 | ul_powerdown_finish(); |
Eric Holmberg | 006057d | 2012-01-11 10:10:42 -0700 | [diff] [blame] | 1967 | a2_pc_disabled = 0; |
Jeff Hugo | 583a6da | 2012-02-03 11:37:30 -0700 | [diff] [blame] | 1968 | a2_pc_disabled_wakelock_skipped = 0; |
Jeff Hugo | f62029d | 2012-07-17 13:39:53 -0600 | [diff] [blame] | 1969 | disconnect_ack = 1; |
Eric Holmberg | 454d9da | 2012-01-12 09:37:14 -0700 | [diff] [blame] | 1970 | |
| 1971 | /* Cleanup Channel States */ |
Eric Holmberg | a623da8 | 2012-07-12 09:37:09 -0600 | [diff] [blame] | 1972 | mutex_lock(&bam_pdev_mutexlock); |
Jeff Hugo | 6e7a92a | 2011-10-24 05:25:13 -0600 | [diff] [blame] | 1973 | for (i = 0; i < BAM_DMUX_NUM_CHANNELS; ++i) { |
| 1974 | temp_remote_status = bam_ch_is_remote_open(i); |
| 1975 | bam_ch[i].status &= ~BAM_CH_REMOTE_OPEN; |
Karthikeyan Ramasubramanian | 7bf5ca8 | 2011-11-21 13:33:19 -0700 | [diff] [blame] | 1976 | bam_ch[i].num_tx_pkts = 0; |
Jeff Hugo | 6e7a92a | 2011-10-24 05:25:13 -0600 | [diff] [blame] | 1977 | if (bam_ch_is_local_open(i)) |
| 1978 | bam_ch[i].status |= BAM_CH_IN_RESET; |
| 1979 | if (temp_remote_status) { |
| 1980 | platform_device_unregister(bam_ch[i].pdev); |
| 1981 | bam_ch[i].pdev = platform_device_alloc( |
| 1982 | bam_ch[i].name, 2); |
| 1983 | } |
| 1984 | } |
Eric Holmberg | a623da8 | 2012-07-12 09:37:09 -0600 | [diff] [blame] | 1985 | mutex_unlock(&bam_pdev_mutexlock); |
Eric Holmberg | 454d9da | 2012-01-12 09:37:14 -0700 | [diff] [blame] | 1986 | |
| 1987 | /* Cleanup pending UL data */ |
Jeff Hugo | 626303bf | 2011-11-21 11:43:28 -0700 | [diff] [blame] | 1988 | spin_lock_irqsave(&bam_tx_pool_spinlock, flags); |
Jeff Hugo | 6e7a92a | 2011-10-24 05:25:13 -0600 | [diff] [blame] | 1989 | while (!list_empty(&bam_tx_pool)) { |
| 1990 | node = bam_tx_pool.next; |
| 1991 | list_del(node); |
| 1992 | info = container_of(node, struct tx_pkt_info, |
| 1993 | list_node); |
| 1994 | if (!info->is_cmd) { |
| 1995 | dma_unmap_single(NULL, info->dma_address, |
| 1996 | info->skb->len, |
| 1997 | DMA_TO_DEVICE); |
| 1998 | dev_kfree_skb_any(info->skb); |
| 1999 | } else { |
| 2000 | dma_unmap_single(NULL, info->dma_address, |
| 2001 | info->len, |
| 2002 | DMA_TO_DEVICE); |
| 2003 | kfree(info->skb); |
| 2004 | } |
| 2005 | kfree(info); |
| 2006 | } |
Jeff Hugo | 626303bf | 2011-11-21 11:43:28 -0700 | [diff] [blame] | 2007 | spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags); |
Eric Holmberg | 454d9da | 2012-01-12 09:37:14 -0700 | [diff] [blame] | 2008 | |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 2009 | bam_dmux_log("%s: complete\n", __func__); |
Jeff Hugo | 6e7a92a | 2011-10-24 05:25:13 -0600 | [diff] [blame] | 2010 | return NOTIFY_DONE; |
| 2011 | } |
| 2012 | |
Jeff Hugo | 9dea05c | 2011-12-21 12:23:05 -0700 | [diff] [blame] | 2013 | static int bam_init(void) |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2014 | { |
| 2015 | u32 h; |
| 2016 | dma_addr_t dma_addr; |
| 2017 | int ret; |
| 2018 | void *a2_virt_addr; |
Jeff Hugo | 4b2890d | 2012-01-16 16:14:21 -0700 | [diff] [blame] | 2019 | int skip_iounmap = 0; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2020 | |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 2021 | vote_dfab(); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2022 | /* init BAM */ |
| 2023 | a2_virt_addr = ioremap_nocache(A2_PHYS_BASE, A2_PHYS_SIZE); |
| 2024 | if (!a2_virt_addr) { |
| 2025 | pr_err("%s: ioremap failed\n", __func__); |
| 2026 | ret = -ENOMEM; |
Jeff Hugo | 994a92d | 2012-01-05 13:25:21 -0700 | [diff] [blame] | 2027 | goto ioremap_failed; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2028 | } |
| 2029 | a2_props.phys_addr = A2_PHYS_BASE; |
| 2030 | a2_props.virt_addr = a2_virt_addr; |
| 2031 | a2_props.virt_size = A2_PHYS_SIZE; |
| 2032 | a2_props.irq = A2_BAM_IRQ; |
Jeff Hugo | 927cba6 | 2011-11-11 11:49:52 -0700 | [diff] [blame] | 2033 | a2_props.options = SPS_BAM_OPT_IRQ_WAKEUP; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2034 | a2_props.num_pipes = A2_NUM_PIPES; |
| 2035 | a2_props.summing_threshold = A2_SUMMING_THRESHOLD; |
Jeff Hugo | 75913c8 | 2011-12-05 15:59:01 -0700 | [diff] [blame] | 2036 | if (cpu_is_msm9615()) |
| 2037 | a2_props.manage = SPS_BAM_MGR_DEVICE_REMOTE; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2038 | /* need to free on tear down */ |
| 2039 | ret = sps_register_bam_device(&a2_props, &h); |
| 2040 | if (ret < 0) { |
| 2041 | pr_err("%s: register bam error %d\n", __func__, ret); |
| 2042 | goto register_bam_failed; |
| 2043 | } |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 2044 | a2_device_handle = h; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2045 | |
| 2046 | bam_tx_pipe = sps_alloc_endpoint(); |
| 2047 | if (bam_tx_pipe == NULL) { |
| 2048 | pr_err("%s: tx alloc endpoint failed\n", __func__); |
| 2049 | ret = -ENOMEM; |
Jeff Hugo | 8ff4a81 | 2012-01-17 11:03:13 -0700 | [diff] [blame] | 2050 | goto tx_alloc_endpoint_failed; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2051 | } |
| 2052 | ret = sps_get_config(bam_tx_pipe, &tx_connection); |
| 2053 | if (ret) { |
| 2054 | pr_err("%s: tx get config failed %d\n", __func__, ret); |
| 2055 | goto tx_get_config_failed; |
| 2056 | } |
| 2057 | |
| 2058 | tx_connection.source = SPS_DEV_HANDLE_MEM; |
| 2059 | tx_connection.src_pipe_index = 0; |
| 2060 | tx_connection.destination = h; |
| 2061 | tx_connection.dest_pipe_index = 4; |
| 2062 | tx_connection.mode = SPS_MODE_DEST; |
| 2063 | tx_connection.options = SPS_O_AUTO_ENABLE | SPS_O_EOT; |
| 2064 | tx_desc_mem_buf.size = 0x800; /* 2k */ |
| 2065 | tx_desc_mem_buf.base = dma_alloc_coherent(NULL, tx_desc_mem_buf.size, |
| 2066 | &dma_addr, 0); |
| 2067 | if (tx_desc_mem_buf.base == NULL) { |
| 2068 | pr_err("%s: tx memory alloc failed\n", __func__); |
| 2069 | ret = -ENOMEM; |
Jeff Hugo | 8ff4a81 | 2012-01-17 11:03:13 -0700 | [diff] [blame] | 2070 | goto tx_get_config_failed; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2071 | } |
| 2072 | tx_desc_mem_buf.phys_base = dma_addr; |
| 2073 | memset(tx_desc_mem_buf.base, 0x0, tx_desc_mem_buf.size); |
| 2074 | tx_connection.desc = tx_desc_mem_buf; |
| 2075 | tx_connection.event_thresh = 0x10; |
| 2076 | |
| 2077 | ret = sps_connect(bam_tx_pipe, &tx_connection); |
| 2078 | if (ret < 0) { |
| 2079 | pr_err("%s: tx connect error %d\n", __func__, ret); |
| 2080 | goto tx_connect_failed; |
| 2081 | } |
| 2082 | |
| 2083 | bam_rx_pipe = sps_alloc_endpoint(); |
| 2084 | if (bam_rx_pipe == NULL) { |
| 2085 | pr_err("%s: rx alloc endpoint failed\n", __func__); |
| 2086 | ret = -ENOMEM; |
Jeff Hugo | 8ff4a81 | 2012-01-17 11:03:13 -0700 | [diff] [blame] | 2087 | goto rx_alloc_endpoint_failed; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2088 | } |
| 2089 | ret = sps_get_config(bam_rx_pipe, &rx_connection); |
| 2090 | if (ret) { |
| 2091 | pr_err("%s: rx get config failed %d\n", __func__, ret); |
| 2092 | goto rx_get_config_failed; |
| 2093 | } |
| 2094 | |
| 2095 | rx_connection.source = h; |
| 2096 | rx_connection.src_pipe_index = 5; |
| 2097 | rx_connection.destination = SPS_DEV_HANDLE_MEM; |
| 2098 | rx_connection.dest_pipe_index = 1; |
| 2099 | rx_connection.mode = SPS_MODE_SRC; |
Jeff Hugo | 949080a | 2011-08-30 11:58:56 -0600 | [diff] [blame] | 2100 | rx_connection.options = SPS_O_AUTO_ENABLE | SPS_O_EOT | |
| 2101 | SPS_O_ACK_TRANSFERS; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2102 | rx_desc_mem_buf.size = 0x800; /* 2k */ |
| 2103 | rx_desc_mem_buf.base = dma_alloc_coherent(NULL, rx_desc_mem_buf.size, |
| 2104 | &dma_addr, 0); |
| 2105 | if (rx_desc_mem_buf.base == NULL) { |
| 2106 | pr_err("%s: rx memory alloc failed\n", __func__); |
| 2107 | ret = -ENOMEM; |
| 2108 | goto rx_mem_failed; |
| 2109 | } |
| 2110 | rx_desc_mem_buf.phys_base = dma_addr; |
| 2111 | memset(rx_desc_mem_buf.base, 0x0, rx_desc_mem_buf.size); |
| 2112 | rx_connection.desc = rx_desc_mem_buf; |
| 2113 | rx_connection.event_thresh = 0x10; |
| 2114 | |
| 2115 | ret = sps_connect(bam_rx_pipe, &rx_connection); |
| 2116 | if (ret < 0) { |
| 2117 | pr_err("%s: rx connect error %d\n", __func__, ret); |
| 2118 | goto rx_connect_failed; |
| 2119 | } |
| 2120 | |
| 2121 | tx_register_event.options = SPS_O_EOT; |
| 2122 | tx_register_event.mode = SPS_TRIGGER_CALLBACK; |
| 2123 | tx_register_event.xfer_done = NULL; |
| 2124 | tx_register_event.callback = bam_mux_tx_notify; |
| 2125 | tx_register_event.user = NULL; |
| 2126 | ret = sps_register_event(bam_tx_pipe, &tx_register_event); |
| 2127 | if (ret < 0) { |
| 2128 | pr_err("%s: tx register event error %d\n", __func__, ret); |
| 2129 | goto rx_event_reg_failed; |
| 2130 | } |
| 2131 | |
Jeff Hugo | 33dbc00 | 2011-08-25 15:52:53 -0600 | [diff] [blame] | 2132 | rx_register_event.options = SPS_O_EOT; |
| 2133 | rx_register_event.mode = SPS_TRIGGER_CALLBACK; |
| 2134 | rx_register_event.xfer_done = NULL; |
| 2135 | rx_register_event.callback = bam_mux_rx_notify; |
| 2136 | rx_register_event.user = NULL; |
| 2137 | ret = sps_register_event(bam_rx_pipe, &rx_register_event); |
| 2138 | if (ret < 0) { |
| 2139 | pr_err("%s: tx register event error %d\n", __func__, ret); |
| 2140 | goto rx_event_reg_failed; |
| 2141 | } |
| 2142 | |
Jeff Hugo | c269614 | 2012-05-03 11:42:13 -0600 | [diff] [blame] | 2143 | mutex_lock(&delayed_ul_vote_lock); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2144 | bam_mux_initialized = 1; |
Jeff Hugo | c269614 | 2012-05-03 11:42:13 -0600 | [diff] [blame] | 2145 | if (need_delayed_ul_vote) { |
| 2146 | need_delayed_ul_vote = 0; |
| 2147 | msm_bam_dmux_kickoff_ul_wakeup(); |
| 2148 | } |
| 2149 | mutex_unlock(&delayed_ul_vote_lock); |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 2150 | toggle_apps_ack(); |
Jeff Hugo | 6e7a92a | 2011-10-24 05:25:13 -0600 | [diff] [blame] | 2151 | bam_connection_is_active = 1; |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 2152 | complete_all(&bam_connection_completion); |
Jeff Hugo | 2fb555e | 2012-03-14 16:33:47 -0600 | [diff] [blame] | 2153 | queue_rx(); |
Jeff Hugo | 9dea05c | 2011-12-21 12:23:05 -0700 | [diff] [blame] | 2154 | return 0; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2155 | |
| 2156 | rx_event_reg_failed: |
| 2157 | sps_disconnect(bam_rx_pipe); |
| 2158 | rx_connect_failed: |
| 2159 | dma_free_coherent(NULL, rx_desc_mem_buf.size, rx_desc_mem_buf.base, |
| 2160 | rx_desc_mem_buf.phys_base); |
| 2161 | rx_mem_failed: |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2162 | rx_get_config_failed: |
| 2163 | sps_free_endpoint(bam_rx_pipe); |
Jeff Hugo | 8ff4a81 | 2012-01-17 11:03:13 -0700 | [diff] [blame] | 2164 | rx_alloc_endpoint_failed: |
| 2165 | sps_disconnect(bam_tx_pipe); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2166 | tx_connect_failed: |
| 2167 | dma_free_coherent(NULL, tx_desc_mem_buf.size, tx_desc_mem_buf.base, |
| 2168 | tx_desc_mem_buf.phys_base); |
| 2169 | tx_get_config_failed: |
| 2170 | sps_free_endpoint(bam_tx_pipe); |
Jeff Hugo | 8ff4a81 | 2012-01-17 11:03:13 -0700 | [diff] [blame] | 2171 | tx_alloc_endpoint_failed: |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2172 | sps_deregister_bam_device(h); |
Jeff Hugo | 4b2890d | 2012-01-16 16:14:21 -0700 | [diff] [blame] | 2173 | /* |
| 2174 | * sps_deregister_bam_device() calls iounmap. calling iounmap on the |
| 2175 | * same handle below will cause a crash, so skip it if we've freed |
| 2176 | * the handle here. |
| 2177 | */ |
| 2178 | skip_iounmap = 1; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2179 | register_bam_failed: |
Jeff Hugo | 4b2890d | 2012-01-16 16:14:21 -0700 | [diff] [blame] | 2180 | if (!skip_iounmap) |
| 2181 | iounmap(a2_virt_addr); |
Jeff Hugo | 994a92d | 2012-01-05 13:25:21 -0700 | [diff] [blame] | 2182 | ioremap_failed: |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2183 | /*destroy_workqueue(bam_mux_workqueue);*/ |
Jeff Hugo | 9dea05c | 2011-12-21 12:23:05 -0700 | [diff] [blame] | 2184 | return ret; |
| 2185 | } |
| 2186 | |
| 2187 | static int bam_init_fallback(void) |
| 2188 | { |
| 2189 | u32 h; |
| 2190 | int ret; |
| 2191 | void *a2_virt_addr; |
| 2192 | |
Jeff Hugo | 9dea05c | 2011-12-21 12:23:05 -0700 | [diff] [blame] | 2193 | /* init BAM */ |
| 2194 | a2_virt_addr = ioremap_nocache(A2_PHYS_BASE, A2_PHYS_SIZE); |
| 2195 | if (!a2_virt_addr) { |
| 2196 | pr_err("%s: ioremap failed\n", __func__); |
| 2197 | ret = -ENOMEM; |
| 2198 | goto ioremap_failed; |
| 2199 | } |
| 2200 | a2_props.phys_addr = A2_PHYS_BASE; |
| 2201 | a2_props.virt_addr = a2_virt_addr; |
| 2202 | a2_props.virt_size = A2_PHYS_SIZE; |
| 2203 | a2_props.irq = A2_BAM_IRQ; |
| 2204 | a2_props.options = SPS_BAM_OPT_IRQ_WAKEUP; |
| 2205 | a2_props.num_pipes = A2_NUM_PIPES; |
| 2206 | a2_props.summing_threshold = A2_SUMMING_THRESHOLD; |
| 2207 | if (cpu_is_msm9615()) |
| 2208 | a2_props.manage = SPS_BAM_MGR_DEVICE_REMOTE; |
| 2209 | ret = sps_register_bam_device(&a2_props, &h); |
| 2210 | if (ret < 0) { |
| 2211 | pr_err("%s: register bam error %d\n", __func__, ret); |
| 2212 | goto register_bam_failed; |
| 2213 | } |
| 2214 | a2_device_handle = h; |
Jeff Hugo | c269614 | 2012-05-03 11:42:13 -0600 | [diff] [blame] | 2215 | |
| 2216 | mutex_lock(&delayed_ul_vote_lock); |
| 2217 | bam_mux_initialized = 1; |
| 2218 | if (need_delayed_ul_vote) { |
| 2219 | need_delayed_ul_vote = 0; |
| 2220 | msm_bam_dmux_kickoff_ul_wakeup(); |
| 2221 | } |
| 2222 | mutex_unlock(&delayed_ul_vote_lock); |
Jeff Hugo | 2bec977 | 2012-04-05 12:25:16 -0600 | [diff] [blame] | 2223 | toggle_apps_ack(); |
Jeff Hugo | 9dea05c | 2011-12-21 12:23:05 -0700 | [diff] [blame] | 2224 | |
Jeff Hugo | 18792a3 | 2012-06-20 15:25:55 -0600 | [diff] [blame] | 2225 | power_management_only_mode = 1; |
| 2226 | bam_connection_is_active = 1; |
| 2227 | complete_all(&bam_connection_completion); |
| 2228 | |
Jeff Hugo | 9dea05c | 2011-12-21 12:23:05 -0700 | [diff] [blame] | 2229 | return 0; |
| 2230 | |
| 2231 | register_bam_failed: |
Jeff Hugo | 4b2890d | 2012-01-16 16:14:21 -0700 | [diff] [blame] | 2232 | iounmap(a2_virt_addr); |
Jeff Hugo | 9dea05c | 2011-12-21 12:23:05 -0700 | [diff] [blame] | 2233 | ioremap_failed: |
| 2234 | return ret; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2235 | } |
Jeff Hugo | ade1f84 | 2011-08-03 15:53:59 -0600 | [diff] [blame] | 2236 | |
Jeff Hugo | a670b76 | 2012-03-15 15:58:28 -0600 | [diff] [blame] | 2237 | static void msm9615_bam_init(void) |
Eric Holmberg | 604ab25 | 2012-01-15 00:01:18 -0700 | [diff] [blame] | 2238 | { |
| 2239 | int ret = 0; |
| 2240 | |
| 2241 | ret = bam_init(); |
| 2242 | if (ret) { |
| 2243 | ret = bam_init_fallback(); |
| 2244 | if (ret) |
| 2245 | pr_err("%s: bam init fallback failed: %d", |
| 2246 | __func__, ret); |
| 2247 | } |
| 2248 | } |
| 2249 | |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 2250 | static void toggle_apps_ack(void) |
| 2251 | { |
| 2252 | static unsigned int clear_bit; /* 0 = set the bit, else clear bit */ |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 2253 | |
| 2254 | bam_dmux_log("%s: apps ack %d->%d\n", __func__, |
| 2255 | clear_bit & 0x1, ~clear_bit & 0x1); |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 2256 | smsm_change_state(SMSM_APPS_STATE, |
| 2257 | clear_bit & SMSM_A2_POWER_CONTROL_ACK, |
| 2258 | ~clear_bit & SMSM_A2_POWER_CONTROL_ACK); |
| 2259 | clear_bit = ~clear_bit; |
Eric Holmberg | 1f1255d | 2012-02-22 13:37:21 -0700 | [diff] [blame] | 2260 | DBG_INC_ACK_OUT_CNT(); |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 2261 | } |
| 2262 | |
Jeff Hugo | ade1f84 | 2011-08-03 15:53:59 -0600 | [diff] [blame] | 2263 | static void bam_dmux_smsm_cb(void *priv, uint32_t old_state, uint32_t new_state) |
| 2264 | { |
Jeff Hugo | 4b7c7b3 | 2012-04-18 16:25:14 -0600 | [diff] [blame] | 2265 | static int last_processed_state; |
| 2266 | |
| 2267 | mutex_lock(&smsm_cb_lock); |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 2268 | bam_dmux_power_state = new_state & SMSM_A2_POWER_CONTROL ? 1 : 0; |
Eric Holmberg | 1f1255d | 2012-02-22 13:37:21 -0700 | [diff] [blame] | 2269 | DBG_INC_A2_POWER_CONTROL_IN_CNT(); |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 2270 | bam_dmux_log("%s: 0x%08x -> 0x%08x\n", __func__, old_state, |
| 2271 | new_state); |
Jeff Hugo | 4b7c7b3 | 2012-04-18 16:25:14 -0600 | [diff] [blame] | 2272 | if (last_processed_state == (new_state & SMSM_A2_POWER_CONTROL)) { |
| 2273 | bam_dmux_log("%s: already processed this state\n", __func__); |
| 2274 | mutex_unlock(&smsm_cb_lock); |
| 2275 | return; |
| 2276 | } |
| 2277 | |
| 2278 | last_processed_state = new_state & SMSM_A2_POWER_CONTROL; |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 2279 | |
Jeff Hugo | ae3a85e | 2011-12-02 17:10:18 -0700 | [diff] [blame] | 2280 | if (bam_mux_initialized && new_state & SMSM_A2_POWER_CONTROL) { |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 2281 | bam_dmux_log("%s: reconnect\n", __func__); |
Eric Holmberg | 006057d | 2012-01-11 10:10:42 -0700 | [diff] [blame] | 2282 | grab_wakelock(); |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 2283 | reconnect_to_bam(); |
Jeff Hugo | ae3a85e | 2011-12-02 17:10:18 -0700 | [diff] [blame] | 2284 | } else if (bam_mux_initialized && |
| 2285 | !(new_state & SMSM_A2_POWER_CONTROL)) { |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 2286 | bam_dmux_log("%s: disconnect\n", __func__); |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 2287 | disconnect_to_bam(); |
Eric Holmberg | 006057d | 2012-01-11 10:10:42 -0700 | [diff] [blame] | 2288 | release_wakelock(); |
Jeff Hugo | ae3a85e | 2011-12-02 17:10:18 -0700 | [diff] [blame] | 2289 | } else if (new_state & SMSM_A2_POWER_CONTROL) { |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 2290 | bam_dmux_log("%s: init\n", __func__); |
Eric Holmberg | 006057d | 2012-01-11 10:10:42 -0700 | [diff] [blame] | 2291 | grab_wakelock(); |
Jeff Hugo | a670b76 | 2012-03-15 15:58:28 -0600 | [diff] [blame] | 2292 | if (cpu_is_msm9615()) |
| 2293 | msm9615_bam_init(); |
| 2294 | else |
Eric Holmberg | 604ab25 | 2012-01-15 00:01:18 -0700 | [diff] [blame] | 2295 | bam_init(); |
Jeff Hugo | ae3a85e | 2011-12-02 17:10:18 -0700 | [diff] [blame] | 2296 | } else { |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 2297 | bam_dmux_log("%s: bad state change\n", __func__); |
Jeff Hugo | ade1f84 | 2011-08-03 15:53:59 -0600 | [diff] [blame] | 2298 | pr_err("%s: unsupported state change\n", __func__); |
Jeff Hugo | ae3a85e | 2011-12-02 17:10:18 -0700 | [diff] [blame] | 2299 | } |
Jeff Hugo | 4b7c7b3 | 2012-04-18 16:25:14 -0600 | [diff] [blame] | 2300 | mutex_unlock(&smsm_cb_lock); |
Jeff Hugo | ade1f84 | 2011-08-03 15:53:59 -0600 | [diff] [blame] | 2301 | |
| 2302 | } |
| 2303 | |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 2304 | static void bam_dmux_smsm_ack_cb(void *priv, uint32_t old_state, |
| 2305 | uint32_t new_state) |
| 2306 | { |
Eric Holmberg | 1f1255d | 2012-02-22 13:37:21 -0700 | [diff] [blame] | 2307 | DBG_INC_ACK_IN_CNT(); |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 2308 | bam_dmux_log("%s: 0x%08x -> 0x%08x\n", __func__, old_state, |
| 2309 | new_state); |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 2310 | complete_all(&ul_wakeup_ack_completion); |
| 2311 | } |
| 2312 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2313 | static int bam_dmux_probe(struct platform_device *pdev) |
| 2314 | { |
| 2315 | int rc; |
| 2316 | |
| 2317 | DBG("%s probe called\n", __func__); |
| 2318 | if (bam_mux_initialized) |
| 2319 | return 0; |
| 2320 | |
Stephen Boyd | 69d35e3 | 2012-02-14 15:33:30 -0800 | [diff] [blame] | 2321 | xo_clk = clk_get(&pdev->dev, "xo"); |
| 2322 | if (IS_ERR(xo_clk)) { |
| 2323 | pr_err("%s: did not get xo clock\n", __func__); |
| 2324 | return PTR_ERR(xo_clk); |
| 2325 | } |
Stephen Boyd | 1c51a49 | 2011-10-26 12:11:47 -0700 | [diff] [blame] | 2326 | dfab_clk = clk_get(&pdev->dev, "bus_clk"); |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 2327 | if (IS_ERR(dfab_clk)) { |
| 2328 | pr_err("%s: did not get dfab clock\n", __func__); |
| 2329 | return -EFAULT; |
| 2330 | } |
| 2331 | |
| 2332 | rc = clk_set_rate(dfab_clk, 64000000); |
| 2333 | if (rc) |
| 2334 | pr_err("%s: unable to set dfab clock rate\n", __func__); |
| 2335 | |
Jeff Hugo | fff43af9 | 2012-03-29 17:54:52 -0600 | [diff] [blame] | 2336 | /* |
| 2337 | * setup the workqueue so that it can be pinned to core 0 and not |
| 2338 | * block the watchdog pet function, so that netif_rx() in rmnet |
| 2339 | * only uses one queue. |
| 2340 | */ |
| 2341 | bam_mux_rx_workqueue = alloc_workqueue("bam_dmux_rx", |
| 2342 | WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2343 | if (!bam_mux_rx_workqueue) |
| 2344 | return -ENOMEM; |
| 2345 | |
| 2346 | bam_mux_tx_workqueue = create_singlethread_workqueue("bam_dmux_tx"); |
| 2347 | if (!bam_mux_tx_workqueue) { |
| 2348 | destroy_workqueue(bam_mux_rx_workqueue); |
| 2349 | return -ENOMEM; |
| 2350 | } |
| 2351 | |
Jeff Hugo | 7960abd | 2011-08-02 15:39:38 -0600 | [diff] [blame] | 2352 | for (rc = 0; rc < BAM_DMUX_NUM_CHANNELS; ++rc) { |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2353 | spin_lock_init(&bam_ch[rc].lock); |
Jeff Hugo | 7960abd | 2011-08-02 15:39:38 -0600 | [diff] [blame] | 2354 | scnprintf(bam_ch[rc].name, BAM_DMUX_CH_NAME_MAX_LEN, |
| 2355 | "bam_dmux_ch_%d", rc); |
| 2356 | /* bus 2, ie a2 stream 2 */ |
| 2357 | bam_ch[rc].pdev = platform_device_alloc(bam_ch[rc].name, 2); |
| 2358 | if (!bam_ch[rc].pdev) { |
| 2359 | pr_err("%s: platform device alloc failed\n", __func__); |
| 2360 | destroy_workqueue(bam_mux_rx_workqueue); |
| 2361 | destroy_workqueue(bam_mux_tx_workqueue); |
| 2362 | return -ENOMEM; |
| 2363 | } |
| 2364 | } |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2365 | |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 2366 | init_completion(&ul_wakeup_ack_completion); |
| 2367 | init_completion(&bam_connection_completion); |
Eric Holmberg | 006057d | 2012-01-11 10:10:42 -0700 | [diff] [blame] | 2368 | init_completion(&dfab_unvote_completion); |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 2369 | INIT_DELAYED_WORK(&ul_timeout_work, ul_timeout); |
Jeff Hugo | ae3a85e | 2011-12-02 17:10:18 -0700 | [diff] [blame] | 2370 | wake_lock_init(&bam_wakelock, WAKE_LOCK_SUSPEND, "bam_dmux_wakelock"); |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 2371 | |
Jeff Hugo | ade1f84 | 2011-08-03 15:53:59 -0600 | [diff] [blame] | 2372 | rc = smsm_state_cb_register(SMSM_MODEM_STATE, SMSM_A2_POWER_CONTROL, |
| 2373 | bam_dmux_smsm_cb, NULL); |
| 2374 | |
| 2375 | if (rc) { |
| 2376 | destroy_workqueue(bam_mux_rx_workqueue); |
| 2377 | destroy_workqueue(bam_mux_tx_workqueue); |
| 2378 | pr_err("%s: smsm cb register failed, rc: %d\n", __func__, rc); |
| 2379 | return -ENOMEM; |
| 2380 | } |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2381 | |
Jeff Hugo | aab7ebc | 2011-09-07 16:46:04 -0600 | [diff] [blame] | 2382 | rc = smsm_state_cb_register(SMSM_MODEM_STATE, SMSM_A2_POWER_CONTROL_ACK, |
| 2383 | bam_dmux_smsm_ack_cb, NULL); |
| 2384 | |
| 2385 | if (rc) { |
| 2386 | destroy_workqueue(bam_mux_rx_workqueue); |
| 2387 | destroy_workqueue(bam_mux_tx_workqueue); |
| 2388 | smsm_state_cb_deregister(SMSM_MODEM_STATE, |
| 2389 | SMSM_A2_POWER_CONTROL, |
| 2390 | bam_dmux_smsm_cb, NULL); |
| 2391 | pr_err("%s: smsm ack cb register failed, rc: %d\n", __func__, |
| 2392 | rc); |
| 2393 | for (rc = 0; rc < BAM_DMUX_NUM_CHANNELS; ++rc) |
| 2394 | platform_device_put(bam_ch[rc].pdev); |
| 2395 | return -ENOMEM; |
| 2396 | } |
| 2397 | |
Eric Holmberg | fd1e2ae | 2011-11-15 18:28:17 -0700 | [diff] [blame] | 2398 | if (smsm_get_state(SMSM_MODEM_STATE) & SMSM_A2_POWER_CONTROL) |
| 2399 | bam_dmux_smsm_cb(NULL, 0, smsm_get_state(SMSM_MODEM_STATE)); |
| 2400 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2401 | return 0; |
| 2402 | } |
| 2403 | |
| 2404 | static struct platform_driver bam_dmux_driver = { |
| 2405 | .probe = bam_dmux_probe, |
| 2406 | .driver = { |
| 2407 | .name = "BAM_RMNT", |
| 2408 | .owner = THIS_MODULE, |
| 2409 | }, |
| 2410 | }; |
| 2411 | |
| 2412 | static int __init bam_dmux_init(void) |
| 2413 | { |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 2414 | int ret; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2415 | #ifdef CONFIG_DEBUG_FS |
| 2416 | struct dentry *dent; |
| 2417 | |
| 2418 | dent = debugfs_create_dir("bam_dmux", 0); |
Eric Holmberg | 2fddbcd | 2011-11-28 18:25:57 -0700 | [diff] [blame] | 2419 | if (!IS_ERR(dent)) { |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2420 | debug_create("tbl", 0444, dent, debug_tbl); |
Eric Holmberg | 2fddbcd | 2011-11-28 18:25:57 -0700 | [diff] [blame] | 2421 | debug_create("ul_pkt_cnt", 0444, dent, debug_ul_pkt_cnt); |
| 2422 | debug_create("stats", 0444, dent, debug_stats); |
Eric Holmberg | e4ac80b | 2012-01-12 09:21:59 -0700 | [diff] [blame] | 2423 | debug_create_multiple("log", 0444, dent, debug_log); |
Eric Holmberg | 2fddbcd | 2011-11-28 18:25:57 -0700 | [diff] [blame] | 2424 | } |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2425 | #endif |
Eric Holmberg | 878923a | 2012-01-10 14:28:19 -0700 | [diff] [blame] | 2426 | ret = kfifo_alloc(&bam_dmux_state_log, PAGE_SIZE, GFP_KERNEL); |
| 2427 | if (ret) { |
| 2428 | pr_err("%s: failed to allocate log %d\n", __func__, ret); |
| 2429 | bam_dmux_state_logging_disabled = 1; |
| 2430 | } |
| 2431 | |
Anurag Singh | dcd8b4e | 2012-07-30 16:46:37 -0700 | [diff] [blame^] | 2432 | rx_timer_interval = DEFAULT_POLLING_MIN_SLEEP; |
| 2433 | |
Jeff Hugo | 6e7a92a | 2011-10-24 05:25:13 -0600 | [diff] [blame] | 2434 | subsys_notif_register_notifier("modem", &restart_notifier); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2435 | return platform_driver_register(&bam_dmux_driver); |
| 2436 | } |
| 2437 | |
Jeff Hugo | ade1f84 | 2011-08-03 15:53:59 -0600 | [diff] [blame] | 2438 | late_initcall(bam_dmux_init); /* needs to init after SMD */ |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2439 | MODULE_DESCRIPTION("MSM BAM DMUX"); |
| 2440 | MODULE_LICENSE("GPL v2"); |