blob: c8974603f04709546d472f8fa072e5d6fef61771 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14/*
15 * BAM DMUX module.
16 */
17
18#define DEBUG
19
20#include <linux/delay.h>
21#include <linux/module.h>
22#include <linux/netdevice.h>
23#include <linux/platform_device.h>
24#include <linux/sched.h>
25#include <linux/skbuff.h>
26#include <linux/debugfs.h>
Jeff Hugoaab7ebc2011-09-07 16:46:04 -060027#include <linux/clk.h>
Jeff Hugoae3a85e2011-12-02 17:10:18 -070028#include <linux/wakelock.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070029
30#include <mach/sps.h>
31#include <mach/bam_dmux.h>
Jeff Hugoade1f842011-08-03 15:53:59 -060032#include <mach/msm_smsm.h>
Jeff Hugo6e7a92a2011-10-24 05:25:13 -060033#include <mach/subsystem_notif.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070034
35#define BAM_CH_LOCAL_OPEN 0x1
36#define BAM_CH_REMOTE_OPEN 0x2
Jeff Hugo6e7a92a2011-10-24 05:25:13 -060037#define BAM_CH_IN_RESET 0x4
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070038
39#define BAM_MUX_HDR_MAGIC_NO 0x33fc
40
41#define BAM_MUX_HDR_CMD_DATA 0
42#define BAM_MUX_HDR_CMD_OPEN 1
43#define BAM_MUX_HDR_CMD_CLOSE 2
44
Jeff Hugo949080a2011-08-30 11:58:56 -060045#define POLLING_MIN_SLEEP 950 /* 0.95 ms */
46#define POLLING_MAX_SLEEP 1050 /* 1.05 ms */
47#define POLLING_INACTIVITY 40 /* cycles before switch to intr mode */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070048
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -070049#define LOW_WATERMARK 2
50#define HIGH_WATERMARK 4
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070051
52static int msm_bam_dmux_debug_enable;
53module_param_named(debug_enable, msm_bam_dmux_debug_enable,
54 int, S_IRUGO | S_IWUSR | S_IWGRP);
55
56#if defined(DEBUG)
57static uint32_t bam_dmux_read_cnt;
58static uint32_t bam_dmux_write_cnt;
59static uint32_t bam_dmux_write_cpy_cnt;
60static uint32_t bam_dmux_write_cpy_bytes;
Eric Holmberg2fddbcd2011-11-28 18:25:57 -070061static uint32_t bam_dmux_tx_sps_failure_cnt;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070062
63#define DBG(x...) do { \
64 if (msm_bam_dmux_debug_enable) \
65 pr_debug(x); \
66 } while (0)
67
68#define DBG_INC_READ_CNT(x) do { \
69 bam_dmux_read_cnt += (x); \
70 if (msm_bam_dmux_debug_enable) \
71 pr_debug("%s: total read bytes %u\n", \
72 __func__, bam_dmux_read_cnt); \
73 } while (0)
74
75#define DBG_INC_WRITE_CNT(x) do { \
76 bam_dmux_write_cnt += (x); \
77 if (msm_bam_dmux_debug_enable) \
78 pr_debug("%s: total written bytes %u\n", \
79 __func__, bam_dmux_write_cnt); \
80 } while (0)
81
82#define DBG_INC_WRITE_CPY(x) do { \
83 bam_dmux_write_cpy_bytes += (x); \
84 bam_dmux_write_cpy_cnt++; \
85 if (msm_bam_dmux_debug_enable) \
86 pr_debug("%s: total write copy cnt %u, bytes %u\n", \
87 __func__, bam_dmux_write_cpy_cnt, \
88 bam_dmux_write_cpy_bytes); \
89 } while (0)
Eric Holmberg2fddbcd2011-11-28 18:25:57 -070090
91#define DBG_INC_TX_SPS_FAILURE_CNT() do { \
92 bam_dmux_tx_sps_failure_cnt++; \
93} while (0)
94
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070095#else
96#define DBG(x...) do { } while (0)
97#define DBG_INC_READ_CNT(x...) do { } while (0)
98#define DBG_INC_WRITE_CNT(x...) do { } while (0)
99#define DBG_INC_WRITE_CPY(x...) do { } while (0)
Eric Holmberg2fddbcd2011-11-28 18:25:57 -0700100#define DBG_INC_TX_SPS_FAILURE_CNT() do { } while (0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700101#endif
102
103struct bam_ch_info {
104 uint32_t status;
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600105 void (*notify)(void *, int, unsigned long);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700106 void *priv;
107 spinlock_t lock;
Jeff Hugo7960abd2011-08-02 15:39:38 -0600108 struct platform_device *pdev;
109 char name[BAM_DMUX_CH_NAME_MAX_LEN];
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700110 int num_tx_pkts;
111 int use_wm;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700112};
113
114struct tx_pkt_info {
115 struct sk_buff *skb;
116 dma_addr_t dma_address;
117 char is_cmd;
118 uint32_t len;
119 struct work_struct work;
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600120 struct list_head list_node;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700121};
122
123struct rx_pkt_info {
124 struct sk_buff *skb;
125 dma_addr_t dma_address;
126 struct work_struct work;
Jeff Hugo949080a2011-08-30 11:58:56 -0600127 struct list_head list_node;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700128};
129
130#define A2_NUM_PIPES 6
131#define A2_SUMMING_THRESHOLD 4096
132#define A2_DEFAULT_DESCRIPTORS 32
133#define A2_PHYS_BASE 0x124C2000
134#define A2_PHYS_SIZE 0x2000
135#define BUFFER_SIZE 2048
136#define NUM_BUFFERS 32
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700137static struct sps_bam_props a2_props;
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600138static u32 a2_device_handle;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700139static struct sps_pipe *bam_tx_pipe;
140static struct sps_pipe *bam_rx_pipe;
141static struct sps_connect tx_connection;
142static struct sps_connect rx_connection;
143static struct sps_mem_buffer tx_desc_mem_buf;
144static struct sps_mem_buffer rx_desc_mem_buf;
145static struct sps_register_event tx_register_event;
Jeff Hugo33dbc002011-08-25 15:52:53 -0600146static struct sps_register_event rx_register_event;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700147
148static struct bam_ch_info bam_ch[BAM_DMUX_NUM_CHANNELS];
149static int bam_mux_initialized;
150
Jeff Hugo949080a2011-08-30 11:58:56 -0600151static int polling_mode;
152
153static LIST_HEAD(bam_rx_pool);
Jeff Hugoc9749932011-11-02 17:50:40 -0600154static DEFINE_MUTEX(bam_rx_pool_mutexlock);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600155static LIST_HEAD(bam_tx_pool);
Jeff Hugoc9749932011-11-02 17:50:40 -0600156static DEFINE_SPINLOCK(bam_tx_pool_spinlock);
Jeff Hugo949080a2011-08-30 11:58:56 -0600157
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700158struct bam_mux_hdr {
159 uint16_t magic_num;
160 uint8_t reserved;
161 uint8_t cmd;
162 uint8_t pad_len;
163 uint8_t ch_id;
164 uint16_t pkt_len;
165};
166
Jeff Hugod98b1082011-10-24 10:30:23 -0600167static void notify_all(int event, unsigned long data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700168static void bam_mux_write_done(struct work_struct *work);
169static void handle_bam_mux_cmd(struct work_struct *work);
Jeff Hugo949080a2011-08-30 11:58:56 -0600170static void rx_timer_work_func(struct work_struct *work);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700171
Jeff Hugo949080a2011-08-30 11:58:56 -0600172static DECLARE_WORK(rx_timer_work, rx_timer_work_func);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700173
174static struct workqueue_struct *bam_mux_rx_workqueue;
175static struct workqueue_struct *bam_mux_tx_workqueue;
176
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600177/* A2 power collaspe */
178#define UL_TIMEOUT_DELAY 1000 /* in ms */
179static void toggle_apps_ack(void);
180static void reconnect_to_bam(void);
181static void disconnect_to_bam(void);
182static void ul_wakeup(void);
183static void ul_timeout(struct work_struct *work);
184static void vote_dfab(void);
185static void unvote_dfab(void);
Jeff Hugod98b1082011-10-24 10:30:23 -0600186static void kickoff_ul_wakeup_func(struct work_struct *work);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600187
188static int bam_is_connected;
189static DEFINE_MUTEX(wakeup_lock);
190static struct completion ul_wakeup_ack_completion;
191static struct completion bam_connection_completion;
192static struct delayed_work ul_timeout_work;
193static int ul_packet_written;
194static struct clk *dfab_clk;
195static DEFINE_RWLOCK(ul_wakeup_lock);
Jeff Hugod98b1082011-10-24 10:30:23 -0600196static DECLARE_WORK(kickoff_ul_wakeup, kickoff_ul_wakeup_func);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600197static int bam_connection_is_active;
Jeff Hugof6c1c1e2011-12-01 17:43:49 -0700198static int wait_for_ack;
Jeff Hugoae3a85e2011-12-02 17:10:18 -0700199static struct wake_lock bam_wakelock;
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600200/* End A2 power collaspe */
201
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600202/* subsystem restart */
203static int restart_notifier_cb(struct notifier_block *this,
204 unsigned long code,
205 void *data);
206
207static struct notifier_block restart_notifier = {
208 .notifier_call = restart_notifier_cb,
209};
210static int in_global_reset;
211/* end subsystem restart */
212
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700213#define bam_ch_is_open(x) \
214 (bam_ch[(x)].status == (BAM_CH_LOCAL_OPEN | BAM_CH_REMOTE_OPEN))
215
216#define bam_ch_is_local_open(x) \
217 (bam_ch[(x)].status & BAM_CH_LOCAL_OPEN)
218
219#define bam_ch_is_remote_open(x) \
220 (bam_ch[(x)].status & BAM_CH_REMOTE_OPEN)
221
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600222#define bam_ch_is_in_reset(x) \
223 (bam_ch[(x)].status & BAM_CH_IN_RESET)
224
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700225static void queue_rx(void)
226{
227 void *ptr;
228 struct rx_pkt_info *info;
229
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600230 if (in_global_reset)
231 return;
232
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700233 info = kmalloc(sizeof(struct rx_pkt_info), GFP_KERNEL);
234 if (!info)
235 return; /*need better way to handle this */
236
237 INIT_WORK(&info->work, handle_bam_mux_cmd);
238
239 info->skb = __dev_alloc_skb(BUFFER_SIZE, GFP_KERNEL);
240 ptr = skb_put(info->skb, BUFFER_SIZE);
Jeff Hugo949080a2011-08-30 11:58:56 -0600241
Jeff Hugoc9749932011-11-02 17:50:40 -0600242 mutex_lock(&bam_rx_pool_mutexlock);
Jeff Hugo949080a2011-08-30 11:58:56 -0600243 list_add_tail(&info->list_node, &bam_rx_pool);
Jeff Hugoc9749932011-11-02 17:50:40 -0600244 mutex_unlock(&bam_rx_pool_mutexlock);
Jeff Hugo949080a2011-08-30 11:58:56 -0600245
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700246 /* need a way to handle error case */
247 info->dma_address = dma_map_single(NULL, ptr, BUFFER_SIZE,
248 DMA_FROM_DEVICE);
249 sps_transfer_one(bam_rx_pipe, info->dma_address,
Jeff Hugo33dbc002011-08-25 15:52:53 -0600250 BUFFER_SIZE, info,
251 SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700252}
253
254static void bam_mux_process_data(struct sk_buff *rx_skb)
255{
256 unsigned long flags;
257 struct bam_mux_hdr *rx_hdr;
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600258 unsigned long event_data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700259
260 rx_hdr = (struct bam_mux_hdr *)rx_skb->data;
261
262 rx_skb->data = (unsigned char *)(rx_hdr + 1);
263 rx_skb->tail = rx_skb->data + rx_hdr->pkt_len;
264 rx_skb->len = rx_hdr->pkt_len;
Jeff Hugoee88f672011-10-04 17:14:52 -0600265 rx_skb->truesize = rx_hdr->pkt_len + sizeof(struct sk_buff);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700266
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600267 event_data = (unsigned long)(rx_skb);
268
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700269 spin_lock_irqsave(&bam_ch[rx_hdr->ch_id].lock, flags);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600270 if (bam_ch[rx_hdr->ch_id].notify)
271 bam_ch[rx_hdr->ch_id].notify(
272 bam_ch[rx_hdr->ch_id].priv, BAM_DMUX_RECEIVE,
273 event_data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700274 else
275 dev_kfree_skb_any(rx_skb);
276 spin_unlock_irqrestore(&bam_ch[rx_hdr->ch_id].lock, flags);
277
278 queue_rx();
279}
280
281static void handle_bam_mux_cmd(struct work_struct *work)
282{
283 unsigned long flags;
284 struct bam_mux_hdr *rx_hdr;
285 struct rx_pkt_info *info;
286 struct sk_buff *rx_skb;
Jeff Hugo7960abd2011-08-02 15:39:38 -0600287 int ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700288
289 info = container_of(work, struct rx_pkt_info, work);
290 rx_skb = info->skb;
Jeff Hugo949080a2011-08-30 11:58:56 -0600291 dma_unmap_single(NULL, info->dma_address, BUFFER_SIZE, DMA_FROM_DEVICE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700292 kfree(info);
293
294 rx_hdr = (struct bam_mux_hdr *)rx_skb->data;
295
296 DBG_INC_READ_CNT(sizeof(struct bam_mux_hdr));
297 DBG("%s: magic %x reserved %d cmd %d pad %d ch %d len %d\n", __func__,
298 rx_hdr->magic_num, rx_hdr->reserved, rx_hdr->cmd,
299 rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
300 if (rx_hdr->magic_num != BAM_MUX_HDR_MAGIC_NO) {
301 pr_err("%s: dropping invalid hdr. magic %x reserved %d cmd %d"
302 " pad %d ch %d len %d\n", __func__,
303 rx_hdr->magic_num, rx_hdr->reserved, rx_hdr->cmd,
304 rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
305 dev_kfree_skb_any(rx_skb);
306 queue_rx();
307 return;
308 }
Eric Holmberg9ff40a52011-11-17 19:17:00 -0700309
310 if (rx_hdr->ch_id >= BAM_DMUX_NUM_CHANNELS) {
311 pr_err("%s: dropping invalid LCID %d reserved %d cmd %d"
312 " pad %d ch %d len %d\n", __func__,
313 rx_hdr->ch_id, rx_hdr->reserved, rx_hdr->cmd,
314 rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
315 dev_kfree_skb_any(rx_skb);
316 queue_rx();
317 return;
318 }
319
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700320 switch (rx_hdr->cmd) {
321 case BAM_MUX_HDR_CMD_DATA:
322 DBG_INC_READ_CNT(rx_hdr->pkt_len);
323 bam_mux_process_data(rx_skb);
324 break;
325 case BAM_MUX_HDR_CMD_OPEN:
326 spin_lock_irqsave(&bam_ch[rx_hdr->ch_id].lock, flags);
327 bam_ch[rx_hdr->ch_id].status |= BAM_CH_REMOTE_OPEN;
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700328 bam_ch[rx_hdr->ch_id].num_tx_pkts = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700329 spin_unlock_irqrestore(&bam_ch[rx_hdr->ch_id].lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700330 queue_rx();
Jeff Hugo7960abd2011-08-02 15:39:38 -0600331 ret = platform_device_add(bam_ch[rx_hdr->ch_id].pdev);
332 if (ret)
333 pr_err("%s: platform_device_add() error: %d\n",
334 __func__, ret);
Eric Holmberge779dba2011-11-04 18:22:01 -0600335 dev_kfree_skb_any(rx_skb);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700336 break;
337 case BAM_MUX_HDR_CMD_CLOSE:
338 /* probably should drop pending write */
339 spin_lock_irqsave(&bam_ch[rx_hdr->ch_id].lock, flags);
340 bam_ch[rx_hdr->ch_id].status &= ~BAM_CH_REMOTE_OPEN;
341 spin_unlock_irqrestore(&bam_ch[rx_hdr->ch_id].lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700342 queue_rx();
Jeff Hugo7960abd2011-08-02 15:39:38 -0600343 platform_device_unregister(bam_ch[rx_hdr->ch_id].pdev);
344 bam_ch[rx_hdr->ch_id].pdev =
345 platform_device_alloc(bam_ch[rx_hdr->ch_id].name, 2);
346 if (!bam_ch[rx_hdr->ch_id].pdev)
347 pr_err("%s: platform_device_alloc failed\n", __func__);
Eric Holmberge779dba2011-11-04 18:22:01 -0600348 dev_kfree_skb_any(rx_skb);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700349 break;
350 default:
351 pr_err("%s: dropping invalid hdr. magic %x reserved %d cmd %d"
352 " pad %d ch %d len %d\n", __func__,
353 rx_hdr->magic_num, rx_hdr->reserved, rx_hdr->cmd,
354 rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
355 dev_kfree_skb_any(rx_skb);
356 queue_rx();
357 return;
358 }
359}
360
361static int bam_mux_write_cmd(void *data, uint32_t len)
362{
363 int rc;
364 struct tx_pkt_info *pkt;
365 dma_addr_t dma_address;
Jeff Hugo626303bf2011-11-21 11:43:28 -0700366 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700367
Eric Holmbergd83cd2b2011-11-04 15:54:17 -0600368 pkt = kmalloc(sizeof(struct tx_pkt_info), GFP_ATOMIC);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700369 if (pkt == NULL) {
370 pr_err("%s: mem alloc for tx_pkt_info failed\n", __func__);
371 rc = -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700372 return rc;
373 }
374
375 dma_address = dma_map_single(NULL, data, len,
376 DMA_TO_DEVICE);
377 if (!dma_address) {
378 pr_err("%s: dma_map_single() failed\n", __func__);
379 rc = -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700380 return rc;
381 }
382 pkt->skb = (struct sk_buff *)(data);
383 pkt->len = len;
384 pkt->dma_address = dma_address;
385 pkt->is_cmd = 1;
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600386 INIT_WORK(&pkt->work, bam_mux_write_done);
Jeff Hugo626303bf2011-11-21 11:43:28 -0700387 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600388 list_add_tail(&pkt->list_node, &bam_tx_pool);
Jeff Hugo626303bf2011-11-21 11:43:28 -0700389 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700390 rc = sps_transfer_one(bam_tx_pipe, dma_address, len,
391 pkt, SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600392 if (rc) {
393 DBG("%s sps_transfer_one failed rc=%d\n", __func__, rc);
Jeff Hugo626303bf2011-11-21 11:43:28 -0700394 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600395 list_del(&pkt->list_node);
Eric Holmberg2fddbcd2011-11-28 18:25:57 -0700396 DBG_INC_TX_SPS_FAILURE_CNT();
Jeff Hugo626303bf2011-11-21 11:43:28 -0700397 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600398 kfree(pkt);
399 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700400
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600401 ul_packet_written = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700402 return rc;
403}
404
405static void bam_mux_write_done(struct work_struct *work)
406{
407 struct sk_buff *skb;
408 struct bam_mux_hdr *hdr;
409 struct tx_pkt_info *info;
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600410 unsigned long event_data;
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600411 struct list_head *node;
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700412 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700413
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600414 if (in_global_reset)
415 return;
Jeff Hugo626303bf2011-11-21 11:43:28 -0700416 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600417 node = bam_tx_pool.next;
418 list_del(node);
Jeff Hugo626303bf2011-11-21 11:43:28 -0700419 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700420 info = container_of(work, struct tx_pkt_info, work);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600421 if (info->is_cmd) {
422 kfree(info->skb);
423 kfree(info);
424 return;
425 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700426 skb = info->skb;
427 kfree(info);
428 hdr = (struct bam_mux_hdr *)skb->data;
429 DBG_INC_WRITE_CNT(skb->data_len);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600430 event_data = (unsigned long)(skb);
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700431 spin_lock_irqsave(&bam_ch[hdr->ch_id].lock, flags);
432 bam_ch[hdr->ch_id].num_tx_pkts--;
433 spin_unlock_irqrestore(&bam_ch[hdr->ch_id].lock, flags);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600434 if (bam_ch[hdr->ch_id].notify)
435 bam_ch[hdr->ch_id].notify(
436 bam_ch[hdr->ch_id].priv, BAM_DMUX_WRITE_DONE,
437 event_data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700438 else
439 dev_kfree_skb_any(skb);
440}
441
442int msm_bam_dmux_write(uint32_t id, struct sk_buff *skb)
443{
444 int rc = 0;
445 struct bam_mux_hdr *hdr;
446 unsigned long flags;
447 struct sk_buff *new_skb = NULL;
448 dma_addr_t dma_address;
449 struct tx_pkt_info *pkt;
450
451 if (id >= BAM_DMUX_NUM_CHANNELS)
452 return -EINVAL;
453 if (!skb)
454 return -EINVAL;
455 if (!bam_mux_initialized)
456 return -ENODEV;
457
458 DBG("%s: writing to ch %d len %d\n", __func__, id, skb->len);
459 spin_lock_irqsave(&bam_ch[id].lock, flags);
460 if (!bam_ch_is_open(id)) {
461 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
462 pr_err("%s: port not open: %d\n", __func__, bam_ch[id].status);
463 return -ENODEV;
464 }
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700465
466 if (bam_ch[id].use_wm &&
467 (bam_ch[id].num_tx_pkts >= HIGH_WATERMARK)) {
468 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
469 pr_err("%s: watermark exceeded: %d\n", __func__, id);
470 return -EAGAIN;
471 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700472 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
473
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600474 read_lock(&ul_wakeup_lock);
Jeff Hugo061ce672011-10-21 17:15:32 -0600475 if (!bam_is_connected) {
476 read_unlock(&ul_wakeup_lock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600477 ul_wakeup();
Jeff Hugo061ce672011-10-21 17:15:32 -0600478 read_lock(&ul_wakeup_lock);
Jeff Hugod98b1082011-10-24 10:30:23 -0600479 notify_all(BAM_DMUX_UL_CONNECTED, (unsigned long)(NULL));
Jeff Hugo061ce672011-10-21 17:15:32 -0600480 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600481
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700482 /* if skb do not have any tailroom for padding,
483 copy the skb into a new expanded skb */
484 if ((skb->len & 0x3) && (skb_tailroom(skb) < (4 - (skb->len & 0x3)))) {
485 /* revisit, probably dev_alloc_skb and memcpy is effecient */
486 new_skb = skb_copy_expand(skb, skb_headroom(skb),
487 4 - (skb->len & 0x3), GFP_ATOMIC);
488 if (new_skb == NULL) {
489 pr_err("%s: cannot allocate skb\n", __func__);
Jeff Hugoc6af54d2011-11-02 17:00:27 -0600490 goto write_fail;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700491 }
492 dev_kfree_skb_any(skb);
493 skb = new_skb;
494 DBG_INC_WRITE_CPY(skb->len);
495 }
496
497 hdr = (struct bam_mux_hdr *)skb_push(skb, sizeof(struct bam_mux_hdr));
498
499 /* caller should allocate for hdr and padding
500 hdr is fine, padding is tricky */
501 hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
502 hdr->cmd = BAM_MUX_HDR_CMD_DATA;
503 hdr->reserved = 0;
504 hdr->ch_id = id;
505 hdr->pkt_len = skb->len - sizeof(struct bam_mux_hdr);
506 if (skb->len & 0x3)
507 skb_put(skb, 4 - (skb->len & 0x3));
508
509 hdr->pad_len = skb->len - (sizeof(struct bam_mux_hdr) + hdr->pkt_len);
510
511 DBG("%s: data %p, tail %p skb len %d pkt len %d pad len %d\n",
512 __func__, skb->data, skb->tail, skb->len,
513 hdr->pkt_len, hdr->pad_len);
514
515 pkt = kmalloc(sizeof(struct tx_pkt_info), GFP_ATOMIC);
516 if (pkt == NULL) {
517 pr_err("%s: mem alloc for tx_pkt_info failed\n", __func__);
Jeff Hugoc6af54d2011-11-02 17:00:27 -0600518 goto write_fail2;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700519 }
520
521 dma_address = dma_map_single(NULL, skb->data, skb->len,
522 DMA_TO_DEVICE);
523 if (!dma_address) {
524 pr_err("%s: dma_map_single() failed\n", __func__);
Jeff Hugoc6af54d2011-11-02 17:00:27 -0600525 goto write_fail3;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700526 }
527 pkt->skb = skb;
528 pkt->dma_address = dma_address;
529 pkt->is_cmd = 0;
530 INIT_WORK(&pkt->work, bam_mux_write_done);
Jeff Hugo626303bf2011-11-21 11:43:28 -0700531 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600532 list_add_tail(&pkt->list_node, &bam_tx_pool);
Jeff Hugo626303bf2011-11-21 11:43:28 -0700533 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700534 rc = sps_transfer_one(bam_tx_pipe, dma_address, skb->len,
535 pkt, SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600536 if (rc) {
537 DBG("%s sps_transfer_one failed rc=%d\n", __func__, rc);
Jeff Hugo626303bf2011-11-21 11:43:28 -0700538 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600539 list_del(&pkt->list_node);
Eric Holmberg2fddbcd2011-11-28 18:25:57 -0700540 DBG_INC_TX_SPS_FAILURE_CNT();
Jeff Hugo626303bf2011-11-21 11:43:28 -0700541 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600542 kfree(pkt);
Jeff Hugo872bd062011-11-15 17:47:21 -0700543 if (new_skb)
544 dev_kfree_skb_any(new_skb);
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700545 } else {
546 spin_lock_irqsave(&bam_ch[id].lock, flags);
547 bam_ch[id].num_tx_pkts++;
548 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600549 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600550 ul_packet_written = 1;
551 read_unlock(&ul_wakeup_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700552 return rc;
Jeff Hugoc6af54d2011-11-02 17:00:27 -0600553
554write_fail3:
555 kfree(pkt);
556write_fail2:
557 if (new_skb)
558 dev_kfree_skb_any(new_skb);
559write_fail:
560 read_unlock(&ul_wakeup_lock);
561 return -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700562}
563
564int msm_bam_dmux_open(uint32_t id, void *priv,
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600565 void (*notify)(void *, int, unsigned long))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700566{
567 struct bam_mux_hdr *hdr;
568 unsigned long flags;
569 int rc = 0;
570
571 DBG("%s: opening ch %d\n", __func__, id);
Eric Holmberg5d775432011-11-09 10:23:35 -0700572 if (!bam_mux_initialized) {
573 DBG("%s: not inititialized\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700574 return -ENODEV;
Eric Holmberg5d775432011-11-09 10:23:35 -0700575 }
576 if (id >= BAM_DMUX_NUM_CHANNELS) {
577 pr_err("%s: invalid channel id %d\n", __func__, id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700578 return -EINVAL;
Eric Holmberg5d775432011-11-09 10:23:35 -0700579 }
580 if (notify == NULL) {
581 pr_err("%s: notify function is NULL\n", __func__);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600582 return -EINVAL;
Eric Holmberg5d775432011-11-09 10:23:35 -0700583 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700584
585 hdr = kmalloc(sizeof(struct bam_mux_hdr), GFP_KERNEL);
586 if (hdr == NULL) {
587 pr_err("%s: hdr kmalloc failed. ch: %d\n", __func__, id);
588 return -ENOMEM;
589 }
590 spin_lock_irqsave(&bam_ch[id].lock, flags);
591 if (bam_ch_is_open(id)) {
592 DBG("%s: Already opened %d\n", __func__, id);
593 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
594 kfree(hdr);
595 goto open_done;
596 }
597 if (!bam_ch_is_remote_open(id)) {
598 DBG("%s: Remote not open; ch: %d\n", __func__, id);
599 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
600 kfree(hdr);
Eric Holmberg5d775432011-11-09 10:23:35 -0700601 return -ENODEV;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700602 }
603
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600604 bam_ch[id].notify = notify;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700605 bam_ch[id].priv = priv;
606 bam_ch[id].status |= BAM_CH_LOCAL_OPEN;
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700607 bam_ch[id].num_tx_pkts = 0;
608 bam_ch[id].use_wm = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700609 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
610
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600611 read_lock(&ul_wakeup_lock);
Jeff Hugo061ce672011-10-21 17:15:32 -0600612 if (!bam_is_connected) {
613 read_unlock(&ul_wakeup_lock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600614 ul_wakeup();
Jeff Hugo061ce672011-10-21 17:15:32 -0600615 read_lock(&ul_wakeup_lock);
Jeff Hugod98b1082011-10-24 10:30:23 -0600616 notify_all(BAM_DMUX_UL_CONNECTED, (unsigned long)(NULL));
Jeff Hugo061ce672011-10-21 17:15:32 -0600617 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600618
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700619 hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
620 hdr->cmd = BAM_MUX_HDR_CMD_OPEN;
621 hdr->reserved = 0;
622 hdr->ch_id = id;
623 hdr->pkt_len = 0;
624 hdr->pad_len = 0;
625
626 rc = bam_mux_write_cmd((void *)hdr, sizeof(struct bam_mux_hdr));
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600627 read_unlock(&ul_wakeup_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700628
629open_done:
630 DBG("%s: opened ch %d\n", __func__, id);
631 return rc;
632}
633
634int msm_bam_dmux_close(uint32_t id)
635{
636 struct bam_mux_hdr *hdr;
637 unsigned long flags;
638 int rc;
639
640 if (id >= BAM_DMUX_NUM_CHANNELS)
641 return -EINVAL;
642 DBG("%s: closing ch %d\n", __func__, id);
643 if (!bam_mux_initialized)
644 return -ENODEV;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700645
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600646 read_lock(&ul_wakeup_lock);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600647 if (!bam_is_connected && !bam_ch_is_in_reset(id)) {
Jeff Hugo061ce672011-10-21 17:15:32 -0600648 read_unlock(&ul_wakeup_lock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600649 ul_wakeup();
Jeff Hugo061ce672011-10-21 17:15:32 -0600650 read_lock(&ul_wakeup_lock);
Jeff Hugod98b1082011-10-24 10:30:23 -0600651 notify_all(BAM_DMUX_UL_CONNECTED, (unsigned long)(NULL));
Jeff Hugo061ce672011-10-21 17:15:32 -0600652 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600653
Jeff Hugo061ce672011-10-21 17:15:32 -0600654 spin_lock_irqsave(&bam_ch[id].lock, flags);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600655 bam_ch[id].notify = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700656 bam_ch[id].priv = NULL;
657 bam_ch[id].status &= ~BAM_CH_LOCAL_OPEN;
658 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
659
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600660 if (bam_ch_is_in_reset(id)) {
661 read_unlock(&ul_wakeup_lock);
662 bam_ch[id].status &= ~BAM_CH_IN_RESET;
663 return 0;
664 }
665
Jeff Hugobb5802f2011-11-02 17:10:29 -0600666 hdr = kmalloc(sizeof(struct bam_mux_hdr), GFP_ATOMIC);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700667 if (hdr == NULL) {
668 pr_err("%s: hdr kmalloc failed. ch: %d\n", __func__, id);
Jeff Hugoc6af54d2011-11-02 17:00:27 -0600669 read_unlock(&ul_wakeup_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700670 return -ENOMEM;
671 }
672 hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
673 hdr->cmd = BAM_MUX_HDR_CMD_CLOSE;
674 hdr->reserved = 0;
675 hdr->ch_id = id;
676 hdr->pkt_len = 0;
677 hdr->pad_len = 0;
678
679 rc = bam_mux_write_cmd((void *)hdr, sizeof(struct bam_mux_hdr));
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600680 read_unlock(&ul_wakeup_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700681
682 DBG("%s: closed ch %d\n", __func__, id);
683 return rc;
684}
685
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700686int msm_bam_dmux_is_ch_full(uint32_t id)
687{
688 unsigned long flags;
689 int ret;
690
691 if (id >= BAM_DMUX_NUM_CHANNELS)
692 return -EINVAL;
693
694 spin_lock_irqsave(&bam_ch[id].lock, flags);
695 bam_ch[id].use_wm = 1;
696 ret = bam_ch[id].num_tx_pkts >= HIGH_WATERMARK;
697 DBG("%s: ch %d num tx pkts=%d, HWM=%d\n", __func__,
698 id, bam_ch[id].num_tx_pkts, ret);
699 if (!bam_ch_is_local_open(id)) {
700 ret = -ENODEV;
701 pr_err("%s: port not open: %d\n", __func__, bam_ch[id].status);
702 }
703 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
704
705 return ret;
706}
707
708int msm_bam_dmux_is_ch_low(uint32_t id)
709{
710 int ret;
711
712 if (id >= BAM_DMUX_NUM_CHANNELS)
713 return -EINVAL;
714
715 bam_ch[id].use_wm = 1;
716 ret = bam_ch[id].num_tx_pkts <= LOW_WATERMARK;
717 DBG("%s: ch %d num tx pkts=%d, LWM=%d\n", __func__,
718 id, bam_ch[id].num_tx_pkts, ret);
719 if (!bam_ch_is_local_open(id)) {
720 ret = -ENODEV;
721 pr_err("%s: port not open: %d\n", __func__, bam_ch[id].status);
722 }
723
724 return ret;
725}
726
Jeff Hugo949080a2011-08-30 11:58:56 -0600727static void rx_timer_work_func(struct work_struct *work)
728{
729 struct sps_iovec iov;
730 struct list_head *node;
731 struct rx_pkt_info *info;
732 int inactive_cycles = 0;
733 int ret;
734 struct sps_connect cur_rx_conn;
735
736 while (1) { /* timer loop */
737 ++inactive_cycles;
738 while (1) { /* deplete queue loop */
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600739 if (in_global_reset)
740 return;
Jeff Hugo949080a2011-08-30 11:58:56 -0600741 sps_get_iovec(bam_rx_pipe, &iov);
742 if (iov.addr == 0)
743 break;
744 inactive_cycles = 0;
Jeff Hugoc9749932011-11-02 17:50:40 -0600745 mutex_lock(&bam_rx_pool_mutexlock);
Jeff Hugo949080a2011-08-30 11:58:56 -0600746 node = bam_rx_pool.next;
747 list_del(node);
Jeff Hugoc9749932011-11-02 17:50:40 -0600748 mutex_unlock(&bam_rx_pool_mutexlock);
Jeff Hugo949080a2011-08-30 11:58:56 -0600749 info = container_of(node, struct rx_pkt_info,
750 list_node);
751 handle_bam_mux_cmd(&info->work);
752 }
753
754 if (inactive_cycles == POLLING_INACTIVITY) {
755 /*
756 * attempt to enable interrupts in this pipe
757 * if enabling interrupts fails, continue polling
758 */
759 ret = sps_get_config(bam_rx_pipe, &cur_rx_conn);
760 if (ret) {
761 pr_err("%s: sps_get_config() failed, interrupts"
762 " not enabled\n", __func__);
763 queue_work(bam_mux_rx_workqueue,
764 &rx_timer_work);
765 return;
766 } else {
767 rx_register_event.options = SPS_O_EOT;
768 /* should check return value */
769 sps_register_event(bam_rx_pipe,
770 &rx_register_event);
771 cur_rx_conn.options = SPS_O_AUTO_ENABLE |
772 SPS_O_EOT | SPS_O_ACK_TRANSFERS;
773 ret = sps_set_config(bam_rx_pipe, &cur_rx_conn);
774 if (ret) {
775 pr_err("%s: sps_set_config() failed, "
776 "interrupts not enabled\n",
777 __func__);
778 queue_work(bam_mux_rx_workqueue,
779 &rx_timer_work);
780 return;
781 }
782 polling_mode = 0;
783 }
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600784 if (in_global_reset)
785 return;
Jeff Hugo949080a2011-08-30 11:58:56 -0600786 /* handle race condition - missed packet? */
787 sps_get_iovec(bam_rx_pipe, &iov);
788 if (iov.addr == 0)
789 return;
790 inactive_cycles = 0;
Jeff Hugoc9749932011-11-02 17:50:40 -0600791 mutex_lock(&bam_rx_pool_mutexlock);
Jeff Hugo949080a2011-08-30 11:58:56 -0600792 node = bam_rx_pool.next;
793 list_del(node);
Jeff Hugoc9749932011-11-02 17:50:40 -0600794 mutex_unlock(&bam_rx_pool_mutexlock);
Jeff Hugo949080a2011-08-30 11:58:56 -0600795 info = container_of(node, struct rx_pkt_info,
796 list_node);
797 handle_bam_mux_cmd(&info->work);
798 return;
799 }
800
801 usleep_range(POLLING_MIN_SLEEP, POLLING_MAX_SLEEP);
802 }
803}
804
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700805static void bam_mux_tx_notify(struct sps_event_notify *notify)
806{
807 struct tx_pkt_info *pkt;
808
809 DBG("%s: event %d notified\n", __func__, notify->event_id);
810
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600811 if (in_global_reset)
812 return;
813
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700814 switch (notify->event_id) {
815 case SPS_EVENT_EOT:
816 pkt = notify->data.transfer.user;
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600817 if (!pkt->is_cmd)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700818 dma_unmap_single(NULL, pkt->dma_address,
819 pkt->skb->len,
820 DMA_TO_DEVICE);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600821 else
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700822 dma_unmap_single(NULL, pkt->dma_address,
823 pkt->len,
824 DMA_TO_DEVICE);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600825 queue_work(bam_mux_tx_workqueue, &pkt->work);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700826 break;
827 default:
828 pr_err("%s: recieved unexpected event id %d\n", __func__,
829 notify->event_id);
830 }
831}
832
Jeff Hugo33dbc002011-08-25 15:52:53 -0600833static void bam_mux_rx_notify(struct sps_event_notify *notify)
834{
Jeff Hugo949080a2011-08-30 11:58:56 -0600835 int ret;
836 struct sps_connect cur_rx_conn;
Jeff Hugo33dbc002011-08-25 15:52:53 -0600837
838 DBG("%s: event %d notified\n", __func__, notify->event_id);
839
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600840 if (in_global_reset)
841 return;
842
Jeff Hugo33dbc002011-08-25 15:52:53 -0600843 switch (notify->event_id) {
844 case SPS_EVENT_EOT:
Jeff Hugo949080a2011-08-30 11:58:56 -0600845 /* attempt to disable interrupts in this pipe */
846 if (!polling_mode) {
847 ret = sps_get_config(bam_rx_pipe, &cur_rx_conn);
848 if (ret) {
849 pr_err("%s: sps_get_config() failed, interrupts"
850 " not disabled\n", __func__);
851 break;
852 }
Jeff Hugoa9d32ba2011-11-21 14:59:48 -0700853 cur_rx_conn.options = SPS_O_AUTO_ENABLE |
Jeff Hugo949080a2011-08-30 11:58:56 -0600854 SPS_O_ACK_TRANSFERS | SPS_O_POLL;
855 ret = sps_set_config(bam_rx_pipe, &cur_rx_conn);
856 if (ret) {
857 pr_err("%s: sps_set_config() failed, interrupts"
858 " not disabled\n", __func__);
859 break;
860 }
861 polling_mode = 1;
862 queue_work(bam_mux_rx_workqueue, &rx_timer_work);
863 }
Jeff Hugo33dbc002011-08-25 15:52:53 -0600864 break;
865 default:
866 pr_err("%s: recieved unexpected event id %d\n", __func__,
867 notify->event_id);
868 }
869}
870
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700871#ifdef CONFIG_DEBUG_FS
872
873static int debug_tbl(char *buf, int max)
874{
875 int i = 0;
876 int j;
877
878 for (j = 0; j < BAM_DMUX_NUM_CHANNELS; ++j) {
879 i += scnprintf(buf + i, max - i,
880 "ch%02d local open=%s remote open=%s\n",
881 j, bam_ch_is_local_open(j) ? "Y" : "N",
882 bam_ch_is_remote_open(j) ? "Y" : "N");
883 }
884
885 return i;
886}
887
Eric Holmberg2fddbcd2011-11-28 18:25:57 -0700888static int debug_ul_pkt_cnt(char *buf, int max)
889{
890 struct list_head *p;
891 unsigned long flags;
892 int n = 0;
893
894 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
895 __list_for_each(p, &bam_tx_pool) {
896 ++n;
897 }
898 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
899
900 return scnprintf(buf, max, "Number of UL packets in flight: %d\n", n);
901}
902
903static int debug_stats(char *buf, int max)
904{
905 int i = 0;
906
907 i += scnprintf(buf + i, max - i,
908 "skb copy cnt: %u\n"
909 "skb copy bytes: %u\n"
910 "sps tx failures: %u\n",
911 bam_dmux_write_cpy_cnt,
912 bam_dmux_write_cpy_bytes,
913 bam_dmux_tx_sps_failure_cnt
914 );
915
916 return i;
917}
918
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700919#define DEBUG_BUFMAX 4096
920static char debug_buffer[DEBUG_BUFMAX];
921
922static ssize_t debug_read(struct file *file, char __user *buf,
923 size_t count, loff_t *ppos)
924{
925 int (*fill)(char *buf, int max) = file->private_data;
926 int bsize = fill(debug_buffer, DEBUG_BUFMAX);
927 return simple_read_from_buffer(buf, count, ppos, debug_buffer, bsize);
928}
929
930static int debug_open(struct inode *inode, struct file *file)
931{
932 file->private_data = inode->i_private;
933 return 0;
934}
935
936
937static const struct file_operations debug_ops = {
938 .read = debug_read,
939 .open = debug_open,
940};
941
942static void debug_create(const char *name, mode_t mode,
943 struct dentry *dent,
944 int (*fill)(char *buf, int max))
945{
946 debugfs_create_file(name, mode, dent, fill, &debug_ops);
947}
948
949#endif
950
Jeff Hugod98b1082011-10-24 10:30:23 -0600951static void notify_all(int event, unsigned long data)
952{
953 int i;
954
955 for (i = 0; i < BAM_DMUX_NUM_CHANNELS; ++i) {
956 if (bam_ch_is_open(i))
957 bam_ch[i].notify(bam_ch[i].priv, event, data);
958 }
959}
960
961static void kickoff_ul_wakeup_func(struct work_struct *work)
962{
963 read_lock(&ul_wakeup_lock);
964 if (!bam_is_connected) {
965 read_unlock(&ul_wakeup_lock);
966 ul_wakeup();
967 read_lock(&ul_wakeup_lock);
968 ul_packet_written = 1;
969 notify_all(BAM_DMUX_UL_CONNECTED, (unsigned long)(NULL));
970 }
971 read_unlock(&ul_wakeup_lock);
972}
973
974void msm_bam_dmux_kickoff_ul_wakeup(void)
975{
976 queue_work(bam_mux_tx_workqueue, &kickoff_ul_wakeup);
977}
978
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600979static void ul_timeout(struct work_struct *work)
980{
Jeff Hugoc040a5b2011-11-15 14:26:01 -0700981 unsigned long flags;
982 int ret;
983
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600984 if (in_global_reset)
985 return;
Jeff Hugoc040a5b2011-11-15 14:26:01 -0700986 ret = write_trylock_irqsave(&ul_wakeup_lock, flags);
987 if (!ret) { /* failed to grab lock, reschedule and bail */
988 schedule_delayed_work(&ul_timeout_work,
989 msecs_to_jiffies(UL_TIMEOUT_DELAY));
990 return;
991 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600992 if (ul_packet_written) {
993 ul_packet_written = 0;
994 schedule_delayed_work(&ul_timeout_work,
995 msecs_to_jiffies(UL_TIMEOUT_DELAY));
996 } else {
Jeff Hugof6c1c1e2011-12-01 17:43:49 -0700997 wait_for_ack = 1;
998 INIT_COMPLETION(ul_wakeup_ack_completion);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600999 smsm_change_state(SMSM_APPS_STATE, SMSM_A2_POWER_CONTROL, 0);
1000 bam_is_connected = 0;
Jeff Hugod98b1082011-10-24 10:30:23 -06001001 notify_all(BAM_DMUX_UL_DISCONNECTED, (unsigned long)(NULL));
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001002 }
Jeff Hugoc040a5b2011-11-15 14:26:01 -07001003 write_unlock_irqrestore(&ul_wakeup_lock, flags);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001004}
1005static void ul_wakeup(void)
1006{
Jeff Hugof6c1c1e2011-12-01 17:43:49 -07001007 int ret;
1008
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001009 mutex_lock(&wakeup_lock);
1010 if (bam_is_connected) { /* bam got connected before lock grabbed */
1011 mutex_unlock(&wakeup_lock);
1012 return;
1013 }
Jeff Hugof6c1c1e2011-12-01 17:43:49 -07001014 /*
1015 * must wait for the previous power down request to have been acked
1016 * chances are it already came in and this will just fall through
1017 * instead of waiting
1018 */
1019 if (wait_for_ack) {
1020 ret = wait_for_completion_interruptible_timeout(
1021 &ul_wakeup_ack_completion, HZ);
1022 BUG_ON(ret == 0);
1023 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001024 INIT_COMPLETION(ul_wakeup_ack_completion);
1025 smsm_change_state(SMSM_APPS_STATE, 0, SMSM_A2_POWER_CONTROL);
Jeff Hugof6c1c1e2011-12-01 17:43:49 -07001026 ret = wait_for_completion_interruptible_timeout(
1027 &ul_wakeup_ack_completion, HZ);
1028 BUG_ON(ret == 0);
1029 ret = wait_for_completion_interruptible_timeout(
1030 &bam_connection_completion, HZ);
1031 BUG_ON(ret == 0);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001032
1033 bam_is_connected = 1;
1034 schedule_delayed_work(&ul_timeout_work,
1035 msecs_to_jiffies(UL_TIMEOUT_DELAY));
1036 mutex_unlock(&wakeup_lock);
1037}
1038
1039static void reconnect_to_bam(void)
1040{
1041 int i;
1042
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001043 in_global_reset = 0;
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001044 vote_dfab();
1045 i = sps_device_reset(a2_device_handle);
1046 if (i)
1047 pr_err("%s: device reset failed rc = %d\n", __func__, i);
1048 i = sps_connect(bam_tx_pipe, &tx_connection);
1049 if (i)
1050 pr_err("%s: tx connection failed rc = %d\n", __func__, i);
1051 i = sps_connect(bam_rx_pipe, &rx_connection);
1052 if (i)
1053 pr_err("%s: rx connection failed rc = %d\n", __func__, i);
1054 i = sps_register_event(bam_tx_pipe, &tx_register_event);
1055 if (i)
1056 pr_err("%s: tx event reg failed rc = %d\n", __func__, i);
1057 i = sps_register_event(bam_rx_pipe, &rx_register_event);
1058 if (i)
1059 pr_err("%s: rx event reg failed rc = %d\n", __func__, i);
1060 for (i = 0; i < NUM_BUFFERS; ++i)
1061 queue_rx();
1062 toggle_apps_ack();
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001063 bam_connection_is_active = 1;
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001064 complete_all(&bam_connection_completion);
1065}
1066
1067static void disconnect_to_bam(void)
1068{
1069 struct list_head *node;
1070 struct rx_pkt_info *info;
1071
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001072 bam_connection_is_active = 0;
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001073 INIT_COMPLETION(bam_connection_completion);
1074 sps_disconnect(bam_tx_pipe);
1075 sps_disconnect(bam_rx_pipe);
1076 unvote_dfab();
1077 __memzero(rx_desc_mem_buf.base, rx_desc_mem_buf.size);
1078 __memzero(tx_desc_mem_buf.base, tx_desc_mem_buf.size);
1079 while (!list_empty(&bam_rx_pool)) {
1080 node = bam_rx_pool.next;
1081 list_del(node);
1082 info = container_of(node, struct rx_pkt_info, list_node);
1083 dma_unmap_single(NULL, info->dma_address, BUFFER_SIZE,
1084 DMA_FROM_DEVICE);
1085 dev_kfree_skb_any(info->skb);
1086 kfree(info);
1087 }
1088}
1089
1090static void vote_dfab(void)
1091{
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001092}
1093
1094static void unvote_dfab(void)
1095{
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001096}
1097
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001098static int restart_notifier_cb(struct notifier_block *this,
1099 unsigned long code,
1100 void *data)
1101{
1102 int i;
1103 struct list_head *node;
1104 struct tx_pkt_info *info;
1105 int temp_remote_status;
Jeff Hugo626303bf2011-11-21 11:43:28 -07001106 unsigned long flags;
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001107
1108 if (code != SUBSYS_AFTER_SHUTDOWN)
1109 return NOTIFY_DONE;
1110
1111 in_global_reset = 1;
1112 for (i = 0; i < BAM_DMUX_NUM_CHANNELS; ++i) {
1113 temp_remote_status = bam_ch_is_remote_open(i);
1114 bam_ch[i].status &= ~BAM_CH_REMOTE_OPEN;
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -07001115 bam_ch[i].num_tx_pkts = 0;
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001116 if (bam_ch_is_local_open(i))
1117 bam_ch[i].status |= BAM_CH_IN_RESET;
1118 if (temp_remote_status) {
1119 platform_device_unregister(bam_ch[i].pdev);
1120 bam_ch[i].pdev = platform_device_alloc(
1121 bam_ch[i].name, 2);
1122 }
1123 }
1124 /*cleanup UL*/
Jeff Hugo626303bf2011-11-21 11:43:28 -07001125 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001126 while (!list_empty(&bam_tx_pool)) {
1127 node = bam_tx_pool.next;
1128 list_del(node);
1129 info = container_of(node, struct tx_pkt_info,
1130 list_node);
1131 if (!info->is_cmd) {
1132 dma_unmap_single(NULL, info->dma_address,
1133 info->skb->len,
1134 DMA_TO_DEVICE);
1135 dev_kfree_skb_any(info->skb);
1136 } else {
1137 dma_unmap_single(NULL, info->dma_address,
1138 info->len,
1139 DMA_TO_DEVICE);
1140 kfree(info->skb);
1141 }
1142 kfree(info);
1143 }
Jeff Hugo626303bf2011-11-21 11:43:28 -07001144 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001145 smsm_change_state(SMSM_APPS_STATE, SMSM_A2_POWER_CONTROL, 0);
1146
1147 return NOTIFY_DONE;
1148}
1149
Jeff Hugoade1f842011-08-03 15:53:59 -06001150static void bam_init(void)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001151{
1152 u32 h;
1153 dma_addr_t dma_addr;
1154 int ret;
1155 void *a2_virt_addr;
1156 int i;
1157
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001158 vote_dfab();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001159 /* init BAM */
1160 a2_virt_addr = ioremap_nocache(A2_PHYS_BASE, A2_PHYS_SIZE);
1161 if (!a2_virt_addr) {
1162 pr_err("%s: ioremap failed\n", __func__);
1163 ret = -ENOMEM;
1164 goto register_bam_failed;
1165 }
1166 a2_props.phys_addr = A2_PHYS_BASE;
1167 a2_props.virt_addr = a2_virt_addr;
1168 a2_props.virt_size = A2_PHYS_SIZE;
1169 a2_props.irq = A2_BAM_IRQ;
Jeff Hugo927cba62011-11-11 11:49:52 -07001170 a2_props.options = SPS_BAM_OPT_IRQ_WAKEUP;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001171 a2_props.num_pipes = A2_NUM_PIPES;
1172 a2_props.summing_threshold = A2_SUMMING_THRESHOLD;
1173 /* need to free on tear down */
1174 ret = sps_register_bam_device(&a2_props, &h);
1175 if (ret < 0) {
1176 pr_err("%s: register bam error %d\n", __func__, ret);
1177 goto register_bam_failed;
1178 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001179 a2_device_handle = h;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001180
1181 bam_tx_pipe = sps_alloc_endpoint();
1182 if (bam_tx_pipe == NULL) {
1183 pr_err("%s: tx alloc endpoint failed\n", __func__);
1184 ret = -ENOMEM;
1185 goto register_bam_failed;
1186 }
1187 ret = sps_get_config(bam_tx_pipe, &tx_connection);
1188 if (ret) {
1189 pr_err("%s: tx get config failed %d\n", __func__, ret);
1190 goto tx_get_config_failed;
1191 }
1192
1193 tx_connection.source = SPS_DEV_HANDLE_MEM;
1194 tx_connection.src_pipe_index = 0;
1195 tx_connection.destination = h;
1196 tx_connection.dest_pipe_index = 4;
1197 tx_connection.mode = SPS_MODE_DEST;
1198 tx_connection.options = SPS_O_AUTO_ENABLE | SPS_O_EOT;
1199 tx_desc_mem_buf.size = 0x800; /* 2k */
1200 tx_desc_mem_buf.base = dma_alloc_coherent(NULL, tx_desc_mem_buf.size,
1201 &dma_addr, 0);
1202 if (tx_desc_mem_buf.base == NULL) {
1203 pr_err("%s: tx memory alloc failed\n", __func__);
1204 ret = -ENOMEM;
1205 goto tx_mem_failed;
1206 }
1207 tx_desc_mem_buf.phys_base = dma_addr;
1208 memset(tx_desc_mem_buf.base, 0x0, tx_desc_mem_buf.size);
1209 tx_connection.desc = tx_desc_mem_buf;
1210 tx_connection.event_thresh = 0x10;
1211
1212 ret = sps_connect(bam_tx_pipe, &tx_connection);
1213 if (ret < 0) {
1214 pr_err("%s: tx connect error %d\n", __func__, ret);
1215 goto tx_connect_failed;
1216 }
1217
1218 bam_rx_pipe = sps_alloc_endpoint();
1219 if (bam_rx_pipe == NULL) {
1220 pr_err("%s: rx alloc endpoint failed\n", __func__);
1221 ret = -ENOMEM;
1222 goto tx_connect_failed;
1223 }
1224 ret = sps_get_config(bam_rx_pipe, &rx_connection);
1225 if (ret) {
1226 pr_err("%s: rx get config failed %d\n", __func__, ret);
1227 goto rx_get_config_failed;
1228 }
1229
1230 rx_connection.source = h;
1231 rx_connection.src_pipe_index = 5;
1232 rx_connection.destination = SPS_DEV_HANDLE_MEM;
1233 rx_connection.dest_pipe_index = 1;
1234 rx_connection.mode = SPS_MODE_SRC;
Jeff Hugo949080a2011-08-30 11:58:56 -06001235 rx_connection.options = SPS_O_AUTO_ENABLE | SPS_O_EOT |
1236 SPS_O_ACK_TRANSFERS;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001237 rx_desc_mem_buf.size = 0x800; /* 2k */
1238 rx_desc_mem_buf.base = dma_alloc_coherent(NULL, rx_desc_mem_buf.size,
1239 &dma_addr, 0);
1240 if (rx_desc_mem_buf.base == NULL) {
1241 pr_err("%s: rx memory alloc failed\n", __func__);
1242 ret = -ENOMEM;
1243 goto rx_mem_failed;
1244 }
1245 rx_desc_mem_buf.phys_base = dma_addr;
1246 memset(rx_desc_mem_buf.base, 0x0, rx_desc_mem_buf.size);
1247 rx_connection.desc = rx_desc_mem_buf;
1248 rx_connection.event_thresh = 0x10;
1249
1250 ret = sps_connect(bam_rx_pipe, &rx_connection);
1251 if (ret < 0) {
1252 pr_err("%s: rx connect error %d\n", __func__, ret);
1253 goto rx_connect_failed;
1254 }
1255
1256 tx_register_event.options = SPS_O_EOT;
1257 tx_register_event.mode = SPS_TRIGGER_CALLBACK;
1258 tx_register_event.xfer_done = NULL;
1259 tx_register_event.callback = bam_mux_tx_notify;
1260 tx_register_event.user = NULL;
1261 ret = sps_register_event(bam_tx_pipe, &tx_register_event);
1262 if (ret < 0) {
1263 pr_err("%s: tx register event error %d\n", __func__, ret);
1264 goto rx_event_reg_failed;
1265 }
1266
Jeff Hugo33dbc002011-08-25 15:52:53 -06001267 rx_register_event.options = SPS_O_EOT;
1268 rx_register_event.mode = SPS_TRIGGER_CALLBACK;
1269 rx_register_event.xfer_done = NULL;
1270 rx_register_event.callback = bam_mux_rx_notify;
1271 rx_register_event.user = NULL;
1272 ret = sps_register_event(bam_rx_pipe, &rx_register_event);
1273 if (ret < 0) {
1274 pr_err("%s: tx register event error %d\n", __func__, ret);
1275 goto rx_event_reg_failed;
1276 }
1277
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001278 bam_mux_initialized = 1;
1279 for (i = 0; i < NUM_BUFFERS; ++i)
1280 queue_rx();
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001281 toggle_apps_ack();
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001282 bam_connection_is_active = 1;
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001283 complete_all(&bam_connection_completion);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001284 return;
1285
1286rx_event_reg_failed:
1287 sps_disconnect(bam_rx_pipe);
1288rx_connect_failed:
1289 dma_free_coherent(NULL, rx_desc_mem_buf.size, rx_desc_mem_buf.base,
1290 rx_desc_mem_buf.phys_base);
1291rx_mem_failed:
1292 sps_disconnect(bam_tx_pipe);
1293rx_get_config_failed:
1294 sps_free_endpoint(bam_rx_pipe);
1295tx_connect_failed:
1296 dma_free_coherent(NULL, tx_desc_mem_buf.size, tx_desc_mem_buf.base,
1297 tx_desc_mem_buf.phys_base);
1298tx_get_config_failed:
1299 sps_free_endpoint(bam_tx_pipe);
1300tx_mem_failed:
1301 sps_deregister_bam_device(h);
1302register_bam_failed:
1303 /*destroy_workqueue(bam_mux_workqueue);*/
1304 /*return ret;*/
1305 return;
1306}
Jeff Hugoade1f842011-08-03 15:53:59 -06001307
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001308static void toggle_apps_ack(void)
1309{
1310 static unsigned int clear_bit; /* 0 = set the bit, else clear bit */
1311 smsm_change_state(SMSM_APPS_STATE,
1312 clear_bit & SMSM_A2_POWER_CONTROL_ACK,
1313 ~clear_bit & SMSM_A2_POWER_CONTROL_ACK);
1314 clear_bit = ~clear_bit;
1315}
1316
Jeff Hugoade1f842011-08-03 15:53:59 -06001317static void bam_dmux_smsm_cb(void *priv, uint32_t old_state, uint32_t new_state)
1318{
1319 DBG("%s: smsm activity\n", __func__);
Jeff Hugoae3a85e2011-12-02 17:10:18 -07001320 if (bam_mux_initialized && new_state & SMSM_A2_POWER_CONTROL) {
1321 wake_lock(&bam_wakelock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001322 reconnect_to_bam();
Jeff Hugoae3a85e2011-12-02 17:10:18 -07001323 } else if (bam_mux_initialized &&
1324 !(new_state & SMSM_A2_POWER_CONTROL)) {
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001325 disconnect_to_bam();
Jeff Hugoae3a85e2011-12-02 17:10:18 -07001326 wake_unlock(&bam_wakelock);
1327 } else if (new_state & SMSM_A2_POWER_CONTROL) {
1328 wake_lock(&bam_wakelock);
Jeff Hugoade1f842011-08-03 15:53:59 -06001329 bam_init();
Jeff Hugoae3a85e2011-12-02 17:10:18 -07001330 } else {
Jeff Hugoade1f842011-08-03 15:53:59 -06001331 pr_err("%s: unsupported state change\n", __func__);
Jeff Hugoae3a85e2011-12-02 17:10:18 -07001332 }
Jeff Hugoade1f842011-08-03 15:53:59 -06001333
1334}
1335
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001336static void bam_dmux_smsm_ack_cb(void *priv, uint32_t old_state,
1337 uint32_t new_state)
1338{
1339 complete_all(&ul_wakeup_ack_completion);
1340}
1341
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001342static int bam_dmux_probe(struct platform_device *pdev)
1343{
1344 int rc;
1345
1346 DBG("%s probe called\n", __func__);
1347 if (bam_mux_initialized)
1348 return 0;
1349
Stephen Boyd1c51a492011-10-26 12:11:47 -07001350 dfab_clk = clk_get(&pdev->dev, "bus_clk");
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001351 if (IS_ERR(dfab_clk)) {
1352 pr_err("%s: did not get dfab clock\n", __func__);
1353 return -EFAULT;
1354 }
1355
1356 rc = clk_set_rate(dfab_clk, 64000000);
1357 if (rc)
1358 pr_err("%s: unable to set dfab clock rate\n", __func__);
1359
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001360 bam_mux_rx_workqueue = create_singlethread_workqueue("bam_dmux_rx");
1361 if (!bam_mux_rx_workqueue)
1362 return -ENOMEM;
1363
1364 bam_mux_tx_workqueue = create_singlethread_workqueue("bam_dmux_tx");
1365 if (!bam_mux_tx_workqueue) {
1366 destroy_workqueue(bam_mux_rx_workqueue);
1367 return -ENOMEM;
1368 }
1369
Jeff Hugo7960abd2011-08-02 15:39:38 -06001370 for (rc = 0; rc < BAM_DMUX_NUM_CHANNELS; ++rc) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001371 spin_lock_init(&bam_ch[rc].lock);
Jeff Hugo7960abd2011-08-02 15:39:38 -06001372 scnprintf(bam_ch[rc].name, BAM_DMUX_CH_NAME_MAX_LEN,
1373 "bam_dmux_ch_%d", rc);
1374 /* bus 2, ie a2 stream 2 */
1375 bam_ch[rc].pdev = platform_device_alloc(bam_ch[rc].name, 2);
1376 if (!bam_ch[rc].pdev) {
1377 pr_err("%s: platform device alloc failed\n", __func__);
1378 destroy_workqueue(bam_mux_rx_workqueue);
1379 destroy_workqueue(bam_mux_tx_workqueue);
1380 return -ENOMEM;
1381 }
1382 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001383
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001384 init_completion(&ul_wakeup_ack_completion);
1385 init_completion(&bam_connection_completion);
1386 INIT_DELAYED_WORK(&ul_timeout_work, ul_timeout);
Jeff Hugoae3a85e2011-12-02 17:10:18 -07001387 wake_lock_init(&bam_wakelock, WAKE_LOCK_SUSPEND, "bam_dmux_wakelock");
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001388
Jeff Hugoade1f842011-08-03 15:53:59 -06001389 rc = smsm_state_cb_register(SMSM_MODEM_STATE, SMSM_A2_POWER_CONTROL,
1390 bam_dmux_smsm_cb, NULL);
1391
1392 if (rc) {
1393 destroy_workqueue(bam_mux_rx_workqueue);
1394 destroy_workqueue(bam_mux_tx_workqueue);
1395 pr_err("%s: smsm cb register failed, rc: %d\n", __func__, rc);
1396 return -ENOMEM;
1397 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001398
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001399 rc = smsm_state_cb_register(SMSM_MODEM_STATE, SMSM_A2_POWER_CONTROL_ACK,
1400 bam_dmux_smsm_ack_cb, NULL);
1401
1402 if (rc) {
1403 destroy_workqueue(bam_mux_rx_workqueue);
1404 destroy_workqueue(bam_mux_tx_workqueue);
1405 smsm_state_cb_deregister(SMSM_MODEM_STATE,
1406 SMSM_A2_POWER_CONTROL,
1407 bam_dmux_smsm_cb, NULL);
1408 pr_err("%s: smsm ack cb register failed, rc: %d\n", __func__,
1409 rc);
1410 for (rc = 0; rc < BAM_DMUX_NUM_CHANNELS; ++rc)
1411 platform_device_put(bam_ch[rc].pdev);
1412 return -ENOMEM;
1413 }
1414
Eric Holmbergfd1e2ae2011-11-15 18:28:17 -07001415 if (smsm_get_state(SMSM_MODEM_STATE) & SMSM_A2_POWER_CONTROL)
1416 bam_dmux_smsm_cb(NULL, 0, smsm_get_state(SMSM_MODEM_STATE));
1417
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001418 return 0;
1419}
1420
1421static struct platform_driver bam_dmux_driver = {
1422 .probe = bam_dmux_probe,
1423 .driver = {
1424 .name = "BAM_RMNT",
1425 .owner = THIS_MODULE,
1426 },
1427};
1428
1429static int __init bam_dmux_init(void)
1430{
1431#ifdef CONFIG_DEBUG_FS
1432 struct dentry *dent;
1433
1434 dent = debugfs_create_dir("bam_dmux", 0);
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07001435 if (!IS_ERR(dent)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001436 debug_create("tbl", 0444, dent, debug_tbl);
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07001437 debug_create("ul_pkt_cnt", 0444, dent, debug_ul_pkt_cnt);
1438 debug_create("stats", 0444, dent, debug_stats);
1439 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001440#endif
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001441 subsys_notif_register_notifier("modem", &restart_notifier);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001442 return platform_driver_register(&bam_dmux_driver);
1443}
1444
Jeff Hugoade1f842011-08-03 15:53:59 -06001445late_initcall(bam_dmux_init); /* needs to init after SMD */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001446MODULE_DESCRIPTION("MSM BAM DMUX");
1447MODULE_LICENSE("GPL v2");