blob: 6c5b380d27f137099f7d125cd5ea608cb0f57e36 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12#include <linux/irq.h>
13#include <linux/kernel.h>
14#include <linux/init.h>
15#include <linux/slab.h>
16#include <linux/io.h>
17#include <linux/interrupt.h>
18#include <linux/platform_device.h>
19#include <linux/dma-mapping.h>
20#include <linux/slimbus/slimbus.h>
21#include <linux/delay.h>
22#include <linux/kthread.h>
23#include <linux/clk.h>
Sagar Dharia45ee38a2011-08-03 17:01:31 -060024#include <linux/pm_runtime.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070025#include <mach/sps.h>
26
27/* Per spec.max 40 bytes per received message */
28#define SLIM_RX_MSGQ_BUF_LEN 40
29
30#define SLIM_USR_MC_GENERIC_ACK 0x25
31#define SLIM_USR_MC_MASTER_CAPABILITY 0x0
32#define SLIM_USR_MC_REPORT_SATELLITE 0x1
33#define SLIM_USR_MC_ADDR_QUERY 0xD
34#define SLIM_USR_MC_ADDR_REPLY 0xE
35#define SLIM_USR_MC_DEFINE_CHAN 0x20
36#define SLIM_USR_MC_DEF_ACT_CHAN 0x21
37#define SLIM_USR_MC_CHAN_CTRL 0x23
38#define SLIM_USR_MC_RECONFIG_NOW 0x24
39#define SLIM_USR_MC_REQ_BW 0x28
40#define SLIM_USR_MC_CONNECT_SRC 0x2C
41#define SLIM_USR_MC_CONNECT_SINK 0x2D
42#define SLIM_USR_MC_DISCONNECT_PORT 0x2E
43
44/* MSM Slimbus peripheral settings */
45#define MSM_SLIM_PERF_SUMM_THRESHOLD 0x8000
46#define MSM_SLIM_NCHANS 32
47#define MSM_SLIM_NPORTS 24
Sagar Dharia45ee38a2011-08-03 17:01:31 -060048#define MSM_SLIM_AUTOSUSPEND MSEC_PER_SEC
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070049
50/*
51 * Need enough descriptors to receive present messages from slaves
52 * if received simultaneously. Present message needs 3 descriptors
53 * and this size will ensure around 10 simultaneous reports.
54 */
55#define MSM_SLIM_DESC_NUM 32
56
57#define SLIM_MSG_ASM_FIRST_WORD(l, mt, mc, dt, ad) \
58 ((l) | ((mt) << 5) | ((mc) << 8) | ((dt) << 15) | ((ad) << 16))
59
60#define MSM_SLIM_NAME "msm_slim_ctrl"
61#define SLIM_ROOT_FREQ 24576000
62
63#define MSM_CONCUR_MSG 8
64#define SAT_CONCUR_MSG 8
65#define DEF_WATERMARK (8 << 1)
66#define DEF_ALIGN 0
67#define DEF_PACK (1 << 6)
68#define ENABLE_PORT 1
69
70#define DEF_BLKSZ 0
71#define DEF_TRANSZ 0
72
73#define SAT_MAGIC_LSB 0xD9
74#define SAT_MAGIC_MSB 0xC5
75#define SAT_MSG_VER 0x1
76#define SAT_MSG_PROT 0x1
77#define MSM_SAT_SUCCSS 0x20
78
79#define QC_MFGID_LSB 0x2
80#define QC_MFGID_MSB 0x17
81#define QC_CHIPID_SL 0x10
82#define QC_DEVID_SAT1 0x3
83#define QC_DEVID_SAT2 0x4
84#define QC_DEVID_PGD 0x5
Sagar Dharia45ee38a2011-08-03 17:01:31 -060085#define QC_MSM_DEVS 5
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070086
87/* Component registers */
88enum comp_reg {
89 COMP_CFG = 0,
90 COMP_TRUST_CFG = 0x14,
91};
92
93/* Manager registers */
94enum mgr_reg {
95 MGR_CFG = 0x200,
96 MGR_STATUS = 0x204,
97 MGR_RX_MSGQ_CFG = 0x208,
98 MGR_INT_EN = 0x210,
99 MGR_INT_STAT = 0x214,
100 MGR_INT_CLR = 0x218,
101 MGR_TX_MSG = 0x230,
102 MGR_RX_MSG = 0x270,
103 MGR_VE_STAT = 0x300,
104};
105
106enum msg_cfg {
107 MGR_CFG_ENABLE = 1,
108 MGR_CFG_RX_MSGQ_EN = 1 << 1,
109 MGR_CFG_TX_MSGQ_EN_HIGH = 1 << 2,
110 MGR_CFG_TX_MSGQ_EN_LOW = 1 << 3,
111};
112/* Message queue types */
113enum msm_slim_msgq_type {
114 MSGQ_RX = 0,
115 MSGQ_TX_LOW = 1,
116 MSGQ_TX_HIGH = 2,
117};
118/* Framer registers */
119enum frm_reg {
120 FRM_CFG = 0x400,
121 FRM_STAT = 0x404,
122 FRM_INT_EN = 0x410,
123 FRM_INT_STAT = 0x414,
124 FRM_INT_CLR = 0x418,
125 FRM_WAKEUP = 0x41C,
126 FRM_CLKCTL_DONE = 0x420,
127 FRM_IE_STAT = 0x430,
128 FRM_VE_STAT = 0x440,
129};
130
131/* Interface registers */
132enum intf_reg {
133 INTF_CFG = 0x600,
134 INTF_STAT = 0x604,
135 INTF_INT_EN = 0x610,
136 INTF_INT_STAT = 0x614,
137 INTF_INT_CLR = 0x618,
138 INTF_IE_STAT = 0x630,
139 INTF_VE_STAT = 0x640,
140};
141
142/* Manager PGD registers */
143enum pgd_reg {
144 PGD_CFG = 0x1000,
145 PGD_STAT = 0x1004,
146 PGD_INT_EN = 0x1010,
147 PGD_INT_STAT = 0x1014,
148 PGD_INT_CLR = 0x1018,
149 PGD_OWN_EEn = 0x1020,
150 PGD_PORT_INT_EN_EEn = 0x1030,
151 PGD_PORT_INT_ST_EEn = 0x1034,
152 PGD_PORT_INT_CL_EEn = 0x1038,
153 PGD_PORT_CFGn = 0x1080,
154 PGD_PORT_STATn = 0x1084,
155 PGD_PORT_PARAMn = 0x1088,
156 PGD_PORT_BLKn = 0x108C,
157 PGD_PORT_TRANn = 0x1090,
158 PGD_PORT_MCHANn = 0x1094,
159 PGD_PORT_PSHPLLn = 0x1098,
160 PGD_PORT_PC_CFGn = 0x1600,
161 PGD_PORT_PC_VALn = 0x1604,
162 PGD_PORT_PC_VFR_TSn = 0x1608,
163 PGD_PORT_PC_VFR_STn = 0x160C,
164 PGD_PORT_PC_VFR_CLn = 0x1610,
165 PGD_IE_STAT = 0x1700,
166 PGD_VE_STAT = 0x1710,
167};
168
169enum rsc_grp {
170 EE_MGR_RSC_GRP = 1 << 10,
171 EE_NGD_2 = 2 << 6,
172 EE_NGD_1 = 0,
173};
174
175enum mgr_intr {
176 MGR_INT_RECFG_DONE = 1 << 24,
177 MGR_INT_TX_NACKED_2 = 1 << 25,
178 MGR_INT_MSG_BUF_CONTE = 1 << 26,
179 MGR_INT_RX_MSG_RCVD = 1 << 30,
180 MGR_INT_TX_MSG_SENT = 1 << 31,
181};
182
183enum frm_cfg {
184 FRM_ACTIVE = 1,
185 CLK_GEAR = 7,
186 ROOT_FREQ = 11,
187 REF_CLK_GEAR = 15,
188};
189
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600190enum msm_ctrl_state {
191 MSM_CTRL_AWAKE,
192 MSM_CTRL_SLEEPING,
193 MSM_CTRL_ASLEEP,
194};
195
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700196struct msm_slim_sps_bam {
197 u32 hdl;
198 void __iomem *base;
199 int irq;
200};
201
202struct msm_slim_endp {
203 struct sps_pipe *sps;
204 struct sps_connect config;
205 struct sps_register_event event;
206 struct sps_mem_buffer buf;
207 struct completion *xcomp;
208 bool connected;
209};
210
211struct msm_slim_ctrl {
212 struct slim_controller ctrl;
213 struct slim_framer framer;
214 struct device *dev;
215 void __iomem *base;
Sagar Dhariacc969452011-09-19 10:34:30 -0600216 struct resource *slew_mem;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700217 u32 curr_bw;
218 u8 msg_cnt;
219 u32 tx_buf[10];
220 u8 rx_msgs[MSM_CONCUR_MSG][SLIM_RX_MSGQ_BUF_LEN];
221 spinlock_t rx_lock;
222 int head;
223 int tail;
224 int irq;
225 int err;
226 int ee;
227 struct completion *wr_comp;
228 struct msm_slim_sat *satd;
229 struct msm_slim_endp pipes[7];
230 struct msm_slim_sps_bam bam;
231 struct msm_slim_endp rx_msgq;
232 struct completion rx_msgq_notify;
233 struct task_struct *rx_msgq_thread;
234 struct clk *rclk;
235 struct mutex tx_lock;
236 u8 pgdla;
237 bool use_rx_msgqs;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700238 int pipe_b;
239 struct completion reconf;
240 bool reconf_busy;
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600241 bool chan_active;
242 enum msm_ctrl_state state;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700243};
244
245struct msm_slim_sat {
246 struct slim_device satcl;
247 struct msm_slim_ctrl *dev;
248 struct workqueue_struct *wq;
249 struct work_struct wd;
250 u8 sat_msgs[SAT_CONCUR_MSG][40];
251 u16 *satch;
252 u8 nsatch;
253 bool sent_capability;
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600254 bool pending_reconf;
255 bool pending_capability;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700256 int shead;
257 int stail;
258 spinlock_t lock;
259};
260
261static int msm_slim_rx_enqueue(struct msm_slim_ctrl *dev, u32 *buf, u8 len)
262{
263 spin_lock(&dev->rx_lock);
264 if ((dev->tail + 1) % MSM_CONCUR_MSG == dev->head) {
265 spin_unlock(&dev->rx_lock);
266 dev_err(dev->dev, "RX QUEUE full!");
267 return -EXFULL;
268 }
269 memcpy((u8 *)dev->rx_msgs[dev->tail], (u8 *)buf, len);
270 dev->tail = (dev->tail + 1) % MSM_CONCUR_MSG;
271 spin_unlock(&dev->rx_lock);
272 return 0;
273}
274
275static int msm_slim_rx_dequeue(struct msm_slim_ctrl *dev, u8 *buf)
276{
277 unsigned long flags;
278 spin_lock_irqsave(&dev->rx_lock, flags);
279 if (dev->tail == dev->head) {
280 spin_unlock_irqrestore(&dev->rx_lock, flags);
281 return -ENODATA;
282 }
283 memcpy(buf, (u8 *)dev->rx_msgs[dev->head], 40);
284 dev->head = (dev->head + 1) % MSM_CONCUR_MSG;
285 spin_unlock_irqrestore(&dev->rx_lock, flags);
286 return 0;
287}
288
289static int msm_sat_enqueue(struct msm_slim_sat *sat, u32 *buf, u8 len)
290{
291 struct msm_slim_ctrl *dev = sat->dev;
292 spin_lock(&sat->lock);
293 if ((sat->stail + 1) % SAT_CONCUR_MSG == sat->shead) {
294 spin_unlock(&sat->lock);
295 dev_err(dev->dev, "SAT QUEUE full!");
296 return -EXFULL;
297 }
298 memcpy(sat->sat_msgs[sat->stail], (u8 *)buf, len);
299 sat->stail = (sat->stail + 1) % SAT_CONCUR_MSG;
300 spin_unlock(&sat->lock);
301 return 0;
302}
303
304static int msm_sat_dequeue(struct msm_slim_sat *sat, u8 *buf)
305{
306 unsigned long flags;
307 spin_lock_irqsave(&sat->lock, flags);
308 if (sat->stail == sat->shead) {
309 spin_unlock_irqrestore(&sat->lock, flags);
310 return -ENODATA;
311 }
312 memcpy(buf, sat->sat_msgs[sat->shead], 40);
313 sat->shead = (sat->shead + 1) % SAT_CONCUR_MSG;
314 spin_unlock_irqrestore(&sat->lock, flags);
315 return 0;
316}
317
318static void msm_get_eaddr(u8 *e_addr, u32 *buffer)
319{
320 e_addr[0] = (buffer[1] >> 24) & 0xff;
321 e_addr[1] = (buffer[1] >> 16) & 0xff;
322 e_addr[2] = (buffer[1] >> 8) & 0xff;
323 e_addr[3] = buffer[1] & 0xff;
324 e_addr[4] = (buffer[0] >> 24) & 0xff;
325 e_addr[5] = (buffer[0] >> 16) & 0xff;
326}
327
328static bool msm_is_sat_dev(u8 *e_addr)
329{
330 if (e_addr[5] == QC_MFGID_LSB && e_addr[4] == QC_MFGID_MSB &&
331 e_addr[2] != QC_CHIPID_SL &&
332 (e_addr[1] == QC_DEVID_SAT1 || e_addr[1] == QC_DEVID_SAT2))
333 return true;
334 return false;
335}
336
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700337static int msm_slim_get_ctrl(struct msm_slim_ctrl *dev)
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600338{
Sagar Dharia45e77912012-01-10 09:55:18 -0700339#ifdef CONFIG_PM_RUNTIME
340 int ref = 0;
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700341 int ret = pm_runtime_get_sync(dev->dev);
342 if (ret >= 0) {
343 ref = atomic_read(&dev->dev->power.usage_count);
344 if (ref <= 0) {
345 dev_err(dev->dev, "reference count -ve:%d", ref);
346 ret = -ENODEV;
347 }
348 }
349 return ret;
Sagar Dharia45e77912012-01-10 09:55:18 -0700350#else
351 return -ENODEV;
352#endif
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600353}
354static void msm_slim_put_ctrl(struct msm_slim_ctrl *dev)
355{
Sagar Dharia45e77912012-01-10 09:55:18 -0700356#ifdef CONFIG_PM_RUNTIME
Sagar Dharia38fd1872012-02-06 18:36:38 -0700357 int ref;
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600358 pm_runtime_mark_last_busy(dev->dev);
Sagar Dharia38fd1872012-02-06 18:36:38 -0700359 ref = atomic_read(&dev->dev->power.usage_count);
360 if (ref <= 0)
361 dev_err(dev->dev, "reference count mismatch:%d", ref);
362 else
363 pm_runtime_put(dev->dev);
Sagar Dharia45e77912012-01-10 09:55:18 -0700364#endif
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600365}
366
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700367static irqreturn_t msm_slim_interrupt(int irq, void *d)
368{
369 struct msm_slim_ctrl *dev = d;
370 u32 pstat;
371 u32 stat = readl_relaxed(dev->base + MGR_INT_STAT);
372
373 if (stat & MGR_INT_TX_MSG_SENT || stat & MGR_INT_TX_NACKED_2) {
374 if (stat & MGR_INT_TX_MSG_SENT)
375 writel_relaxed(MGR_INT_TX_MSG_SENT,
376 dev->base + MGR_INT_CLR);
377 else {
378 writel_relaxed(MGR_INT_TX_NACKED_2,
379 dev->base + MGR_INT_CLR);
380 dev->err = -EIO;
381 }
382 /*
383 * Guarantee that interrupt clear bit write goes through before
384 * signalling completion/exiting ISR
385 */
386 mb();
387 if (dev->wr_comp)
388 complete(dev->wr_comp);
389 }
390 if (stat & MGR_INT_RX_MSG_RCVD) {
391 u32 rx_buf[10];
392 u32 mc, mt;
393 u8 len, i;
394 rx_buf[0] = readl_relaxed(dev->base + MGR_RX_MSG);
395 len = rx_buf[0] & 0x1F;
396 for (i = 1; i < ((len + 3) >> 2); i++) {
397 rx_buf[i] = readl_relaxed(dev->base + MGR_RX_MSG +
398 (4 * i));
399 dev_dbg(dev->dev, "reading data: %x\n", rx_buf[i]);
400 }
401 mt = (rx_buf[0] >> 5) & 0x7;
402 mc = (rx_buf[0] >> 8) & 0xff;
403 dev_dbg(dev->dev, "MC: %x, MT: %x\n", mc, mt);
404 if (mt == SLIM_MSG_MT_DEST_REFERRED_USER ||
405 mt == SLIM_MSG_MT_SRC_REFERRED_USER) {
406 struct msm_slim_sat *sat = dev->satd;
407 msm_sat_enqueue(sat, rx_buf, len);
408 writel_relaxed(MGR_INT_RX_MSG_RCVD,
409 dev->base + MGR_INT_CLR);
410 /*
411 * Guarantee that CLR bit write goes through before
412 * queuing work
413 */
414 mb();
415 queue_work(sat->wq, &sat->wd);
416 } else if (mt == SLIM_MSG_MT_CORE &&
417 mc == SLIM_MSG_MC_REPORT_PRESENT) {
418 u8 e_addr[6];
419 msm_get_eaddr(e_addr, rx_buf);
420 if (msm_is_sat_dev(e_addr)) {
421 /*
422 * Consider possibility that this device may
423 * be reporting more than once?
424 */
425 struct msm_slim_sat *sat = dev->satd;
426 msm_sat_enqueue(sat, rx_buf, len);
427 writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
428 MGR_INT_CLR);
429 /*
430 * Guarantee that CLR bit write goes through
431 * before queuing work
432 */
433 mb();
434 queue_work(sat->wq, &sat->wd);
435 } else {
436 msm_slim_rx_enqueue(dev, rx_buf, len);
437 writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
438 MGR_INT_CLR);
439 /*
440 * Guarantee that CLR bit write goes through
441 * before signalling completion
442 */
443 mb();
444 complete(&dev->rx_msgq_notify);
445 }
446 } else if (mc == SLIM_MSG_MC_REPLY_INFORMATION ||
447 mc == SLIM_MSG_MC_REPLY_VALUE) {
448 msm_slim_rx_enqueue(dev, rx_buf, len);
449 writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
450 MGR_INT_CLR);
451 /*
452 * Guarantee that CLR bit write goes through
453 * before signalling completion
454 */
455 mb();
456 complete(&dev->rx_msgq_notify);
Sagar Dharia144e5e02011-08-08 17:30:11 -0600457 } else if (mc == SLIM_MSG_MC_REPORT_INFORMATION) {
458 u8 *buf = (u8 *)rx_buf;
459 u8 l_addr = buf[2];
460 u16 ele = (u16)buf[4] << 4;
461 ele |= ((buf[3] & 0xf0) >> 4);
462 dev_err(dev->dev, "Slim-dev:%d report inf element:0x%x",
463 l_addr, ele);
464 for (i = 0; i < len - 5; i++)
465 dev_err(dev->dev, "offset:0x%x:bit mask:%x",
466 i, buf[i+5]);
467 writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
468 MGR_INT_CLR);
469 /*
470 * Guarantee that CLR bit write goes through
471 * before exiting
472 */
473 mb();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700474 } else {
475 dev_err(dev->dev, "Unexpected MC,%x MT:%x, len:%d",
476 mc, mt, len);
477 for (i = 0; i < ((len + 3) >> 2); i++)
478 dev_err(dev->dev, "error msg: %x", rx_buf[i]);
479 writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
480 MGR_INT_CLR);
481 /*
482 * Guarantee that CLR bit write goes through
483 * before exiting
484 */
485 mb();
486 }
487 }
488 if (stat & MGR_INT_RECFG_DONE) {
489 writel_relaxed(MGR_INT_RECFG_DONE, dev->base + MGR_INT_CLR);
490 /*
491 * Guarantee that CLR bit write goes through
492 * before exiting ISR
493 */
494 mb();
495 complete(&dev->reconf);
496 }
497 pstat = readl_relaxed(dev->base + PGD_PORT_INT_ST_EEn + (16 * dev->ee));
498 if (pstat != 0) {
499 int i = 0;
500 for (i = dev->pipe_b; i < MSM_SLIM_NPORTS; i++) {
501 if (pstat & 1 << i) {
502 u32 val = readl_relaxed(dev->base +
503 PGD_PORT_STATn + (i * 32));
504 if (val & (1 << 19)) {
505 dev->ctrl.ports[i].err =
506 SLIM_P_DISCONNECT;
507 dev->pipes[i-dev->pipe_b].connected =
508 false;
509 /*
510 * SPS will call completion since
511 * ERROR flags are registered
512 */
513 } else if (val & (1 << 2))
514 dev->ctrl.ports[i].err =
515 SLIM_P_OVERFLOW;
516 else if (val & (1 << 3))
517 dev->ctrl.ports[i].err =
518 SLIM_P_UNDERFLOW;
519 }
520 writel_relaxed(1, dev->base + PGD_PORT_INT_CL_EEn +
521 (dev->ee * 16));
522 }
523 /*
524 * Guarantee that port interrupt bit(s) clearing writes go
525 * through before exiting ISR
526 */
527 mb();
528 }
529
530 return IRQ_HANDLED;
531}
532
533static int
534msm_slim_init_endpoint(struct msm_slim_ctrl *dev, struct msm_slim_endp *ep)
535{
536 int ret;
537 struct sps_pipe *endpoint;
538 struct sps_connect *config = &ep->config;
539
540 /* Allocate the endpoint */
541 endpoint = sps_alloc_endpoint();
542 if (!endpoint) {
543 dev_err(dev->dev, "sps_alloc_endpoint failed\n");
544 return -ENOMEM;
545 }
546
547 /* Get default connection configuration for an endpoint */
548 ret = sps_get_config(endpoint, config);
549 if (ret) {
550 dev_err(dev->dev, "sps_get_config failed 0x%x\n", ret);
551 goto sps_config_failed;
552 }
553
554 ep->sps = endpoint;
555 return 0;
556
557sps_config_failed:
558 sps_free_endpoint(endpoint);
559 return ret;
560}
561
562static void
563msm_slim_free_endpoint(struct msm_slim_endp *ep)
564{
565 sps_free_endpoint(ep->sps);
566 ep->sps = NULL;
567}
568
569static int msm_slim_sps_mem_alloc(
570 struct msm_slim_ctrl *dev, struct sps_mem_buffer *mem, u32 len)
571{
572 dma_addr_t phys;
573
574 mem->size = len;
575 mem->min_size = 0;
576 mem->base = dma_alloc_coherent(dev->dev, mem->size, &phys, GFP_KERNEL);
577
578 if (!mem->base) {
579 dev_err(dev->dev, "dma_alloc_coherent(%d) failed\n", len);
580 return -ENOMEM;
581 }
582
583 mem->phys_base = phys;
584 memset(mem->base, 0x00, mem->size);
585 return 0;
586}
587
588static void
589msm_slim_sps_mem_free(struct msm_slim_ctrl *dev, struct sps_mem_buffer *mem)
590{
591 dma_free_coherent(dev->dev, mem->size, mem->base, mem->phys_base);
592 mem->size = 0;
593 mem->base = NULL;
594 mem->phys_base = 0;
595}
596
597static void msm_hw_set_port(struct msm_slim_ctrl *dev, u8 pn)
598{
599 u32 set_cfg = DEF_WATERMARK | DEF_ALIGN | DEF_PACK | ENABLE_PORT;
600 u32 int_port = readl_relaxed(dev->base + PGD_PORT_INT_EN_EEn +
601 (dev->ee * 16));
602 writel_relaxed(set_cfg, dev->base + PGD_PORT_CFGn + (pn * 32));
603 writel_relaxed(DEF_BLKSZ, dev->base + PGD_PORT_BLKn + (pn * 32));
604 writel_relaxed(DEF_TRANSZ, dev->base + PGD_PORT_TRANn + (pn * 32));
605 writel_relaxed((int_port | 1 << pn) , dev->base + PGD_PORT_INT_EN_EEn +
606 (dev->ee * 16));
607 /* Make sure that port registers are updated before returning */
608 mb();
609}
610
611static int msm_slim_connect_pipe_port(struct msm_slim_ctrl *dev, u8 pn)
612{
613 struct msm_slim_endp *endpoint = &dev->pipes[pn];
614 struct sps_connect *cfg = &endpoint->config;
615 u32 stat;
616 int ret = sps_get_config(dev->pipes[pn].sps, cfg);
617 if (ret) {
618 dev_err(dev->dev, "sps pipe-port get config error%x\n", ret);
619 return ret;
620 }
621 cfg->options = SPS_O_DESC_DONE | SPS_O_ERROR |
622 SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
623
624 if (dev->pipes[pn].connected) {
625 ret = sps_set_config(dev->pipes[pn].sps, cfg);
626 if (ret) {
627 dev_err(dev->dev, "sps pipe-port set config erro:%x\n",
628 ret);
629 return ret;
630 }
631 }
632
633 stat = readl_relaxed(dev->base + PGD_PORT_STATn +
634 (32 * (pn + dev->pipe_b)));
635 if (dev->ctrl.ports[pn].flow == SLIM_SRC) {
636 cfg->destination = dev->bam.hdl;
637 cfg->source = SPS_DEV_HANDLE_MEM;
638 cfg->dest_pipe_index = ((stat & (0xFF << 4)) >> 4);
639 cfg->src_pipe_index = 0;
640 dev_dbg(dev->dev, "flow src:pipe num:%d",
641 cfg->dest_pipe_index);
642 cfg->mode = SPS_MODE_DEST;
643 } else {
644 cfg->source = dev->bam.hdl;
645 cfg->destination = SPS_DEV_HANDLE_MEM;
646 cfg->src_pipe_index = ((stat & (0xFF << 4)) >> 4);
647 cfg->dest_pipe_index = 0;
648 dev_dbg(dev->dev, "flow dest:pipe num:%d",
649 cfg->src_pipe_index);
650 cfg->mode = SPS_MODE_SRC;
651 }
652 /* Space for desciptor FIFOs */
653 cfg->desc.size = MSM_SLIM_DESC_NUM * sizeof(struct sps_iovec);
654 cfg->config = SPS_CONFIG_DEFAULT;
655 ret = sps_connect(dev->pipes[pn].sps, cfg);
656 if (!ret) {
657 dev->pipes[pn].connected = true;
658 msm_hw_set_port(dev, pn + dev->pipe_b);
659 }
660 return ret;
661}
662
663static u32 *msm_get_msg_buf(struct slim_controller *ctrl, int len)
664{
665 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
666 /*
667 * Currently we block a transaction until the current one completes.
668 * In case we need multiple transactions, use message Q
669 */
670 return dev->tx_buf;
671}
672
673static int msm_send_msg_buf(struct slim_controller *ctrl, u32 *buf, u8 len)
674{
675 int i;
676 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
677 for (i = 0; i < (len + 3) >> 2; i++) {
678 dev_dbg(dev->dev, "TX data:0x%x\n", buf[i]);
679 writel_relaxed(buf[i], dev->base + MGR_TX_MSG + (i * 4));
680 }
681 /* Guarantee that message is sent before returning */
682 mb();
683 return 0;
684}
685
686static int msm_xfer_msg(struct slim_controller *ctrl, struct slim_msg_txn *txn)
687{
688 DECLARE_COMPLETION_ONSTACK(done);
689 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
690 u32 *pbuf;
691 u8 *puc;
692 int timeout;
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700693 int msgv = -1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700694 u8 la = txn->la;
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600695 u8 mc = (u8)(txn->mc & 0xFF);
696 /*
697 * Voting for runtime PM: Slimbus has 2 possible use cases:
698 * 1. messaging
699 * 2. Data channels
700 * Messaging case goes through messaging slots and data channels
701 * use their own slots
702 * This "get" votes for messaging bandwidth
703 */
704 if (!(txn->mc & SLIM_MSG_CLK_PAUSE_SEQ_FLG))
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700705 msgv = msm_slim_get_ctrl(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700706 mutex_lock(&dev->tx_lock);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700707 if (dev->state == MSM_CTRL_ASLEEP ||
708 ((!(txn->mc & SLIM_MSG_CLK_PAUSE_SEQ_FLG)) &&
709 dev->state == MSM_CTRL_SLEEPING)) {
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600710 dev_err(dev->dev, "runtime or system PM suspended state");
711 mutex_unlock(&dev->tx_lock);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700712 if (msgv >= 0)
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600713 msm_slim_put_ctrl(dev);
714 return -EBUSY;
715 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700716 if (txn->mt == SLIM_MSG_MT_CORE &&
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600717 mc == SLIM_MSG_MC_BEGIN_RECONFIGURATION) {
718 if (dev->reconf_busy) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700719 wait_for_completion(&dev->reconf);
720 dev->reconf_busy = false;
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600721 }
722 /* This "get" votes for data channels */
723 if (dev->ctrl.sched.usedslots != 0 &&
724 !dev->chan_active) {
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700725 int chv = msm_slim_get_ctrl(dev);
726 if (chv >= 0)
727 dev->chan_active = true;
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600728 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700729 }
730 txn->rl--;
731 pbuf = msm_get_msg_buf(ctrl, txn->rl);
732 dev->wr_comp = NULL;
733 dev->err = 0;
734
735 if (txn->dt == SLIM_MSG_DEST_ENUMADDR) {
736 mutex_unlock(&dev->tx_lock);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700737 if (msgv >= 0)
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600738 msm_slim_put_ctrl(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700739 return -EPROTONOSUPPORT;
740 }
741 if (txn->mt == SLIM_MSG_MT_CORE && txn->la == 0xFF &&
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600742 (mc == SLIM_MSG_MC_CONNECT_SOURCE ||
743 mc == SLIM_MSG_MC_CONNECT_SINK ||
744 mc == SLIM_MSG_MC_DISCONNECT_PORT))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700745 la = dev->pgdla;
746 if (txn->dt == SLIM_MSG_DEST_LOGICALADDR)
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600747 *pbuf = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt, mc, 0, la);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700748 else
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600749 *pbuf = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt, mc, 1, la);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700750 if (txn->dt == SLIM_MSG_DEST_LOGICALADDR)
751 puc = ((u8 *)pbuf) + 3;
752 else
753 puc = ((u8 *)pbuf) + 2;
754 if (txn->rbuf)
755 *(puc++) = txn->tid;
756 if ((txn->mt == SLIM_MSG_MT_CORE) &&
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600757 ((mc >= SLIM_MSG_MC_REQUEST_INFORMATION &&
758 mc <= SLIM_MSG_MC_REPORT_INFORMATION) ||
759 (mc >= SLIM_MSG_MC_REQUEST_VALUE &&
760 mc <= SLIM_MSG_MC_CHANGE_VALUE))) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700761 *(puc++) = (txn->ec & 0xFF);
762 *(puc++) = (txn->ec >> 8)&0xFF;
763 }
764 if (txn->wbuf)
765 memcpy(puc, txn->wbuf, txn->len);
766 if (txn->mt == SLIM_MSG_MT_CORE && txn->la == 0xFF &&
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600767 (mc == SLIM_MSG_MC_CONNECT_SOURCE ||
768 mc == SLIM_MSG_MC_CONNECT_SINK ||
769 mc == SLIM_MSG_MC_DISCONNECT_PORT)) {
770 if (mc != SLIM_MSG_MC_DISCONNECT_PORT)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700771 dev->err = msm_slim_connect_pipe_port(dev, *puc);
772 else {
773 struct msm_slim_endp *endpoint = &dev->pipes[*puc];
774 struct sps_register_event sps_event;
775 memset(&sps_event, 0, sizeof(sps_event));
776 sps_register_event(endpoint->sps, &sps_event);
777 sps_disconnect(endpoint->sps);
778 /*
779 * Remove channel disconnects master-side ports from
780 * channel. No need to send that again on the bus
781 */
782 dev->pipes[*puc].connected = false;
783 mutex_unlock(&dev->tx_lock);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700784 if (msgv >= 0)
785 msm_slim_put_ctrl(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700786 return 0;
787 }
788 if (dev->err) {
789 dev_err(dev->dev, "pipe-port connect err:%d", dev->err);
790 mutex_unlock(&dev->tx_lock);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700791 if (msgv >= 0)
792 msm_slim_put_ctrl(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700793 return dev->err;
794 }
795 *(puc) = *(puc) + dev->pipe_b;
796 }
797 if (txn->mt == SLIM_MSG_MT_CORE &&
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600798 mc == SLIM_MSG_MC_BEGIN_RECONFIGURATION)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700799 dev->reconf_busy = true;
800 dev->wr_comp = &done;
801 msm_send_msg_buf(ctrl, pbuf, txn->rl);
802 timeout = wait_for_completion_timeout(&done, HZ);
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600803
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700804 if (mc == SLIM_MSG_MC_RECONFIGURE_NOW) {
805 if ((txn->mc == (SLIM_MSG_MC_RECONFIGURE_NOW |
806 SLIM_MSG_CLK_PAUSE_SEQ_FLG)) &&
807 timeout) {
808 timeout = wait_for_completion_timeout(&dev->reconf, HZ);
809 dev->reconf_busy = false;
810 if (timeout) {
811 clk_disable(dev->rclk);
812 disable_irq(dev->irq);
813 }
814 }
815 if ((txn->mc == (SLIM_MSG_MC_RECONFIGURE_NOW |
816 SLIM_MSG_CLK_PAUSE_SEQ_FLG)) &&
817 !timeout) {
818 dev->reconf_busy = false;
819 dev_err(dev->dev, "clock pause failed");
820 mutex_unlock(&dev->tx_lock);
821 return -ETIMEDOUT;
822 }
823 if (txn->mt == SLIM_MSG_MT_CORE &&
824 txn->mc == SLIM_MSG_MC_RECONFIGURE_NOW) {
825 if (dev->ctrl.sched.usedslots == 0 &&
826 dev->chan_active) {
827 dev->chan_active = false;
828 msm_slim_put_ctrl(dev);
829 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600830 }
831 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600832 mutex_unlock(&dev->tx_lock);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700833 if (msgv >= 0)
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600834 msm_slim_put_ctrl(dev);
835
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700836 if (!timeout)
837 dev_err(dev->dev, "TX timed out:MC:0x%x,mt:0x%x", txn->mc,
838 txn->mt);
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600839
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700840 return timeout ? dev->err : -ETIMEDOUT;
841}
842
843static int msm_set_laddr(struct slim_controller *ctrl, const u8 *ea,
844 u8 elen, u8 laddr)
845{
846 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
847 DECLARE_COMPLETION_ONSTACK(done);
848 int timeout;
849 u32 *buf;
850 mutex_lock(&dev->tx_lock);
851 buf = msm_get_msg_buf(ctrl, 9);
852 buf[0] = SLIM_MSG_ASM_FIRST_WORD(9, SLIM_MSG_MT_CORE,
853 SLIM_MSG_MC_ASSIGN_LOGICAL_ADDRESS,
854 SLIM_MSG_DEST_LOGICALADDR,
855 ea[5] | ea[4] << 8);
856 buf[1] = ea[3] | (ea[2] << 8) | (ea[1] << 16) | (ea[0] << 24);
857 buf[2] = laddr;
858
859 dev->wr_comp = &done;
860 msm_send_msg_buf(ctrl, buf, 9);
861 timeout = wait_for_completion_timeout(&done, HZ);
862 mutex_unlock(&dev->tx_lock);
863 return timeout ? dev->err : -ETIMEDOUT;
864}
865
Sagar Dharia144e5e02011-08-08 17:30:11 -0600866static int msm_clk_pause_wakeup(struct slim_controller *ctrl)
867{
868 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600869 enable_irq(dev->irq);
Sagar Dharia144e5e02011-08-08 17:30:11 -0600870 clk_enable(dev->rclk);
871 writel_relaxed(1, dev->base + FRM_WAKEUP);
872 /* Make sure framer wakeup write goes through before exiting function */
873 mb();
874 /*
875 * Workaround: Currently, slave is reporting lost-sync messages
876 * after slimbus comes out of clock pause.
877 * Transaction with slave fail before slave reports that message
878 * Give some time for that report to come
879 * Slimbus wakes up in clock gear 10 at 24.576MHz. With each superframe
880 * being 250 usecs, we wait for 20 superframes here to ensure
881 * we get the message
882 */
883 usleep_range(5000, 5000);
884 return 0;
885}
886
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700887static int msm_config_port(struct slim_controller *ctrl, u8 pn)
888{
889 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
890 struct msm_slim_endp *endpoint;
891 int ret = 0;
892 if (ctrl->ports[pn].req == SLIM_REQ_HALF_DUP ||
893 ctrl->ports[pn].req == SLIM_REQ_MULTI_CH)
894 return -EPROTONOSUPPORT;
895 if (pn >= (MSM_SLIM_NPORTS - dev->pipe_b))
896 return -ENODEV;
897
898 endpoint = &dev->pipes[pn];
899 ret = msm_slim_init_endpoint(dev, endpoint);
900 dev_dbg(dev->dev, "sps register bam error code:%x\n", ret);
901 return ret;
902}
903
904static enum slim_port_err msm_slim_port_xfer_status(struct slim_controller *ctr,
905 u8 pn, u8 **done_buf, u32 *done_len)
906{
907 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctr);
908 struct sps_iovec sio;
909 int ret;
910 if (done_len)
911 *done_len = 0;
912 if (done_buf)
913 *done_buf = NULL;
914 if (!dev->pipes[pn].connected)
915 return SLIM_P_DISCONNECT;
916 ret = sps_get_iovec(dev->pipes[pn].sps, &sio);
917 if (!ret) {
918 if (done_len)
919 *done_len = sio.size;
920 if (done_buf)
921 *done_buf = (u8 *)sio.addr;
922 }
923 dev_dbg(dev->dev, "get iovec returned %d\n", ret);
924 return SLIM_P_INPROGRESS;
925}
926
927static int msm_slim_port_xfer(struct slim_controller *ctrl, u8 pn, u8 *iobuf,
928 u32 len, struct completion *comp)
929{
930 struct sps_register_event sreg;
931 int ret;
932 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
Sagar Dhariae77961f2011-09-27 14:03:50 -0600933 if (pn >= 7)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700934 return -ENODEV;
935
936
937 ctrl->ports[pn].xcomp = comp;
938 sreg.options = (SPS_EVENT_DESC_DONE|SPS_EVENT_ERROR);
939 sreg.mode = SPS_TRIGGER_WAIT;
940 sreg.xfer_done = comp;
941 sreg.callback = NULL;
942 sreg.user = &ctrl->ports[pn];
943 ret = sps_register_event(dev->pipes[pn].sps, &sreg);
944 if (ret) {
945 dev_dbg(dev->dev, "sps register event error:%x\n", ret);
946 return ret;
947 }
948 ret = sps_transfer_one(dev->pipes[pn].sps, (u32)iobuf, len, NULL,
949 SPS_IOVEC_FLAG_INT);
950 dev_dbg(dev->dev, "sps submit xfer error code:%x\n", ret);
951
952 return ret;
953}
954
955static int msm_sat_define_ch(struct msm_slim_sat *sat, u8 *buf, u8 len, u8 mc)
956{
957 struct msm_slim_ctrl *dev = sat->dev;
958 enum slim_ch_control oper;
959 int i;
960 int ret = 0;
961 if (mc == SLIM_USR_MC_CHAN_CTRL) {
962 u16 chanh = sat->satch[buf[5]];
963 oper = ((buf[3] & 0xC0) >> 6);
964 /* part of grp. activating/removing 1 will take care of rest */
965 ret = slim_control_ch(&sat->satcl, chanh, oper, false);
966 } else {
967 u16 chh[40];
968 struct slim_ch prop;
969 u32 exp;
970 u8 coeff, cc;
971 u8 prrate = buf[6];
972 for (i = 8; i < len; i++)
973 chh[i-8] = sat->satch[buf[i]];
974 prop.dataf = (enum slim_ch_dataf)((buf[3] & 0xE0) >> 5);
975 prop.auxf = (enum slim_ch_auxf)((buf[4] & 0xC0) >> 5);
976 prop.baser = SLIM_RATE_4000HZ;
977 if (prrate & 0x8)
978 prop.baser = SLIM_RATE_11025HZ;
979 else
980 prop.baser = SLIM_RATE_4000HZ;
981 prop.prot = (enum slim_ch_proto)(buf[5] & 0x0F);
982 prop.sampleszbits = (buf[4] & 0x1F)*SLIM_CL_PER_SL;
983 exp = (u32)((buf[5] & 0xF0) >> 4);
984 coeff = (buf[4] & 0x20) >> 5;
985 cc = (coeff ? 3 : 1);
986 prop.ratem = cc * (1 << exp);
987 if (i > 9)
988 ret = slim_define_ch(&sat->satcl, &prop, chh, len - 8,
989 true, &sat->satch[buf[8]]);
990 else
991 ret = slim_define_ch(&sat->satcl, &prop,
992 &sat->satch[buf[8]], 1, false,
993 NULL);
994 dev_dbg(dev->dev, "define sat grp returned:%d", ret);
995
996 /* part of group so activating 1 will take care of rest */
997 if (mc == SLIM_USR_MC_DEF_ACT_CHAN)
998 ret = slim_control_ch(&sat->satcl,
999 sat->satch[buf[8]],
1000 SLIM_CH_ACTIVATE, false);
1001 }
1002 return ret;
1003}
1004
1005static void msm_slim_rxwq(struct msm_slim_ctrl *dev)
1006{
1007 u8 buf[40];
1008 u8 mc, mt, len;
1009 int i, ret;
1010 if ((msm_slim_rx_dequeue(dev, (u8 *)buf)) != -ENODATA) {
1011 len = buf[0] & 0x1F;
1012 mt = (buf[0] >> 5) & 0x7;
1013 mc = buf[1];
1014 if (mt == SLIM_MSG_MT_CORE &&
1015 mc == SLIM_MSG_MC_REPORT_PRESENT) {
1016 u8 laddr;
1017 u8 e_addr[6];
1018 for (i = 0; i < 6; i++)
1019 e_addr[i] = buf[7-i];
1020
1021 ret = slim_assign_laddr(&dev->ctrl, e_addr, 6, &laddr);
1022 /* Is this Qualcomm ported generic device? */
1023 if (!ret && e_addr[5] == QC_MFGID_LSB &&
1024 e_addr[4] == QC_MFGID_MSB &&
1025 e_addr[1] == QC_DEVID_PGD &&
1026 e_addr[2] != QC_CHIPID_SL)
1027 dev->pgdla = laddr;
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001028 if (!ret && !pm_runtime_enabled(dev->dev) &&
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001029 laddr == (QC_MSM_DEVS - 1))
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001030 pm_runtime_enable(dev->dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001031
1032 } else if (mc == SLIM_MSG_MC_REPLY_INFORMATION ||
1033 mc == SLIM_MSG_MC_REPLY_VALUE) {
1034 u8 tid = buf[3];
1035 dev_dbg(dev->dev, "tid:%d, len:%d\n", tid, len - 4);
1036 slim_msg_response(&dev->ctrl, &buf[4], tid,
1037 len - 4);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001038 pm_runtime_mark_last_busy(dev->dev);
Sagar Dharia144e5e02011-08-08 17:30:11 -06001039 } else if (mc == SLIM_MSG_MC_REPORT_INFORMATION) {
1040 u8 l_addr = buf[2];
1041 u16 ele = (u16)buf[4] << 4;
1042 ele |= ((buf[3] & 0xf0) >> 4);
1043 dev_err(dev->dev, "Slim-dev:%d report inf element:0x%x",
1044 l_addr, ele);
1045 for (i = 0; i < len - 5; i++)
1046 dev_err(dev->dev, "offset:0x%x:bit mask:%x",
1047 i, buf[i+5]);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001048 } else {
1049 dev_err(dev->dev, "unexpected message:mc:%x, mt:%x",
1050 mc, mt);
1051 for (i = 0; i < len; i++)
1052 dev_err(dev->dev, "error msg: %x", buf[i]);
1053
1054 }
1055 } else
1056 dev_err(dev->dev, "rxwq called and no dequeue");
1057}
1058
1059static void slim_sat_rxprocess(struct work_struct *work)
1060{
1061 struct msm_slim_sat *sat = container_of(work, struct msm_slim_sat, wd);
1062 struct msm_slim_ctrl *dev = sat->dev;
1063 u8 buf[40];
1064
1065 while ((msm_sat_dequeue(sat, buf)) != -ENODATA) {
1066 struct slim_msg_txn txn;
1067 int i;
1068 u8 len, mc, mt;
1069 u32 bw_sl;
1070 int ret = 0;
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001071 int satv = -1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001072 bool gen_ack = false;
1073 u8 tid;
1074 u8 wbuf[8];
1075 txn.mt = SLIM_MSG_MT_SRC_REFERRED_USER;
1076 txn.dt = SLIM_MSG_DEST_LOGICALADDR;
1077 txn.ec = 0;
1078 txn.rbuf = NULL;
1079 txn.la = sat->satcl.laddr;
1080 /* satellite handling */
1081 len = buf[0] & 0x1F;
1082 mc = buf[1];
1083 mt = (buf[0] >> 5) & 0x7;
1084
1085 if (mt == SLIM_MSG_MT_CORE &&
1086 mc == SLIM_MSG_MC_REPORT_PRESENT) {
1087 u8 laddr;
1088 u8 e_addr[6];
1089 for (i = 0; i < 6; i++)
1090 e_addr[i] = buf[7-i];
1091
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001092 if (pm_runtime_enabled(dev->dev)) {
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001093 satv = msm_slim_get_ctrl(dev);
1094 if (satv >= 0)
1095 sat->pending_capability = true;
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001096 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001097 slim_assign_laddr(&dev->ctrl, e_addr, 6, &laddr);
1098 sat->satcl.laddr = laddr;
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001099 } else if (mt != SLIM_MSG_MT_CORE &&
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001100 mc != SLIM_MSG_MC_REPORT_PRESENT) {
1101 satv = msm_slim_get_ctrl(dev);
1102 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001103 switch (mc) {
1104 case SLIM_MSG_MC_REPORT_PRESENT:
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001105 /* Remove runtime_pm vote once satellite acks */
1106 if (mt != SLIM_MSG_MT_CORE) {
1107 if (pm_runtime_enabled(dev->dev) &&
1108 sat->pending_capability) {
1109 msm_slim_put_ctrl(dev);
1110 sat->pending_capability = false;
1111 }
1112 continue;
1113 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001114 /* send a Manager capability msg */
1115 if (sat->sent_capability)
1116 continue;
1117 ret = slim_add_device(&dev->ctrl, &sat->satcl);
1118 if (ret) {
1119 dev_err(dev->dev,
1120 "Satellite-init failed");
1121 continue;
1122 }
1123 /* Satellite owns first 21 channels */
1124 sat->satch = kzalloc(21 * sizeof(u16), GFP_KERNEL);
1125 sat->nsatch = 20;
1126 /* alloc all sat chans */
1127 for (i = 0; i < 21; i++)
1128 slim_alloc_ch(&sat->satcl, &sat->satch[i]);
1129 txn.mc = SLIM_USR_MC_MASTER_CAPABILITY;
1130 txn.mt = SLIM_MSG_MT_SRC_REFERRED_USER;
1131 txn.la = sat->satcl.laddr;
1132 txn.rl = 8;
1133 wbuf[0] = SAT_MAGIC_LSB;
1134 wbuf[1] = SAT_MAGIC_MSB;
1135 wbuf[2] = SAT_MSG_VER;
1136 wbuf[3] = SAT_MSG_PROT;
1137 txn.wbuf = wbuf;
1138 txn.len = 4;
1139 sat->sent_capability = true;
1140 msm_xfer_msg(&dev->ctrl, &txn);
1141 break;
1142 case SLIM_USR_MC_ADDR_QUERY:
1143 memcpy(&wbuf[1], &buf[4], 6);
1144 ret = slim_get_logical_addr(&sat->satcl,
1145 &wbuf[1], 6, &wbuf[7]);
1146 if (ret)
1147 memset(&wbuf[1], 0, 6);
1148 wbuf[0] = buf[3];
1149 txn.mc = SLIM_USR_MC_ADDR_REPLY;
1150 txn.rl = 12;
1151 txn.len = 8;
1152 txn.wbuf = wbuf;
1153 msm_xfer_msg(&dev->ctrl, &txn);
1154 break;
1155 case SLIM_USR_MC_DEFINE_CHAN:
1156 case SLIM_USR_MC_DEF_ACT_CHAN:
1157 case SLIM_USR_MC_CHAN_CTRL:
1158 if (mc != SLIM_USR_MC_CHAN_CTRL)
1159 tid = buf[7];
1160 else
1161 tid = buf[4];
1162 gen_ack = true;
1163 ret = msm_sat_define_ch(sat, buf, len, mc);
1164 if (ret) {
1165 dev_err(dev->dev,
1166 "SAT define_ch returned:%d",
1167 ret);
1168 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001169 if (!sat->pending_reconf) {
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001170 int chv = msm_slim_get_ctrl(dev);
1171 if (chv >= 0)
1172 sat->pending_reconf = true;
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001173 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001174 break;
1175 case SLIM_USR_MC_RECONFIG_NOW:
1176 tid = buf[3];
1177 gen_ack = true;
1178 ret = slim_reconfigure_now(&sat->satcl);
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001179 if (sat->pending_reconf) {
1180 msm_slim_put_ctrl(dev);
1181 sat->pending_reconf = false;
1182 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001183 break;
1184 case SLIM_USR_MC_REQ_BW:
1185 /* what we get is in SLOTS */
1186 bw_sl = (u32)buf[4] << 3 |
1187 ((buf[3] & 0xE0) >> 5);
1188 sat->satcl.pending_msgsl = bw_sl;
1189 tid = buf[5];
1190 gen_ack = true;
1191 break;
1192 case SLIM_USR_MC_CONNECT_SRC:
1193 case SLIM_USR_MC_CONNECT_SINK:
1194 if (mc == SLIM_USR_MC_CONNECT_SRC)
1195 txn.mc = SLIM_MSG_MC_CONNECT_SOURCE;
1196 else
1197 txn.mc = SLIM_MSG_MC_CONNECT_SINK;
1198 wbuf[0] = buf[4] & 0x1F;
1199 wbuf[1] = buf[5];
1200 tid = buf[6];
1201 txn.la = buf[3];
1202 txn.mt = SLIM_MSG_MT_CORE;
1203 txn.rl = 6;
1204 txn.len = 2;
1205 txn.wbuf = wbuf;
1206 gen_ack = true;
1207 ret = msm_xfer_msg(&dev->ctrl, &txn);
1208 break;
1209 case SLIM_USR_MC_DISCONNECT_PORT:
1210 txn.mc = SLIM_MSG_MC_DISCONNECT_PORT;
1211 wbuf[0] = buf[4] & 0x1F;
1212 tid = buf[5];
1213 txn.la = buf[3];
1214 txn.rl = 5;
1215 txn.len = 1;
1216 txn.mt = SLIM_MSG_MT_CORE;
1217 txn.wbuf = wbuf;
1218 gen_ack = true;
1219 ret = msm_xfer_msg(&dev->ctrl, &txn);
1220 default:
1221 break;
1222 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001223 if (!gen_ack) {
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001224 if (mc != SLIM_MSG_MC_REPORT_PRESENT && satv >= 0)
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001225 msm_slim_put_ctrl(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001226 continue;
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001227 }
1228
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001229 wbuf[0] = tid;
1230 if (!ret)
1231 wbuf[1] = MSM_SAT_SUCCSS;
1232 else
1233 wbuf[1] = 0;
1234 txn.mc = SLIM_USR_MC_GENERIC_ACK;
1235 txn.la = sat->satcl.laddr;
1236 txn.rl = 6;
1237 txn.len = 2;
1238 txn.wbuf = wbuf;
1239 txn.mt = SLIM_MSG_MT_SRC_REFERRED_USER;
1240 msm_xfer_msg(&dev->ctrl, &txn);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001241 if (satv >= 0)
1242 msm_slim_put_ctrl(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001243 }
1244}
1245
1246static void
1247msm_slim_rx_msgq_event(struct msm_slim_ctrl *dev, struct sps_event_notify *ev)
1248{
1249 u32 *buf = ev->data.transfer.user;
1250 struct sps_iovec *iovec = &ev->data.transfer.iovec;
1251
1252 /*
1253 * Note the virtual address needs to be offset by the same index
1254 * as the physical address or just pass in the actual virtual address
1255 * if the sps_mem_buffer is not needed. Note that if completion is
1256 * used, the virtual address won't be available and will need to be
1257 * calculated based on the offset of the physical address
1258 */
1259 if (ev->event_id == SPS_EVENT_DESC_DONE) {
1260
1261 pr_debug("buf = 0x%p, data = 0x%x\n", buf, *buf);
1262
1263 pr_debug("iovec = (0x%x 0x%x 0x%x)\n",
1264 iovec->addr, iovec->size, iovec->flags);
1265
1266 } else {
1267 dev_err(dev->dev, "%s: unknown event %d\n",
1268 __func__, ev->event_id);
1269 }
1270}
1271
1272static void msm_slim_rx_msgq_cb(struct sps_event_notify *notify)
1273{
1274 struct msm_slim_ctrl *dev = (struct msm_slim_ctrl *)notify->user;
1275 msm_slim_rx_msgq_event(dev, notify);
1276}
1277
1278/* Queue up Rx message buffer */
1279static inline int
1280msm_slim_post_rx_msgq(struct msm_slim_ctrl *dev, int ix)
1281{
1282 int ret;
1283 u32 flags = SPS_IOVEC_FLAG_INT;
1284 struct msm_slim_endp *endpoint = &dev->rx_msgq;
1285 struct sps_mem_buffer *mem = &endpoint->buf;
1286 struct sps_pipe *pipe = endpoint->sps;
1287
1288 /* Rx message queue buffers are 4 bytes in length */
1289 u8 *virt_addr = mem->base + (4 * ix);
1290 u32 phys_addr = mem->phys_base + (4 * ix);
1291
1292 pr_debug("index:%d, phys:0x%x, virt:0x%p\n", ix, phys_addr, virt_addr);
1293
1294 ret = sps_transfer_one(pipe, phys_addr, 4, virt_addr, flags);
1295 if (ret)
1296 dev_err(dev->dev, "transfer_one() failed 0x%x, %d\n", ret, ix);
1297
1298 return ret;
1299}
1300
1301static inline int
1302msm_slim_rx_msgq_get(struct msm_slim_ctrl *dev, u32 *data, int offset)
1303{
1304 struct msm_slim_endp *endpoint = &dev->rx_msgq;
1305 struct sps_mem_buffer *mem = &endpoint->buf;
1306 struct sps_pipe *pipe = endpoint->sps;
1307 struct sps_iovec iovec;
1308 int index;
1309 int ret;
1310
1311 ret = sps_get_iovec(pipe, &iovec);
1312 if (ret) {
1313 dev_err(dev->dev, "sps_get_iovec() failed 0x%x\n", ret);
1314 goto err_exit;
1315 }
1316
1317 pr_debug("iovec = (0x%x 0x%x 0x%x)\n",
1318 iovec.addr, iovec.size, iovec.flags);
1319 BUG_ON(iovec.addr < mem->phys_base);
1320 BUG_ON(iovec.addr >= mem->phys_base + mem->size);
1321
1322 /* Calculate buffer index */
1323 index = (iovec.addr - mem->phys_base) / 4;
1324 *(data + offset) = *((u32 *)mem->base + index);
1325
1326 pr_debug("buf = 0x%p, data = 0x%x\n", (u32 *)mem->base + index, *data);
1327
1328 /* Add buffer back to the queue */
1329 (void)msm_slim_post_rx_msgq(dev, index);
1330
1331err_exit:
1332 return ret;
1333}
1334
1335static int msm_slim_rx_msgq_thread(void *data)
1336{
1337 struct msm_slim_ctrl *dev = (struct msm_slim_ctrl *)data;
1338 struct completion *notify = &dev->rx_msgq_notify;
1339 struct msm_slim_sat *sat = NULL;
1340 u32 mc = 0;
1341 u32 mt = 0;
1342 u32 buffer[10];
1343 int index = 0;
1344 u8 msg_len = 0;
1345 int ret;
1346
1347 dev_dbg(dev->dev, "rx thread started");
1348
1349 while (!kthread_should_stop()) {
1350 set_current_state(TASK_INTERRUPTIBLE);
1351 ret = wait_for_completion_interruptible(notify);
1352
1353 if (ret)
1354 dev_err(dev->dev, "rx thread wait error:%d", ret);
1355
1356 /* 1 irq notification per message */
1357 if (!dev->use_rx_msgqs) {
1358 msm_slim_rxwq(dev);
1359 continue;
1360 }
1361
1362 ret = msm_slim_rx_msgq_get(dev, buffer, index);
1363 if (ret) {
1364 dev_err(dev->dev, "rx_msgq_get() failed 0x%x\n", ret);
1365 continue;
1366 }
1367
1368 pr_debug("message[%d] = 0x%x\n", index, *buffer);
1369
1370 /* Decide if we use generic RX or satellite RX */
1371 if (index++ == 0) {
1372 msg_len = *buffer & 0x1F;
1373 pr_debug("Start of new message, len = %d\n", msg_len);
1374 mt = (buffer[0] >> 5) & 0x7;
1375 mc = (buffer[0] >> 8) & 0xff;
1376 dev_dbg(dev->dev, "MC: %x, MT: %x\n", mc, mt);
1377 if (mt == SLIM_MSG_MT_DEST_REFERRED_USER ||
1378 mt == SLIM_MSG_MT_SRC_REFERRED_USER)
1379 sat = dev->satd;
1380
1381 } else if ((index * 4) >= msg_len) {
1382 index = 0;
1383 if (mt == SLIM_MSG_MT_CORE &&
1384 mc == SLIM_MSG_MC_REPORT_PRESENT) {
1385 u8 e_addr[6];
1386 msm_get_eaddr(e_addr, buffer);
1387 if (msm_is_sat_dev(e_addr))
1388 sat = dev->satd;
1389 }
1390 if (sat) {
1391 msm_sat_enqueue(sat, buffer, msg_len);
1392 queue_work(sat->wq, &sat->wd);
1393 sat = NULL;
1394 } else {
1395 msm_slim_rx_enqueue(dev, buffer, msg_len);
1396 msm_slim_rxwq(dev);
1397 }
1398 }
1399 }
1400
1401 return 0;
1402}
1403
1404static int __devinit msm_slim_init_rx_msgq(struct msm_slim_ctrl *dev)
1405{
1406 int i, ret;
1407 u32 pipe_offset;
1408 struct msm_slim_endp *endpoint = &dev->rx_msgq;
1409 struct sps_connect *config = &endpoint->config;
1410 struct sps_mem_buffer *descr = &config->desc;
1411 struct sps_mem_buffer *mem = &endpoint->buf;
1412 struct completion *notify = &dev->rx_msgq_notify;
1413
1414 struct sps_register_event sps_error_event; /* SPS_ERROR */
1415 struct sps_register_event sps_descr_event; /* DESCR_DONE */
1416
1417 /* Allocate the endpoint */
1418 ret = msm_slim_init_endpoint(dev, endpoint);
1419 if (ret) {
1420 dev_err(dev->dev, "init_endpoint failed 0x%x\n", ret);
1421 goto sps_init_endpoint_failed;
1422 }
1423
1424 /* Get the pipe indices for the message queues */
1425 pipe_offset = (readl_relaxed(dev->base + MGR_STATUS) & 0xfc) >> 2;
1426 dev_dbg(dev->dev, "Message queue pipe offset %d\n", pipe_offset);
1427
1428 config->mode = SPS_MODE_SRC;
1429 config->source = dev->bam.hdl;
1430 config->destination = SPS_DEV_HANDLE_MEM;
1431 config->src_pipe_index = pipe_offset;
1432 config->options = SPS_O_DESC_DONE | SPS_O_ERROR |
1433 SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
1434
1435 /* Allocate memory for the FIFO descriptors */
1436 ret = msm_slim_sps_mem_alloc(dev, descr,
1437 MSM_SLIM_DESC_NUM * sizeof(struct sps_iovec));
1438 if (ret) {
1439 dev_err(dev->dev, "unable to allocate SPS descriptors\n");
1440 goto alloc_descr_failed;
1441 }
1442
1443 ret = sps_connect(endpoint->sps, config);
1444 if (ret) {
1445 dev_err(dev->dev, "sps_connect failed 0x%x\n", ret);
1446 goto sps_connect_failed;
1447 }
1448
1449 /* Register completion for DESC_DONE */
1450 init_completion(notify);
1451 memset(&sps_descr_event, 0x00, sizeof(sps_descr_event));
1452
1453 sps_descr_event.mode = SPS_TRIGGER_CALLBACK;
1454 sps_descr_event.options = SPS_O_DESC_DONE;
1455 sps_descr_event.user = (void *)dev;
1456 sps_descr_event.xfer_done = notify;
1457
1458 ret = sps_register_event(endpoint->sps, &sps_descr_event);
1459 if (ret) {
1460 dev_err(dev->dev, "sps_connect() failed 0x%x\n", ret);
1461 goto sps_reg_event_failed;
1462 }
1463
1464 /* Register callback for errors */
1465 memset(&sps_error_event, 0x00, sizeof(sps_error_event));
1466 sps_error_event.mode = SPS_TRIGGER_CALLBACK;
1467 sps_error_event.options = SPS_O_ERROR;
1468 sps_error_event.user = (void *)dev;
1469 sps_error_event.callback = msm_slim_rx_msgq_cb;
1470
1471 ret = sps_register_event(endpoint->sps, &sps_error_event);
1472 if (ret) {
1473 dev_err(dev->dev, "sps_connect() failed 0x%x\n", ret);
1474 goto sps_reg_event_failed;
1475 }
1476
1477 /* Allocate memory for the message buffer(s), N descrs, 4-byte mesg */
1478 ret = msm_slim_sps_mem_alloc(dev, mem, MSM_SLIM_DESC_NUM * 4);
1479 if (ret) {
1480 dev_err(dev->dev, "dma_alloc_coherent failed\n");
1481 goto alloc_buffer_failed;
1482 }
1483
1484 /*
1485 * Call transfer_one for each 4-byte buffer
1486 * Use (buf->size/4) - 1 for the number of buffer to post
1487 */
1488
1489 /* Setup the transfer */
1490 for (i = 0; i < (MSM_SLIM_DESC_NUM - 1); i++) {
1491 ret = msm_slim_post_rx_msgq(dev, i);
1492 if (ret) {
1493 dev_err(dev->dev, "post_rx_msgq() failed 0x%x\n", ret);
1494 goto sps_transfer_failed;
1495 }
1496 }
1497
1498 /* Fire up the Rx message queue thread */
1499 dev->rx_msgq_thread = kthread_run(msm_slim_rx_msgq_thread, dev,
1500 MSM_SLIM_NAME "_rx_msgq_thread");
1501 if (!dev->rx_msgq_thread) {
1502 dev_err(dev->dev, "Failed to start Rx message queue thread\n");
1503 ret = -EIO;
1504 } else
1505 return 0;
1506
1507sps_transfer_failed:
1508 msm_slim_sps_mem_free(dev, mem);
1509alloc_buffer_failed:
1510 memset(&sps_error_event, 0x00, sizeof(sps_error_event));
1511 sps_register_event(endpoint->sps, &sps_error_event);
1512sps_reg_event_failed:
1513 sps_disconnect(endpoint->sps);
1514sps_connect_failed:
1515 msm_slim_sps_mem_free(dev, descr);
1516alloc_descr_failed:
1517 msm_slim_free_endpoint(endpoint);
1518sps_init_endpoint_failed:
1519 return ret;
1520}
1521
1522/* Registers BAM h/w resource with SPS driver and initializes msgq endpoints */
1523static int __devinit
1524msm_slim_sps_init(struct msm_slim_ctrl *dev, struct resource *bam_mem)
1525{
1526 int i, ret;
1527 u32 bam_handle;
1528 struct sps_bam_props bam_props = {0};
1529
1530 static struct sps_bam_sec_config_props sec_props = {
1531 .ees = {
1532 [0] = { /* LPASS */
1533 .vmid = 0,
1534 .pipe_mask = 0xFFFF98,
1535 },
1536 [1] = { /* Krait Apps */
1537 .vmid = 1,
1538 .pipe_mask = 0x3F000007,
1539 },
1540 [2] = { /* Modem */
1541 .vmid = 2,
1542 .pipe_mask = 0x00000060,
1543 },
1544 },
1545 };
1546
1547 bam_props.ee = dev->ee;
1548 bam_props.virt_addr = dev->bam.base;
1549 bam_props.phys_addr = bam_mem->start;
1550 bam_props.irq = dev->bam.irq;
1551 bam_props.manage = SPS_BAM_MGR_LOCAL;
1552 bam_props.summing_threshold = MSM_SLIM_PERF_SUMM_THRESHOLD;
1553
1554 bam_props.sec_config = SPS_BAM_SEC_DO_CONFIG;
1555 bam_props.p_sec_config_props = &sec_props;
1556
1557 bam_props.options = SPS_O_DESC_DONE | SPS_O_ERROR |
1558 SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
1559
1560 /* First 7 bits are for message Qs */
1561 for (i = 7; i < 32; i++) {
1562 /* Check what pipes are owned by Apps. */
1563 if ((sec_props.ees[dev->ee].pipe_mask >> i) & 0x1)
1564 break;
1565 }
1566 dev->pipe_b = i - 7;
1567
1568 /* Register the BAM device with the SPS driver */
1569 ret = sps_register_bam_device(&bam_props, &bam_handle);
1570 if (ret) {
1571 dev_err(dev->dev, "sps_register_bam_device failed 0x%x\n", ret);
1572 return ret;
1573 }
1574 dev->bam.hdl = bam_handle;
1575 dev_dbg(dev->dev, "SLIM BAM registered, handle = 0x%x\n", bam_handle);
1576
1577 ret = msm_slim_init_rx_msgq(dev);
1578 if (ret) {
1579 dev_err(dev->dev, "msm_slim_init_rx_msgq failed 0x%x\n", ret);
1580 goto rx_msgq_init_failed;
1581 }
1582
1583 return 0;
1584rx_msgq_init_failed:
1585 sps_deregister_bam_device(bam_handle);
1586 dev->bam.hdl = 0L;
1587 return ret;
1588}
1589
1590static void msm_slim_sps_exit(struct msm_slim_ctrl *dev)
1591{
1592 if (dev->use_rx_msgqs) {
1593 struct msm_slim_endp *endpoint = &dev->rx_msgq;
1594 struct sps_connect *config = &endpoint->config;
1595 struct sps_mem_buffer *descr = &config->desc;
1596 struct sps_mem_buffer *mem = &endpoint->buf;
1597 struct sps_register_event sps_event;
1598 memset(&sps_event, 0x00, sizeof(sps_event));
1599 msm_slim_sps_mem_free(dev, mem);
1600 sps_register_event(endpoint->sps, &sps_event);
1601 sps_disconnect(endpoint->sps);
1602 msm_slim_sps_mem_free(dev, descr);
1603 msm_slim_free_endpoint(endpoint);
1604 }
1605 sps_deregister_bam_device(dev->bam.hdl);
1606}
1607
Sagar Dhariacc969452011-09-19 10:34:30 -06001608static void msm_slim_prg_slew(struct platform_device *pdev,
1609 struct msm_slim_ctrl *dev)
1610{
1611 struct resource *slew_io;
1612 void __iomem *slew_reg;
1613 /* SLEW RATE register for this slimbus */
1614 dev->slew_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1615 "slimbus_slew_reg");
1616 if (!dev->slew_mem) {
1617 dev_dbg(&pdev->dev, "no slimbus slew resource\n");
1618 return;
1619 }
1620 slew_io = request_mem_region(dev->slew_mem->start,
1621 resource_size(dev->slew_mem), pdev->name);
1622 if (!slew_io) {
1623 dev_dbg(&pdev->dev, "slimbus-slew mem claimed\n");
1624 dev->slew_mem = NULL;
1625 return;
1626 }
1627
1628 slew_reg = ioremap(dev->slew_mem->start, resource_size(dev->slew_mem));
1629 if (!slew_reg) {
1630 dev_dbg(dev->dev, "slew register mapping failed");
1631 release_mem_region(dev->slew_mem->start,
1632 resource_size(dev->slew_mem));
1633 dev->slew_mem = NULL;
1634 return;
1635 }
1636 writel_relaxed(1, slew_reg);
1637 /* Make sure slimbus-slew rate enabling goes through */
1638 wmb();
1639 iounmap(slew_reg);
1640}
1641
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001642static int __devinit msm_slim_probe(struct platform_device *pdev)
1643{
1644 struct msm_slim_ctrl *dev;
1645 int ret;
1646 struct resource *bam_mem, *bam_io;
1647 struct resource *slim_mem, *slim_io;
1648 struct resource *irq, *bam_irq;
1649 slim_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1650 "slimbus_physical");
1651 if (!slim_mem) {
1652 dev_err(&pdev->dev, "no slimbus physical memory resource\n");
1653 return -ENODEV;
1654 }
1655 slim_io = request_mem_region(slim_mem->start, resource_size(slim_mem),
1656 pdev->name);
1657 if (!slim_io) {
1658 dev_err(&pdev->dev, "slimbus memory already claimed\n");
1659 return -EBUSY;
1660 }
1661
1662 bam_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1663 "slimbus_bam_physical");
1664 if (!bam_mem) {
1665 dev_err(&pdev->dev, "no slimbus BAM memory resource\n");
1666 ret = -ENODEV;
1667 goto err_get_res_bam_failed;
1668 }
1669 bam_io = request_mem_region(bam_mem->start, resource_size(bam_mem),
1670 pdev->name);
1671 if (!bam_io) {
1672 release_mem_region(slim_mem->start, resource_size(slim_mem));
1673 dev_err(&pdev->dev, "slimbus BAM memory already claimed\n");
1674 ret = -EBUSY;
1675 goto err_get_res_bam_failed;
1676 }
1677 irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
1678 "slimbus_irq");
1679 if (!irq) {
1680 dev_err(&pdev->dev, "no slimbus IRQ resource\n");
1681 ret = -ENODEV;
1682 goto err_get_res_failed;
1683 }
1684 bam_irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
1685 "slimbus_bam_irq");
1686 if (!bam_irq) {
1687 dev_err(&pdev->dev, "no slimbus BAM IRQ resource\n");
1688 ret = -ENODEV;
1689 goto err_get_res_failed;
1690 }
1691
1692 dev = kzalloc(sizeof(struct msm_slim_ctrl), GFP_KERNEL);
1693 if (!dev) {
1694 dev_err(&pdev->dev, "no memory for MSM slimbus controller\n");
1695 ret = -ENOMEM;
1696 goto err_get_res_failed;
1697 }
1698 dev->dev = &pdev->dev;
1699 platform_set_drvdata(pdev, dev);
1700 slim_set_ctrldata(&dev->ctrl, dev);
1701 dev->base = ioremap(slim_mem->start, resource_size(slim_mem));
1702 if (!dev->base) {
1703 dev_err(&pdev->dev, "IOremap failed\n");
1704 ret = -ENOMEM;
1705 goto err_ioremap_failed;
1706 }
1707 dev->bam.base = ioremap(bam_mem->start, resource_size(bam_mem));
1708 if (!dev->bam.base) {
1709 dev_err(&pdev->dev, "BAM IOremap failed\n");
1710 ret = -ENOMEM;
1711 goto err_ioremap_bam_failed;
1712 }
1713 dev->ctrl.nr = pdev->id;
1714 dev->ctrl.nchans = MSM_SLIM_NCHANS;
1715 dev->ctrl.nports = MSM_SLIM_NPORTS;
1716 dev->ctrl.set_laddr = msm_set_laddr;
1717 dev->ctrl.xfer_msg = msm_xfer_msg;
Sagar Dharia144e5e02011-08-08 17:30:11 -06001718 dev->ctrl.wakeup = msm_clk_pause_wakeup;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001719 dev->ctrl.config_port = msm_config_port;
1720 dev->ctrl.port_xfer = msm_slim_port_xfer;
1721 dev->ctrl.port_xfer_status = msm_slim_port_xfer_status;
1722 /* Reserve some messaging BW for satellite-apps driver communication */
1723 dev->ctrl.sched.pending_msgsl = 30;
1724
1725 init_completion(&dev->reconf);
1726 mutex_init(&dev->tx_lock);
1727 spin_lock_init(&dev->rx_lock);
1728 dev->ee = 1;
1729 dev->use_rx_msgqs = 1;
1730 dev->irq = irq->start;
1731 dev->bam.irq = bam_irq->start;
1732
1733 ret = msm_slim_sps_init(dev, bam_mem);
1734 if (ret != 0) {
1735 dev_err(dev->dev, "error SPS init\n");
1736 goto err_sps_init_failed;
1737 }
1738
1739
1740 dev->rclk = clk_get(dev->dev, "audio_slimbus_clk");
Sagar Dhariacc969452011-09-19 10:34:30 -06001741 if (!dev->rclk) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001742 dev_err(dev->dev, "slimbus clock not found");
1743 goto err_clk_get_failed;
1744 }
1745 dev->framer.rootfreq = SLIM_ROOT_FREQ >> 3;
1746 dev->framer.superfreq =
1747 dev->framer.rootfreq / SLIM_CL_PER_SUPERFRAME_DIV8;
1748 dev->ctrl.a_framer = &dev->framer;
1749 dev->ctrl.clkgear = SLIM_MAX_CLK_GEAR;
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001750 dev->ctrl.dev.parent = &pdev->dev;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001751
1752 ret = request_irq(dev->irq, msm_slim_interrupt, IRQF_TRIGGER_HIGH,
1753 "msm_slim_irq", dev);
1754 if (ret) {
1755 dev_err(&pdev->dev, "request IRQ failed\n");
1756 goto err_request_irq_failed;
1757 }
1758
1759 dev->satd = kzalloc(sizeof(struct msm_slim_sat), GFP_KERNEL);
1760 if (!dev->satd) {
1761 ret = -ENOMEM;
1762 goto err_sat_failed;
1763 }
Sagar Dhariacc969452011-09-19 10:34:30 -06001764
1765 msm_slim_prg_slew(pdev, dev);
1766 clk_set_rate(dev->rclk, SLIM_ROOT_FREQ);
1767 clk_enable(dev->rclk);
1768
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001769 dev->satd->dev = dev;
1770 dev->satd->satcl.name = "msm_sat_dev";
1771 spin_lock_init(&dev->satd->lock);
1772 INIT_WORK(&dev->satd->wd, slim_sat_rxprocess);
1773 dev->satd->wq = create_singlethread_workqueue("msm_slim_sat");
1774 /* Component register initialization */
1775 writel_relaxed(1, dev->base + COMP_CFG);
1776 writel_relaxed((EE_MGR_RSC_GRP | EE_NGD_2 | EE_NGD_1),
1777 dev->base + COMP_TRUST_CFG);
1778
1779 /*
1780 * Manager register initialization
1781 * If RX msg Q is used, disable RX_MSG_RCVD interrupt
1782 */
1783 if (dev->use_rx_msgqs)
1784 writel_relaxed((MGR_INT_RECFG_DONE | MGR_INT_TX_NACKED_2 |
1785 MGR_INT_MSG_BUF_CONTE | /* MGR_INT_RX_MSG_RCVD | */
1786 MGR_INT_TX_MSG_SENT), dev->base + MGR_INT_EN);
1787 else
1788 writel_relaxed((MGR_INT_RECFG_DONE | MGR_INT_TX_NACKED_2 |
1789 MGR_INT_MSG_BUF_CONTE | MGR_INT_RX_MSG_RCVD |
1790 MGR_INT_TX_MSG_SENT), dev->base + MGR_INT_EN);
1791 writel_relaxed(1, dev->base + MGR_CFG);
1792 /*
1793 * Framer registers are beyond 1K memory region after Manager and/or
1794 * component registers. Make sure those writes are ordered
1795 * before framer register writes
1796 */
1797 wmb();
1798
Sagar Dharia72007922011-12-13 21:14:26 -07001799 /* Register with framework before enabling frame, clock */
1800 ret = slim_add_numbered_controller(&dev->ctrl);
1801 if (ret) {
1802 dev_err(dev->dev, "error adding controller\n");
1803 goto err_ctrl_failed;
1804 }
1805
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001806 /* Framer register initialization */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001807 writel_relaxed((0xA << REF_CLK_GEAR) | (0xA << CLK_GEAR) |
1808 (1 << ROOT_FREQ) | (1 << FRM_ACTIVE) | 1,
1809 dev->base + FRM_CFG);
1810 /*
1811 * Make sure that framer wake-up and enabling writes go through
1812 * before any other component is enabled. Framer is responsible for
1813 * clocking the bus and enabling framer first will ensure that other
1814 * devices can report presence when they are enabled
1815 */
1816 mb();
1817
1818 /* Enable RX msg Q */
1819 if (dev->use_rx_msgqs)
1820 writel_relaxed(MGR_CFG_ENABLE | MGR_CFG_RX_MSGQ_EN,
1821 dev->base + MGR_CFG);
1822 else
1823 writel_relaxed(MGR_CFG_ENABLE, dev->base + MGR_CFG);
1824 /*
1825 * Make sure that manager-enable is written through before interface
1826 * device is enabled
1827 */
1828 mb();
1829 writel_relaxed(1, dev->base + INTF_CFG);
1830 /*
1831 * Make sure that interface-enable is written through before enabling
1832 * ported generic device inside MSM manager
1833 */
1834 mb();
1835 writel_relaxed(1, dev->base + PGD_CFG);
1836 writel_relaxed(0x3F<<17, dev->base + (PGD_OWN_EEn + (4 * dev->ee)));
1837 /*
1838 * Make sure that ported generic device is enabled and port-EE settings
1839 * are written through before finally enabling the component
1840 */
1841 mb();
1842
1843 writel_relaxed(1, dev->base + COMP_CFG);
1844 /*
1845 * Make sure that all writes have gone through before exiting this
1846 * function
1847 */
1848 mb();
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001849 pm_runtime_use_autosuspend(&pdev->dev);
1850 pm_runtime_set_autosuspend_delay(&pdev->dev, MSM_SLIM_AUTOSUSPEND);
1851 pm_runtime_set_active(&pdev->dev);
1852
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001853 dev_dbg(dev->dev, "MSM SB controller is up!\n");
1854 return 0;
1855
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001856err_ctrl_failed:
1857 writel_relaxed(0, dev->base + COMP_CFG);
1858 kfree(dev->satd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001859err_sat_failed:
1860 free_irq(dev->irq, dev);
1861err_request_irq_failed:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001862 clk_disable(dev->rclk);
1863 clk_put(dev->rclk);
1864err_clk_get_failed:
1865 msm_slim_sps_exit(dev);
1866err_sps_init_failed:
1867 iounmap(dev->bam.base);
1868err_ioremap_bam_failed:
1869 iounmap(dev->base);
1870err_ioremap_failed:
1871 kfree(dev);
1872err_get_res_failed:
1873 release_mem_region(bam_mem->start, resource_size(bam_mem));
1874err_get_res_bam_failed:
1875 release_mem_region(slim_mem->start, resource_size(slim_mem));
1876 return ret;
1877}
1878
1879static int __devexit msm_slim_remove(struct platform_device *pdev)
1880{
1881 struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
1882 struct resource *bam_mem;
1883 struct resource *slim_mem;
Sagar Dhariacc969452011-09-19 10:34:30 -06001884 struct resource *slew_mem = dev->slew_mem;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001885 struct msm_slim_sat *sat = dev->satd;
1886 slim_remove_device(&sat->satcl);
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001887 pm_runtime_disable(&pdev->dev);
1888 pm_runtime_set_suspended(&pdev->dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001889 kfree(sat->satch);
1890 destroy_workqueue(sat->wq);
1891 kfree(sat);
1892 free_irq(dev->irq, dev);
1893 slim_del_controller(&dev->ctrl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001894 clk_put(dev->rclk);
1895 msm_slim_sps_exit(dev);
1896 kthread_stop(dev->rx_msgq_thread);
1897 iounmap(dev->bam.base);
1898 iounmap(dev->base);
1899 kfree(dev);
1900 bam_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1901 "slimbus_bam_physical");
Sagar Dhariae77961f2011-09-27 14:03:50 -06001902 if (bam_mem)
1903 release_mem_region(bam_mem->start, resource_size(bam_mem));
Sagar Dhariacc969452011-09-19 10:34:30 -06001904 if (slew_mem)
1905 release_mem_region(slew_mem->start, resource_size(slew_mem));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001906 slim_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1907 "slimbus_physical");
Sagar Dhariae77961f2011-09-27 14:03:50 -06001908 if (slim_mem)
1909 release_mem_region(slim_mem->start, resource_size(slim_mem));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001910 return 0;
1911}
1912
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001913#ifdef CONFIG_PM_RUNTIME
1914static int msm_slim_runtime_idle(struct device *device)
1915{
1916 dev_dbg(device, "pm_runtime: idle...\n");
1917 pm_request_autosuspend(device);
1918 return -EAGAIN;
1919}
1920#endif
1921
1922/*
1923 * If PM_RUNTIME is not defined, these 2 functions become helper
1924 * functions to be called from system suspend/resume. So they are not
1925 * inside ifdef CONFIG_PM_RUNTIME
1926 */
Sagar Dharia45e77912012-01-10 09:55:18 -07001927#ifdef CONFIG_PM_SLEEP
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001928static int msm_slim_runtime_suspend(struct device *device)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001929{
1930 struct platform_device *pdev = to_platform_device(device);
1931 struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001932 int ret;
1933 dev_dbg(device, "pm_runtime: suspending...\n");
1934 dev->state = MSM_CTRL_SLEEPING;
1935 ret = slim_ctrl_clk_pause(&dev->ctrl, false, SLIM_CLK_UNSPECIFIED);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001936 if (ret) {
1937 dev_err(device, "clk pause not entered:%d", ret);
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001938 dev->state = MSM_CTRL_AWAKE;
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001939 } else {
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001940 dev->state = MSM_CTRL_ASLEEP;
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001941 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001942 return ret;
1943}
1944
1945static int msm_slim_runtime_resume(struct device *device)
1946{
1947 struct platform_device *pdev = to_platform_device(device);
1948 struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
1949 int ret = 0;
1950 dev_dbg(device, "pm_runtime: resuming...\n");
1951 if (dev->state == MSM_CTRL_ASLEEP)
1952 ret = slim_ctrl_clk_pause(&dev->ctrl, true, 0);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001953 if (ret) {
1954 dev_err(device, "clk pause not exited:%d", ret);
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001955 dev->state = MSM_CTRL_ASLEEP;
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001956 } else {
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001957 dev->state = MSM_CTRL_AWAKE;
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001958 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001959 return ret;
1960}
1961
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001962static int msm_slim_suspend(struct device *dev)
1963{
1964 int ret = 0;
1965 if (!pm_runtime_enabled(dev) || !pm_runtime_suspended(dev)) {
1966 dev_dbg(dev, "system suspend");
1967 ret = msm_slim_runtime_suspend(dev);
Sagar Dharia6b559e02011-08-03 17:01:31 -06001968 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001969 if (ret == -EBUSY) {
Sagar Dharia144e5e02011-08-08 17:30:11 -06001970 /*
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001971 * If the clock pause failed due to active channels, there is
1972 * a possibility that some audio stream is active during suspend
1973 * We dont want to return suspend failure in that case so that
1974 * display and relevant components can still go to suspend.
1975 * If there is some other error, then it should be passed-on
1976 * to system level suspend
1977 */
Sagar Dharia144e5e02011-08-08 17:30:11 -06001978 ret = 0;
1979 }
1980 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001981}
1982
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001983static int msm_slim_resume(struct device *dev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001984{
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001985 /* If runtime_pm is enabled, this resume shouldn't do anything */
1986 if (!pm_runtime_enabled(dev) || !pm_runtime_suspended(dev)) {
1987 int ret;
1988 dev_dbg(dev, "system resume");
1989 ret = msm_slim_runtime_resume(dev);
1990 if (!ret) {
1991 pm_runtime_mark_last_busy(dev);
1992 pm_request_autosuspend(dev);
1993 }
1994 return ret;
1995
Sagar Dharia144e5e02011-08-08 17:30:11 -06001996 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001997 return 0;
1998}
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001999#endif /* CONFIG_PM_SLEEP */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002000
2001static const struct dev_pm_ops msm_slim_dev_pm_ops = {
2002 SET_SYSTEM_SLEEP_PM_OPS(
2003 msm_slim_suspend,
2004 msm_slim_resume
2005 )
2006 SET_RUNTIME_PM_OPS(
2007 msm_slim_runtime_suspend,
2008 msm_slim_runtime_resume,
2009 msm_slim_runtime_idle
2010 )
2011};
2012
2013static struct platform_driver msm_slim_driver = {
2014 .probe = msm_slim_probe,
2015 .remove = msm_slim_remove,
2016 .driver = {
2017 .name = MSM_SLIM_NAME,
2018 .owner = THIS_MODULE,
2019 .pm = &msm_slim_dev_pm_ops,
2020 },
2021};
2022
2023static int msm_slim_init(void)
2024{
2025 return platform_driver_register(&msm_slim_driver);
2026}
2027subsys_initcall(msm_slim_init);
2028
2029static void msm_slim_exit(void)
2030{
2031 platform_driver_unregister(&msm_slim_driver);
2032}
2033module_exit(msm_slim_exit);
2034
2035MODULE_LICENSE("GPL v2");
2036MODULE_VERSION("0.1");
2037MODULE_DESCRIPTION("MSM Slimbus controller");
2038MODULE_ALIAS("platform:msm-slim");