blob: aa67c8c897e67a30a36f59d7503bccc6126eacdf [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12#include <linux/irq.h>
13#include <linux/kernel.h>
14#include <linux/init.h>
15#include <linux/slab.h>
16#include <linux/io.h>
17#include <linux/interrupt.h>
18#include <linux/platform_device.h>
19#include <linux/dma-mapping.h>
20#include <linux/slimbus/slimbus.h>
21#include <linux/delay.h>
22#include <linux/kthread.h>
23#include <linux/clk.h>
Sagar Dharia45ee38a2011-08-03 17:01:31 -060024#include <linux/pm_runtime.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070025#include <mach/sps.h>
26
27/* Per spec.max 40 bytes per received message */
28#define SLIM_RX_MSGQ_BUF_LEN 40
29
30#define SLIM_USR_MC_GENERIC_ACK 0x25
31#define SLIM_USR_MC_MASTER_CAPABILITY 0x0
32#define SLIM_USR_MC_REPORT_SATELLITE 0x1
33#define SLIM_USR_MC_ADDR_QUERY 0xD
34#define SLIM_USR_MC_ADDR_REPLY 0xE
35#define SLIM_USR_MC_DEFINE_CHAN 0x20
36#define SLIM_USR_MC_DEF_ACT_CHAN 0x21
37#define SLIM_USR_MC_CHAN_CTRL 0x23
38#define SLIM_USR_MC_RECONFIG_NOW 0x24
39#define SLIM_USR_MC_REQ_BW 0x28
40#define SLIM_USR_MC_CONNECT_SRC 0x2C
41#define SLIM_USR_MC_CONNECT_SINK 0x2D
42#define SLIM_USR_MC_DISCONNECT_PORT 0x2E
43
44/* MSM Slimbus peripheral settings */
45#define MSM_SLIM_PERF_SUMM_THRESHOLD 0x8000
46#define MSM_SLIM_NCHANS 32
47#define MSM_SLIM_NPORTS 24
Sagar Dharia45ee38a2011-08-03 17:01:31 -060048#define MSM_SLIM_AUTOSUSPEND MSEC_PER_SEC
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070049
50/*
51 * Need enough descriptors to receive present messages from slaves
52 * if received simultaneously. Present message needs 3 descriptors
53 * and this size will ensure around 10 simultaneous reports.
54 */
55#define MSM_SLIM_DESC_NUM 32
56
57#define SLIM_MSG_ASM_FIRST_WORD(l, mt, mc, dt, ad) \
58 ((l) | ((mt) << 5) | ((mc) << 8) | ((dt) << 15) | ((ad) << 16))
59
60#define MSM_SLIM_NAME "msm_slim_ctrl"
61#define SLIM_ROOT_FREQ 24576000
62
63#define MSM_CONCUR_MSG 8
64#define SAT_CONCUR_MSG 8
65#define DEF_WATERMARK (8 << 1)
66#define DEF_ALIGN 0
67#define DEF_PACK (1 << 6)
68#define ENABLE_PORT 1
69
70#define DEF_BLKSZ 0
71#define DEF_TRANSZ 0
72
73#define SAT_MAGIC_LSB 0xD9
74#define SAT_MAGIC_MSB 0xC5
75#define SAT_MSG_VER 0x1
76#define SAT_MSG_PROT 0x1
77#define MSM_SAT_SUCCSS 0x20
78
79#define QC_MFGID_LSB 0x2
80#define QC_MFGID_MSB 0x17
81#define QC_CHIPID_SL 0x10
82#define QC_DEVID_SAT1 0x3
83#define QC_DEVID_SAT2 0x4
84#define QC_DEVID_PGD 0x5
Sagar Dharia45ee38a2011-08-03 17:01:31 -060085#define QC_MSM_DEVS 5
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070086
87/* Component registers */
88enum comp_reg {
89 COMP_CFG = 0,
90 COMP_TRUST_CFG = 0x14,
91};
92
93/* Manager registers */
94enum mgr_reg {
95 MGR_CFG = 0x200,
96 MGR_STATUS = 0x204,
97 MGR_RX_MSGQ_CFG = 0x208,
98 MGR_INT_EN = 0x210,
99 MGR_INT_STAT = 0x214,
100 MGR_INT_CLR = 0x218,
101 MGR_TX_MSG = 0x230,
102 MGR_RX_MSG = 0x270,
103 MGR_VE_STAT = 0x300,
104};
105
106enum msg_cfg {
107 MGR_CFG_ENABLE = 1,
108 MGR_CFG_RX_MSGQ_EN = 1 << 1,
109 MGR_CFG_TX_MSGQ_EN_HIGH = 1 << 2,
110 MGR_CFG_TX_MSGQ_EN_LOW = 1 << 3,
111};
112/* Message queue types */
113enum msm_slim_msgq_type {
114 MSGQ_RX = 0,
115 MSGQ_TX_LOW = 1,
116 MSGQ_TX_HIGH = 2,
117};
118/* Framer registers */
119enum frm_reg {
120 FRM_CFG = 0x400,
121 FRM_STAT = 0x404,
122 FRM_INT_EN = 0x410,
123 FRM_INT_STAT = 0x414,
124 FRM_INT_CLR = 0x418,
125 FRM_WAKEUP = 0x41C,
126 FRM_CLKCTL_DONE = 0x420,
127 FRM_IE_STAT = 0x430,
128 FRM_VE_STAT = 0x440,
129};
130
131/* Interface registers */
132enum intf_reg {
133 INTF_CFG = 0x600,
134 INTF_STAT = 0x604,
135 INTF_INT_EN = 0x610,
136 INTF_INT_STAT = 0x614,
137 INTF_INT_CLR = 0x618,
138 INTF_IE_STAT = 0x630,
139 INTF_VE_STAT = 0x640,
140};
141
142/* Manager PGD registers */
143enum pgd_reg {
144 PGD_CFG = 0x1000,
145 PGD_STAT = 0x1004,
146 PGD_INT_EN = 0x1010,
147 PGD_INT_STAT = 0x1014,
148 PGD_INT_CLR = 0x1018,
149 PGD_OWN_EEn = 0x1020,
150 PGD_PORT_INT_EN_EEn = 0x1030,
151 PGD_PORT_INT_ST_EEn = 0x1034,
152 PGD_PORT_INT_CL_EEn = 0x1038,
153 PGD_PORT_CFGn = 0x1080,
154 PGD_PORT_STATn = 0x1084,
155 PGD_PORT_PARAMn = 0x1088,
156 PGD_PORT_BLKn = 0x108C,
157 PGD_PORT_TRANn = 0x1090,
158 PGD_PORT_MCHANn = 0x1094,
159 PGD_PORT_PSHPLLn = 0x1098,
160 PGD_PORT_PC_CFGn = 0x1600,
161 PGD_PORT_PC_VALn = 0x1604,
162 PGD_PORT_PC_VFR_TSn = 0x1608,
163 PGD_PORT_PC_VFR_STn = 0x160C,
164 PGD_PORT_PC_VFR_CLn = 0x1610,
165 PGD_IE_STAT = 0x1700,
166 PGD_VE_STAT = 0x1710,
167};
168
169enum rsc_grp {
170 EE_MGR_RSC_GRP = 1 << 10,
171 EE_NGD_2 = 2 << 6,
172 EE_NGD_1 = 0,
173};
174
175enum mgr_intr {
176 MGR_INT_RECFG_DONE = 1 << 24,
177 MGR_INT_TX_NACKED_2 = 1 << 25,
178 MGR_INT_MSG_BUF_CONTE = 1 << 26,
179 MGR_INT_RX_MSG_RCVD = 1 << 30,
180 MGR_INT_TX_MSG_SENT = 1 << 31,
181};
182
183enum frm_cfg {
184 FRM_ACTIVE = 1,
185 CLK_GEAR = 7,
186 ROOT_FREQ = 11,
187 REF_CLK_GEAR = 15,
188};
189
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600190enum msm_ctrl_state {
191 MSM_CTRL_AWAKE,
192 MSM_CTRL_SLEEPING,
193 MSM_CTRL_ASLEEP,
194};
195
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700196struct msm_slim_sps_bam {
197 u32 hdl;
198 void __iomem *base;
199 int irq;
200};
201
202struct msm_slim_endp {
203 struct sps_pipe *sps;
204 struct sps_connect config;
205 struct sps_register_event event;
206 struct sps_mem_buffer buf;
207 struct completion *xcomp;
208 bool connected;
209};
210
211struct msm_slim_ctrl {
212 struct slim_controller ctrl;
213 struct slim_framer framer;
214 struct device *dev;
215 void __iomem *base;
Sagar Dhariacc969452011-09-19 10:34:30 -0600216 struct resource *slew_mem;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700217 u32 curr_bw;
218 u8 msg_cnt;
219 u32 tx_buf[10];
220 u8 rx_msgs[MSM_CONCUR_MSG][SLIM_RX_MSGQ_BUF_LEN];
221 spinlock_t rx_lock;
222 int head;
223 int tail;
224 int irq;
225 int err;
226 int ee;
227 struct completion *wr_comp;
228 struct msm_slim_sat *satd;
229 struct msm_slim_endp pipes[7];
230 struct msm_slim_sps_bam bam;
231 struct msm_slim_endp rx_msgq;
232 struct completion rx_msgq_notify;
233 struct task_struct *rx_msgq_thread;
234 struct clk *rclk;
235 struct mutex tx_lock;
236 u8 pgdla;
237 bool use_rx_msgqs;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700238 int pipe_b;
239 struct completion reconf;
240 bool reconf_busy;
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600241 bool chan_active;
242 enum msm_ctrl_state state;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700243};
244
245struct msm_slim_sat {
246 struct slim_device satcl;
247 struct msm_slim_ctrl *dev;
248 struct workqueue_struct *wq;
249 struct work_struct wd;
250 u8 sat_msgs[SAT_CONCUR_MSG][40];
251 u16 *satch;
252 u8 nsatch;
253 bool sent_capability;
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600254 bool pending_reconf;
255 bool pending_capability;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700256 int shead;
257 int stail;
258 spinlock_t lock;
259};
260
261static int msm_slim_rx_enqueue(struct msm_slim_ctrl *dev, u32 *buf, u8 len)
262{
263 spin_lock(&dev->rx_lock);
264 if ((dev->tail + 1) % MSM_CONCUR_MSG == dev->head) {
265 spin_unlock(&dev->rx_lock);
266 dev_err(dev->dev, "RX QUEUE full!");
267 return -EXFULL;
268 }
269 memcpy((u8 *)dev->rx_msgs[dev->tail], (u8 *)buf, len);
270 dev->tail = (dev->tail + 1) % MSM_CONCUR_MSG;
271 spin_unlock(&dev->rx_lock);
272 return 0;
273}
274
275static int msm_slim_rx_dequeue(struct msm_slim_ctrl *dev, u8 *buf)
276{
277 unsigned long flags;
278 spin_lock_irqsave(&dev->rx_lock, flags);
279 if (dev->tail == dev->head) {
280 spin_unlock_irqrestore(&dev->rx_lock, flags);
281 return -ENODATA;
282 }
283 memcpy(buf, (u8 *)dev->rx_msgs[dev->head], 40);
284 dev->head = (dev->head + 1) % MSM_CONCUR_MSG;
285 spin_unlock_irqrestore(&dev->rx_lock, flags);
286 return 0;
287}
288
289static int msm_sat_enqueue(struct msm_slim_sat *sat, u32 *buf, u8 len)
290{
291 struct msm_slim_ctrl *dev = sat->dev;
292 spin_lock(&sat->lock);
293 if ((sat->stail + 1) % SAT_CONCUR_MSG == sat->shead) {
294 spin_unlock(&sat->lock);
295 dev_err(dev->dev, "SAT QUEUE full!");
296 return -EXFULL;
297 }
298 memcpy(sat->sat_msgs[sat->stail], (u8 *)buf, len);
299 sat->stail = (sat->stail + 1) % SAT_CONCUR_MSG;
300 spin_unlock(&sat->lock);
301 return 0;
302}
303
304static int msm_sat_dequeue(struct msm_slim_sat *sat, u8 *buf)
305{
306 unsigned long flags;
307 spin_lock_irqsave(&sat->lock, flags);
308 if (sat->stail == sat->shead) {
309 spin_unlock_irqrestore(&sat->lock, flags);
310 return -ENODATA;
311 }
312 memcpy(buf, sat->sat_msgs[sat->shead], 40);
313 sat->shead = (sat->shead + 1) % SAT_CONCUR_MSG;
314 spin_unlock_irqrestore(&sat->lock, flags);
315 return 0;
316}
317
318static void msm_get_eaddr(u8 *e_addr, u32 *buffer)
319{
320 e_addr[0] = (buffer[1] >> 24) & 0xff;
321 e_addr[1] = (buffer[1] >> 16) & 0xff;
322 e_addr[2] = (buffer[1] >> 8) & 0xff;
323 e_addr[3] = buffer[1] & 0xff;
324 e_addr[4] = (buffer[0] >> 24) & 0xff;
325 e_addr[5] = (buffer[0] >> 16) & 0xff;
326}
327
328static bool msm_is_sat_dev(u8 *e_addr)
329{
330 if (e_addr[5] == QC_MFGID_LSB && e_addr[4] == QC_MFGID_MSB &&
331 e_addr[2] != QC_CHIPID_SL &&
332 (e_addr[1] == QC_DEVID_SAT1 || e_addr[1] == QC_DEVID_SAT2))
333 return true;
334 return false;
335}
336
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700337static int msm_slim_get_ctrl(struct msm_slim_ctrl *dev)
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600338{
Sagar Dharia45e77912012-01-10 09:55:18 -0700339#ifdef CONFIG_PM_RUNTIME
340 int ref = 0;
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700341 int ret = pm_runtime_get_sync(dev->dev);
342 if (ret >= 0) {
343 ref = atomic_read(&dev->dev->power.usage_count);
344 if (ref <= 0) {
345 dev_err(dev->dev, "reference count -ve:%d", ref);
346 ret = -ENODEV;
347 }
348 }
349 return ret;
Sagar Dharia45e77912012-01-10 09:55:18 -0700350#else
351 return -ENODEV;
352#endif
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600353}
354static void msm_slim_put_ctrl(struct msm_slim_ctrl *dev)
355{
Sagar Dharia45e77912012-01-10 09:55:18 -0700356#ifdef CONFIG_PM_RUNTIME
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600357 pm_runtime_mark_last_busy(dev->dev);
358 pm_runtime_put(dev->dev);
Sagar Dharia45e77912012-01-10 09:55:18 -0700359#endif
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600360}
361
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700362static irqreturn_t msm_slim_interrupt(int irq, void *d)
363{
364 struct msm_slim_ctrl *dev = d;
365 u32 pstat;
366 u32 stat = readl_relaxed(dev->base + MGR_INT_STAT);
367
368 if (stat & MGR_INT_TX_MSG_SENT || stat & MGR_INT_TX_NACKED_2) {
369 if (stat & MGR_INT_TX_MSG_SENT)
370 writel_relaxed(MGR_INT_TX_MSG_SENT,
371 dev->base + MGR_INT_CLR);
372 else {
373 writel_relaxed(MGR_INT_TX_NACKED_2,
374 dev->base + MGR_INT_CLR);
375 dev->err = -EIO;
376 }
377 /*
378 * Guarantee that interrupt clear bit write goes through before
379 * signalling completion/exiting ISR
380 */
381 mb();
382 if (dev->wr_comp)
383 complete(dev->wr_comp);
384 }
385 if (stat & MGR_INT_RX_MSG_RCVD) {
386 u32 rx_buf[10];
387 u32 mc, mt;
388 u8 len, i;
389 rx_buf[0] = readl_relaxed(dev->base + MGR_RX_MSG);
390 len = rx_buf[0] & 0x1F;
391 for (i = 1; i < ((len + 3) >> 2); i++) {
392 rx_buf[i] = readl_relaxed(dev->base + MGR_RX_MSG +
393 (4 * i));
394 dev_dbg(dev->dev, "reading data: %x\n", rx_buf[i]);
395 }
396 mt = (rx_buf[0] >> 5) & 0x7;
397 mc = (rx_buf[0] >> 8) & 0xff;
398 dev_dbg(dev->dev, "MC: %x, MT: %x\n", mc, mt);
399 if (mt == SLIM_MSG_MT_DEST_REFERRED_USER ||
400 mt == SLIM_MSG_MT_SRC_REFERRED_USER) {
401 struct msm_slim_sat *sat = dev->satd;
402 msm_sat_enqueue(sat, rx_buf, len);
403 writel_relaxed(MGR_INT_RX_MSG_RCVD,
404 dev->base + MGR_INT_CLR);
405 /*
406 * Guarantee that CLR bit write goes through before
407 * queuing work
408 */
409 mb();
410 queue_work(sat->wq, &sat->wd);
411 } else if (mt == SLIM_MSG_MT_CORE &&
412 mc == SLIM_MSG_MC_REPORT_PRESENT) {
413 u8 e_addr[6];
414 msm_get_eaddr(e_addr, rx_buf);
415 if (msm_is_sat_dev(e_addr)) {
416 /*
417 * Consider possibility that this device may
418 * be reporting more than once?
419 */
420 struct msm_slim_sat *sat = dev->satd;
421 msm_sat_enqueue(sat, rx_buf, len);
422 writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
423 MGR_INT_CLR);
424 /*
425 * Guarantee that CLR bit write goes through
426 * before queuing work
427 */
428 mb();
429 queue_work(sat->wq, &sat->wd);
430 } else {
431 msm_slim_rx_enqueue(dev, rx_buf, len);
432 writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
433 MGR_INT_CLR);
434 /*
435 * Guarantee that CLR bit write goes through
436 * before signalling completion
437 */
438 mb();
439 complete(&dev->rx_msgq_notify);
440 }
441 } else if (mc == SLIM_MSG_MC_REPLY_INFORMATION ||
442 mc == SLIM_MSG_MC_REPLY_VALUE) {
443 msm_slim_rx_enqueue(dev, rx_buf, len);
444 writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
445 MGR_INT_CLR);
446 /*
447 * Guarantee that CLR bit write goes through
448 * before signalling completion
449 */
450 mb();
451 complete(&dev->rx_msgq_notify);
Sagar Dharia144e5e02011-08-08 17:30:11 -0600452 } else if (mc == SLIM_MSG_MC_REPORT_INFORMATION) {
453 u8 *buf = (u8 *)rx_buf;
454 u8 l_addr = buf[2];
455 u16 ele = (u16)buf[4] << 4;
456 ele |= ((buf[3] & 0xf0) >> 4);
457 dev_err(dev->dev, "Slim-dev:%d report inf element:0x%x",
458 l_addr, ele);
459 for (i = 0; i < len - 5; i++)
460 dev_err(dev->dev, "offset:0x%x:bit mask:%x",
461 i, buf[i+5]);
462 writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
463 MGR_INT_CLR);
464 /*
465 * Guarantee that CLR bit write goes through
466 * before exiting
467 */
468 mb();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700469 } else {
470 dev_err(dev->dev, "Unexpected MC,%x MT:%x, len:%d",
471 mc, mt, len);
472 for (i = 0; i < ((len + 3) >> 2); i++)
473 dev_err(dev->dev, "error msg: %x", rx_buf[i]);
474 writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
475 MGR_INT_CLR);
476 /*
477 * Guarantee that CLR bit write goes through
478 * before exiting
479 */
480 mb();
481 }
482 }
483 if (stat & MGR_INT_RECFG_DONE) {
484 writel_relaxed(MGR_INT_RECFG_DONE, dev->base + MGR_INT_CLR);
485 /*
486 * Guarantee that CLR bit write goes through
487 * before exiting ISR
488 */
489 mb();
490 complete(&dev->reconf);
491 }
492 pstat = readl_relaxed(dev->base + PGD_PORT_INT_ST_EEn + (16 * dev->ee));
493 if (pstat != 0) {
494 int i = 0;
495 for (i = dev->pipe_b; i < MSM_SLIM_NPORTS; i++) {
496 if (pstat & 1 << i) {
497 u32 val = readl_relaxed(dev->base +
498 PGD_PORT_STATn + (i * 32));
499 if (val & (1 << 19)) {
500 dev->ctrl.ports[i].err =
501 SLIM_P_DISCONNECT;
502 dev->pipes[i-dev->pipe_b].connected =
503 false;
504 /*
505 * SPS will call completion since
506 * ERROR flags are registered
507 */
508 } else if (val & (1 << 2))
509 dev->ctrl.ports[i].err =
510 SLIM_P_OVERFLOW;
511 else if (val & (1 << 3))
512 dev->ctrl.ports[i].err =
513 SLIM_P_UNDERFLOW;
514 }
515 writel_relaxed(1, dev->base + PGD_PORT_INT_CL_EEn +
516 (dev->ee * 16));
517 }
518 /*
519 * Guarantee that port interrupt bit(s) clearing writes go
520 * through before exiting ISR
521 */
522 mb();
523 }
524
525 return IRQ_HANDLED;
526}
527
528static int
529msm_slim_init_endpoint(struct msm_slim_ctrl *dev, struct msm_slim_endp *ep)
530{
531 int ret;
532 struct sps_pipe *endpoint;
533 struct sps_connect *config = &ep->config;
534
535 /* Allocate the endpoint */
536 endpoint = sps_alloc_endpoint();
537 if (!endpoint) {
538 dev_err(dev->dev, "sps_alloc_endpoint failed\n");
539 return -ENOMEM;
540 }
541
542 /* Get default connection configuration for an endpoint */
543 ret = sps_get_config(endpoint, config);
544 if (ret) {
545 dev_err(dev->dev, "sps_get_config failed 0x%x\n", ret);
546 goto sps_config_failed;
547 }
548
549 ep->sps = endpoint;
550 return 0;
551
552sps_config_failed:
553 sps_free_endpoint(endpoint);
554 return ret;
555}
556
557static void
558msm_slim_free_endpoint(struct msm_slim_endp *ep)
559{
560 sps_free_endpoint(ep->sps);
561 ep->sps = NULL;
562}
563
564static int msm_slim_sps_mem_alloc(
565 struct msm_slim_ctrl *dev, struct sps_mem_buffer *mem, u32 len)
566{
567 dma_addr_t phys;
568
569 mem->size = len;
570 mem->min_size = 0;
571 mem->base = dma_alloc_coherent(dev->dev, mem->size, &phys, GFP_KERNEL);
572
573 if (!mem->base) {
574 dev_err(dev->dev, "dma_alloc_coherent(%d) failed\n", len);
575 return -ENOMEM;
576 }
577
578 mem->phys_base = phys;
579 memset(mem->base, 0x00, mem->size);
580 return 0;
581}
582
583static void
584msm_slim_sps_mem_free(struct msm_slim_ctrl *dev, struct sps_mem_buffer *mem)
585{
586 dma_free_coherent(dev->dev, mem->size, mem->base, mem->phys_base);
587 mem->size = 0;
588 mem->base = NULL;
589 mem->phys_base = 0;
590}
591
592static void msm_hw_set_port(struct msm_slim_ctrl *dev, u8 pn)
593{
594 u32 set_cfg = DEF_WATERMARK | DEF_ALIGN | DEF_PACK | ENABLE_PORT;
595 u32 int_port = readl_relaxed(dev->base + PGD_PORT_INT_EN_EEn +
596 (dev->ee * 16));
597 writel_relaxed(set_cfg, dev->base + PGD_PORT_CFGn + (pn * 32));
598 writel_relaxed(DEF_BLKSZ, dev->base + PGD_PORT_BLKn + (pn * 32));
599 writel_relaxed(DEF_TRANSZ, dev->base + PGD_PORT_TRANn + (pn * 32));
600 writel_relaxed((int_port | 1 << pn) , dev->base + PGD_PORT_INT_EN_EEn +
601 (dev->ee * 16));
602 /* Make sure that port registers are updated before returning */
603 mb();
604}
605
606static int msm_slim_connect_pipe_port(struct msm_slim_ctrl *dev, u8 pn)
607{
608 struct msm_slim_endp *endpoint = &dev->pipes[pn];
609 struct sps_connect *cfg = &endpoint->config;
610 u32 stat;
611 int ret = sps_get_config(dev->pipes[pn].sps, cfg);
612 if (ret) {
613 dev_err(dev->dev, "sps pipe-port get config error%x\n", ret);
614 return ret;
615 }
616 cfg->options = SPS_O_DESC_DONE | SPS_O_ERROR |
617 SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
618
619 if (dev->pipes[pn].connected) {
620 ret = sps_set_config(dev->pipes[pn].sps, cfg);
621 if (ret) {
622 dev_err(dev->dev, "sps pipe-port set config erro:%x\n",
623 ret);
624 return ret;
625 }
626 }
627
628 stat = readl_relaxed(dev->base + PGD_PORT_STATn +
629 (32 * (pn + dev->pipe_b)));
630 if (dev->ctrl.ports[pn].flow == SLIM_SRC) {
631 cfg->destination = dev->bam.hdl;
632 cfg->source = SPS_DEV_HANDLE_MEM;
633 cfg->dest_pipe_index = ((stat & (0xFF << 4)) >> 4);
634 cfg->src_pipe_index = 0;
635 dev_dbg(dev->dev, "flow src:pipe num:%d",
636 cfg->dest_pipe_index);
637 cfg->mode = SPS_MODE_DEST;
638 } else {
639 cfg->source = dev->bam.hdl;
640 cfg->destination = SPS_DEV_HANDLE_MEM;
641 cfg->src_pipe_index = ((stat & (0xFF << 4)) >> 4);
642 cfg->dest_pipe_index = 0;
643 dev_dbg(dev->dev, "flow dest:pipe num:%d",
644 cfg->src_pipe_index);
645 cfg->mode = SPS_MODE_SRC;
646 }
647 /* Space for desciptor FIFOs */
648 cfg->desc.size = MSM_SLIM_DESC_NUM * sizeof(struct sps_iovec);
649 cfg->config = SPS_CONFIG_DEFAULT;
650 ret = sps_connect(dev->pipes[pn].sps, cfg);
651 if (!ret) {
652 dev->pipes[pn].connected = true;
653 msm_hw_set_port(dev, pn + dev->pipe_b);
654 }
655 return ret;
656}
657
658static u32 *msm_get_msg_buf(struct slim_controller *ctrl, int len)
659{
660 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
661 /*
662 * Currently we block a transaction until the current one completes.
663 * In case we need multiple transactions, use message Q
664 */
665 return dev->tx_buf;
666}
667
668static int msm_send_msg_buf(struct slim_controller *ctrl, u32 *buf, u8 len)
669{
670 int i;
671 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
672 for (i = 0; i < (len + 3) >> 2; i++) {
673 dev_dbg(dev->dev, "TX data:0x%x\n", buf[i]);
674 writel_relaxed(buf[i], dev->base + MGR_TX_MSG + (i * 4));
675 }
676 /* Guarantee that message is sent before returning */
677 mb();
678 return 0;
679}
680
681static int msm_xfer_msg(struct slim_controller *ctrl, struct slim_msg_txn *txn)
682{
683 DECLARE_COMPLETION_ONSTACK(done);
684 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
685 u32 *pbuf;
686 u8 *puc;
687 int timeout;
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700688 int msgv = -1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700689 u8 la = txn->la;
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600690 u8 mc = (u8)(txn->mc & 0xFF);
691 /*
692 * Voting for runtime PM: Slimbus has 2 possible use cases:
693 * 1. messaging
694 * 2. Data channels
695 * Messaging case goes through messaging slots and data channels
696 * use their own slots
697 * This "get" votes for messaging bandwidth
698 */
699 if (!(txn->mc & SLIM_MSG_CLK_PAUSE_SEQ_FLG))
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700700 msgv = msm_slim_get_ctrl(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700701 mutex_lock(&dev->tx_lock);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700702 if (dev->state == MSM_CTRL_ASLEEP ||
703 ((!(txn->mc & SLIM_MSG_CLK_PAUSE_SEQ_FLG)) &&
704 dev->state == MSM_CTRL_SLEEPING)) {
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600705 dev_err(dev->dev, "runtime or system PM suspended state");
706 mutex_unlock(&dev->tx_lock);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700707 if (msgv >= 0)
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600708 msm_slim_put_ctrl(dev);
709 return -EBUSY;
710 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700711 if (txn->mt == SLIM_MSG_MT_CORE &&
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600712 mc == SLIM_MSG_MC_BEGIN_RECONFIGURATION) {
713 if (dev->reconf_busy) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700714 wait_for_completion(&dev->reconf);
715 dev->reconf_busy = false;
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600716 }
717 /* This "get" votes for data channels */
718 if (dev->ctrl.sched.usedslots != 0 &&
719 !dev->chan_active) {
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700720 int chv = msm_slim_get_ctrl(dev);
721 if (chv >= 0)
722 dev->chan_active = true;
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600723 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700724 }
725 txn->rl--;
726 pbuf = msm_get_msg_buf(ctrl, txn->rl);
727 dev->wr_comp = NULL;
728 dev->err = 0;
729
730 if (txn->dt == SLIM_MSG_DEST_ENUMADDR) {
731 mutex_unlock(&dev->tx_lock);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700732 if (msgv >= 0)
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600733 msm_slim_put_ctrl(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700734 return -EPROTONOSUPPORT;
735 }
736 if (txn->mt == SLIM_MSG_MT_CORE && txn->la == 0xFF &&
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600737 (mc == SLIM_MSG_MC_CONNECT_SOURCE ||
738 mc == SLIM_MSG_MC_CONNECT_SINK ||
739 mc == SLIM_MSG_MC_DISCONNECT_PORT))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700740 la = dev->pgdla;
741 if (txn->dt == SLIM_MSG_DEST_LOGICALADDR)
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600742 *pbuf = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt, mc, 0, la);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700743 else
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600744 *pbuf = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt, mc, 1, la);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700745 if (txn->dt == SLIM_MSG_DEST_LOGICALADDR)
746 puc = ((u8 *)pbuf) + 3;
747 else
748 puc = ((u8 *)pbuf) + 2;
749 if (txn->rbuf)
750 *(puc++) = txn->tid;
751 if ((txn->mt == SLIM_MSG_MT_CORE) &&
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600752 ((mc >= SLIM_MSG_MC_REQUEST_INFORMATION &&
753 mc <= SLIM_MSG_MC_REPORT_INFORMATION) ||
754 (mc >= SLIM_MSG_MC_REQUEST_VALUE &&
755 mc <= SLIM_MSG_MC_CHANGE_VALUE))) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700756 *(puc++) = (txn->ec & 0xFF);
757 *(puc++) = (txn->ec >> 8)&0xFF;
758 }
759 if (txn->wbuf)
760 memcpy(puc, txn->wbuf, txn->len);
761 if (txn->mt == SLIM_MSG_MT_CORE && txn->la == 0xFF &&
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600762 (mc == SLIM_MSG_MC_CONNECT_SOURCE ||
763 mc == SLIM_MSG_MC_CONNECT_SINK ||
764 mc == SLIM_MSG_MC_DISCONNECT_PORT)) {
765 if (mc != SLIM_MSG_MC_DISCONNECT_PORT)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700766 dev->err = msm_slim_connect_pipe_port(dev, *puc);
767 else {
768 struct msm_slim_endp *endpoint = &dev->pipes[*puc];
769 struct sps_register_event sps_event;
770 memset(&sps_event, 0, sizeof(sps_event));
771 sps_register_event(endpoint->sps, &sps_event);
772 sps_disconnect(endpoint->sps);
773 /*
774 * Remove channel disconnects master-side ports from
775 * channel. No need to send that again on the bus
776 */
777 dev->pipes[*puc].connected = false;
778 mutex_unlock(&dev->tx_lock);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700779 if (msgv >= 0)
780 msm_slim_put_ctrl(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700781 return 0;
782 }
783 if (dev->err) {
784 dev_err(dev->dev, "pipe-port connect err:%d", dev->err);
785 mutex_unlock(&dev->tx_lock);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700786 if (msgv >= 0)
787 msm_slim_put_ctrl(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700788 return dev->err;
789 }
790 *(puc) = *(puc) + dev->pipe_b;
791 }
792 if (txn->mt == SLIM_MSG_MT_CORE &&
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600793 mc == SLIM_MSG_MC_BEGIN_RECONFIGURATION)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700794 dev->reconf_busy = true;
795 dev->wr_comp = &done;
796 msm_send_msg_buf(ctrl, pbuf, txn->rl);
797 timeout = wait_for_completion_timeout(&done, HZ);
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600798
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700799 if (mc == SLIM_MSG_MC_RECONFIGURE_NOW) {
800 if ((txn->mc == (SLIM_MSG_MC_RECONFIGURE_NOW |
801 SLIM_MSG_CLK_PAUSE_SEQ_FLG)) &&
802 timeout) {
803 timeout = wait_for_completion_timeout(&dev->reconf, HZ);
804 dev->reconf_busy = false;
805 if (timeout) {
806 clk_disable(dev->rclk);
807 disable_irq(dev->irq);
808 }
809 }
810 if ((txn->mc == (SLIM_MSG_MC_RECONFIGURE_NOW |
811 SLIM_MSG_CLK_PAUSE_SEQ_FLG)) &&
812 !timeout) {
813 dev->reconf_busy = false;
814 dev_err(dev->dev, "clock pause failed");
815 mutex_unlock(&dev->tx_lock);
816 return -ETIMEDOUT;
817 }
818 if (txn->mt == SLIM_MSG_MT_CORE &&
819 txn->mc == SLIM_MSG_MC_RECONFIGURE_NOW) {
820 if (dev->ctrl.sched.usedslots == 0 &&
821 dev->chan_active) {
822 dev->chan_active = false;
823 msm_slim_put_ctrl(dev);
824 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600825 }
826 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600827 mutex_unlock(&dev->tx_lock);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700828 if (msgv >= 0)
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600829 msm_slim_put_ctrl(dev);
830
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700831 if (!timeout)
832 dev_err(dev->dev, "TX timed out:MC:0x%x,mt:0x%x", txn->mc,
833 txn->mt);
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600834
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700835 return timeout ? dev->err : -ETIMEDOUT;
836}
837
838static int msm_set_laddr(struct slim_controller *ctrl, const u8 *ea,
839 u8 elen, u8 laddr)
840{
841 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
842 DECLARE_COMPLETION_ONSTACK(done);
843 int timeout;
844 u32 *buf;
845 mutex_lock(&dev->tx_lock);
846 buf = msm_get_msg_buf(ctrl, 9);
847 buf[0] = SLIM_MSG_ASM_FIRST_WORD(9, SLIM_MSG_MT_CORE,
848 SLIM_MSG_MC_ASSIGN_LOGICAL_ADDRESS,
849 SLIM_MSG_DEST_LOGICALADDR,
850 ea[5] | ea[4] << 8);
851 buf[1] = ea[3] | (ea[2] << 8) | (ea[1] << 16) | (ea[0] << 24);
852 buf[2] = laddr;
853
854 dev->wr_comp = &done;
855 msm_send_msg_buf(ctrl, buf, 9);
856 timeout = wait_for_completion_timeout(&done, HZ);
857 mutex_unlock(&dev->tx_lock);
858 return timeout ? dev->err : -ETIMEDOUT;
859}
860
Sagar Dharia144e5e02011-08-08 17:30:11 -0600861static int msm_clk_pause_wakeup(struct slim_controller *ctrl)
862{
863 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600864 enable_irq(dev->irq);
Sagar Dharia144e5e02011-08-08 17:30:11 -0600865 clk_enable(dev->rclk);
866 writel_relaxed(1, dev->base + FRM_WAKEUP);
867 /* Make sure framer wakeup write goes through before exiting function */
868 mb();
869 /*
870 * Workaround: Currently, slave is reporting lost-sync messages
871 * after slimbus comes out of clock pause.
872 * Transaction with slave fail before slave reports that message
873 * Give some time for that report to come
874 * Slimbus wakes up in clock gear 10 at 24.576MHz. With each superframe
875 * being 250 usecs, we wait for 20 superframes here to ensure
876 * we get the message
877 */
878 usleep_range(5000, 5000);
879 return 0;
880}
881
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700882static int msm_config_port(struct slim_controller *ctrl, u8 pn)
883{
884 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
885 struct msm_slim_endp *endpoint;
886 int ret = 0;
887 if (ctrl->ports[pn].req == SLIM_REQ_HALF_DUP ||
888 ctrl->ports[pn].req == SLIM_REQ_MULTI_CH)
889 return -EPROTONOSUPPORT;
890 if (pn >= (MSM_SLIM_NPORTS - dev->pipe_b))
891 return -ENODEV;
892
893 endpoint = &dev->pipes[pn];
894 ret = msm_slim_init_endpoint(dev, endpoint);
895 dev_dbg(dev->dev, "sps register bam error code:%x\n", ret);
896 return ret;
897}
898
899static enum slim_port_err msm_slim_port_xfer_status(struct slim_controller *ctr,
900 u8 pn, u8 **done_buf, u32 *done_len)
901{
902 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctr);
903 struct sps_iovec sio;
904 int ret;
905 if (done_len)
906 *done_len = 0;
907 if (done_buf)
908 *done_buf = NULL;
909 if (!dev->pipes[pn].connected)
910 return SLIM_P_DISCONNECT;
911 ret = sps_get_iovec(dev->pipes[pn].sps, &sio);
912 if (!ret) {
913 if (done_len)
914 *done_len = sio.size;
915 if (done_buf)
916 *done_buf = (u8 *)sio.addr;
917 }
918 dev_dbg(dev->dev, "get iovec returned %d\n", ret);
919 return SLIM_P_INPROGRESS;
920}
921
922static int msm_slim_port_xfer(struct slim_controller *ctrl, u8 pn, u8 *iobuf,
923 u32 len, struct completion *comp)
924{
925 struct sps_register_event sreg;
926 int ret;
927 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
Sagar Dhariae77961f2011-09-27 14:03:50 -0600928 if (pn >= 7)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700929 return -ENODEV;
930
931
932 ctrl->ports[pn].xcomp = comp;
933 sreg.options = (SPS_EVENT_DESC_DONE|SPS_EVENT_ERROR);
934 sreg.mode = SPS_TRIGGER_WAIT;
935 sreg.xfer_done = comp;
936 sreg.callback = NULL;
937 sreg.user = &ctrl->ports[pn];
938 ret = sps_register_event(dev->pipes[pn].sps, &sreg);
939 if (ret) {
940 dev_dbg(dev->dev, "sps register event error:%x\n", ret);
941 return ret;
942 }
943 ret = sps_transfer_one(dev->pipes[pn].sps, (u32)iobuf, len, NULL,
944 SPS_IOVEC_FLAG_INT);
945 dev_dbg(dev->dev, "sps submit xfer error code:%x\n", ret);
946
947 return ret;
948}
949
950static int msm_sat_define_ch(struct msm_slim_sat *sat, u8 *buf, u8 len, u8 mc)
951{
952 struct msm_slim_ctrl *dev = sat->dev;
953 enum slim_ch_control oper;
954 int i;
955 int ret = 0;
956 if (mc == SLIM_USR_MC_CHAN_CTRL) {
957 u16 chanh = sat->satch[buf[5]];
958 oper = ((buf[3] & 0xC0) >> 6);
959 /* part of grp. activating/removing 1 will take care of rest */
960 ret = slim_control_ch(&sat->satcl, chanh, oper, false);
961 } else {
962 u16 chh[40];
963 struct slim_ch prop;
964 u32 exp;
965 u8 coeff, cc;
966 u8 prrate = buf[6];
967 for (i = 8; i < len; i++)
968 chh[i-8] = sat->satch[buf[i]];
969 prop.dataf = (enum slim_ch_dataf)((buf[3] & 0xE0) >> 5);
970 prop.auxf = (enum slim_ch_auxf)((buf[4] & 0xC0) >> 5);
971 prop.baser = SLIM_RATE_4000HZ;
972 if (prrate & 0x8)
973 prop.baser = SLIM_RATE_11025HZ;
974 else
975 prop.baser = SLIM_RATE_4000HZ;
976 prop.prot = (enum slim_ch_proto)(buf[5] & 0x0F);
977 prop.sampleszbits = (buf[4] & 0x1F)*SLIM_CL_PER_SL;
978 exp = (u32)((buf[5] & 0xF0) >> 4);
979 coeff = (buf[4] & 0x20) >> 5;
980 cc = (coeff ? 3 : 1);
981 prop.ratem = cc * (1 << exp);
982 if (i > 9)
983 ret = slim_define_ch(&sat->satcl, &prop, chh, len - 8,
984 true, &sat->satch[buf[8]]);
985 else
986 ret = slim_define_ch(&sat->satcl, &prop,
987 &sat->satch[buf[8]], 1, false,
988 NULL);
989 dev_dbg(dev->dev, "define sat grp returned:%d", ret);
990
991 /* part of group so activating 1 will take care of rest */
992 if (mc == SLIM_USR_MC_DEF_ACT_CHAN)
993 ret = slim_control_ch(&sat->satcl,
994 sat->satch[buf[8]],
995 SLIM_CH_ACTIVATE, false);
996 }
997 return ret;
998}
999
1000static void msm_slim_rxwq(struct msm_slim_ctrl *dev)
1001{
1002 u8 buf[40];
1003 u8 mc, mt, len;
1004 int i, ret;
1005 if ((msm_slim_rx_dequeue(dev, (u8 *)buf)) != -ENODATA) {
1006 len = buf[0] & 0x1F;
1007 mt = (buf[0] >> 5) & 0x7;
1008 mc = buf[1];
1009 if (mt == SLIM_MSG_MT_CORE &&
1010 mc == SLIM_MSG_MC_REPORT_PRESENT) {
1011 u8 laddr;
1012 u8 e_addr[6];
1013 for (i = 0; i < 6; i++)
1014 e_addr[i] = buf[7-i];
1015
1016 ret = slim_assign_laddr(&dev->ctrl, e_addr, 6, &laddr);
1017 /* Is this Qualcomm ported generic device? */
1018 if (!ret && e_addr[5] == QC_MFGID_LSB &&
1019 e_addr[4] == QC_MFGID_MSB &&
1020 e_addr[1] == QC_DEVID_PGD &&
1021 e_addr[2] != QC_CHIPID_SL)
1022 dev->pgdla = laddr;
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001023 if (!ret && !pm_runtime_enabled(dev->dev) &&
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001024 laddr == (QC_MSM_DEVS - 1))
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001025 pm_runtime_enable(dev->dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001026
1027 } else if (mc == SLIM_MSG_MC_REPLY_INFORMATION ||
1028 mc == SLIM_MSG_MC_REPLY_VALUE) {
1029 u8 tid = buf[3];
1030 dev_dbg(dev->dev, "tid:%d, len:%d\n", tid, len - 4);
1031 slim_msg_response(&dev->ctrl, &buf[4], tid,
1032 len - 4);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001033 pm_runtime_mark_last_busy(dev->dev);
Sagar Dharia144e5e02011-08-08 17:30:11 -06001034 } else if (mc == SLIM_MSG_MC_REPORT_INFORMATION) {
1035 u8 l_addr = buf[2];
1036 u16 ele = (u16)buf[4] << 4;
1037 ele |= ((buf[3] & 0xf0) >> 4);
1038 dev_err(dev->dev, "Slim-dev:%d report inf element:0x%x",
1039 l_addr, ele);
1040 for (i = 0; i < len - 5; i++)
1041 dev_err(dev->dev, "offset:0x%x:bit mask:%x",
1042 i, buf[i+5]);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001043 } else {
1044 dev_err(dev->dev, "unexpected message:mc:%x, mt:%x",
1045 mc, mt);
1046 for (i = 0; i < len; i++)
1047 dev_err(dev->dev, "error msg: %x", buf[i]);
1048
1049 }
1050 } else
1051 dev_err(dev->dev, "rxwq called and no dequeue");
1052}
1053
1054static void slim_sat_rxprocess(struct work_struct *work)
1055{
1056 struct msm_slim_sat *sat = container_of(work, struct msm_slim_sat, wd);
1057 struct msm_slim_ctrl *dev = sat->dev;
1058 u8 buf[40];
1059
1060 while ((msm_sat_dequeue(sat, buf)) != -ENODATA) {
1061 struct slim_msg_txn txn;
1062 int i;
1063 u8 len, mc, mt;
1064 u32 bw_sl;
1065 int ret = 0;
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001066 int satv = -1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001067 bool gen_ack = false;
1068 u8 tid;
1069 u8 wbuf[8];
1070 txn.mt = SLIM_MSG_MT_SRC_REFERRED_USER;
1071 txn.dt = SLIM_MSG_DEST_LOGICALADDR;
1072 txn.ec = 0;
1073 txn.rbuf = NULL;
1074 txn.la = sat->satcl.laddr;
1075 /* satellite handling */
1076 len = buf[0] & 0x1F;
1077 mc = buf[1];
1078 mt = (buf[0] >> 5) & 0x7;
1079
1080 if (mt == SLIM_MSG_MT_CORE &&
1081 mc == SLIM_MSG_MC_REPORT_PRESENT) {
1082 u8 laddr;
1083 u8 e_addr[6];
1084 for (i = 0; i < 6; i++)
1085 e_addr[i] = buf[7-i];
1086
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001087 if (pm_runtime_enabled(dev->dev)) {
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001088 satv = msm_slim_get_ctrl(dev);
1089 if (satv >= 0)
1090 sat->pending_capability = true;
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001091 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001092 slim_assign_laddr(&dev->ctrl, e_addr, 6, &laddr);
1093 sat->satcl.laddr = laddr;
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001094 } else if (mt != SLIM_MSG_MT_CORE &&
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001095 mc != SLIM_MSG_MC_REPORT_PRESENT) {
1096 satv = msm_slim_get_ctrl(dev);
1097 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001098 switch (mc) {
1099 case SLIM_MSG_MC_REPORT_PRESENT:
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001100 /* Remove runtime_pm vote once satellite acks */
1101 if (mt != SLIM_MSG_MT_CORE) {
1102 if (pm_runtime_enabled(dev->dev) &&
1103 sat->pending_capability) {
1104 msm_slim_put_ctrl(dev);
1105 sat->pending_capability = false;
1106 }
1107 continue;
1108 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001109 /* send a Manager capability msg */
1110 if (sat->sent_capability)
1111 continue;
1112 ret = slim_add_device(&dev->ctrl, &sat->satcl);
1113 if (ret) {
1114 dev_err(dev->dev,
1115 "Satellite-init failed");
1116 continue;
1117 }
1118 /* Satellite owns first 21 channels */
1119 sat->satch = kzalloc(21 * sizeof(u16), GFP_KERNEL);
1120 sat->nsatch = 20;
1121 /* alloc all sat chans */
1122 for (i = 0; i < 21; i++)
1123 slim_alloc_ch(&sat->satcl, &sat->satch[i]);
1124 txn.mc = SLIM_USR_MC_MASTER_CAPABILITY;
1125 txn.mt = SLIM_MSG_MT_SRC_REFERRED_USER;
1126 txn.la = sat->satcl.laddr;
1127 txn.rl = 8;
1128 wbuf[0] = SAT_MAGIC_LSB;
1129 wbuf[1] = SAT_MAGIC_MSB;
1130 wbuf[2] = SAT_MSG_VER;
1131 wbuf[3] = SAT_MSG_PROT;
1132 txn.wbuf = wbuf;
1133 txn.len = 4;
1134 sat->sent_capability = true;
1135 msm_xfer_msg(&dev->ctrl, &txn);
1136 break;
1137 case SLIM_USR_MC_ADDR_QUERY:
1138 memcpy(&wbuf[1], &buf[4], 6);
1139 ret = slim_get_logical_addr(&sat->satcl,
1140 &wbuf[1], 6, &wbuf[7]);
1141 if (ret)
1142 memset(&wbuf[1], 0, 6);
1143 wbuf[0] = buf[3];
1144 txn.mc = SLIM_USR_MC_ADDR_REPLY;
1145 txn.rl = 12;
1146 txn.len = 8;
1147 txn.wbuf = wbuf;
1148 msm_xfer_msg(&dev->ctrl, &txn);
1149 break;
1150 case SLIM_USR_MC_DEFINE_CHAN:
1151 case SLIM_USR_MC_DEF_ACT_CHAN:
1152 case SLIM_USR_MC_CHAN_CTRL:
1153 if (mc != SLIM_USR_MC_CHAN_CTRL)
1154 tid = buf[7];
1155 else
1156 tid = buf[4];
1157 gen_ack = true;
1158 ret = msm_sat_define_ch(sat, buf, len, mc);
1159 if (ret) {
1160 dev_err(dev->dev,
1161 "SAT define_ch returned:%d",
1162 ret);
1163 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001164 if (!sat->pending_reconf) {
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001165 int chv = msm_slim_get_ctrl(dev);
1166 if (chv >= 0)
1167 sat->pending_reconf = true;
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001168 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001169 break;
1170 case SLIM_USR_MC_RECONFIG_NOW:
1171 tid = buf[3];
1172 gen_ack = true;
1173 ret = slim_reconfigure_now(&sat->satcl);
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001174 if (sat->pending_reconf) {
1175 msm_slim_put_ctrl(dev);
1176 sat->pending_reconf = false;
1177 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001178 break;
1179 case SLIM_USR_MC_REQ_BW:
1180 /* what we get is in SLOTS */
1181 bw_sl = (u32)buf[4] << 3 |
1182 ((buf[3] & 0xE0) >> 5);
1183 sat->satcl.pending_msgsl = bw_sl;
1184 tid = buf[5];
1185 gen_ack = true;
1186 break;
1187 case SLIM_USR_MC_CONNECT_SRC:
1188 case SLIM_USR_MC_CONNECT_SINK:
1189 if (mc == SLIM_USR_MC_CONNECT_SRC)
1190 txn.mc = SLIM_MSG_MC_CONNECT_SOURCE;
1191 else
1192 txn.mc = SLIM_MSG_MC_CONNECT_SINK;
1193 wbuf[0] = buf[4] & 0x1F;
1194 wbuf[1] = buf[5];
1195 tid = buf[6];
1196 txn.la = buf[3];
1197 txn.mt = SLIM_MSG_MT_CORE;
1198 txn.rl = 6;
1199 txn.len = 2;
1200 txn.wbuf = wbuf;
1201 gen_ack = true;
1202 ret = msm_xfer_msg(&dev->ctrl, &txn);
1203 break;
1204 case SLIM_USR_MC_DISCONNECT_PORT:
1205 txn.mc = SLIM_MSG_MC_DISCONNECT_PORT;
1206 wbuf[0] = buf[4] & 0x1F;
1207 tid = buf[5];
1208 txn.la = buf[3];
1209 txn.rl = 5;
1210 txn.len = 1;
1211 txn.mt = SLIM_MSG_MT_CORE;
1212 txn.wbuf = wbuf;
1213 gen_ack = true;
1214 ret = msm_xfer_msg(&dev->ctrl, &txn);
1215 default:
1216 break;
1217 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001218 if (!gen_ack) {
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001219 if (mc != SLIM_MSG_MC_REPORT_PRESENT && satv >= 0)
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001220 msm_slim_put_ctrl(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001221 continue;
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001222 }
1223
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001224 wbuf[0] = tid;
1225 if (!ret)
1226 wbuf[1] = MSM_SAT_SUCCSS;
1227 else
1228 wbuf[1] = 0;
1229 txn.mc = SLIM_USR_MC_GENERIC_ACK;
1230 txn.la = sat->satcl.laddr;
1231 txn.rl = 6;
1232 txn.len = 2;
1233 txn.wbuf = wbuf;
1234 txn.mt = SLIM_MSG_MT_SRC_REFERRED_USER;
1235 msm_xfer_msg(&dev->ctrl, &txn);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001236 if (satv >= 0)
1237 msm_slim_put_ctrl(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001238 }
1239}
1240
1241static void
1242msm_slim_rx_msgq_event(struct msm_slim_ctrl *dev, struct sps_event_notify *ev)
1243{
1244 u32 *buf = ev->data.transfer.user;
1245 struct sps_iovec *iovec = &ev->data.transfer.iovec;
1246
1247 /*
1248 * Note the virtual address needs to be offset by the same index
1249 * as the physical address or just pass in the actual virtual address
1250 * if the sps_mem_buffer is not needed. Note that if completion is
1251 * used, the virtual address won't be available and will need to be
1252 * calculated based on the offset of the physical address
1253 */
1254 if (ev->event_id == SPS_EVENT_DESC_DONE) {
1255
1256 pr_debug("buf = 0x%p, data = 0x%x\n", buf, *buf);
1257
1258 pr_debug("iovec = (0x%x 0x%x 0x%x)\n",
1259 iovec->addr, iovec->size, iovec->flags);
1260
1261 } else {
1262 dev_err(dev->dev, "%s: unknown event %d\n",
1263 __func__, ev->event_id);
1264 }
1265}
1266
1267static void msm_slim_rx_msgq_cb(struct sps_event_notify *notify)
1268{
1269 struct msm_slim_ctrl *dev = (struct msm_slim_ctrl *)notify->user;
1270 msm_slim_rx_msgq_event(dev, notify);
1271}
1272
1273/* Queue up Rx message buffer */
1274static inline int
1275msm_slim_post_rx_msgq(struct msm_slim_ctrl *dev, int ix)
1276{
1277 int ret;
1278 u32 flags = SPS_IOVEC_FLAG_INT;
1279 struct msm_slim_endp *endpoint = &dev->rx_msgq;
1280 struct sps_mem_buffer *mem = &endpoint->buf;
1281 struct sps_pipe *pipe = endpoint->sps;
1282
1283 /* Rx message queue buffers are 4 bytes in length */
1284 u8 *virt_addr = mem->base + (4 * ix);
1285 u32 phys_addr = mem->phys_base + (4 * ix);
1286
1287 pr_debug("index:%d, phys:0x%x, virt:0x%p\n", ix, phys_addr, virt_addr);
1288
1289 ret = sps_transfer_one(pipe, phys_addr, 4, virt_addr, flags);
1290 if (ret)
1291 dev_err(dev->dev, "transfer_one() failed 0x%x, %d\n", ret, ix);
1292
1293 return ret;
1294}
1295
1296static inline int
1297msm_slim_rx_msgq_get(struct msm_slim_ctrl *dev, u32 *data, int offset)
1298{
1299 struct msm_slim_endp *endpoint = &dev->rx_msgq;
1300 struct sps_mem_buffer *mem = &endpoint->buf;
1301 struct sps_pipe *pipe = endpoint->sps;
1302 struct sps_iovec iovec;
1303 int index;
1304 int ret;
1305
1306 ret = sps_get_iovec(pipe, &iovec);
1307 if (ret) {
1308 dev_err(dev->dev, "sps_get_iovec() failed 0x%x\n", ret);
1309 goto err_exit;
1310 }
1311
1312 pr_debug("iovec = (0x%x 0x%x 0x%x)\n",
1313 iovec.addr, iovec.size, iovec.flags);
1314 BUG_ON(iovec.addr < mem->phys_base);
1315 BUG_ON(iovec.addr >= mem->phys_base + mem->size);
1316
1317 /* Calculate buffer index */
1318 index = (iovec.addr - mem->phys_base) / 4;
1319 *(data + offset) = *((u32 *)mem->base + index);
1320
1321 pr_debug("buf = 0x%p, data = 0x%x\n", (u32 *)mem->base + index, *data);
1322
1323 /* Add buffer back to the queue */
1324 (void)msm_slim_post_rx_msgq(dev, index);
1325
1326err_exit:
1327 return ret;
1328}
1329
1330static int msm_slim_rx_msgq_thread(void *data)
1331{
1332 struct msm_slim_ctrl *dev = (struct msm_slim_ctrl *)data;
1333 struct completion *notify = &dev->rx_msgq_notify;
1334 struct msm_slim_sat *sat = NULL;
1335 u32 mc = 0;
1336 u32 mt = 0;
1337 u32 buffer[10];
1338 int index = 0;
1339 u8 msg_len = 0;
1340 int ret;
1341
1342 dev_dbg(dev->dev, "rx thread started");
1343
1344 while (!kthread_should_stop()) {
1345 set_current_state(TASK_INTERRUPTIBLE);
1346 ret = wait_for_completion_interruptible(notify);
1347
1348 if (ret)
1349 dev_err(dev->dev, "rx thread wait error:%d", ret);
1350
1351 /* 1 irq notification per message */
1352 if (!dev->use_rx_msgqs) {
1353 msm_slim_rxwq(dev);
1354 continue;
1355 }
1356
1357 ret = msm_slim_rx_msgq_get(dev, buffer, index);
1358 if (ret) {
1359 dev_err(dev->dev, "rx_msgq_get() failed 0x%x\n", ret);
1360 continue;
1361 }
1362
1363 pr_debug("message[%d] = 0x%x\n", index, *buffer);
1364
1365 /* Decide if we use generic RX or satellite RX */
1366 if (index++ == 0) {
1367 msg_len = *buffer & 0x1F;
1368 pr_debug("Start of new message, len = %d\n", msg_len);
1369 mt = (buffer[0] >> 5) & 0x7;
1370 mc = (buffer[0] >> 8) & 0xff;
1371 dev_dbg(dev->dev, "MC: %x, MT: %x\n", mc, mt);
1372 if (mt == SLIM_MSG_MT_DEST_REFERRED_USER ||
1373 mt == SLIM_MSG_MT_SRC_REFERRED_USER)
1374 sat = dev->satd;
1375
1376 } else if ((index * 4) >= msg_len) {
1377 index = 0;
1378 if (mt == SLIM_MSG_MT_CORE &&
1379 mc == SLIM_MSG_MC_REPORT_PRESENT) {
1380 u8 e_addr[6];
1381 msm_get_eaddr(e_addr, buffer);
1382 if (msm_is_sat_dev(e_addr))
1383 sat = dev->satd;
1384 }
1385 if (sat) {
1386 msm_sat_enqueue(sat, buffer, msg_len);
1387 queue_work(sat->wq, &sat->wd);
1388 sat = NULL;
1389 } else {
1390 msm_slim_rx_enqueue(dev, buffer, msg_len);
1391 msm_slim_rxwq(dev);
1392 }
1393 }
1394 }
1395
1396 return 0;
1397}
1398
1399static int __devinit msm_slim_init_rx_msgq(struct msm_slim_ctrl *dev)
1400{
1401 int i, ret;
1402 u32 pipe_offset;
1403 struct msm_slim_endp *endpoint = &dev->rx_msgq;
1404 struct sps_connect *config = &endpoint->config;
1405 struct sps_mem_buffer *descr = &config->desc;
1406 struct sps_mem_buffer *mem = &endpoint->buf;
1407 struct completion *notify = &dev->rx_msgq_notify;
1408
1409 struct sps_register_event sps_error_event; /* SPS_ERROR */
1410 struct sps_register_event sps_descr_event; /* DESCR_DONE */
1411
1412 /* Allocate the endpoint */
1413 ret = msm_slim_init_endpoint(dev, endpoint);
1414 if (ret) {
1415 dev_err(dev->dev, "init_endpoint failed 0x%x\n", ret);
1416 goto sps_init_endpoint_failed;
1417 }
1418
1419 /* Get the pipe indices for the message queues */
1420 pipe_offset = (readl_relaxed(dev->base + MGR_STATUS) & 0xfc) >> 2;
1421 dev_dbg(dev->dev, "Message queue pipe offset %d\n", pipe_offset);
1422
1423 config->mode = SPS_MODE_SRC;
1424 config->source = dev->bam.hdl;
1425 config->destination = SPS_DEV_HANDLE_MEM;
1426 config->src_pipe_index = pipe_offset;
1427 config->options = SPS_O_DESC_DONE | SPS_O_ERROR |
1428 SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
1429
1430 /* Allocate memory for the FIFO descriptors */
1431 ret = msm_slim_sps_mem_alloc(dev, descr,
1432 MSM_SLIM_DESC_NUM * sizeof(struct sps_iovec));
1433 if (ret) {
1434 dev_err(dev->dev, "unable to allocate SPS descriptors\n");
1435 goto alloc_descr_failed;
1436 }
1437
1438 ret = sps_connect(endpoint->sps, config);
1439 if (ret) {
1440 dev_err(dev->dev, "sps_connect failed 0x%x\n", ret);
1441 goto sps_connect_failed;
1442 }
1443
1444 /* Register completion for DESC_DONE */
1445 init_completion(notify);
1446 memset(&sps_descr_event, 0x00, sizeof(sps_descr_event));
1447
1448 sps_descr_event.mode = SPS_TRIGGER_CALLBACK;
1449 sps_descr_event.options = SPS_O_DESC_DONE;
1450 sps_descr_event.user = (void *)dev;
1451 sps_descr_event.xfer_done = notify;
1452
1453 ret = sps_register_event(endpoint->sps, &sps_descr_event);
1454 if (ret) {
1455 dev_err(dev->dev, "sps_connect() failed 0x%x\n", ret);
1456 goto sps_reg_event_failed;
1457 }
1458
1459 /* Register callback for errors */
1460 memset(&sps_error_event, 0x00, sizeof(sps_error_event));
1461 sps_error_event.mode = SPS_TRIGGER_CALLBACK;
1462 sps_error_event.options = SPS_O_ERROR;
1463 sps_error_event.user = (void *)dev;
1464 sps_error_event.callback = msm_slim_rx_msgq_cb;
1465
1466 ret = sps_register_event(endpoint->sps, &sps_error_event);
1467 if (ret) {
1468 dev_err(dev->dev, "sps_connect() failed 0x%x\n", ret);
1469 goto sps_reg_event_failed;
1470 }
1471
1472 /* Allocate memory for the message buffer(s), N descrs, 4-byte mesg */
1473 ret = msm_slim_sps_mem_alloc(dev, mem, MSM_SLIM_DESC_NUM * 4);
1474 if (ret) {
1475 dev_err(dev->dev, "dma_alloc_coherent failed\n");
1476 goto alloc_buffer_failed;
1477 }
1478
1479 /*
1480 * Call transfer_one for each 4-byte buffer
1481 * Use (buf->size/4) - 1 for the number of buffer to post
1482 */
1483
1484 /* Setup the transfer */
1485 for (i = 0; i < (MSM_SLIM_DESC_NUM - 1); i++) {
1486 ret = msm_slim_post_rx_msgq(dev, i);
1487 if (ret) {
1488 dev_err(dev->dev, "post_rx_msgq() failed 0x%x\n", ret);
1489 goto sps_transfer_failed;
1490 }
1491 }
1492
1493 /* Fire up the Rx message queue thread */
1494 dev->rx_msgq_thread = kthread_run(msm_slim_rx_msgq_thread, dev,
1495 MSM_SLIM_NAME "_rx_msgq_thread");
1496 if (!dev->rx_msgq_thread) {
1497 dev_err(dev->dev, "Failed to start Rx message queue thread\n");
1498 ret = -EIO;
1499 } else
1500 return 0;
1501
1502sps_transfer_failed:
1503 msm_slim_sps_mem_free(dev, mem);
1504alloc_buffer_failed:
1505 memset(&sps_error_event, 0x00, sizeof(sps_error_event));
1506 sps_register_event(endpoint->sps, &sps_error_event);
1507sps_reg_event_failed:
1508 sps_disconnect(endpoint->sps);
1509sps_connect_failed:
1510 msm_slim_sps_mem_free(dev, descr);
1511alloc_descr_failed:
1512 msm_slim_free_endpoint(endpoint);
1513sps_init_endpoint_failed:
1514 return ret;
1515}
1516
1517/* Registers BAM h/w resource with SPS driver and initializes msgq endpoints */
1518static int __devinit
1519msm_slim_sps_init(struct msm_slim_ctrl *dev, struct resource *bam_mem)
1520{
1521 int i, ret;
1522 u32 bam_handle;
1523 struct sps_bam_props bam_props = {0};
1524
1525 static struct sps_bam_sec_config_props sec_props = {
1526 .ees = {
1527 [0] = { /* LPASS */
1528 .vmid = 0,
1529 .pipe_mask = 0xFFFF98,
1530 },
1531 [1] = { /* Krait Apps */
1532 .vmid = 1,
1533 .pipe_mask = 0x3F000007,
1534 },
1535 [2] = { /* Modem */
1536 .vmid = 2,
1537 .pipe_mask = 0x00000060,
1538 },
1539 },
1540 };
1541
1542 bam_props.ee = dev->ee;
1543 bam_props.virt_addr = dev->bam.base;
1544 bam_props.phys_addr = bam_mem->start;
1545 bam_props.irq = dev->bam.irq;
1546 bam_props.manage = SPS_BAM_MGR_LOCAL;
1547 bam_props.summing_threshold = MSM_SLIM_PERF_SUMM_THRESHOLD;
1548
1549 bam_props.sec_config = SPS_BAM_SEC_DO_CONFIG;
1550 bam_props.p_sec_config_props = &sec_props;
1551
1552 bam_props.options = SPS_O_DESC_DONE | SPS_O_ERROR |
1553 SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
1554
1555 /* First 7 bits are for message Qs */
1556 for (i = 7; i < 32; i++) {
1557 /* Check what pipes are owned by Apps. */
1558 if ((sec_props.ees[dev->ee].pipe_mask >> i) & 0x1)
1559 break;
1560 }
1561 dev->pipe_b = i - 7;
1562
1563 /* Register the BAM device with the SPS driver */
1564 ret = sps_register_bam_device(&bam_props, &bam_handle);
1565 if (ret) {
1566 dev_err(dev->dev, "sps_register_bam_device failed 0x%x\n", ret);
1567 return ret;
1568 }
1569 dev->bam.hdl = bam_handle;
1570 dev_dbg(dev->dev, "SLIM BAM registered, handle = 0x%x\n", bam_handle);
1571
1572 ret = msm_slim_init_rx_msgq(dev);
1573 if (ret) {
1574 dev_err(dev->dev, "msm_slim_init_rx_msgq failed 0x%x\n", ret);
1575 goto rx_msgq_init_failed;
1576 }
1577
1578 return 0;
1579rx_msgq_init_failed:
1580 sps_deregister_bam_device(bam_handle);
1581 dev->bam.hdl = 0L;
1582 return ret;
1583}
1584
1585static void msm_slim_sps_exit(struct msm_slim_ctrl *dev)
1586{
1587 if (dev->use_rx_msgqs) {
1588 struct msm_slim_endp *endpoint = &dev->rx_msgq;
1589 struct sps_connect *config = &endpoint->config;
1590 struct sps_mem_buffer *descr = &config->desc;
1591 struct sps_mem_buffer *mem = &endpoint->buf;
1592 struct sps_register_event sps_event;
1593 memset(&sps_event, 0x00, sizeof(sps_event));
1594 msm_slim_sps_mem_free(dev, mem);
1595 sps_register_event(endpoint->sps, &sps_event);
1596 sps_disconnect(endpoint->sps);
1597 msm_slim_sps_mem_free(dev, descr);
1598 msm_slim_free_endpoint(endpoint);
1599 }
1600 sps_deregister_bam_device(dev->bam.hdl);
1601}
1602
Sagar Dhariacc969452011-09-19 10:34:30 -06001603static void msm_slim_prg_slew(struct platform_device *pdev,
1604 struct msm_slim_ctrl *dev)
1605{
1606 struct resource *slew_io;
1607 void __iomem *slew_reg;
1608 /* SLEW RATE register for this slimbus */
1609 dev->slew_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1610 "slimbus_slew_reg");
1611 if (!dev->slew_mem) {
1612 dev_dbg(&pdev->dev, "no slimbus slew resource\n");
1613 return;
1614 }
1615 slew_io = request_mem_region(dev->slew_mem->start,
1616 resource_size(dev->slew_mem), pdev->name);
1617 if (!slew_io) {
1618 dev_dbg(&pdev->dev, "slimbus-slew mem claimed\n");
1619 dev->slew_mem = NULL;
1620 return;
1621 }
1622
1623 slew_reg = ioremap(dev->slew_mem->start, resource_size(dev->slew_mem));
1624 if (!slew_reg) {
1625 dev_dbg(dev->dev, "slew register mapping failed");
1626 release_mem_region(dev->slew_mem->start,
1627 resource_size(dev->slew_mem));
1628 dev->slew_mem = NULL;
1629 return;
1630 }
1631 writel_relaxed(1, slew_reg);
1632 /* Make sure slimbus-slew rate enabling goes through */
1633 wmb();
1634 iounmap(slew_reg);
1635}
1636
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001637static int __devinit msm_slim_probe(struct platform_device *pdev)
1638{
1639 struct msm_slim_ctrl *dev;
1640 int ret;
1641 struct resource *bam_mem, *bam_io;
1642 struct resource *slim_mem, *slim_io;
1643 struct resource *irq, *bam_irq;
1644 slim_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1645 "slimbus_physical");
1646 if (!slim_mem) {
1647 dev_err(&pdev->dev, "no slimbus physical memory resource\n");
1648 return -ENODEV;
1649 }
1650 slim_io = request_mem_region(slim_mem->start, resource_size(slim_mem),
1651 pdev->name);
1652 if (!slim_io) {
1653 dev_err(&pdev->dev, "slimbus memory already claimed\n");
1654 return -EBUSY;
1655 }
1656
1657 bam_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1658 "slimbus_bam_physical");
1659 if (!bam_mem) {
1660 dev_err(&pdev->dev, "no slimbus BAM memory resource\n");
1661 ret = -ENODEV;
1662 goto err_get_res_bam_failed;
1663 }
1664 bam_io = request_mem_region(bam_mem->start, resource_size(bam_mem),
1665 pdev->name);
1666 if (!bam_io) {
1667 release_mem_region(slim_mem->start, resource_size(slim_mem));
1668 dev_err(&pdev->dev, "slimbus BAM memory already claimed\n");
1669 ret = -EBUSY;
1670 goto err_get_res_bam_failed;
1671 }
1672 irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
1673 "slimbus_irq");
1674 if (!irq) {
1675 dev_err(&pdev->dev, "no slimbus IRQ resource\n");
1676 ret = -ENODEV;
1677 goto err_get_res_failed;
1678 }
1679 bam_irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
1680 "slimbus_bam_irq");
1681 if (!bam_irq) {
1682 dev_err(&pdev->dev, "no slimbus BAM IRQ resource\n");
1683 ret = -ENODEV;
1684 goto err_get_res_failed;
1685 }
1686
1687 dev = kzalloc(sizeof(struct msm_slim_ctrl), GFP_KERNEL);
1688 if (!dev) {
1689 dev_err(&pdev->dev, "no memory for MSM slimbus controller\n");
1690 ret = -ENOMEM;
1691 goto err_get_res_failed;
1692 }
1693 dev->dev = &pdev->dev;
1694 platform_set_drvdata(pdev, dev);
1695 slim_set_ctrldata(&dev->ctrl, dev);
1696 dev->base = ioremap(slim_mem->start, resource_size(slim_mem));
1697 if (!dev->base) {
1698 dev_err(&pdev->dev, "IOremap failed\n");
1699 ret = -ENOMEM;
1700 goto err_ioremap_failed;
1701 }
1702 dev->bam.base = ioremap(bam_mem->start, resource_size(bam_mem));
1703 if (!dev->bam.base) {
1704 dev_err(&pdev->dev, "BAM IOremap failed\n");
1705 ret = -ENOMEM;
1706 goto err_ioremap_bam_failed;
1707 }
1708 dev->ctrl.nr = pdev->id;
1709 dev->ctrl.nchans = MSM_SLIM_NCHANS;
1710 dev->ctrl.nports = MSM_SLIM_NPORTS;
1711 dev->ctrl.set_laddr = msm_set_laddr;
1712 dev->ctrl.xfer_msg = msm_xfer_msg;
Sagar Dharia144e5e02011-08-08 17:30:11 -06001713 dev->ctrl.wakeup = msm_clk_pause_wakeup;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001714 dev->ctrl.config_port = msm_config_port;
1715 dev->ctrl.port_xfer = msm_slim_port_xfer;
1716 dev->ctrl.port_xfer_status = msm_slim_port_xfer_status;
1717 /* Reserve some messaging BW for satellite-apps driver communication */
1718 dev->ctrl.sched.pending_msgsl = 30;
1719
1720 init_completion(&dev->reconf);
1721 mutex_init(&dev->tx_lock);
1722 spin_lock_init(&dev->rx_lock);
1723 dev->ee = 1;
1724 dev->use_rx_msgqs = 1;
1725 dev->irq = irq->start;
1726 dev->bam.irq = bam_irq->start;
1727
1728 ret = msm_slim_sps_init(dev, bam_mem);
1729 if (ret != 0) {
1730 dev_err(dev->dev, "error SPS init\n");
1731 goto err_sps_init_failed;
1732 }
1733
1734
1735 dev->rclk = clk_get(dev->dev, "audio_slimbus_clk");
Sagar Dhariacc969452011-09-19 10:34:30 -06001736 if (!dev->rclk) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001737 dev_err(dev->dev, "slimbus clock not found");
1738 goto err_clk_get_failed;
1739 }
1740 dev->framer.rootfreq = SLIM_ROOT_FREQ >> 3;
1741 dev->framer.superfreq =
1742 dev->framer.rootfreq / SLIM_CL_PER_SUPERFRAME_DIV8;
1743 dev->ctrl.a_framer = &dev->framer;
1744 dev->ctrl.clkgear = SLIM_MAX_CLK_GEAR;
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001745 dev->ctrl.dev.parent = &pdev->dev;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001746
1747 ret = request_irq(dev->irq, msm_slim_interrupt, IRQF_TRIGGER_HIGH,
1748 "msm_slim_irq", dev);
1749 if (ret) {
1750 dev_err(&pdev->dev, "request IRQ failed\n");
1751 goto err_request_irq_failed;
1752 }
1753
1754 dev->satd = kzalloc(sizeof(struct msm_slim_sat), GFP_KERNEL);
1755 if (!dev->satd) {
1756 ret = -ENOMEM;
1757 goto err_sat_failed;
1758 }
Sagar Dhariacc969452011-09-19 10:34:30 -06001759
1760 msm_slim_prg_slew(pdev, dev);
1761 clk_set_rate(dev->rclk, SLIM_ROOT_FREQ);
1762 clk_enable(dev->rclk);
1763
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001764 dev->satd->dev = dev;
1765 dev->satd->satcl.name = "msm_sat_dev";
1766 spin_lock_init(&dev->satd->lock);
1767 INIT_WORK(&dev->satd->wd, slim_sat_rxprocess);
1768 dev->satd->wq = create_singlethread_workqueue("msm_slim_sat");
1769 /* Component register initialization */
1770 writel_relaxed(1, dev->base + COMP_CFG);
1771 writel_relaxed((EE_MGR_RSC_GRP | EE_NGD_2 | EE_NGD_1),
1772 dev->base + COMP_TRUST_CFG);
1773
1774 /*
1775 * Manager register initialization
1776 * If RX msg Q is used, disable RX_MSG_RCVD interrupt
1777 */
1778 if (dev->use_rx_msgqs)
1779 writel_relaxed((MGR_INT_RECFG_DONE | MGR_INT_TX_NACKED_2 |
1780 MGR_INT_MSG_BUF_CONTE | /* MGR_INT_RX_MSG_RCVD | */
1781 MGR_INT_TX_MSG_SENT), dev->base + MGR_INT_EN);
1782 else
1783 writel_relaxed((MGR_INT_RECFG_DONE | MGR_INT_TX_NACKED_2 |
1784 MGR_INT_MSG_BUF_CONTE | MGR_INT_RX_MSG_RCVD |
1785 MGR_INT_TX_MSG_SENT), dev->base + MGR_INT_EN);
1786 writel_relaxed(1, dev->base + MGR_CFG);
1787 /*
1788 * Framer registers are beyond 1K memory region after Manager and/or
1789 * component registers. Make sure those writes are ordered
1790 * before framer register writes
1791 */
1792 wmb();
1793
Sagar Dharia72007922011-12-13 21:14:26 -07001794 /* Register with framework before enabling frame, clock */
1795 ret = slim_add_numbered_controller(&dev->ctrl);
1796 if (ret) {
1797 dev_err(dev->dev, "error adding controller\n");
1798 goto err_ctrl_failed;
1799 }
1800
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001801 /* Framer register initialization */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001802 writel_relaxed((0xA << REF_CLK_GEAR) | (0xA << CLK_GEAR) |
1803 (1 << ROOT_FREQ) | (1 << FRM_ACTIVE) | 1,
1804 dev->base + FRM_CFG);
1805 /*
1806 * Make sure that framer wake-up and enabling writes go through
1807 * before any other component is enabled. Framer is responsible for
1808 * clocking the bus and enabling framer first will ensure that other
1809 * devices can report presence when they are enabled
1810 */
1811 mb();
1812
1813 /* Enable RX msg Q */
1814 if (dev->use_rx_msgqs)
1815 writel_relaxed(MGR_CFG_ENABLE | MGR_CFG_RX_MSGQ_EN,
1816 dev->base + MGR_CFG);
1817 else
1818 writel_relaxed(MGR_CFG_ENABLE, dev->base + MGR_CFG);
1819 /*
1820 * Make sure that manager-enable is written through before interface
1821 * device is enabled
1822 */
1823 mb();
1824 writel_relaxed(1, dev->base + INTF_CFG);
1825 /*
1826 * Make sure that interface-enable is written through before enabling
1827 * ported generic device inside MSM manager
1828 */
1829 mb();
1830 writel_relaxed(1, dev->base + PGD_CFG);
1831 writel_relaxed(0x3F<<17, dev->base + (PGD_OWN_EEn + (4 * dev->ee)));
1832 /*
1833 * Make sure that ported generic device is enabled and port-EE settings
1834 * are written through before finally enabling the component
1835 */
1836 mb();
1837
1838 writel_relaxed(1, dev->base + COMP_CFG);
1839 /*
1840 * Make sure that all writes have gone through before exiting this
1841 * function
1842 */
1843 mb();
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001844 pm_runtime_use_autosuspend(&pdev->dev);
1845 pm_runtime_set_autosuspend_delay(&pdev->dev, MSM_SLIM_AUTOSUSPEND);
1846 pm_runtime_set_active(&pdev->dev);
1847
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001848 dev_dbg(dev->dev, "MSM SB controller is up!\n");
1849 return 0;
1850
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001851err_ctrl_failed:
1852 writel_relaxed(0, dev->base + COMP_CFG);
1853 kfree(dev->satd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001854err_sat_failed:
1855 free_irq(dev->irq, dev);
1856err_request_irq_failed:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001857 clk_disable(dev->rclk);
1858 clk_put(dev->rclk);
1859err_clk_get_failed:
1860 msm_slim_sps_exit(dev);
1861err_sps_init_failed:
1862 iounmap(dev->bam.base);
1863err_ioremap_bam_failed:
1864 iounmap(dev->base);
1865err_ioremap_failed:
1866 kfree(dev);
1867err_get_res_failed:
1868 release_mem_region(bam_mem->start, resource_size(bam_mem));
1869err_get_res_bam_failed:
1870 release_mem_region(slim_mem->start, resource_size(slim_mem));
1871 return ret;
1872}
1873
1874static int __devexit msm_slim_remove(struct platform_device *pdev)
1875{
1876 struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
1877 struct resource *bam_mem;
1878 struct resource *slim_mem;
Sagar Dhariacc969452011-09-19 10:34:30 -06001879 struct resource *slew_mem = dev->slew_mem;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001880 struct msm_slim_sat *sat = dev->satd;
1881 slim_remove_device(&sat->satcl);
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001882 pm_runtime_disable(&pdev->dev);
1883 pm_runtime_set_suspended(&pdev->dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001884 kfree(sat->satch);
1885 destroy_workqueue(sat->wq);
1886 kfree(sat);
1887 free_irq(dev->irq, dev);
1888 slim_del_controller(&dev->ctrl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001889 clk_put(dev->rclk);
1890 msm_slim_sps_exit(dev);
1891 kthread_stop(dev->rx_msgq_thread);
1892 iounmap(dev->bam.base);
1893 iounmap(dev->base);
1894 kfree(dev);
1895 bam_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1896 "slimbus_bam_physical");
Sagar Dhariae77961f2011-09-27 14:03:50 -06001897 if (bam_mem)
1898 release_mem_region(bam_mem->start, resource_size(bam_mem));
Sagar Dhariacc969452011-09-19 10:34:30 -06001899 if (slew_mem)
1900 release_mem_region(slew_mem->start, resource_size(slew_mem));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001901 slim_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1902 "slimbus_physical");
Sagar Dhariae77961f2011-09-27 14:03:50 -06001903 if (slim_mem)
1904 release_mem_region(slim_mem->start, resource_size(slim_mem));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001905 return 0;
1906}
1907
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001908#ifdef CONFIG_PM_RUNTIME
1909static int msm_slim_runtime_idle(struct device *device)
1910{
1911 dev_dbg(device, "pm_runtime: idle...\n");
1912 pm_request_autosuspend(device);
1913 return -EAGAIN;
1914}
1915#endif
1916
1917/*
1918 * If PM_RUNTIME is not defined, these 2 functions become helper
1919 * functions to be called from system suspend/resume. So they are not
1920 * inside ifdef CONFIG_PM_RUNTIME
1921 */
Sagar Dharia45e77912012-01-10 09:55:18 -07001922#ifdef CONFIG_PM_SLEEP
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001923static int msm_slim_runtime_suspend(struct device *device)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001924{
1925 struct platform_device *pdev = to_platform_device(device);
1926 struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001927 int ret;
1928 dev_dbg(device, "pm_runtime: suspending...\n");
1929 dev->state = MSM_CTRL_SLEEPING;
1930 ret = slim_ctrl_clk_pause(&dev->ctrl, false, SLIM_CLK_UNSPECIFIED);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001931 if (ret) {
1932 dev_err(device, "clk pause not entered:%d", ret);
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001933 dev->state = MSM_CTRL_AWAKE;
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001934 } else {
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001935 dev->state = MSM_CTRL_ASLEEP;
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001936 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001937 return ret;
1938}
1939
1940static int msm_slim_runtime_resume(struct device *device)
1941{
1942 struct platform_device *pdev = to_platform_device(device);
1943 struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
1944 int ret = 0;
1945 dev_dbg(device, "pm_runtime: resuming...\n");
1946 if (dev->state == MSM_CTRL_ASLEEP)
1947 ret = slim_ctrl_clk_pause(&dev->ctrl, true, 0);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001948 if (ret) {
1949 dev_err(device, "clk pause not exited:%d", ret);
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001950 dev->state = MSM_CTRL_ASLEEP;
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001951 } else {
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001952 dev->state = MSM_CTRL_AWAKE;
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001953 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001954 return ret;
1955}
1956
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001957static int msm_slim_suspend(struct device *dev)
1958{
1959 int ret = 0;
1960 if (!pm_runtime_enabled(dev) || !pm_runtime_suspended(dev)) {
1961 dev_dbg(dev, "system suspend");
1962 ret = msm_slim_runtime_suspend(dev);
Sagar Dharia6b559e02011-08-03 17:01:31 -06001963 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001964 if (ret == -EBUSY) {
Sagar Dharia144e5e02011-08-08 17:30:11 -06001965 /*
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001966 * If the clock pause failed due to active channels, there is
1967 * a possibility that some audio stream is active during suspend
1968 * We dont want to return suspend failure in that case so that
1969 * display and relevant components can still go to suspend.
1970 * If there is some other error, then it should be passed-on
1971 * to system level suspend
1972 */
Sagar Dharia144e5e02011-08-08 17:30:11 -06001973 ret = 0;
1974 }
1975 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001976}
1977
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001978static int msm_slim_resume(struct device *dev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001979{
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001980 /* If runtime_pm is enabled, this resume shouldn't do anything */
1981 if (!pm_runtime_enabled(dev) || !pm_runtime_suspended(dev)) {
1982 int ret;
1983 dev_dbg(dev, "system resume");
1984 ret = msm_slim_runtime_resume(dev);
1985 if (!ret) {
1986 pm_runtime_mark_last_busy(dev);
1987 pm_request_autosuspend(dev);
1988 }
1989 return ret;
1990
Sagar Dharia144e5e02011-08-08 17:30:11 -06001991 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001992 return 0;
1993}
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001994#endif /* CONFIG_PM_SLEEP */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001995
1996static const struct dev_pm_ops msm_slim_dev_pm_ops = {
1997 SET_SYSTEM_SLEEP_PM_OPS(
1998 msm_slim_suspend,
1999 msm_slim_resume
2000 )
2001 SET_RUNTIME_PM_OPS(
2002 msm_slim_runtime_suspend,
2003 msm_slim_runtime_resume,
2004 msm_slim_runtime_idle
2005 )
2006};
2007
2008static struct platform_driver msm_slim_driver = {
2009 .probe = msm_slim_probe,
2010 .remove = msm_slim_remove,
2011 .driver = {
2012 .name = MSM_SLIM_NAME,
2013 .owner = THIS_MODULE,
2014 .pm = &msm_slim_dev_pm_ops,
2015 },
2016};
2017
2018static int msm_slim_init(void)
2019{
2020 return platform_driver_register(&msm_slim_driver);
2021}
2022subsys_initcall(msm_slim_init);
2023
2024static void msm_slim_exit(void)
2025{
2026 platform_driver_unregister(&msm_slim_driver);
2027}
2028module_exit(msm_slim_exit);
2029
2030MODULE_LICENSE("GPL v2");
2031MODULE_VERSION("0.1");
2032MODULE_DESCRIPTION("MSM Slimbus controller");
2033MODULE_ALIAS("platform:msm-slim");