blob: 937bfe7857cd916a28a21e648453f365117c0593 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12#include <linux/irq.h>
13#include <linux/kernel.h>
14#include <linux/init.h>
15#include <linux/slab.h>
16#include <linux/io.h>
17#include <linux/interrupt.h>
18#include <linux/platform_device.h>
19#include <linux/dma-mapping.h>
20#include <linux/slimbus/slimbus.h>
21#include <linux/delay.h>
22#include <linux/kthread.h>
23#include <linux/clk.h>
Sagar Dharia6b559e02011-08-03 17:01:31 -060024#include <linux/pm_runtime.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070025#include <mach/sps.h>
26
27/* Per spec.max 40 bytes per received message */
28#define SLIM_RX_MSGQ_BUF_LEN 40
29
30#define SLIM_USR_MC_GENERIC_ACK 0x25
31#define SLIM_USR_MC_MASTER_CAPABILITY 0x0
32#define SLIM_USR_MC_REPORT_SATELLITE 0x1
33#define SLIM_USR_MC_ADDR_QUERY 0xD
34#define SLIM_USR_MC_ADDR_REPLY 0xE
35#define SLIM_USR_MC_DEFINE_CHAN 0x20
36#define SLIM_USR_MC_DEF_ACT_CHAN 0x21
37#define SLIM_USR_MC_CHAN_CTRL 0x23
38#define SLIM_USR_MC_RECONFIG_NOW 0x24
39#define SLIM_USR_MC_REQ_BW 0x28
40#define SLIM_USR_MC_CONNECT_SRC 0x2C
41#define SLIM_USR_MC_CONNECT_SINK 0x2D
42#define SLIM_USR_MC_DISCONNECT_PORT 0x2E
43
44/* MSM Slimbus peripheral settings */
45#define MSM_SLIM_PERF_SUMM_THRESHOLD 0x8000
46#define MSM_SLIM_NCHANS 32
47#define MSM_SLIM_NPORTS 24
Sagar Dharia6b559e02011-08-03 17:01:31 -060048#define MSM_SLIM_AUTOSUSPEND MSEC_PER_SEC
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070049
50/*
51 * Need enough descriptors to receive present messages from slaves
52 * if received simultaneously. Present message needs 3 descriptors
53 * and this size will ensure around 10 simultaneous reports.
54 */
55#define MSM_SLIM_DESC_NUM 32
56
57#define SLIM_MSG_ASM_FIRST_WORD(l, mt, mc, dt, ad) \
58 ((l) | ((mt) << 5) | ((mc) << 8) | ((dt) << 15) | ((ad) << 16))
59
60#define MSM_SLIM_NAME "msm_slim_ctrl"
61#define SLIM_ROOT_FREQ 24576000
62
63#define MSM_CONCUR_MSG 8
64#define SAT_CONCUR_MSG 8
65#define DEF_WATERMARK (8 << 1)
66#define DEF_ALIGN 0
67#define DEF_PACK (1 << 6)
68#define ENABLE_PORT 1
69
70#define DEF_BLKSZ 0
71#define DEF_TRANSZ 0
72
73#define SAT_MAGIC_LSB 0xD9
74#define SAT_MAGIC_MSB 0xC5
75#define SAT_MSG_VER 0x1
76#define SAT_MSG_PROT 0x1
77#define MSM_SAT_SUCCSS 0x20
78
79#define QC_MFGID_LSB 0x2
80#define QC_MFGID_MSB 0x17
81#define QC_CHIPID_SL 0x10
82#define QC_DEVID_SAT1 0x3
83#define QC_DEVID_SAT2 0x4
84#define QC_DEVID_PGD 0x5
85
86/* Component registers */
87enum comp_reg {
88 COMP_CFG = 0,
89 COMP_TRUST_CFG = 0x14,
90};
91
92/* Manager registers */
93enum mgr_reg {
94 MGR_CFG = 0x200,
95 MGR_STATUS = 0x204,
96 MGR_RX_MSGQ_CFG = 0x208,
97 MGR_INT_EN = 0x210,
98 MGR_INT_STAT = 0x214,
99 MGR_INT_CLR = 0x218,
100 MGR_TX_MSG = 0x230,
101 MGR_RX_MSG = 0x270,
102 MGR_VE_STAT = 0x300,
103};
104
105enum msg_cfg {
106 MGR_CFG_ENABLE = 1,
107 MGR_CFG_RX_MSGQ_EN = 1 << 1,
108 MGR_CFG_TX_MSGQ_EN_HIGH = 1 << 2,
109 MGR_CFG_TX_MSGQ_EN_LOW = 1 << 3,
110};
111/* Message queue types */
112enum msm_slim_msgq_type {
113 MSGQ_RX = 0,
114 MSGQ_TX_LOW = 1,
115 MSGQ_TX_HIGH = 2,
116};
117/* Framer registers */
118enum frm_reg {
119 FRM_CFG = 0x400,
120 FRM_STAT = 0x404,
121 FRM_INT_EN = 0x410,
122 FRM_INT_STAT = 0x414,
123 FRM_INT_CLR = 0x418,
124 FRM_WAKEUP = 0x41C,
125 FRM_CLKCTL_DONE = 0x420,
126 FRM_IE_STAT = 0x430,
127 FRM_VE_STAT = 0x440,
128};
129
130/* Interface registers */
131enum intf_reg {
132 INTF_CFG = 0x600,
133 INTF_STAT = 0x604,
134 INTF_INT_EN = 0x610,
135 INTF_INT_STAT = 0x614,
136 INTF_INT_CLR = 0x618,
137 INTF_IE_STAT = 0x630,
138 INTF_VE_STAT = 0x640,
139};
140
141/* Manager PGD registers */
142enum pgd_reg {
143 PGD_CFG = 0x1000,
144 PGD_STAT = 0x1004,
145 PGD_INT_EN = 0x1010,
146 PGD_INT_STAT = 0x1014,
147 PGD_INT_CLR = 0x1018,
148 PGD_OWN_EEn = 0x1020,
149 PGD_PORT_INT_EN_EEn = 0x1030,
150 PGD_PORT_INT_ST_EEn = 0x1034,
151 PGD_PORT_INT_CL_EEn = 0x1038,
152 PGD_PORT_CFGn = 0x1080,
153 PGD_PORT_STATn = 0x1084,
154 PGD_PORT_PARAMn = 0x1088,
155 PGD_PORT_BLKn = 0x108C,
156 PGD_PORT_TRANn = 0x1090,
157 PGD_PORT_MCHANn = 0x1094,
158 PGD_PORT_PSHPLLn = 0x1098,
159 PGD_PORT_PC_CFGn = 0x1600,
160 PGD_PORT_PC_VALn = 0x1604,
161 PGD_PORT_PC_VFR_TSn = 0x1608,
162 PGD_PORT_PC_VFR_STn = 0x160C,
163 PGD_PORT_PC_VFR_CLn = 0x1610,
164 PGD_IE_STAT = 0x1700,
165 PGD_VE_STAT = 0x1710,
166};
167
168enum rsc_grp {
169 EE_MGR_RSC_GRP = 1 << 10,
170 EE_NGD_2 = 2 << 6,
171 EE_NGD_1 = 0,
172};
173
174enum mgr_intr {
175 MGR_INT_RECFG_DONE = 1 << 24,
176 MGR_INT_TX_NACKED_2 = 1 << 25,
177 MGR_INT_MSG_BUF_CONTE = 1 << 26,
178 MGR_INT_RX_MSG_RCVD = 1 << 30,
179 MGR_INT_TX_MSG_SENT = 1 << 31,
180};
181
182enum frm_cfg {
183 FRM_ACTIVE = 1,
184 CLK_GEAR = 7,
185 ROOT_FREQ = 11,
186 REF_CLK_GEAR = 15,
187};
188
Sagar Dharia6b559e02011-08-03 17:01:31 -0600189enum msm_ctrl_state {
190 MSM_CTRL_AWAKE,
191 MSM_CTRL_SLEEPING,
192 MSM_CTRL_ASLEEP,
193};
194
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700195struct msm_slim_sps_bam {
196 u32 hdl;
197 void __iomem *base;
198 int irq;
199};
200
201struct msm_slim_endp {
202 struct sps_pipe *sps;
203 struct sps_connect config;
204 struct sps_register_event event;
205 struct sps_mem_buffer buf;
206 struct completion *xcomp;
207 bool connected;
208};
209
210struct msm_slim_ctrl {
211 struct slim_controller ctrl;
212 struct slim_framer framer;
213 struct device *dev;
214 void __iomem *base;
Sagar Dhariacc969452011-09-19 10:34:30 -0600215 struct resource *slew_mem;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700216 u32 curr_bw;
217 u8 msg_cnt;
218 u32 tx_buf[10];
219 u8 rx_msgs[MSM_CONCUR_MSG][SLIM_RX_MSGQ_BUF_LEN];
220 spinlock_t rx_lock;
221 int head;
222 int tail;
223 int irq;
224 int err;
225 int ee;
226 struct completion *wr_comp;
227 struct msm_slim_sat *satd;
228 struct msm_slim_endp pipes[7];
229 struct msm_slim_sps_bam bam;
230 struct msm_slim_endp rx_msgq;
231 struct completion rx_msgq_notify;
232 struct task_struct *rx_msgq_thread;
233 struct clk *rclk;
234 struct mutex tx_lock;
235 u8 pgdla;
236 bool use_rx_msgqs;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700237 int pipe_b;
238 struct completion reconf;
239 bool reconf_busy;
Sagar Dharia6b559e02011-08-03 17:01:31 -0600240 bool chan_active;
241 enum msm_ctrl_state state;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700242};
243
244struct msm_slim_sat {
245 struct slim_device satcl;
246 struct msm_slim_ctrl *dev;
247 struct workqueue_struct *wq;
248 struct work_struct wd;
249 u8 sat_msgs[SAT_CONCUR_MSG][40];
250 u16 *satch;
251 u8 nsatch;
252 bool sent_capability;
253 int shead;
254 int stail;
255 spinlock_t lock;
256};
257
258static int msm_slim_rx_enqueue(struct msm_slim_ctrl *dev, u32 *buf, u8 len)
259{
260 spin_lock(&dev->rx_lock);
261 if ((dev->tail + 1) % MSM_CONCUR_MSG == dev->head) {
262 spin_unlock(&dev->rx_lock);
263 dev_err(dev->dev, "RX QUEUE full!");
264 return -EXFULL;
265 }
266 memcpy((u8 *)dev->rx_msgs[dev->tail], (u8 *)buf, len);
267 dev->tail = (dev->tail + 1) % MSM_CONCUR_MSG;
268 spin_unlock(&dev->rx_lock);
269 return 0;
270}
271
272static int msm_slim_rx_dequeue(struct msm_slim_ctrl *dev, u8 *buf)
273{
274 unsigned long flags;
275 spin_lock_irqsave(&dev->rx_lock, flags);
276 if (dev->tail == dev->head) {
277 spin_unlock_irqrestore(&dev->rx_lock, flags);
278 return -ENODATA;
279 }
280 memcpy(buf, (u8 *)dev->rx_msgs[dev->head], 40);
281 dev->head = (dev->head + 1) % MSM_CONCUR_MSG;
282 spin_unlock_irqrestore(&dev->rx_lock, flags);
283 return 0;
284}
285
286static int msm_sat_enqueue(struct msm_slim_sat *sat, u32 *buf, u8 len)
287{
288 struct msm_slim_ctrl *dev = sat->dev;
289 spin_lock(&sat->lock);
290 if ((sat->stail + 1) % SAT_CONCUR_MSG == sat->shead) {
291 spin_unlock(&sat->lock);
292 dev_err(dev->dev, "SAT QUEUE full!");
293 return -EXFULL;
294 }
295 memcpy(sat->sat_msgs[sat->stail], (u8 *)buf, len);
296 sat->stail = (sat->stail + 1) % SAT_CONCUR_MSG;
297 spin_unlock(&sat->lock);
298 return 0;
299}
300
301static int msm_sat_dequeue(struct msm_slim_sat *sat, u8 *buf)
302{
303 unsigned long flags;
304 spin_lock_irqsave(&sat->lock, flags);
305 if (sat->stail == sat->shead) {
306 spin_unlock_irqrestore(&sat->lock, flags);
307 return -ENODATA;
308 }
309 memcpy(buf, sat->sat_msgs[sat->shead], 40);
310 sat->shead = (sat->shead + 1) % SAT_CONCUR_MSG;
311 spin_unlock_irqrestore(&sat->lock, flags);
312 return 0;
313}
314
315static void msm_get_eaddr(u8 *e_addr, u32 *buffer)
316{
317 e_addr[0] = (buffer[1] >> 24) & 0xff;
318 e_addr[1] = (buffer[1] >> 16) & 0xff;
319 e_addr[2] = (buffer[1] >> 8) & 0xff;
320 e_addr[3] = buffer[1] & 0xff;
321 e_addr[4] = (buffer[0] >> 24) & 0xff;
322 e_addr[5] = (buffer[0] >> 16) & 0xff;
323}
324
325static bool msm_is_sat_dev(u8 *e_addr)
326{
327 if (e_addr[5] == QC_MFGID_LSB && e_addr[4] == QC_MFGID_MSB &&
328 e_addr[2] != QC_CHIPID_SL &&
329 (e_addr[1] == QC_DEVID_SAT1 || e_addr[1] == QC_DEVID_SAT2))
330 return true;
331 return false;
332}
333
334static irqreturn_t msm_slim_interrupt(int irq, void *d)
335{
336 struct msm_slim_ctrl *dev = d;
337 u32 pstat;
338 u32 stat = readl_relaxed(dev->base + MGR_INT_STAT);
339
340 if (stat & MGR_INT_TX_MSG_SENT || stat & MGR_INT_TX_NACKED_2) {
341 if (stat & MGR_INT_TX_MSG_SENT)
342 writel_relaxed(MGR_INT_TX_MSG_SENT,
343 dev->base + MGR_INT_CLR);
344 else {
345 writel_relaxed(MGR_INT_TX_NACKED_2,
346 dev->base + MGR_INT_CLR);
347 dev->err = -EIO;
348 }
349 /*
350 * Guarantee that interrupt clear bit write goes through before
351 * signalling completion/exiting ISR
352 */
353 mb();
354 if (dev->wr_comp)
355 complete(dev->wr_comp);
356 }
357 if (stat & MGR_INT_RX_MSG_RCVD) {
358 u32 rx_buf[10];
359 u32 mc, mt;
360 u8 len, i;
361 rx_buf[0] = readl_relaxed(dev->base + MGR_RX_MSG);
362 len = rx_buf[0] & 0x1F;
363 for (i = 1; i < ((len + 3) >> 2); i++) {
364 rx_buf[i] = readl_relaxed(dev->base + MGR_RX_MSG +
365 (4 * i));
366 dev_dbg(dev->dev, "reading data: %x\n", rx_buf[i]);
367 }
368 mt = (rx_buf[0] >> 5) & 0x7;
369 mc = (rx_buf[0] >> 8) & 0xff;
370 dev_dbg(dev->dev, "MC: %x, MT: %x\n", mc, mt);
371 if (mt == SLIM_MSG_MT_DEST_REFERRED_USER ||
372 mt == SLIM_MSG_MT_SRC_REFERRED_USER) {
373 struct msm_slim_sat *sat = dev->satd;
374 msm_sat_enqueue(sat, rx_buf, len);
375 writel_relaxed(MGR_INT_RX_MSG_RCVD,
376 dev->base + MGR_INT_CLR);
377 /*
378 * Guarantee that CLR bit write goes through before
379 * queuing work
380 */
381 mb();
382 queue_work(sat->wq, &sat->wd);
383 } else if (mt == SLIM_MSG_MT_CORE &&
384 mc == SLIM_MSG_MC_REPORT_PRESENT) {
385 u8 e_addr[6];
386 msm_get_eaddr(e_addr, rx_buf);
387 if (msm_is_sat_dev(e_addr)) {
388 /*
389 * Consider possibility that this device may
390 * be reporting more than once?
391 */
392 struct msm_slim_sat *sat = dev->satd;
393 msm_sat_enqueue(sat, rx_buf, len);
394 writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
395 MGR_INT_CLR);
396 /*
397 * Guarantee that CLR bit write goes through
398 * before queuing work
399 */
400 mb();
401 queue_work(sat->wq, &sat->wd);
402 } else {
403 msm_slim_rx_enqueue(dev, rx_buf, len);
404 writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
405 MGR_INT_CLR);
406 /*
407 * Guarantee that CLR bit write goes through
408 * before signalling completion
409 */
410 mb();
411 complete(&dev->rx_msgq_notify);
412 }
413 } else if (mc == SLIM_MSG_MC_REPLY_INFORMATION ||
414 mc == SLIM_MSG_MC_REPLY_VALUE) {
415 msm_slim_rx_enqueue(dev, rx_buf, len);
416 writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
417 MGR_INT_CLR);
418 /*
419 * Guarantee that CLR bit write goes through
420 * before signalling completion
421 */
422 mb();
423 complete(&dev->rx_msgq_notify);
Sagar Dharia144e5e02011-08-08 17:30:11 -0600424 } else if (mc == SLIM_MSG_MC_REPORT_INFORMATION) {
425 u8 *buf = (u8 *)rx_buf;
426 u8 l_addr = buf[2];
427 u16 ele = (u16)buf[4] << 4;
428 ele |= ((buf[3] & 0xf0) >> 4);
429 dev_err(dev->dev, "Slim-dev:%d report inf element:0x%x",
430 l_addr, ele);
431 for (i = 0; i < len - 5; i++)
432 dev_err(dev->dev, "offset:0x%x:bit mask:%x",
433 i, buf[i+5]);
434 writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
435 MGR_INT_CLR);
436 /*
437 * Guarantee that CLR bit write goes through
438 * before exiting
439 */
440 mb();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700441 } else {
442 dev_err(dev->dev, "Unexpected MC,%x MT:%x, len:%d",
443 mc, mt, len);
444 for (i = 0; i < ((len + 3) >> 2); i++)
445 dev_err(dev->dev, "error msg: %x", rx_buf[i]);
446 writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
447 MGR_INT_CLR);
448 /*
449 * Guarantee that CLR bit write goes through
450 * before exiting
451 */
452 mb();
453 }
454 }
455 if (stat & MGR_INT_RECFG_DONE) {
456 writel_relaxed(MGR_INT_RECFG_DONE, dev->base + MGR_INT_CLR);
457 /*
458 * Guarantee that CLR bit write goes through
459 * before exiting ISR
460 */
461 mb();
Sagar Dharia6b559e02011-08-03 17:01:31 -0600462 if (dev->ctrl.sched.usedslots == 0 &&
463 dev->state != MSM_CTRL_SLEEPING) {
464 dev->chan_active = false;
465 pm_runtime_put(dev->dev);
466 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700467 complete(&dev->reconf);
468 }
469 pstat = readl_relaxed(dev->base + PGD_PORT_INT_ST_EEn + (16 * dev->ee));
470 if (pstat != 0) {
471 int i = 0;
472 for (i = dev->pipe_b; i < MSM_SLIM_NPORTS; i++) {
473 if (pstat & 1 << i) {
474 u32 val = readl_relaxed(dev->base +
475 PGD_PORT_STATn + (i * 32));
476 if (val & (1 << 19)) {
477 dev->ctrl.ports[i].err =
478 SLIM_P_DISCONNECT;
479 dev->pipes[i-dev->pipe_b].connected =
480 false;
481 /*
482 * SPS will call completion since
483 * ERROR flags are registered
484 */
485 } else if (val & (1 << 2))
486 dev->ctrl.ports[i].err =
487 SLIM_P_OVERFLOW;
488 else if (val & (1 << 3))
489 dev->ctrl.ports[i].err =
490 SLIM_P_UNDERFLOW;
491 }
492 writel_relaxed(1, dev->base + PGD_PORT_INT_CL_EEn +
493 (dev->ee * 16));
494 }
495 /*
496 * Guarantee that port interrupt bit(s) clearing writes go
497 * through before exiting ISR
498 */
499 mb();
500 }
501
502 return IRQ_HANDLED;
503}
504
505static int
506msm_slim_init_endpoint(struct msm_slim_ctrl *dev, struct msm_slim_endp *ep)
507{
508 int ret;
509 struct sps_pipe *endpoint;
510 struct sps_connect *config = &ep->config;
511
512 /* Allocate the endpoint */
513 endpoint = sps_alloc_endpoint();
514 if (!endpoint) {
515 dev_err(dev->dev, "sps_alloc_endpoint failed\n");
516 return -ENOMEM;
517 }
518
519 /* Get default connection configuration for an endpoint */
520 ret = sps_get_config(endpoint, config);
521 if (ret) {
522 dev_err(dev->dev, "sps_get_config failed 0x%x\n", ret);
523 goto sps_config_failed;
524 }
525
526 ep->sps = endpoint;
527 return 0;
528
529sps_config_failed:
530 sps_free_endpoint(endpoint);
531 return ret;
532}
533
534static void
535msm_slim_free_endpoint(struct msm_slim_endp *ep)
536{
537 sps_free_endpoint(ep->sps);
538 ep->sps = NULL;
539}
540
541static int msm_slim_sps_mem_alloc(
542 struct msm_slim_ctrl *dev, struct sps_mem_buffer *mem, u32 len)
543{
544 dma_addr_t phys;
545
546 mem->size = len;
547 mem->min_size = 0;
548 mem->base = dma_alloc_coherent(dev->dev, mem->size, &phys, GFP_KERNEL);
549
550 if (!mem->base) {
551 dev_err(dev->dev, "dma_alloc_coherent(%d) failed\n", len);
552 return -ENOMEM;
553 }
554
555 mem->phys_base = phys;
556 memset(mem->base, 0x00, mem->size);
557 return 0;
558}
559
560static void
561msm_slim_sps_mem_free(struct msm_slim_ctrl *dev, struct sps_mem_buffer *mem)
562{
563 dma_free_coherent(dev->dev, mem->size, mem->base, mem->phys_base);
564 mem->size = 0;
565 mem->base = NULL;
566 mem->phys_base = 0;
567}
568
569static void msm_hw_set_port(struct msm_slim_ctrl *dev, u8 pn)
570{
571 u32 set_cfg = DEF_WATERMARK | DEF_ALIGN | DEF_PACK | ENABLE_PORT;
572 u32 int_port = readl_relaxed(dev->base + PGD_PORT_INT_EN_EEn +
573 (dev->ee * 16));
574 writel_relaxed(set_cfg, dev->base + PGD_PORT_CFGn + (pn * 32));
575 writel_relaxed(DEF_BLKSZ, dev->base + PGD_PORT_BLKn + (pn * 32));
576 writel_relaxed(DEF_TRANSZ, dev->base + PGD_PORT_TRANn + (pn * 32));
577 writel_relaxed((int_port | 1 << pn) , dev->base + PGD_PORT_INT_EN_EEn +
578 (dev->ee * 16));
579 /* Make sure that port registers are updated before returning */
580 mb();
581}
582
583static int msm_slim_connect_pipe_port(struct msm_slim_ctrl *dev, u8 pn)
584{
585 struct msm_slim_endp *endpoint = &dev->pipes[pn];
586 struct sps_connect *cfg = &endpoint->config;
587 u32 stat;
588 int ret = sps_get_config(dev->pipes[pn].sps, cfg);
589 if (ret) {
590 dev_err(dev->dev, "sps pipe-port get config error%x\n", ret);
591 return ret;
592 }
593 cfg->options = SPS_O_DESC_DONE | SPS_O_ERROR |
594 SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
595
596 if (dev->pipes[pn].connected) {
597 ret = sps_set_config(dev->pipes[pn].sps, cfg);
598 if (ret) {
599 dev_err(dev->dev, "sps pipe-port set config erro:%x\n",
600 ret);
601 return ret;
602 }
603 }
604
605 stat = readl_relaxed(dev->base + PGD_PORT_STATn +
606 (32 * (pn + dev->pipe_b)));
607 if (dev->ctrl.ports[pn].flow == SLIM_SRC) {
608 cfg->destination = dev->bam.hdl;
609 cfg->source = SPS_DEV_HANDLE_MEM;
610 cfg->dest_pipe_index = ((stat & (0xFF << 4)) >> 4);
611 cfg->src_pipe_index = 0;
612 dev_dbg(dev->dev, "flow src:pipe num:%d",
613 cfg->dest_pipe_index);
614 cfg->mode = SPS_MODE_DEST;
615 } else {
616 cfg->source = dev->bam.hdl;
617 cfg->destination = SPS_DEV_HANDLE_MEM;
618 cfg->src_pipe_index = ((stat & (0xFF << 4)) >> 4);
619 cfg->dest_pipe_index = 0;
620 dev_dbg(dev->dev, "flow dest:pipe num:%d",
621 cfg->src_pipe_index);
622 cfg->mode = SPS_MODE_SRC;
623 }
624 /* Space for desciptor FIFOs */
625 cfg->desc.size = MSM_SLIM_DESC_NUM * sizeof(struct sps_iovec);
626 cfg->config = SPS_CONFIG_DEFAULT;
627 ret = sps_connect(dev->pipes[pn].sps, cfg);
628 if (!ret) {
629 dev->pipes[pn].connected = true;
630 msm_hw_set_port(dev, pn + dev->pipe_b);
631 }
632 return ret;
633}
634
635static u32 *msm_get_msg_buf(struct slim_controller *ctrl, int len)
636{
637 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
638 /*
639 * Currently we block a transaction until the current one completes.
640 * In case we need multiple transactions, use message Q
641 */
642 return dev->tx_buf;
643}
644
645static int msm_send_msg_buf(struct slim_controller *ctrl, u32 *buf, u8 len)
646{
647 int i;
648 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
649 for (i = 0; i < (len + 3) >> 2; i++) {
650 dev_dbg(dev->dev, "TX data:0x%x\n", buf[i]);
651 writel_relaxed(buf[i], dev->base + MGR_TX_MSG + (i * 4));
652 }
653 /* Guarantee that message is sent before returning */
654 mb();
655 return 0;
656}
657
658static int msm_xfer_msg(struct slim_controller *ctrl, struct slim_msg_txn *txn)
659{
660 DECLARE_COMPLETION_ONSTACK(done);
661 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
662 u32 *pbuf;
663 u8 *puc;
664 int timeout;
665 u8 la = txn->la;
Sagar Dharia6b559e02011-08-03 17:01:31 -0600666 /*
667 * Voting for runtime PM: Slimbus has 2 possible use cases:
668 * 1. messaging
669 * 2. Data channels
670 * Messaging case goes through messaging slots and data channels
671 * use their own slots
672 * This "get" votes for messaging bandwidth
673 */
674 if (dev->state != MSM_CTRL_SLEEPING)
675 pm_runtime_get_sync(dev->dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700676 mutex_lock(&dev->tx_lock);
Sagar Dharia6b559e02011-08-03 17:01:31 -0600677 if (dev->state == MSM_CTRL_ASLEEP) {
678 dev_err(dev->dev, "runtime or system PM suspended state");
679 mutex_unlock(&dev->tx_lock);
680 pm_runtime_put(dev->dev);
681 return -EBUSY;
682 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700683 if (txn->mt == SLIM_MSG_MT_CORE &&
Sagar Dharia6b559e02011-08-03 17:01:31 -0600684 txn->mc == SLIM_MSG_MC_BEGIN_RECONFIGURATION) {
685 if (dev->reconf_busy) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700686 wait_for_completion(&dev->reconf);
687 dev->reconf_busy = false;
Sagar Dharia6b559e02011-08-03 17:01:31 -0600688 }
689 /* This "get" votes for data channels */
690 if (dev->ctrl.sched.usedslots != 0 &&
691 !dev->chan_active) {
692 dev->chan_active = true;
693 if (dev->state != MSM_CTRL_SLEEPING)
694 pm_runtime_get(dev->dev);
695 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700696 }
697 txn->rl--;
698 pbuf = msm_get_msg_buf(ctrl, txn->rl);
699 dev->wr_comp = NULL;
700 dev->err = 0;
701
702 if (txn->dt == SLIM_MSG_DEST_ENUMADDR) {
703 mutex_unlock(&dev->tx_lock);
Sagar Dharia6b559e02011-08-03 17:01:31 -0600704 if (dev->state != MSM_CTRL_SLEEPING)
705 pm_runtime_put(dev->dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700706 return -EPROTONOSUPPORT;
707 }
708 if (txn->mt == SLIM_MSG_MT_CORE && txn->la == 0xFF &&
709 (txn->mc == SLIM_MSG_MC_CONNECT_SOURCE ||
710 txn->mc == SLIM_MSG_MC_CONNECT_SINK ||
711 txn->mc == SLIM_MSG_MC_DISCONNECT_PORT))
712 la = dev->pgdla;
713 if (txn->dt == SLIM_MSG_DEST_LOGICALADDR)
714 *pbuf = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt, txn->mc,
715 0, la);
716 else
717 *pbuf = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt, txn->mc,
718 1, la);
719 if (txn->dt == SLIM_MSG_DEST_LOGICALADDR)
720 puc = ((u8 *)pbuf) + 3;
721 else
722 puc = ((u8 *)pbuf) + 2;
723 if (txn->rbuf)
724 *(puc++) = txn->tid;
725 if ((txn->mt == SLIM_MSG_MT_CORE) &&
726 ((txn->mc >= SLIM_MSG_MC_REQUEST_INFORMATION &&
727 txn->mc <= SLIM_MSG_MC_REPORT_INFORMATION) ||
728 (txn->mc >= SLIM_MSG_MC_REQUEST_VALUE &&
729 txn->mc <= SLIM_MSG_MC_CHANGE_VALUE))) {
730 *(puc++) = (txn->ec & 0xFF);
731 *(puc++) = (txn->ec >> 8)&0xFF;
732 }
733 if (txn->wbuf)
734 memcpy(puc, txn->wbuf, txn->len);
735 if (txn->mt == SLIM_MSG_MT_CORE && txn->la == 0xFF &&
736 (txn->mc == SLIM_MSG_MC_CONNECT_SOURCE ||
737 txn->mc == SLIM_MSG_MC_CONNECT_SINK ||
738 txn->mc == SLIM_MSG_MC_DISCONNECT_PORT)) {
739 if (txn->mc != SLIM_MSG_MC_DISCONNECT_PORT)
740 dev->err = msm_slim_connect_pipe_port(dev, *puc);
741 else {
742 struct msm_slim_endp *endpoint = &dev->pipes[*puc];
743 struct sps_register_event sps_event;
744 memset(&sps_event, 0, sizeof(sps_event));
745 sps_register_event(endpoint->sps, &sps_event);
746 sps_disconnect(endpoint->sps);
747 /*
748 * Remove channel disconnects master-side ports from
749 * channel. No need to send that again on the bus
750 */
751 dev->pipes[*puc].connected = false;
752 mutex_unlock(&dev->tx_lock);
Sagar Dharia6b559e02011-08-03 17:01:31 -0600753 if (dev->state != MSM_CTRL_SLEEPING)
754 pm_runtime_put(dev->dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700755 return 0;
756 }
757 if (dev->err) {
758 dev_err(dev->dev, "pipe-port connect err:%d", dev->err);
759 mutex_unlock(&dev->tx_lock);
Sagar Dharia6b559e02011-08-03 17:01:31 -0600760 if (dev->state != MSM_CTRL_SLEEPING)
761 pm_runtime_put(dev->dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700762 return dev->err;
763 }
764 *(puc) = *(puc) + dev->pipe_b;
765 }
766 if (txn->mt == SLIM_MSG_MT_CORE &&
767 txn->mc == SLIM_MSG_MC_BEGIN_RECONFIGURATION)
768 dev->reconf_busy = true;
769 dev->wr_comp = &done;
770 msm_send_msg_buf(ctrl, pbuf, txn->rl);
771 timeout = wait_for_completion_timeout(&done, HZ);
Sagar Dharia6b559e02011-08-03 17:01:31 -0600772
773 if (dev->state == MSM_CTRL_SLEEPING &&
774 txn->mc == SLIM_MSG_MC_RECONFIGURE_NOW &&
775 txn->mt == SLIM_MSG_MT_CORE && timeout) {
776 timeout = wait_for_completion_timeout(&dev->reconf, HZ);
777 if (timeout)
778 dev->reconf_busy = false;
779 }
780 mutex_unlock(&dev->tx_lock);
781 if (!txn->rbuf && dev->state != MSM_CTRL_SLEEPING)
782 pm_runtime_put(dev->dev);
783
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700784 if (!timeout)
785 dev_err(dev->dev, "TX timed out:MC:0x%x,mt:0x%x", txn->mc,
786 txn->mt);
Sagar Dharia6b559e02011-08-03 17:01:31 -0600787
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700788 return timeout ? dev->err : -ETIMEDOUT;
789}
790
791static int msm_set_laddr(struct slim_controller *ctrl, const u8 *ea,
792 u8 elen, u8 laddr)
793{
794 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
795 DECLARE_COMPLETION_ONSTACK(done);
796 int timeout;
797 u32 *buf;
798 mutex_lock(&dev->tx_lock);
799 buf = msm_get_msg_buf(ctrl, 9);
800 buf[0] = SLIM_MSG_ASM_FIRST_WORD(9, SLIM_MSG_MT_CORE,
801 SLIM_MSG_MC_ASSIGN_LOGICAL_ADDRESS,
802 SLIM_MSG_DEST_LOGICALADDR,
803 ea[5] | ea[4] << 8);
804 buf[1] = ea[3] | (ea[2] << 8) | (ea[1] << 16) | (ea[0] << 24);
805 buf[2] = laddr;
806
807 dev->wr_comp = &done;
808 msm_send_msg_buf(ctrl, buf, 9);
809 timeout = wait_for_completion_timeout(&done, HZ);
810 mutex_unlock(&dev->tx_lock);
811 return timeout ? dev->err : -ETIMEDOUT;
812}
813
Sagar Dharia144e5e02011-08-08 17:30:11 -0600814static int msm_clk_pause_wakeup(struct slim_controller *ctrl)
815{
816 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
Sagar Dharia6b559e02011-08-03 17:01:31 -0600817 enable_irq(dev->irq);
Sagar Dharia144e5e02011-08-08 17:30:11 -0600818 clk_enable(dev->rclk);
819 writel_relaxed(1, dev->base + FRM_WAKEUP);
820 /* Make sure framer wakeup write goes through before exiting function */
821 mb();
822 /*
823 * Workaround: Currently, slave is reporting lost-sync messages
824 * after slimbus comes out of clock pause.
825 * Transaction with slave fail before slave reports that message
826 * Give some time for that report to come
827 * Slimbus wakes up in clock gear 10 at 24.576MHz. With each superframe
828 * being 250 usecs, we wait for 20 superframes here to ensure
829 * we get the message
830 */
831 usleep_range(5000, 5000);
832 return 0;
833}
834
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700835static int msm_config_port(struct slim_controller *ctrl, u8 pn)
836{
837 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
838 struct msm_slim_endp *endpoint;
839 int ret = 0;
840 if (ctrl->ports[pn].req == SLIM_REQ_HALF_DUP ||
841 ctrl->ports[pn].req == SLIM_REQ_MULTI_CH)
842 return -EPROTONOSUPPORT;
843 if (pn >= (MSM_SLIM_NPORTS - dev->pipe_b))
844 return -ENODEV;
845
846 endpoint = &dev->pipes[pn];
847 ret = msm_slim_init_endpoint(dev, endpoint);
848 dev_dbg(dev->dev, "sps register bam error code:%x\n", ret);
849 return ret;
850}
851
852static enum slim_port_err msm_slim_port_xfer_status(struct slim_controller *ctr,
853 u8 pn, u8 **done_buf, u32 *done_len)
854{
855 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctr);
856 struct sps_iovec sio;
857 int ret;
858 if (done_len)
859 *done_len = 0;
860 if (done_buf)
861 *done_buf = NULL;
862 if (!dev->pipes[pn].connected)
863 return SLIM_P_DISCONNECT;
864 ret = sps_get_iovec(dev->pipes[pn].sps, &sio);
865 if (!ret) {
866 if (done_len)
867 *done_len = sio.size;
868 if (done_buf)
869 *done_buf = (u8 *)sio.addr;
870 }
871 dev_dbg(dev->dev, "get iovec returned %d\n", ret);
872 return SLIM_P_INPROGRESS;
873}
874
875static int msm_slim_port_xfer(struct slim_controller *ctrl, u8 pn, u8 *iobuf,
876 u32 len, struct completion *comp)
877{
878 struct sps_register_event sreg;
879 int ret;
880 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
Sagar Dhariae77961f2011-09-27 14:03:50 -0600881 if (pn >= 7)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700882 return -ENODEV;
883
884
885 ctrl->ports[pn].xcomp = comp;
886 sreg.options = (SPS_EVENT_DESC_DONE|SPS_EVENT_ERROR);
887 sreg.mode = SPS_TRIGGER_WAIT;
888 sreg.xfer_done = comp;
889 sreg.callback = NULL;
890 sreg.user = &ctrl->ports[pn];
891 ret = sps_register_event(dev->pipes[pn].sps, &sreg);
892 if (ret) {
893 dev_dbg(dev->dev, "sps register event error:%x\n", ret);
894 return ret;
895 }
896 ret = sps_transfer_one(dev->pipes[pn].sps, (u32)iobuf, len, NULL,
897 SPS_IOVEC_FLAG_INT);
898 dev_dbg(dev->dev, "sps submit xfer error code:%x\n", ret);
899
900 return ret;
901}
902
903static int msm_sat_define_ch(struct msm_slim_sat *sat, u8 *buf, u8 len, u8 mc)
904{
905 struct msm_slim_ctrl *dev = sat->dev;
906 enum slim_ch_control oper;
907 int i;
908 int ret = 0;
909 if (mc == SLIM_USR_MC_CHAN_CTRL) {
910 u16 chanh = sat->satch[buf[5]];
911 oper = ((buf[3] & 0xC0) >> 6);
912 /* part of grp. activating/removing 1 will take care of rest */
913 ret = slim_control_ch(&sat->satcl, chanh, oper, false);
914 } else {
915 u16 chh[40];
916 struct slim_ch prop;
917 u32 exp;
918 u8 coeff, cc;
919 u8 prrate = buf[6];
920 for (i = 8; i < len; i++)
921 chh[i-8] = sat->satch[buf[i]];
922 prop.dataf = (enum slim_ch_dataf)((buf[3] & 0xE0) >> 5);
923 prop.auxf = (enum slim_ch_auxf)((buf[4] & 0xC0) >> 5);
924 prop.baser = SLIM_RATE_4000HZ;
925 if (prrate & 0x8)
926 prop.baser = SLIM_RATE_11025HZ;
927 else
928 prop.baser = SLIM_RATE_4000HZ;
929 prop.prot = (enum slim_ch_proto)(buf[5] & 0x0F);
930 prop.sampleszbits = (buf[4] & 0x1F)*SLIM_CL_PER_SL;
931 exp = (u32)((buf[5] & 0xF0) >> 4);
932 coeff = (buf[4] & 0x20) >> 5;
933 cc = (coeff ? 3 : 1);
934 prop.ratem = cc * (1 << exp);
935 if (i > 9)
936 ret = slim_define_ch(&sat->satcl, &prop, chh, len - 8,
937 true, &sat->satch[buf[8]]);
938 else
939 ret = slim_define_ch(&sat->satcl, &prop,
940 &sat->satch[buf[8]], 1, false,
941 NULL);
942 dev_dbg(dev->dev, "define sat grp returned:%d", ret);
943
944 /* part of group so activating 1 will take care of rest */
945 if (mc == SLIM_USR_MC_DEF_ACT_CHAN)
946 ret = slim_control_ch(&sat->satcl,
947 sat->satch[buf[8]],
948 SLIM_CH_ACTIVATE, false);
949 }
950 return ret;
951}
952
953static void msm_slim_rxwq(struct msm_slim_ctrl *dev)
954{
955 u8 buf[40];
956 u8 mc, mt, len;
957 int i, ret;
958 if ((msm_slim_rx_dequeue(dev, (u8 *)buf)) != -ENODATA) {
959 len = buf[0] & 0x1F;
960 mt = (buf[0] >> 5) & 0x7;
961 mc = buf[1];
962 if (mt == SLIM_MSG_MT_CORE &&
963 mc == SLIM_MSG_MC_REPORT_PRESENT) {
964 u8 laddr;
965 u8 e_addr[6];
966 for (i = 0; i < 6; i++)
967 e_addr[i] = buf[7-i];
Sagar Dharia6b559e02011-08-03 17:01:31 -0600968 pm_runtime_get_sync(dev->dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700969
970 ret = slim_assign_laddr(&dev->ctrl, e_addr, 6, &laddr);
971 /* Is this Qualcomm ported generic device? */
972 if (!ret && e_addr[5] == QC_MFGID_LSB &&
973 e_addr[4] == QC_MFGID_MSB &&
974 e_addr[1] == QC_DEVID_PGD &&
975 e_addr[2] != QC_CHIPID_SL)
976 dev->pgdla = laddr;
Sagar Dharia6b559e02011-08-03 17:01:31 -0600977 pm_runtime_put(dev->dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700978
979 } else if (mc == SLIM_MSG_MC_REPLY_INFORMATION ||
980 mc == SLIM_MSG_MC_REPLY_VALUE) {
981 u8 tid = buf[3];
982 dev_dbg(dev->dev, "tid:%d, len:%d\n", tid, len - 4);
983 slim_msg_response(&dev->ctrl, &buf[4], tid,
984 len - 4);
Sagar Dharia6b559e02011-08-03 17:01:31 -0600985 pm_runtime_put(dev->dev);
Sagar Dharia144e5e02011-08-08 17:30:11 -0600986 } else if (mc == SLIM_MSG_MC_REPORT_INFORMATION) {
987 u8 l_addr = buf[2];
988 u16 ele = (u16)buf[4] << 4;
989 ele |= ((buf[3] & 0xf0) >> 4);
990 dev_err(dev->dev, "Slim-dev:%d report inf element:0x%x",
991 l_addr, ele);
992 for (i = 0; i < len - 5; i++)
993 dev_err(dev->dev, "offset:0x%x:bit mask:%x",
994 i, buf[i+5]);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700995 } else {
996 dev_err(dev->dev, "unexpected message:mc:%x, mt:%x",
997 mc, mt);
998 for (i = 0; i < len; i++)
999 dev_err(dev->dev, "error msg: %x", buf[i]);
1000
1001 }
1002 } else
1003 dev_err(dev->dev, "rxwq called and no dequeue");
1004}
1005
1006static void slim_sat_rxprocess(struct work_struct *work)
1007{
1008 struct msm_slim_sat *sat = container_of(work, struct msm_slim_sat, wd);
1009 struct msm_slim_ctrl *dev = sat->dev;
1010 u8 buf[40];
1011
1012 while ((msm_sat_dequeue(sat, buf)) != -ENODATA) {
1013 struct slim_msg_txn txn;
1014 int i;
1015 u8 len, mc, mt;
1016 u32 bw_sl;
1017 int ret = 0;
1018 bool gen_ack = false;
1019 u8 tid;
1020 u8 wbuf[8];
1021 txn.mt = SLIM_MSG_MT_SRC_REFERRED_USER;
1022 txn.dt = SLIM_MSG_DEST_LOGICALADDR;
1023 txn.ec = 0;
1024 txn.rbuf = NULL;
1025 txn.la = sat->satcl.laddr;
1026 /* satellite handling */
1027 len = buf[0] & 0x1F;
1028 mc = buf[1];
1029 mt = (buf[0] >> 5) & 0x7;
1030
1031 if (mt == SLIM_MSG_MT_CORE &&
1032 mc == SLIM_MSG_MC_REPORT_PRESENT) {
1033 u8 laddr;
1034 u8 e_addr[6];
1035 for (i = 0; i < 6; i++)
1036 e_addr[i] = buf[7-i];
1037
Sagar Dharia6b559e02011-08-03 17:01:31 -06001038 pm_runtime_get_sync(dev->dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001039 slim_assign_laddr(&dev->ctrl, e_addr, 6, &laddr);
1040 sat->satcl.laddr = laddr;
Sagar Dharia6b559e02011-08-03 17:01:31 -06001041 } else if (mt != SLIM_MSG_MT_CORE &&
1042 mc != SLIM_MSG_MC_REPORT_PRESENT)
1043 pm_runtime_get_sync(dev->dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001044 switch (mc) {
1045 case SLIM_MSG_MC_REPORT_PRESENT:
Sagar Dharia6b559e02011-08-03 17:01:31 -06001046 /* Remove runtime_pm vote once satellite acks */
1047 if (mt != SLIM_MSG_MT_CORE) {
1048 pm_runtime_put(dev->dev);
1049 continue;
1050 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001051 /* send a Manager capability msg */
1052 if (sat->sent_capability)
1053 continue;
1054 ret = slim_add_device(&dev->ctrl, &sat->satcl);
1055 if (ret) {
1056 dev_err(dev->dev,
1057 "Satellite-init failed");
1058 continue;
1059 }
1060 /* Satellite owns first 21 channels */
1061 sat->satch = kzalloc(21 * sizeof(u16), GFP_KERNEL);
1062 sat->nsatch = 20;
1063 /* alloc all sat chans */
1064 for (i = 0; i < 21; i++)
1065 slim_alloc_ch(&sat->satcl, &sat->satch[i]);
1066 txn.mc = SLIM_USR_MC_MASTER_CAPABILITY;
1067 txn.mt = SLIM_MSG_MT_SRC_REFERRED_USER;
1068 txn.la = sat->satcl.laddr;
1069 txn.rl = 8;
1070 wbuf[0] = SAT_MAGIC_LSB;
1071 wbuf[1] = SAT_MAGIC_MSB;
1072 wbuf[2] = SAT_MSG_VER;
1073 wbuf[3] = SAT_MSG_PROT;
1074 txn.wbuf = wbuf;
1075 txn.len = 4;
1076 sat->sent_capability = true;
1077 msm_xfer_msg(&dev->ctrl, &txn);
1078 break;
1079 case SLIM_USR_MC_ADDR_QUERY:
1080 memcpy(&wbuf[1], &buf[4], 6);
1081 ret = slim_get_logical_addr(&sat->satcl,
1082 &wbuf[1], 6, &wbuf[7]);
1083 if (ret)
1084 memset(&wbuf[1], 0, 6);
1085 wbuf[0] = buf[3];
1086 txn.mc = SLIM_USR_MC_ADDR_REPLY;
1087 txn.rl = 12;
1088 txn.len = 8;
1089 txn.wbuf = wbuf;
1090 msm_xfer_msg(&dev->ctrl, &txn);
1091 break;
1092 case SLIM_USR_MC_DEFINE_CHAN:
1093 case SLIM_USR_MC_DEF_ACT_CHAN:
1094 case SLIM_USR_MC_CHAN_CTRL:
1095 if (mc != SLIM_USR_MC_CHAN_CTRL)
1096 tid = buf[7];
1097 else
1098 tid = buf[4];
1099 gen_ack = true;
1100 ret = msm_sat_define_ch(sat, buf, len, mc);
1101 if (ret) {
1102 dev_err(dev->dev,
1103 "SAT define_ch returned:%d",
1104 ret);
1105 }
1106 break;
1107 case SLIM_USR_MC_RECONFIG_NOW:
1108 tid = buf[3];
1109 gen_ack = true;
1110 ret = slim_reconfigure_now(&sat->satcl);
1111 break;
1112 case SLIM_USR_MC_REQ_BW:
1113 /* what we get is in SLOTS */
1114 bw_sl = (u32)buf[4] << 3 |
1115 ((buf[3] & 0xE0) >> 5);
1116 sat->satcl.pending_msgsl = bw_sl;
1117 tid = buf[5];
1118 gen_ack = true;
1119 break;
1120 case SLIM_USR_MC_CONNECT_SRC:
1121 case SLIM_USR_MC_CONNECT_SINK:
1122 if (mc == SLIM_USR_MC_CONNECT_SRC)
1123 txn.mc = SLIM_MSG_MC_CONNECT_SOURCE;
1124 else
1125 txn.mc = SLIM_MSG_MC_CONNECT_SINK;
1126 wbuf[0] = buf[4] & 0x1F;
1127 wbuf[1] = buf[5];
1128 tid = buf[6];
1129 txn.la = buf[3];
1130 txn.mt = SLIM_MSG_MT_CORE;
1131 txn.rl = 6;
1132 txn.len = 2;
1133 txn.wbuf = wbuf;
1134 gen_ack = true;
1135 ret = msm_xfer_msg(&dev->ctrl, &txn);
1136 break;
1137 case SLIM_USR_MC_DISCONNECT_PORT:
1138 txn.mc = SLIM_MSG_MC_DISCONNECT_PORT;
1139 wbuf[0] = buf[4] & 0x1F;
1140 tid = buf[5];
1141 txn.la = buf[3];
1142 txn.rl = 5;
1143 txn.len = 1;
1144 txn.mt = SLIM_MSG_MT_CORE;
1145 txn.wbuf = wbuf;
1146 gen_ack = true;
1147 ret = msm_xfer_msg(&dev->ctrl, &txn);
1148 default:
1149 break;
1150 }
Sagar Dharia6b559e02011-08-03 17:01:31 -06001151 if (!gen_ack) {
1152 if (mc != SLIM_MSG_MC_REPORT_PRESENT)
1153 pm_runtime_put(dev->dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001154 continue;
Sagar Dharia6b559e02011-08-03 17:01:31 -06001155 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001156 wbuf[0] = tid;
1157 if (!ret)
1158 wbuf[1] = MSM_SAT_SUCCSS;
1159 else
1160 wbuf[1] = 0;
1161 txn.mc = SLIM_USR_MC_GENERIC_ACK;
1162 txn.la = sat->satcl.laddr;
1163 txn.rl = 6;
1164 txn.len = 2;
1165 txn.wbuf = wbuf;
1166 txn.mt = SLIM_MSG_MT_SRC_REFERRED_USER;
1167 msm_xfer_msg(&dev->ctrl, &txn);
Sagar Dharia6b559e02011-08-03 17:01:31 -06001168 pm_runtime_put(dev->dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001169 }
1170}
1171
1172static void
1173msm_slim_rx_msgq_event(struct msm_slim_ctrl *dev, struct sps_event_notify *ev)
1174{
1175 u32 *buf = ev->data.transfer.user;
1176 struct sps_iovec *iovec = &ev->data.transfer.iovec;
1177
1178 /*
1179 * Note the virtual address needs to be offset by the same index
1180 * as the physical address or just pass in the actual virtual address
1181 * if the sps_mem_buffer is not needed. Note that if completion is
1182 * used, the virtual address won't be available and will need to be
1183 * calculated based on the offset of the physical address
1184 */
1185 if (ev->event_id == SPS_EVENT_DESC_DONE) {
1186
1187 pr_debug("buf = 0x%p, data = 0x%x\n", buf, *buf);
1188
1189 pr_debug("iovec = (0x%x 0x%x 0x%x)\n",
1190 iovec->addr, iovec->size, iovec->flags);
1191
1192 } else {
1193 dev_err(dev->dev, "%s: unknown event %d\n",
1194 __func__, ev->event_id);
1195 }
1196}
1197
1198static void msm_slim_rx_msgq_cb(struct sps_event_notify *notify)
1199{
1200 struct msm_slim_ctrl *dev = (struct msm_slim_ctrl *)notify->user;
1201 msm_slim_rx_msgq_event(dev, notify);
1202}
1203
1204/* Queue up Rx message buffer */
1205static inline int
1206msm_slim_post_rx_msgq(struct msm_slim_ctrl *dev, int ix)
1207{
1208 int ret;
1209 u32 flags = SPS_IOVEC_FLAG_INT;
1210 struct msm_slim_endp *endpoint = &dev->rx_msgq;
1211 struct sps_mem_buffer *mem = &endpoint->buf;
1212 struct sps_pipe *pipe = endpoint->sps;
1213
1214 /* Rx message queue buffers are 4 bytes in length */
1215 u8 *virt_addr = mem->base + (4 * ix);
1216 u32 phys_addr = mem->phys_base + (4 * ix);
1217
1218 pr_debug("index:%d, phys:0x%x, virt:0x%p\n", ix, phys_addr, virt_addr);
1219
1220 ret = sps_transfer_one(pipe, phys_addr, 4, virt_addr, flags);
1221 if (ret)
1222 dev_err(dev->dev, "transfer_one() failed 0x%x, %d\n", ret, ix);
1223
1224 return ret;
1225}
1226
1227static inline int
1228msm_slim_rx_msgq_get(struct msm_slim_ctrl *dev, u32 *data, int offset)
1229{
1230 struct msm_slim_endp *endpoint = &dev->rx_msgq;
1231 struct sps_mem_buffer *mem = &endpoint->buf;
1232 struct sps_pipe *pipe = endpoint->sps;
1233 struct sps_iovec iovec;
1234 int index;
1235 int ret;
1236
1237 ret = sps_get_iovec(pipe, &iovec);
1238 if (ret) {
1239 dev_err(dev->dev, "sps_get_iovec() failed 0x%x\n", ret);
1240 goto err_exit;
1241 }
1242
1243 pr_debug("iovec = (0x%x 0x%x 0x%x)\n",
1244 iovec.addr, iovec.size, iovec.flags);
1245 BUG_ON(iovec.addr < mem->phys_base);
1246 BUG_ON(iovec.addr >= mem->phys_base + mem->size);
1247
1248 /* Calculate buffer index */
1249 index = (iovec.addr - mem->phys_base) / 4;
1250 *(data + offset) = *((u32 *)mem->base + index);
1251
1252 pr_debug("buf = 0x%p, data = 0x%x\n", (u32 *)mem->base + index, *data);
1253
1254 /* Add buffer back to the queue */
1255 (void)msm_slim_post_rx_msgq(dev, index);
1256
1257err_exit:
1258 return ret;
1259}
1260
1261static int msm_slim_rx_msgq_thread(void *data)
1262{
1263 struct msm_slim_ctrl *dev = (struct msm_slim_ctrl *)data;
1264 struct completion *notify = &dev->rx_msgq_notify;
1265 struct msm_slim_sat *sat = NULL;
1266 u32 mc = 0;
1267 u32 mt = 0;
1268 u32 buffer[10];
1269 int index = 0;
1270 u8 msg_len = 0;
1271 int ret;
1272
1273 dev_dbg(dev->dev, "rx thread started");
1274
1275 while (!kthread_should_stop()) {
1276 set_current_state(TASK_INTERRUPTIBLE);
1277 ret = wait_for_completion_interruptible(notify);
1278
1279 if (ret)
1280 dev_err(dev->dev, "rx thread wait error:%d", ret);
1281
1282 /* 1 irq notification per message */
1283 if (!dev->use_rx_msgqs) {
1284 msm_slim_rxwq(dev);
1285 continue;
1286 }
1287
1288 ret = msm_slim_rx_msgq_get(dev, buffer, index);
1289 if (ret) {
1290 dev_err(dev->dev, "rx_msgq_get() failed 0x%x\n", ret);
1291 continue;
1292 }
1293
1294 pr_debug("message[%d] = 0x%x\n", index, *buffer);
1295
1296 /* Decide if we use generic RX or satellite RX */
1297 if (index++ == 0) {
1298 msg_len = *buffer & 0x1F;
1299 pr_debug("Start of new message, len = %d\n", msg_len);
1300 mt = (buffer[0] >> 5) & 0x7;
1301 mc = (buffer[0] >> 8) & 0xff;
1302 dev_dbg(dev->dev, "MC: %x, MT: %x\n", mc, mt);
1303 if (mt == SLIM_MSG_MT_DEST_REFERRED_USER ||
1304 mt == SLIM_MSG_MT_SRC_REFERRED_USER)
1305 sat = dev->satd;
1306
1307 } else if ((index * 4) >= msg_len) {
1308 index = 0;
1309 if (mt == SLIM_MSG_MT_CORE &&
1310 mc == SLIM_MSG_MC_REPORT_PRESENT) {
1311 u8 e_addr[6];
1312 msm_get_eaddr(e_addr, buffer);
1313 if (msm_is_sat_dev(e_addr))
1314 sat = dev->satd;
1315 }
1316 if (sat) {
1317 msm_sat_enqueue(sat, buffer, msg_len);
1318 queue_work(sat->wq, &sat->wd);
1319 sat = NULL;
1320 } else {
1321 msm_slim_rx_enqueue(dev, buffer, msg_len);
1322 msm_slim_rxwq(dev);
1323 }
1324 }
1325 }
1326
1327 return 0;
1328}
1329
1330static int __devinit msm_slim_init_rx_msgq(struct msm_slim_ctrl *dev)
1331{
1332 int i, ret;
1333 u32 pipe_offset;
1334 struct msm_slim_endp *endpoint = &dev->rx_msgq;
1335 struct sps_connect *config = &endpoint->config;
1336 struct sps_mem_buffer *descr = &config->desc;
1337 struct sps_mem_buffer *mem = &endpoint->buf;
1338 struct completion *notify = &dev->rx_msgq_notify;
1339
1340 struct sps_register_event sps_error_event; /* SPS_ERROR */
1341 struct sps_register_event sps_descr_event; /* DESCR_DONE */
1342
1343 /* Allocate the endpoint */
1344 ret = msm_slim_init_endpoint(dev, endpoint);
1345 if (ret) {
1346 dev_err(dev->dev, "init_endpoint failed 0x%x\n", ret);
1347 goto sps_init_endpoint_failed;
1348 }
1349
1350 /* Get the pipe indices for the message queues */
1351 pipe_offset = (readl_relaxed(dev->base + MGR_STATUS) & 0xfc) >> 2;
1352 dev_dbg(dev->dev, "Message queue pipe offset %d\n", pipe_offset);
1353
1354 config->mode = SPS_MODE_SRC;
1355 config->source = dev->bam.hdl;
1356 config->destination = SPS_DEV_HANDLE_MEM;
1357 config->src_pipe_index = pipe_offset;
1358 config->options = SPS_O_DESC_DONE | SPS_O_ERROR |
1359 SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
1360
1361 /* Allocate memory for the FIFO descriptors */
1362 ret = msm_slim_sps_mem_alloc(dev, descr,
1363 MSM_SLIM_DESC_NUM * sizeof(struct sps_iovec));
1364 if (ret) {
1365 dev_err(dev->dev, "unable to allocate SPS descriptors\n");
1366 goto alloc_descr_failed;
1367 }
1368
1369 ret = sps_connect(endpoint->sps, config);
1370 if (ret) {
1371 dev_err(dev->dev, "sps_connect failed 0x%x\n", ret);
1372 goto sps_connect_failed;
1373 }
1374
1375 /* Register completion for DESC_DONE */
1376 init_completion(notify);
1377 memset(&sps_descr_event, 0x00, sizeof(sps_descr_event));
1378
1379 sps_descr_event.mode = SPS_TRIGGER_CALLBACK;
1380 sps_descr_event.options = SPS_O_DESC_DONE;
1381 sps_descr_event.user = (void *)dev;
1382 sps_descr_event.xfer_done = notify;
1383
1384 ret = sps_register_event(endpoint->sps, &sps_descr_event);
1385 if (ret) {
1386 dev_err(dev->dev, "sps_connect() failed 0x%x\n", ret);
1387 goto sps_reg_event_failed;
1388 }
1389
1390 /* Register callback for errors */
1391 memset(&sps_error_event, 0x00, sizeof(sps_error_event));
1392 sps_error_event.mode = SPS_TRIGGER_CALLBACK;
1393 sps_error_event.options = SPS_O_ERROR;
1394 sps_error_event.user = (void *)dev;
1395 sps_error_event.callback = msm_slim_rx_msgq_cb;
1396
1397 ret = sps_register_event(endpoint->sps, &sps_error_event);
1398 if (ret) {
1399 dev_err(dev->dev, "sps_connect() failed 0x%x\n", ret);
1400 goto sps_reg_event_failed;
1401 }
1402
1403 /* Allocate memory for the message buffer(s), N descrs, 4-byte mesg */
1404 ret = msm_slim_sps_mem_alloc(dev, mem, MSM_SLIM_DESC_NUM * 4);
1405 if (ret) {
1406 dev_err(dev->dev, "dma_alloc_coherent failed\n");
1407 goto alloc_buffer_failed;
1408 }
1409
1410 /*
1411 * Call transfer_one for each 4-byte buffer
1412 * Use (buf->size/4) - 1 for the number of buffer to post
1413 */
1414
1415 /* Setup the transfer */
1416 for (i = 0; i < (MSM_SLIM_DESC_NUM - 1); i++) {
1417 ret = msm_slim_post_rx_msgq(dev, i);
1418 if (ret) {
1419 dev_err(dev->dev, "post_rx_msgq() failed 0x%x\n", ret);
1420 goto sps_transfer_failed;
1421 }
1422 }
1423
1424 /* Fire up the Rx message queue thread */
1425 dev->rx_msgq_thread = kthread_run(msm_slim_rx_msgq_thread, dev,
1426 MSM_SLIM_NAME "_rx_msgq_thread");
1427 if (!dev->rx_msgq_thread) {
1428 dev_err(dev->dev, "Failed to start Rx message queue thread\n");
1429 ret = -EIO;
1430 } else
1431 return 0;
1432
1433sps_transfer_failed:
1434 msm_slim_sps_mem_free(dev, mem);
1435alloc_buffer_failed:
1436 memset(&sps_error_event, 0x00, sizeof(sps_error_event));
1437 sps_register_event(endpoint->sps, &sps_error_event);
1438sps_reg_event_failed:
1439 sps_disconnect(endpoint->sps);
1440sps_connect_failed:
1441 msm_slim_sps_mem_free(dev, descr);
1442alloc_descr_failed:
1443 msm_slim_free_endpoint(endpoint);
1444sps_init_endpoint_failed:
1445 return ret;
1446}
1447
1448/* Registers BAM h/w resource with SPS driver and initializes msgq endpoints */
1449static int __devinit
1450msm_slim_sps_init(struct msm_slim_ctrl *dev, struct resource *bam_mem)
1451{
1452 int i, ret;
1453 u32 bam_handle;
1454 struct sps_bam_props bam_props = {0};
1455
1456 static struct sps_bam_sec_config_props sec_props = {
1457 .ees = {
1458 [0] = { /* LPASS */
1459 .vmid = 0,
1460 .pipe_mask = 0xFFFF98,
1461 },
1462 [1] = { /* Krait Apps */
1463 .vmid = 1,
1464 .pipe_mask = 0x3F000007,
1465 },
1466 [2] = { /* Modem */
1467 .vmid = 2,
1468 .pipe_mask = 0x00000060,
1469 },
1470 },
1471 };
1472
1473 bam_props.ee = dev->ee;
1474 bam_props.virt_addr = dev->bam.base;
1475 bam_props.phys_addr = bam_mem->start;
1476 bam_props.irq = dev->bam.irq;
1477 bam_props.manage = SPS_BAM_MGR_LOCAL;
1478 bam_props.summing_threshold = MSM_SLIM_PERF_SUMM_THRESHOLD;
1479
1480 bam_props.sec_config = SPS_BAM_SEC_DO_CONFIG;
1481 bam_props.p_sec_config_props = &sec_props;
1482
1483 bam_props.options = SPS_O_DESC_DONE | SPS_O_ERROR |
1484 SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
1485
1486 /* First 7 bits are for message Qs */
1487 for (i = 7; i < 32; i++) {
1488 /* Check what pipes are owned by Apps. */
1489 if ((sec_props.ees[dev->ee].pipe_mask >> i) & 0x1)
1490 break;
1491 }
1492 dev->pipe_b = i - 7;
1493
1494 /* Register the BAM device with the SPS driver */
1495 ret = sps_register_bam_device(&bam_props, &bam_handle);
1496 if (ret) {
1497 dev_err(dev->dev, "sps_register_bam_device failed 0x%x\n", ret);
1498 return ret;
1499 }
1500 dev->bam.hdl = bam_handle;
1501 dev_dbg(dev->dev, "SLIM BAM registered, handle = 0x%x\n", bam_handle);
1502
1503 ret = msm_slim_init_rx_msgq(dev);
1504 if (ret) {
1505 dev_err(dev->dev, "msm_slim_init_rx_msgq failed 0x%x\n", ret);
1506 goto rx_msgq_init_failed;
1507 }
1508
1509 return 0;
1510rx_msgq_init_failed:
1511 sps_deregister_bam_device(bam_handle);
1512 dev->bam.hdl = 0L;
1513 return ret;
1514}
1515
1516static void msm_slim_sps_exit(struct msm_slim_ctrl *dev)
1517{
1518 if (dev->use_rx_msgqs) {
1519 struct msm_slim_endp *endpoint = &dev->rx_msgq;
1520 struct sps_connect *config = &endpoint->config;
1521 struct sps_mem_buffer *descr = &config->desc;
1522 struct sps_mem_buffer *mem = &endpoint->buf;
1523 struct sps_register_event sps_event;
1524 memset(&sps_event, 0x00, sizeof(sps_event));
1525 msm_slim_sps_mem_free(dev, mem);
1526 sps_register_event(endpoint->sps, &sps_event);
1527 sps_disconnect(endpoint->sps);
1528 msm_slim_sps_mem_free(dev, descr);
1529 msm_slim_free_endpoint(endpoint);
1530 }
1531 sps_deregister_bam_device(dev->bam.hdl);
1532}
1533
Sagar Dhariacc969452011-09-19 10:34:30 -06001534static void msm_slim_prg_slew(struct platform_device *pdev,
1535 struct msm_slim_ctrl *dev)
1536{
1537 struct resource *slew_io;
1538 void __iomem *slew_reg;
1539 /* SLEW RATE register for this slimbus */
1540 dev->slew_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1541 "slimbus_slew_reg");
1542 if (!dev->slew_mem) {
1543 dev_dbg(&pdev->dev, "no slimbus slew resource\n");
1544 return;
1545 }
1546 slew_io = request_mem_region(dev->slew_mem->start,
1547 resource_size(dev->slew_mem), pdev->name);
1548 if (!slew_io) {
1549 dev_dbg(&pdev->dev, "slimbus-slew mem claimed\n");
1550 dev->slew_mem = NULL;
1551 return;
1552 }
1553
1554 slew_reg = ioremap(dev->slew_mem->start, resource_size(dev->slew_mem));
1555 if (!slew_reg) {
1556 dev_dbg(dev->dev, "slew register mapping failed");
1557 release_mem_region(dev->slew_mem->start,
1558 resource_size(dev->slew_mem));
1559 dev->slew_mem = NULL;
1560 return;
1561 }
1562 writel_relaxed(1, slew_reg);
1563 /* Make sure slimbus-slew rate enabling goes through */
1564 wmb();
1565 iounmap(slew_reg);
1566}
1567
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001568static int __devinit msm_slim_probe(struct platform_device *pdev)
1569{
1570 struct msm_slim_ctrl *dev;
1571 int ret;
1572 struct resource *bam_mem, *bam_io;
1573 struct resource *slim_mem, *slim_io;
1574 struct resource *irq, *bam_irq;
1575 slim_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1576 "slimbus_physical");
1577 if (!slim_mem) {
1578 dev_err(&pdev->dev, "no slimbus physical memory resource\n");
1579 return -ENODEV;
1580 }
1581 slim_io = request_mem_region(slim_mem->start, resource_size(slim_mem),
1582 pdev->name);
1583 if (!slim_io) {
1584 dev_err(&pdev->dev, "slimbus memory already claimed\n");
1585 return -EBUSY;
1586 }
1587
1588 bam_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1589 "slimbus_bam_physical");
1590 if (!bam_mem) {
1591 dev_err(&pdev->dev, "no slimbus BAM memory resource\n");
1592 ret = -ENODEV;
1593 goto err_get_res_bam_failed;
1594 }
1595 bam_io = request_mem_region(bam_mem->start, resource_size(bam_mem),
1596 pdev->name);
1597 if (!bam_io) {
1598 release_mem_region(slim_mem->start, resource_size(slim_mem));
1599 dev_err(&pdev->dev, "slimbus BAM memory already claimed\n");
1600 ret = -EBUSY;
1601 goto err_get_res_bam_failed;
1602 }
1603 irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
1604 "slimbus_irq");
1605 if (!irq) {
1606 dev_err(&pdev->dev, "no slimbus IRQ resource\n");
1607 ret = -ENODEV;
1608 goto err_get_res_failed;
1609 }
1610 bam_irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
1611 "slimbus_bam_irq");
1612 if (!bam_irq) {
1613 dev_err(&pdev->dev, "no slimbus BAM IRQ resource\n");
1614 ret = -ENODEV;
1615 goto err_get_res_failed;
1616 }
1617
1618 dev = kzalloc(sizeof(struct msm_slim_ctrl), GFP_KERNEL);
1619 if (!dev) {
1620 dev_err(&pdev->dev, "no memory for MSM slimbus controller\n");
1621 ret = -ENOMEM;
1622 goto err_get_res_failed;
1623 }
1624 dev->dev = &pdev->dev;
1625 platform_set_drvdata(pdev, dev);
1626 slim_set_ctrldata(&dev->ctrl, dev);
1627 dev->base = ioremap(slim_mem->start, resource_size(slim_mem));
1628 if (!dev->base) {
1629 dev_err(&pdev->dev, "IOremap failed\n");
1630 ret = -ENOMEM;
1631 goto err_ioremap_failed;
1632 }
1633 dev->bam.base = ioremap(bam_mem->start, resource_size(bam_mem));
1634 if (!dev->bam.base) {
1635 dev_err(&pdev->dev, "BAM IOremap failed\n");
1636 ret = -ENOMEM;
1637 goto err_ioremap_bam_failed;
1638 }
1639 dev->ctrl.nr = pdev->id;
1640 dev->ctrl.nchans = MSM_SLIM_NCHANS;
1641 dev->ctrl.nports = MSM_SLIM_NPORTS;
1642 dev->ctrl.set_laddr = msm_set_laddr;
1643 dev->ctrl.xfer_msg = msm_xfer_msg;
Sagar Dharia144e5e02011-08-08 17:30:11 -06001644 dev->ctrl.wakeup = msm_clk_pause_wakeup;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001645 dev->ctrl.config_port = msm_config_port;
1646 dev->ctrl.port_xfer = msm_slim_port_xfer;
1647 dev->ctrl.port_xfer_status = msm_slim_port_xfer_status;
1648 /* Reserve some messaging BW for satellite-apps driver communication */
1649 dev->ctrl.sched.pending_msgsl = 30;
1650
1651 init_completion(&dev->reconf);
1652 mutex_init(&dev->tx_lock);
1653 spin_lock_init(&dev->rx_lock);
1654 dev->ee = 1;
1655 dev->use_rx_msgqs = 1;
1656 dev->irq = irq->start;
1657 dev->bam.irq = bam_irq->start;
1658
1659 ret = msm_slim_sps_init(dev, bam_mem);
1660 if (ret != 0) {
1661 dev_err(dev->dev, "error SPS init\n");
1662 goto err_sps_init_failed;
1663 }
1664
1665
1666 dev->rclk = clk_get(dev->dev, "audio_slimbus_clk");
Sagar Dhariacc969452011-09-19 10:34:30 -06001667 if (!dev->rclk) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001668 dev_err(dev->dev, "slimbus clock not found");
1669 goto err_clk_get_failed;
1670 }
1671 dev->framer.rootfreq = SLIM_ROOT_FREQ >> 3;
1672 dev->framer.superfreq =
1673 dev->framer.rootfreq / SLIM_CL_PER_SUPERFRAME_DIV8;
1674 dev->ctrl.a_framer = &dev->framer;
1675 dev->ctrl.clkgear = SLIM_MAX_CLK_GEAR;
1676 ret = slim_add_numbered_controller(&dev->ctrl);
1677 if (ret) {
1678 dev_err(dev->dev, "error adding controller\n");
1679 goto err_ctrl_failed;
1680 }
1681
1682 ret = request_irq(dev->irq, msm_slim_interrupt, IRQF_TRIGGER_HIGH,
1683 "msm_slim_irq", dev);
1684 if (ret) {
1685 dev_err(&pdev->dev, "request IRQ failed\n");
1686 goto err_request_irq_failed;
1687 }
1688
1689 dev->satd = kzalloc(sizeof(struct msm_slim_sat), GFP_KERNEL);
1690 if (!dev->satd) {
1691 ret = -ENOMEM;
1692 goto err_sat_failed;
1693 }
Sagar Dhariacc969452011-09-19 10:34:30 -06001694
1695 msm_slim_prg_slew(pdev, dev);
1696 clk_set_rate(dev->rclk, SLIM_ROOT_FREQ);
1697 clk_enable(dev->rclk);
1698
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001699 dev->satd->dev = dev;
1700 dev->satd->satcl.name = "msm_sat_dev";
1701 spin_lock_init(&dev->satd->lock);
1702 INIT_WORK(&dev->satd->wd, slim_sat_rxprocess);
1703 dev->satd->wq = create_singlethread_workqueue("msm_slim_sat");
1704 /* Component register initialization */
1705 writel_relaxed(1, dev->base + COMP_CFG);
1706 writel_relaxed((EE_MGR_RSC_GRP | EE_NGD_2 | EE_NGD_1),
1707 dev->base + COMP_TRUST_CFG);
1708
1709 /*
1710 * Manager register initialization
1711 * If RX msg Q is used, disable RX_MSG_RCVD interrupt
1712 */
1713 if (dev->use_rx_msgqs)
1714 writel_relaxed((MGR_INT_RECFG_DONE | MGR_INT_TX_NACKED_2 |
1715 MGR_INT_MSG_BUF_CONTE | /* MGR_INT_RX_MSG_RCVD | */
1716 MGR_INT_TX_MSG_SENT), dev->base + MGR_INT_EN);
1717 else
1718 writel_relaxed((MGR_INT_RECFG_DONE | MGR_INT_TX_NACKED_2 |
1719 MGR_INT_MSG_BUF_CONTE | MGR_INT_RX_MSG_RCVD |
1720 MGR_INT_TX_MSG_SENT), dev->base + MGR_INT_EN);
1721 writel_relaxed(1, dev->base + MGR_CFG);
1722 /*
1723 * Framer registers are beyond 1K memory region after Manager and/or
1724 * component registers. Make sure those writes are ordered
1725 * before framer register writes
1726 */
1727 wmb();
1728
1729 /* Framer register initialization */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001730 writel_relaxed((0xA << REF_CLK_GEAR) | (0xA << CLK_GEAR) |
1731 (1 << ROOT_FREQ) | (1 << FRM_ACTIVE) | 1,
1732 dev->base + FRM_CFG);
1733 /*
1734 * Make sure that framer wake-up and enabling writes go through
1735 * before any other component is enabled. Framer is responsible for
1736 * clocking the bus and enabling framer first will ensure that other
1737 * devices can report presence when they are enabled
1738 */
1739 mb();
1740
1741 /* Enable RX msg Q */
1742 if (dev->use_rx_msgqs)
1743 writel_relaxed(MGR_CFG_ENABLE | MGR_CFG_RX_MSGQ_EN,
1744 dev->base + MGR_CFG);
1745 else
1746 writel_relaxed(MGR_CFG_ENABLE, dev->base + MGR_CFG);
1747 /*
1748 * Make sure that manager-enable is written through before interface
1749 * device is enabled
1750 */
1751 mb();
1752 writel_relaxed(1, dev->base + INTF_CFG);
1753 /*
1754 * Make sure that interface-enable is written through before enabling
1755 * ported generic device inside MSM manager
1756 */
1757 mb();
1758 writel_relaxed(1, dev->base + PGD_CFG);
1759 writel_relaxed(0x3F<<17, dev->base + (PGD_OWN_EEn + (4 * dev->ee)));
1760 /*
1761 * Make sure that ported generic device is enabled and port-EE settings
1762 * are written through before finally enabling the component
1763 */
1764 mb();
1765
1766 writel_relaxed(1, dev->base + COMP_CFG);
1767 /*
1768 * Make sure that all writes have gone through before exiting this
1769 * function
1770 */
1771 mb();
Sagar Dharia6b559e02011-08-03 17:01:31 -06001772 pm_runtime_use_autosuspend(&pdev->dev);
1773 pm_runtime_set_autosuspend_delay(&pdev->dev, MSM_SLIM_AUTOSUSPEND);
1774 pm_runtime_set_active(&pdev->dev);
1775 pm_runtime_enable(&pdev->dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001776 dev_dbg(dev->dev, "MSM SB controller is up!\n");
1777 return 0;
1778
1779err_sat_failed:
1780 free_irq(dev->irq, dev);
1781err_request_irq_failed:
1782 slim_del_controller(&dev->ctrl);
1783err_ctrl_failed:
1784 clk_disable(dev->rclk);
1785 clk_put(dev->rclk);
1786err_clk_get_failed:
1787 msm_slim_sps_exit(dev);
1788err_sps_init_failed:
1789 iounmap(dev->bam.base);
1790err_ioremap_bam_failed:
1791 iounmap(dev->base);
1792err_ioremap_failed:
1793 kfree(dev);
1794err_get_res_failed:
1795 release_mem_region(bam_mem->start, resource_size(bam_mem));
1796err_get_res_bam_failed:
1797 release_mem_region(slim_mem->start, resource_size(slim_mem));
1798 return ret;
1799}
1800
1801static int __devexit msm_slim_remove(struct platform_device *pdev)
1802{
1803 struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
1804 struct resource *bam_mem;
1805 struct resource *slim_mem;
Sagar Dhariacc969452011-09-19 10:34:30 -06001806 struct resource *slew_mem = dev->slew_mem;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001807 struct msm_slim_sat *sat = dev->satd;
1808 slim_remove_device(&sat->satcl);
Sagar Dharia6b559e02011-08-03 17:01:31 -06001809 pm_runtime_disable(&pdev->dev);
1810 pm_runtime_set_suspended(&pdev->dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001811 kfree(sat->satch);
1812 destroy_workqueue(sat->wq);
1813 kfree(sat);
1814 free_irq(dev->irq, dev);
1815 slim_del_controller(&dev->ctrl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001816 clk_put(dev->rclk);
1817 msm_slim_sps_exit(dev);
1818 kthread_stop(dev->rx_msgq_thread);
1819 iounmap(dev->bam.base);
1820 iounmap(dev->base);
1821 kfree(dev);
1822 bam_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1823 "slimbus_bam_physical");
Sagar Dhariae77961f2011-09-27 14:03:50 -06001824 if (bam_mem)
1825 release_mem_region(bam_mem->start, resource_size(bam_mem));
Sagar Dhariacc969452011-09-19 10:34:30 -06001826 if (slew_mem)
1827 release_mem_region(slew_mem->start, resource_size(slew_mem));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001828 slim_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1829 "slimbus_physical");
Sagar Dhariae77961f2011-09-27 14:03:50 -06001830 if (slim_mem)
1831 release_mem_region(slim_mem->start, resource_size(slim_mem));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001832 return 0;
1833}
1834
Sagar Dharia6b559e02011-08-03 17:01:31 -06001835#ifdef CONFIG_PM_RUNTIME
1836static int msm_slim_runtime_idle(struct device *device)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001837{
1838 struct platform_device *pdev = to_platform_device(device);
1839 struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
Sagar Dharia6b559e02011-08-03 17:01:31 -06001840 dev_dbg(device, "pm_runtime: idle...\n");
1841 pm_runtime_mark_last_busy(dev->dev);
1842 pm_request_autosuspend(device);
1843 return -EAGAIN;
1844}
1845#endif
1846
1847/*
1848 * If PM_RUNTIME is not defined, these 2 functions become helper
1849 * functions to be called from system suspend/resume. So they are not
1850 * inside ifdef CONFIG_PM_RUNTIME
1851 */
1852static int msm_slim_runtime_suspend(struct device *device)
1853{
1854 struct platform_device *pdev = to_platform_device(device);
1855 struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
1856 int ret;
1857 dev_dbg(device, "pm_runtime: suspending...\n");
1858 dev->state = MSM_CTRL_SLEEPING;
1859 ret = slim_ctrl_clk_pause(&dev->ctrl, false, SLIM_CLK_UNSPECIFIED);
Sagar Dharia144e5e02011-08-08 17:30:11 -06001860 /* Make sure clock pause goes through */
Sagar Dharia144e5e02011-08-08 17:30:11 -06001861 if (!ret) {
1862 clk_disable(dev->rclk);
1863 disable_irq(dev->irq);
Sagar Dharia6b559e02011-08-03 17:01:31 -06001864 dev->state = MSM_CTRL_ASLEEP;
1865 } else
1866 dev->state = MSM_CTRL_AWAKE;
1867 return ret;
1868}
1869
1870static int msm_slim_runtime_resume(struct device *device)
1871{
1872 struct platform_device *pdev = to_platform_device(device);
1873 struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
1874 int ret = 0;
1875 dev_dbg(device, "pm_runtime: resuming...\n");
1876 mutex_lock(&dev->tx_lock);
1877 if (dev->state == MSM_CTRL_ASLEEP) {
1878 mutex_unlock(&dev->tx_lock);
1879 ret = slim_ctrl_clk_pause(&dev->ctrl, true, 0);
1880 if (!ret)
1881 dev->state = MSM_CTRL_AWAKE;
1882 return ret;
1883 }
1884 mutex_unlock(&dev->tx_lock);
1885 return ret;
1886}
1887
1888#ifdef CONFIG_PM_SLEEP
1889static int msm_slim_suspend(struct device *dev)
1890{
1891 int ret = 0;
1892 if (!pm_runtime_enabled(dev) || !pm_runtime_suspended(dev)) {
1893 dev_dbg(dev, "system suspend");
1894 ret = msm_slim_runtime_suspend(dev);
1895 }
1896 if (ret == -EBUSY) {
Sagar Dharia144e5e02011-08-08 17:30:11 -06001897 /*
Sagar Dharia6b559e02011-08-03 17:01:31 -06001898 * If the clock pause failed due to active channels, there is
1899 * a possibility that some audio stream is active during suspend
1900 * We dont want to return suspend failure in that case so that
1901 * display and relevant components can still go to suspend.
1902 * If there is some other error, then it should be passed-on
1903 * to system level suspend
1904 */
Sagar Dharia144e5e02011-08-08 17:30:11 -06001905 ret = 0;
1906 }
1907 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001908}
1909
Sagar Dharia6b559e02011-08-03 17:01:31 -06001910static int msm_slim_resume(struct device *dev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001911{
Sagar Dharia6b559e02011-08-03 17:01:31 -06001912 /* If runtime_pm is enabled, this resume shouldn't do anything */
1913 if (!pm_runtime_enabled(dev) || !pm_runtime_suspended(dev)) {
1914 dev_dbg(dev, "system resume");
1915 return msm_slim_runtime_resume(dev);
Sagar Dharia144e5e02011-08-08 17:30:11 -06001916 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001917 return 0;
1918}
Sagar Dharia6b559e02011-08-03 17:01:31 -06001919#endif /* CONFIG_PM_SLEEP */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001920
1921static const struct dev_pm_ops msm_slim_dev_pm_ops = {
1922 SET_SYSTEM_SLEEP_PM_OPS(
1923 msm_slim_suspend,
1924 msm_slim_resume
1925 )
1926 SET_RUNTIME_PM_OPS(
1927 msm_slim_runtime_suspend,
1928 msm_slim_runtime_resume,
1929 msm_slim_runtime_idle
1930 )
1931};
1932
1933static struct platform_driver msm_slim_driver = {
1934 .probe = msm_slim_probe,
1935 .remove = msm_slim_remove,
1936 .driver = {
1937 .name = MSM_SLIM_NAME,
1938 .owner = THIS_MODULE,
1939 .pm = &msm_slim_dev_pm_ops,
1940 },
1941};
1942
1943static int msm_slim_init(void)
1944{
1945 return platform_driver_register(&msm_slim_driver);
1946}
1947subsys_initcall(msm_slim_init);
1948
1949static void msm_slim_exit(void)
1950{
1951 platform_driver_unregister(&msm_slim_driver);
1952}
1953module_exit(msm_slim_exit);
1954
1955MODULE_LICENSE("GPL v2");
1956MODULE_VERSION("0.1");
1957MODULE_DESCRIPTION("MSM Slimbus controller");
1958MODULE_ALIAS("platform:msm-slim");