blob: 3678b30cdd197ac812f5bec49260a479464d8031 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12#include <linux/irq.h>
13#include <linux/kernel.h>
14#include <linux/init.h>
15#include <linux/slab.h>
16#include <linux/io.h>
17#include <linux/interrupt.h>
18#include <linux/platform_device.h>
19#include <linux/dma-mapping.h>
20#include <linux/slimbus/slimbus.h>
21#include <linux/delay.h>
22#include <linux/kthread.h>
23#include <linux/clk.h>
24#include <mach/sps.h>
25
26/* Per spec.max 40 bytes per received message */
27#define SLIM_RX_MSGQ_BUF_LEN 40
28
29#define SLIM_USR_MC_GENERIC_ACK 0x25
30#define SLIM_USR_MC_MASTER_CAPABILITY 0x0
31#define SLIM_USR_MC_REPORT_SATELLITE 0x1
32#define SLIM_USR_MC_ADDR_QUERY 0xD
33#define SLIM_USR_MC_ADDR_REPLY 0xE
34#define SLIM_USR_MC_DEFINE_CHAN 0x20
35#define SLIM_USR_MC_DEF_ACT_CHAN 0x21
36#define SLIM_USR_MC_CHAN_CTRL 0x23
37#define SLIM_USR_MC_RECONFIG_NOW 0x24
38#define SLIM_USR_MC_REQ_BW 0x28
39#define SLIM_USR_MC_CONNECT_SRC 0x2C
40#define SLIM_USR_MC_CONNECT_SINK 0x2D
41#define SLIM_USR_MC_DISCONNECT_PORT 0x2E
42
43/* MSM Slimbus peripheral settings */
44#define MSM_SLIM_PERF_SUMM_THRESHOLD 0x8000
45#define MSM_SLIM_NCHANS 32
46#define MSM_SLIM_NPORTS 24
47
48/*
49 * Need enough descriptors to receive present messages from slaves
50 * if received simultaneously. Present message needs 3 descriptors
51 * and this size will ensure around 10 simultaneous reports.
52 */
53#define MSM_SLIM_DESC_NUM 32
54
55#define SLIM_MSG_ASM_FIRST_WORD(l, mt, mc, dt, ad) \
56 ((l) | ((mt) << 5) | ((mc) << 8) | ((dt) << 15) | ((ad) << 16))
57
58#define MSM_SLIM_NAME "msm_slim_ctrl"
59#define SLIM_ROOT_FREQ 24576000
60
61#define MSM_CONCUR_MSG 8
62#define SAT_CONCUR_MSG 8
63#define DEF_WATERMARK (8 << 1)
64#define DEF_ALIGN 0
65#define DEF_PACK (1 << 6)
66#define ENABLE_PORT 1
67
68#define DEF_BLKSZ 0
69#define DEF_TRANSZ 0
70
71#define SAT_MAGIC_LSB 0xD9
72#define SAT_MAGIC_MSB 0xC5
73#define SAT_MSG_VER 0x1
74#define SAT_MSG_PROT 0x1
75#define MSM_SAT_SUCCSS 0x20
76
77#define QC_MFGID_LSB 0x2
78#define QC_MFGID_MSB 0x17
79#define QC_CHIPID_SL 0x10
80#define QC_DEVID_SAT1 0x3
81#define QC_DEVID_SAT2 0x4
82#define QC_DEVID_PGD 0x5
83
84/* Component registers */
85enum comp_reg {
86 COMP_CFG = 0,
87 COMP_TRUST_CFG = 0x14,
88};
89
90/* Manager registers */
91enum mgr_reg {
92 MGR_CFG = 0x200,
93 MGR_STATUS = 0x204,
94 MGR_RX_MSGQ_CFG = 0x208,
95 MGR_INT_EN = 0x210,
96 MGR_INT_STAT = 0x214,
97 MGR_INT_CLR = 0x218,
98 MGR_TX_MSG = 0x230,
99 MGR_RX_MSG = 0x270,
100 MGR_VE_STAT = 0x300,
101};
102
103enum msg_cfg {
104 MGR_CFG_ENABLE = 1,
105 MGR_CFG_RX_MSGQ_EN = 1 << 1,
106 MGR_CFG_TX_MSGQ_EN_HIGH = 1 << 2,
107 MGR_CFG_TX_MSGQ_EN_LOW = 1 << 3,
108};
109/* Message queue types */
110enum msm_slim_msgq_type {
111 MSGQ_RX = 0,
112 MSGQ_TX_LOW = 1,
113 MSGQ_TX_HIGH = 2,
114};
115/* Framer registers */
116enum frm_reg {
117 FRM_CFG = 0x400,
118 FRM_STAT = 0x404,
119 FRM_INT_EN = 0x410,
120 FRM_INT_STAT = 0x414,
121 FRM_INT_CLR = 0x418,
122 FRM_WAKEUP = 0x41C,
123 FRM_CLKCTL_DONE = 0x420,
124 FRM_IE_STAT = 0x430,
125 FRM_VE_STAT = 0x440,
126};
127
128/* Interface registers */
129enum intf_reg {
130 INTF_CFG = 0x600,
131 INTF_STAT = 0x604,
132 INTF_INT_EN = 0x610,
133 INTF_INT_STAT = 0x614,
134 INTF_INT_CLR = 0x618,
135 INTF_IE_STAT = 0x630,
136 INTF_VE_STAT = 0x640,
137};
138
139/* Manager PGD registers */
140enum pgd_reg {
141 PGD_CFG = 0x1000,
142 PGD_STAT = 0x1004,
143 PGD_INT_EN = 0x1010,
144 PGD_INT_STAT = 0x1014,
145 PGD_INT_CLR = 0x1018,
146 PGD_OWN_EEn = 0x1020,
147 PGD_PORT_INT_EN_EEn = 0x1030,
148 PGD_PORT_INT_ST_EEn = 0x1034,
149 PGD_PORT_INT_CL_EEn = 0x1038,
150 PGD_PORT_CFGn = 0x1080,
151 PGD_PORT_STATn = 0x1084,
152 PGD_PORT_PARAMn = 0x1088,
153 PGD_PORT_BLKn = 0x108C,
154 PGD_PORT_TRANn = 0x1090,
155 PGD_PORT_MCHANn = 0x1094,
156 PGD_PORT_PSHPLLn = 0x1098,
157 PGD_PORT_PC_CFGn = 0x1600,
158 PGD_PORT_PC_VALn = 0x1604,
159 PGD_PORT_PC_VFR_TSn = 0x1608,
160 PGD_PORT_PC_VFR_STn = 0x160C,
161 PGD_PORT_PC_VFR_CLn = 0x1610,
162 PGD_IE_STAT = 0x1700,
163 PGD_VE_STAT = 0x1710,
164};
165
166enum rsc_grp {
167 EE_MGR_RSC_GRP = 1 << 10,
168 EE_NGD_2 = 2 << 6,
169 EE_NGD_1 = 0,
170};
171
172enum mgr_intr {
173 MGR_INT_RECFG_DONE = 1 << 24,
174 MGR_INT_TX_NACKED_2 = 1 << 25,
175 MGR_INT_MSG_BUF_CONTE = 1 << 26,
176 MGR_INT_RX_MSG_RCVD = 1 << 30,
177 MGR_INT_TX_MSG_SENT = 1 << 31,
178};
179
180enum frm_cfg {
181 FRM_ACTIVE = 1,
182 CLK_GEAR = 7,
183 ROOT_FREQ = 11,
184 REF_CLK_GEAR = 15,
185};
186
187struct msm_slim_sps_bam {
188 u32 hdl;
189 void __iomem *base;
190 int irq;
191};
192
193struct msm_slim_endp {
194 struct sps_pipe *sps;
195 struct sps_connect config;
196 struct sps_register_event event;
197 struct sps_mem_buffer buf;
198 struct completion *xcomp;
199 bool connected;
200};
201
202struct msm_slim_ctrl {
203 struct slim_controller ctrl;
204 struct slim_framer framer;
205 struct device *dev;
206 void __iomem *base;
207 u32 curr_bw;
208 u8 msg_cnt;
209 u32 tx_buf[10];
210 u8 rx_msgs[MSM_CONCUR_MSG][SLIM_RX_MSGQ_BUF_LEN];
211 spinlock_t rx_lock;
212 int head;
213 int tail;
214 int irq;
215 int err;
216 int ee;
217 struct completion *wr_comp;
218 struct msm_slim_sat *satd;
219 struct msm_slim_endp pipes[7];
220 struct msm_slim_sps_bam bam;
221 struct msm_slim_endp rx_msgq;
222 struct completion rx_msgq_notify;
223 struct task_struct *rx_msgq_thread;
224 struct clk *rclk;
225 struct mutex tx_lock;
226 u8 pgdla;
227 bool use_rx_msgqs;
228 int suspended;
229 int pipe_b;
230 struct completion reconf;
231 bool reconf_busy;
232};
233
234struct msm_slim_sat {
235 struct slim_device satcl;
236 struct msm_slim_ctrl *dev;
237 struct workqueue_struct *wq;
238 struct work_struct wd;
239 u8 sat_msgs[SAT_CONCUR_MSG][40];
240 u16 *satch;
241 u8 nsatch;
242 bool sent_capability;
243 int shead;
244 int stail;
245 spinlock_t lock;
246};
247
248static int msm_slim_rx_enqueue(struct msm_slim_ctrl *dev, u32 *buf, u8 len)
249{
250 spin_lock(&dev->rx_lock);
251 if ((dev->tail + 1) % MSM_CONCUR_MSG == dev->head) {
252 spin_unlock(&dev->rx_lock);
253 dev_err(dev->dev, "RX QUEUE full!");
254 return -EXFULL;
255 }
256 memcpy((u8 *)dev->rx_msgs[dev->tail], (u8 *)buf, len);
257 dev->tail = (dev->tail + 1) % MSM_CONCUR_MSG;
258 spin_unlock(&dev->rx_lock);
259 return 0;
260}
261
262static int msm_slim_rx_dequeue(struct msm_slim_ctrl *dev, u8 *buf)
263{
264 unsigned long flags;
265 spin_lock_irqsave(&dev->rx_lock, flags);
266 if (dev->tail == dev->head) {
267 spin_unlock_irqrestore(&dev->rx_lock, flags);
268 return -ENODATA;
269 }
270 memcpy(buf, (u8 *)dev->rx_msgs[dev->head], 40);
271 dev->head = (dev->head + 1) % MSM_CONCUR_MSG;
272 spin_unlock_irqrestore(&dev->rx_lock, flags);
273 return 0;
274}
275
276static int msm_sat_enqueue(struct msm_slim_sat *sat, u32 *buf, u8 len)
277{
278 struct msm_slim_ctrl *dev = sat->dev;
279 spin_lock(&sat->lock);
280 if ((sat->stail + 1) % SAT_CONCUR_MSG == sat->shead) {
281 spin_unlock(&sat->lock);
282 dev_err(dev->dev, "SAT QUEUE full!");
283 return -EXFULL;
284 }
285 memcpy(sat->sat_msgs[sat->stail], (u8 *)buf, len);
286 sat->stail = (sat->stail + 1) % SAT_CONCUR_MSG;
287 spin_unlock(&sat->lock);
288 return 0;
289}
290
291static int msm_sat_dequeue(struct msm_slim_sat *sat, u8 *buf)
292{
293 unsigned long flags;
294 spin_lock_irqsave(&sat->lock, flags);
295 if (sat->stail == sat->shead) {
296 spin_unlock_irqrestore(&sat->lock, flags);
297 return -ENODATA;
298 }
299 memcpy(buf, sat->sat_msgs[sat->shead], 40);
300 sat->shead = (sat->shead + 1) % SAT_CONCUR_MSG;
301 spin_unlock_irqrestore(&sat->lock, flags);
302 return 0;
303}
304
305static void msm_get_eaddr(u8 *e_addr, u32 *buffer)
306{
307 e_addr[0] = (buffer[1] >> 24) & 0xff;
308 e_addr[1] = (buffer[1] >> 16) & 0xff;
309 e_addr[2] = (buffer[1] >> 8) & 0xff;
310 e_addr[3] = buffer[1] & 0xff;
311 e_addr[4] = (buffer[0] >> 24) & 0xff;
312 e_addr[5] = (buffer[0] >> 16) & 0xff;
313}
314
315static bool msm_is_sat_dev(u8 *e_addr)
316{
317 if (e_addr[5] == QC_MFGID_LSB && e_addr[4] == QC_MFGID_MSB &&
318 e_addr[2] != QC_CHIPID_SL &&
319 (e_addr[1] == QC_DEVID_SAT1 || e_addr[1] == QC_DEVID_SAT2))
320 return true;
321 return false;
322}
323
324static irqreturn_t msm_slim_interrupt(int irq, void *d)
325{
326 struct msm_slim_ctrl *dev = d;
327 u32 pstat;
328 u32 stat = readl_relaxed(dev->base + MGR_INT_STAT);
329
330 if (stat & MGR_INT_TX_MSG_SENT || stat & MGR_INT_TX_NACKED_2) {
331 if (stat & MGR_INT_TX_MSG_SENT)
332 writel_relaxed(MGR_INT_TX_MSG_SENT,
333 dev->base + MGR_INT_CLR);
334 else {
335 writel_relaxed(MGR_INT_TX_NACKED_2,
336 dev->base + MGR_INT_CLR);
337 dev->err = -EIO;
338 }
339 /*
340 * Guarantee that interrupt clear bit write goes through before
341 * signalling completion/exiting ISR
342 */
343 mb();
344 if (dev->wr_comp)
345 complete(dev->wr_comp);
346 }
347 if (stat & MGR_INT_RX_MSG_RCVD) {
348 u32 rx_buf[10];
349 u32 mc, mt;
350 u8 len, i;
351 rx_buf[0] = readl_relaxed(dev->base + MGR_RX_MSG);
352 len = rx_buf[0] & 0x1F;
353 for (i = 1; i < ((len + 3) >> 2); i++) {
354 rx_buf[i] = readl_relaxed(dev->base + MGR_RX_MSG +
355 (4 * i));
356 dev_dbg(dev->dev, "reading data: %x\n", rx_buf[i]);
357 }
358 mt = (rx_buf[0] >> 5) & 0x7;
359 mc = (rx_buf[0] >> 8) & 0xff;
360 dev_dbg(dev->dev, "MC: %x, MT: %x\n", mc, mt);
361 if (mt == SLIM_MSG_MT_DEST_REFERRED_USER ||
362 mt == SLIM_MSG_MT_SRC_REFERRED_USER) {
363 struct msm_slim_sat *sat = dev->satd;
364 msm_sat_enqueue(sat, rx_buf, len);
365 writel_relaxed(MGR_INT_RX_MSG_RCVD,
366 dev->base + MGR_INT_CLR);
367 /*
368 * Guarantee that CLR bit write goes through before
369 * queuing work
370 */
371 mb();
372 queue_work(sat->wq, &sat->wd);
373 } else if (mt == SLIM_MSG_MT_CORE &&
374 mc == SLIM_MSG_MC_REPORT_PRESENT) {
375 u8 e_addr[6];
376 msm_get_eaddr(e_addr, rx_buf);
377 if (msm_is_sat_dev(e_addr)) {
378 /*
379 * Consider possibility that this device may
380 * be reporting more than once?
381 */
382 struct msm_slim_sat *sat = dev->satd;
383 msm_sat_enqueue(sat, rx_buf, len);
384 writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
385 MGR_INT_CLR);
386 /*
387 * Guarantee that CLR bit write goes through
388 * before queuing work
389 */
390 mb();
391 queue_work(sat->wq, &sat->wd);
392 } else {
393 msm_slim_rx_enqueue(dev, rx_buf, len);
394 writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
395 MGR_INT_CLR);
396 /*
397 * Guarantee that CLR bit write goes through
398 * before signalling completion
399 */
400 mb();
401 complete(&dev->rx_msgq_notify);
402 }
403 } else if (mc == SLIM_MSG_MC_REPLY_INFORMATION ||
404 mc == SLIM_MSG_MC_REPLY_VALUE) {
405 msm_slim_rx_enqueue(dev, rx_buf, len);
406 writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
407 MGR_INT_CLR);
408 /*
409 * Guarantee that CLR bit write goes through
410 * before signalling completion
411 */
412 mb();
413 complete(&dev->rx_msgq_notify);
Sagar Dharia144e5e02011-08-08 17:30:11 -0600414 } else if (mc == SLIM_MSG_MC_REPORT_INFORMATION) {
415 u8 *buf = (u8 *)rx_buf;
416 u8 l_addr = buf[2];
417 u16 ele = (u16)buf[4] << 4;
418 ele |= ((buf[3] & 0xf0) >> 4);
419 dev_err(dev->dev, "Slim-dev:%d report inf element:0x%x",
420 l_addr, ele);
421 for (i = 0; i < len - 5; i++)
422 dev_err(dev->dev, "offset:0x%x:bit mask:%x",
423 i, buf[i+5]);
424 writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
425 MGR_INT_CLR);
426 /*
427 * Guarantee that CLR bit write goes through
428 * before exiting
429 */
430 mb();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700431 } else {
432 dev_err(dev->dev, "Unexpected MC,%x MT:%x, len:%d",
433 mc, mt, len);
434 for (i = 0; i < ((len + 3) >> 2); i++)
435 dev_err(dev->dev, "error msg: %x", rx_buf[i]);
436 writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
437 MGR_INT_CLR);
438 /*
439 * Guarantee that CLR bit write goes through
440 * before exiting
441 */
442 mb();
443 }
444 }
445 if (stat & MGR_INT_RECFG_DONE) {
446 writel_relaxed(MGR_INT_RECFG_DONE, dev->base + MGR_INT_CLR);
447 /*
448 * Guarantee that CLR bit write goes through
449 * before exiting ISR
450 */
451 mb();
452 complete(&dev->reconf);
453 }
454 pstat = readl_relaxed(dev->base + PGD_PORT_INT_ST_EEn + (16 * dev->ee));
455 if (pstat != 0) {
456 int i = 0;
457 for (i = dev->pipe_b; i < MSM_SLIM_NPORTS; i++) {
458 if (pstat & 1 << i) {
459 u32 val = readl_relaxed(dev->base +
460 PGD_PORT_STATn + (i * 32));
461 if (val & (1 << 19)) {
462 dev->ctrl.ports[i].err =
463 SLIM_P_DISCONNECT;
464 dev->pipes[i-dev->pipe_b].connected =
465 false;
466 /*
467 * SPS will call completion since
468 * ERROR flags are registered
469 */
470 } else if (val & (1 << 2))
471 dev->ctrl.ports[i].err =
472 SLIM_P_OVERFLOW;
473 else if (val & (1 << 3))
474 dev->ctrl.ports[i].err =
475 SLIM_P_UNDERFLOW;
476 }
477 writel_relaxed(1, dev->base + PGD_PORT_INT_CL_EEn +
478 (dev->ee * 16));
479 }
480 /*
481 * Guarantee that port interrupt bit(s) clearing writes go
482 * through before exiting ISR
483 */
484 mb();
485 }
486
487 return IRQ_HANDLED;
488}
489
490static int
491msm_slim_init_endpoint(struct msm_slim_ctrl *dev, struct msm_slim_endp *ep)
492{
493 int ret;
494 struct sps_pipe *endpoint;
495 struct sps_connect *config = &ep->config;
496
497 /* Allocate the endpoint */
498 endpoint = sps_alloc_endpoint();
499 if (!endpoint) {
500 dev_err(dev->dev, "sps_alloc_endpoint failed\n");
501 return -ENOMEM;
502 }
503
504 /* Get default connection configuration for an endpoint */
505 ret = sps_get_config(endpoint, config);
506 if (ret) {
507 dev_err(dev->dev, "sps_get_config failed 0x%x\n", ret);
508 goto sps_config_failed;
509 }
510
511 ep->sps = endpoint;
512 return 0;
513
514sps_config_failed:
515 sps_free_endpoint(endpoint);
516 return ret;
517}
518
519static void
520msm_slim_free_endpoint(struct msm_slim_endp *ep)
521{
522 sps_free_endpoint(ep->sps);
523 ep->sps = NULL;
524}
525
526static int msm_slim_sps_mem_alloc(
527 struct msm_slim_ctrl *dev, struct sps_mem_buffer *mem, u32 len)
528{
529 dma_addr_t phys;
530
531 mem->size = len;
532 mem->min_size = 0;
533 mem->base = dma_alloc_coherent(dev->dev, mem->size, &phys, GFP_KERNEL);
534
535 if (!mem->base) {
536 dev_err(dev->dev, "dma_alloc_coherent(%d) failed\n", len);
537 return -ENOMEM;
538 }
539
540 mem->phys_base = phys;
541 memset(mem->base, 0x00, mem->size);
542 return 0;
543}
544
545static void
546msm_slim_sps_mem_free(struct msm_slim_ctrl *dev, struct sps_mem_buffer *mem)
547{
548 dma_free_coherent(dev->dev, mem->size, mem->base, mem->phys_base);
549 mem->size = 0;
550 mem->base = NULL;
551 mem->phys_base = 0;
552}
553
554static void msm_hw_set_port(struct msm_slim_ctrl *dev, u8 pn)
555{
556 u32 set_cfg = DEF_WATERMARK | DEF_ALIGN | DEF_PACK | ENABLE_PORT;
557 u32 int_port = readl_relaxed(dev->base + PGD_PORT_INT_EN_EEn +
558 (dev->ee * 16));
559 writel_relaxed(set_cfg, dev->base + PGD_PORT_CFGn + (pn * 32));
560 writel_relaxed(DEF_BLKSZ, dev->base + PGD_PORT_BLKn + (pn * 32));
561 writel_relaxed(DEF_TRANSZ, dev->base + PGD_PORT_TRANn + (pn * 32));
562 writel_relaxed((int_port | 1 << pn) , dev->base + PGD_PORT_INT_EN_EEn +
563 (dev->ee * 16));
564 /* Make sure that port registers are updated before returning */
565 mb();
566}
567
568static int msm_slim_connect_pipe_port(struct msm_slim_ctrl *dev, u8 pn)
569{
570 struct msm_slim_endp *endpoint = &dev->pipes[pn];
571 struct sps_connect *cfg = &endpoint->config;
572 u32 stat;
573 int ret = sps_get_config(dev->pipes[pn].sps, cfg);
574 if (ret) {
575 dev_err(dev->dev, "sps pipe-port get config error%x\n", ret);
576 return ret;
577 }
578 cfg->options = SPS_O_DESC_DONE | SPS_O_ERROR |
579 SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
580
581 if (dev->pipes[pn].connected) {
582 ret = sps_set_config(dev->pipes[pn].sps, cfg);
583 if (ret) {
584 dev_err(dev->dev, "sps pipe-port set config erro:%x\n",
585 ret);
586 return ret;
587 }
588 }
589
590 stat = readl_relaxed(dev->base + PGD_PORT_STATn +
591 (32 * (pn + dev->pipe_b)));
592 if (dev->ctrl.ports[pn].flow == SLIM_SRC) {
593 cfg->destination = dev->bam.hdl;
594 cfg->source = SPS_DEV_HANDLE_MEM;
595 cfg->dest_pipe_index = ((stat & (0xFF << 4)) >> 4);
596 cfg->src_pipe_index = 0;
597 dev_dbg(dev->dev, "flow src:pipe num:%d",
598 cfg->dest_pipe_index);
599 cfg->mode = SPS_MODE_DEST;
600 } else {
601 cfg->source = dev->bam.hdl;
602 cfg->destination = SPS_DEV_HANDLE_MEM;
603 cfg->src_pipe_index = ((stat & (0xFF << 4)) >> 4);
604 cfg->dest_pipe_index = 0;
605 dev_dbg(dev->dev, "flow dest:pipe num:%d",
606 cfg->src_pipe_index);
607 cfg->mode = SPS_MODE_SRC;
608 }
609 /* Space for desciptor FIFOs */
610 cfg->desc.size = MSM_SLIM_DESC_NUM * sizeof(struct sps_iovec);
611 cfg->config = SPS_CONFIG_DEFAULT;
612 ret = sps_connect(dev->pipes[pn].sps, cfg);
613 if (!ret) {
614 dev->pipes[pn].connected = true;
615 msm_hw_set_port(dev, pn + dev->pipe_b);
616 }
617 return ret;
618}
619
620static u32 *msm_get_msg_buf(struct slim_controller *ctrl, int len)
621{
622 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
623 /*
624 * Currently we block a transaction until the current one completes.
625 * In case we need multiple transactions, use message Q
626 */
627 return dev->tx_buf;
628}
629
630static int msm_send_msg_buf(struct slim_controller *ctrl, u32 *buf, u8 len)
631{
632 int i;
633 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
634 for (i = 0; i < (len + 3) >> 2; i++) {
635 dev_dbg(dev->dev, "TX data:0x%x\n", buf[i]);
636 writel_relaxed(buf[i], dev->base + MGR_TX_MSG + (i * 4));
637 }
638 /* Guarantee that message is sent before returning */
639 mb();
640 return 0;
641}
642
643static int msm_xfer_msg(struct slim_controller *ctrl, struct slim_msg_txn *txn)
644{
645 DECLARE_COMPLETION_ONSTACK(done);
646 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
647 u32 *pbuf;
648 u8 *puc;
649 int timeout;
650 u8 la = txn->la;
651 mutex_lock(&dev->tx_lock);
652 if (txn->mt == SLIM_MSG_MT_CORE &&
653 txn->mc == SLIM_MSG_MC_BEGIN_RECONFIGURATION &&
654 dev->reconf_busy) {
655 wait_for_completion(&dev->reconf);
656 dev->reconf_busy = false;
657 }
658 if (dev->suspended) {
659 dev_err(dev->dev, "No transaction in suspended state");
660 mutex_unlock(&dev->tx_lock);
661 return -EBUSY;
662 }
663 txn->rl--;
664 pbuf = msm_get_msg_buf(ctrl, txn->rl);
665 dev->wr_comp = NULL;
666 dev->err = 0;
667
668 if (txn->dt == SLIM_MSG_DEST_ENUMADDR) {
669 mutex_unlock(&dev->tx_lock);
670 return -EPROTONOSUPPORT;
671 }
672 if (txn->mt == SLIM_MSG_MT_CORE && txn->la == 0xFF &&
673 (txn->mc == SLIM_MSG_MC_CONNECT_SOURCE ||
674 txn->mc == SLIM_MSG_MC_CONNECT_SINK ||
675 txn->mc == SLIM_MSG_MC_DISCONNECT_PORT))
676 la = dev->pgdla;
677 if (txn->dt == SLIM_MSG_DEST_LOGICALADDR)
678 *pbuf = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt, txn->mc,
679 0, la);
680 else
681 *pbuf = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt, txn->mc,
682 1, la);
683 if (txn->dt == SLIM_MSG_DEST_LOGICALADDR)
684 puc = ((u8 *)pbuf) + 3;
685 else
686 puc = ((u8 *)pbuf) + 2;
687 if (txn->rbuf)
688 *(puc++) = txn->tid;
689 if ((txn->mt == SLIM_MSG_MT_CORE) &&
690 ((txn->mc >= SLIM_MSG_MC_REQUEST_INFORMATION &&
691 txn->mc <= SLIM_MSG_MC_REPORT_INFORMATION) ||
692 (txn->mc >= SLIM_MSG_MC_REQUEST_VALUE &&
693 txn->mc <= SLIM_MSG_MC_CHANGE_VALUE))) {
694 *(puc++) = (txn->ec & 0xFF);
695 *(puc++) = (txn->ec >> 8)&0xFF;
696 }
697 if (txn->wbuf)
698 memcpy(puc, txn->wbuf, txn->len);
699 if (txn->mt == SLIM_MSG_MT_CORE && txn->la == 0xFF &&
700 (txn->mc == SLIM_MSG_MC_CONNECT_SOURCE ||
701 txn->mc == SLIM_MSG_MC_CONNECT_SINK ||
702 txn->mc == SLIM_MSG_MC_DISCONNECT_PORT)) {
703 if (txn->mc != SLIM_MSG_MC_DISCONNECT_PORT)
704 dev->err = msm_slim_connect_pipe_port(dev, *puc);
705 else {
706 struct msm_slim_endp *endpoint = &dev->pipes[*puc];
707 struct sps_register_event sps_event;
708 memset(&sps_event, 0, sizeof(sps_event));
709 sps_register_event(endpoint->sps, &sps_event);
710 sps_disconnect(endpoint->sps);
711 /*
712 * Remove channel disconnects master-side ports from
713 * channel. No need to send that again on the bus
714 */
715 dev->pipes[*puc].connected = false;
716 mutex_unlock(&dev->tx_lock);
717 return 0;
718 }
719 if (dev->err) {
720 dev_err(dev->dev, "pipe-port connect err:%d", dev->err);
721 mutex_unlock(&dev->tx_lock);
722 return dev->err;
723 }
724 *(puc) = *(puc) + dev->pipe_b;
725 }
726 if (txn->mt == SLIM_MSG_MT_CORE &&
727 txn->mc == SLIM_MSG_MC_BEGIN_RECONFIGURATION)
728 dev->reconf_busy = true;
729 dev->wr_comp = &done;
730 msm_send_msg_buf(ctrl, pbuf, txn->rl);
731 timeout = wait_for_completion_timeout(&done, HZ);
732 if (!timeout)
733 dev_err(dev->dev, "TX timed out:MC:0x%x,mt:0x%x", txn->mc,
734 txn->mt);
735 mutex_unlock(&dev->tx_lock);
736 return timeout ? dev->err : -ETIMEDOUT;
737}
738
739static int msm_set_laddr(struct slim_controller *ctrl, const u8 *ea,
740 u8 elen, u8 laddr)
741{
742 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
743 DECLARE_COMPLETION_ONSTACK(done);
744 int timeout;
745 u32 *buf;
746 mutex_lock(&dev->tx_lock);
747 buf = msm_get_msg_buf(ctrl, 9);
748 buf[0] = SLIM_MSG_ASM_FIRST_WORD(9, SLIM_MSG_MT_CORE,
749 SLIM_MSG_MC_ASSIGN_LOGICAL_ADDRESS,
750 SLIM_MSG_DEST_LOGICALADDR,
751 ea[5] | ea[4] << 8);
752 buf[1] = ea[3] | (ea[2] << 8) | (ea[1] << 16) | (ea[0] << 24);
753 buf[2] = laddr;
754
755 dev->wr_comp = &done;
756 msm_send_msg_buf(ctrl, buf, 9);
757 timeout = wait_for_completion_timeout(&done, HZ);
758 mutex_unlock(&dev->tx_lock);
759 return timeout ? dev->err : -ETIMEDOUT;
760}
761
Sagar Dharia144e5e02011-08-08 17:30:11 -0600762static int msm_clk_pause_wakeup(struct slim_controller *ctrl)
763{
764 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
765 clk_enable(dev->rclk);
766 writel_relaxed(1, dev->base + FRM_WAKEUP);
767 /* Make sure framer wakeup write goes through before exiting function */
768 mb();
769 /*
770 * Workaround: Currently, slave is reporting lost-sync messages
771 * after slimbus comes out of clock pause.
772 * Transaction with slave fail before slave reports that message
773 * Give some time for that report to come
774 * Slimbus wakes up in clock gear 10 at 24.576MHz. With each superframe
775 * being 250 usecs, we wait for 20 superframes here to ensure
776 * we get the message
777 */
778 usleep_range(5000, 5000);
779 return 0;
780}
781
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700782static int msm_config_port(struct slim_controller *ctrl, u8 pn)
783{
784 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
785 struct msm_slim_endp *endpoint;
786 int ret = 0;
787 if (ctrl->ports[pn].req == SLIM_REQ_HALF_DUP ||
788 ctrl->ports[pn].req == SLIM_REQ_MULTI_CH)
789 return -EPROTONOSUPPORT;
790 if (pn >= (MSM_SLIM_NPORTS - dev->pipe_b))
791 return -ENODEV;
792
793 endpoint = &dev->pipes[pn];
794 ret = msm_slim_init_endpoint(dev, endpoint);
795 dev_dbg(dev->dev, "sps register bam error code:%x\n", ret);
796 return ret;
797}
798
799static enum slim_port_err msm_slim_port_xfer_status(struct slim_controller *ctr,
800 u8 pn, u8 **done_buf, u32 *done_len)
801{
802 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctr);
803 struct sps_iovec sio;
804 int ret;
805 if (done_len)
806 *done_len = 0;
807 if (done_buf)
808 *done_buf = NULL;
809 if (!dev->pipes[pn].connected)
810 return SLIM_P_DISCONNECT;
811 ret = sps_get_iovec(dev->pipes[pn].sps, &sio);
812 if (!ret) {
813 if (done_len)
814 *done_len = sio.size;
815 if (done_buf)
816 *done_buf = (u8 *)sio.addr;
817 }
818 dev_dbg(dev->dev, "get iovec returned %d\n", ret);
819 return SLIM_P_INPROGRESS;
820}
821
822static int msm_slim_port_xfer(struct slim_controller *ctrl, u8 pn, u8 *iobuf,
823 u32 len, struct completion *comp)
824{
825 struct sps_register_event sreg;
826 int ret;
827 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
Sagar Dhariae77961f2011-09-27 14:03:50 -0600828 if (pn >= 7)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700829 return -ENODEV;
830
831
832 ctrl->ports[pn].xcomp = comp;
833 sreg.options = (SPS_EVENT_DESC_DONE|SPS_EVENT_ERROR);
834 sreg.mode = SPS_TRIGGER_WAIT;
835 sreg.xfer_done = comp;
836 sreg.callback = NULL;
837 sreg.user = &ctrl->ports[pn];
838 ret = sps_register_event(dev->pipes[pn].sps, &sreg);
839 if (ret) {
840 dev_dbg(dev->dev, "sps register event error:%x\n", ret);
841 return ret;
842 }
843 ret = sps_transfer_one(dev->pipes[pn].sps, (u32)iobuf, len, NULL,
844 SPS_IOVEC_FLAG_INT);
845 dev_dbg(dev->dev, "sps submit xfer error code:%x\n", ret);
846
847 return ret;
848}
849
850static int msm_sat_define_ch(struct msm_slim_sat *sat, u8 *buf, u8 len, u8 mc)
851{
852 struct msm_slim_ctrl *dev = sat->dev;
853 enum slim_ch_control oper;
854 int i;
855 int ret = 0;
856 if (mc == SLIM_USR_MC_CHAN_CTRL) {
857 u16 chanh = sat->satch[buf[5]];
858 oper = ((buf[3] & 0xC0) >> 6);
859 /* part of grp. activating/removing 1 will take care of rest */
860 ret = slim_control_ch(&sat->satcl, chanh, oper, false);
861 } else {
862 u16 chh[40];
863 struct slim_ch prop;
864 u32 exp;
865 u8 coeff, cc;
866 u8 prrate = buf[6];
867 for (i = 8; i < len; i++)
868 chh[i-8] = sat->satch[buf[i]];
869 prop.dataf = (enum slim_ch_dataf)((buf[3] & 0xE0) >> 5);
870 prop.auxf = (enum slim_ch_auxf)((buf[4] & 0xC0) >> 5);
871 prop.baser = SLIM_RATE_4000HZ;
872 if (prrate & 0x8)
873 prop.baser = SLIM_RATE_11025HZ;
874 else
875 prop.baser = SLIM_RATE_4000HZ;
876 prop.prot = (enum slim_ch_proto)(buf[5] & 0x0F);
877 prop.sampleszbits = (buf[4] & 0x1F)*SLIM_CL_PER_SL;
878 exp = (u32)((buf[5] & 0xF0) >> 4);
879 coeff = (buf[4] & 0x20) >> 5;
880 cc = (coeff ? 3 : 1);
881 prop.ratem = cc * (1 << exp);
882 if (i > 9)
883 ret = slim_define_ch(&sat->satcl, &prop, chh, len - 8,
884 true, &sat->satch[buf[8]]);
885 else
886 ret = slim_define_ch(&sat->satcl, &prop,
887 &sat->satch[buf[8]], 1, false,
888 NULL);
889 dev_dbg(dev->dev, "define sat grp returned:%d", ret);
890
891 /* part of group so activating 1 will take care of rest */
892 if (mc == SLIM_USR_MC_DEF_ACT_CHAN)
893 ret = slim_control_ch(&sat->satcl,
894 sat->satch[buf[8]],
895 SLIM_CH_ACTIVATE, false);
896 }
897 return ret;
898}
899
900static void msm_slim_rxwq(struct msm_slim_ctrl *dev)
901{
902 u8 buf[40];
903 u8 mc, mt, len;
904 int i, ret;
905 if ((msm_slim_rx_dequeue(dev, (u8 *)buf)) != -ENODATA) {
906 len = buf[0] & 0x1F;
907 mt = (buf[0] >> 5) & 0x7;
908 mc = buf[1];
909 if (mt == SLIM_MSG_MT_CORE &&
910 mc == SLIM_MSG_MC_REPORT_PRESENT) {
911 u8 laddr;
912 u8 e_addr[6];
913 for (i = 0; i < 6; i++)
914 e_addr[i] = buf[7-i];
915
916 ret = slim_assign_laddr(&dev->ctrl, e_addr, 6, &laddr);
917 /* Is this Qualcomm ported generic device? */
918 if (!ret && e_addr[5] == QC_MFGID_LSB &&
919 e_addr[4] == QC_MFGID_MSB &&
920 e_addr[1] == QC_DEVID_PGD &&
921 e_addr[2] != QC_CHIPID_SL)
922 dev->pgdla = laddr;
923
924 } else if (mc == SLIM_MSG_MC_REPLY_INFORMATION ||
925 mc == SLIM_MSG_MC_REPLY_VALUE) {
926 u8 tid = buf[3];
927 dev_dbg(dev->dev, "tid:%d, len:%d\n", tid, len - 4);
928 slim_msg_response(&dev->ctrl, &buf[4], tid,
929 len - 4);
Sagar Dharia144e5e02011-08-08 17:30:11 -0600930 } else if (mc == SLIM_MSG_MC_REPORT_INFORMATION) {
931 u8 l_addr = buf[2];
932 u16 ele = (u16)buf[4] << 4;
933 ele |= ((buf[3] & 0xf0) >> 4);
934 dev_err(dev->dev, "Slim-dev:%d report inf element:0x%x",
935 l_addr, ele);
936 for (i = 0; i < len - 5; i++)
937 dev_err(dev->dev, "offset:0x%x:bit mask:%x",
938 i, buf[i+5]);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700939 } else {
940 dev_err(dev->dev, "unexpected message:mc:%x, mt:%x",
941 mc, mt);
942 for (i = 0; i < len; i++)
943 dev_err(dev->dev, "error msg: %x", buf[i]);
944
945 }
946 } else
947 dev_err(dev->dev, "rxwq called and no dequeue");
948}
949
950static void slim_sat_rxprocess(struct work_struct *work)
951{
952 struct msm_slim_sat *sat = container_of(work, struct msm_slim_sat, wd);
953 struct msm_slim_ctrl *dev = sat->dev;
954 u8 buf[40];
955
956 while ((msm_sat_dequeue(sat, buf)) != -ENODATA) {
957 struct slim_msg_txn txn;
958 int i;
959 u8 len, mc, mt;
960 u32 bw_sl;
961 int ret = 0;
962 bool gen_ack = false;
963 u8 tid;
964 u8 wbuf[8];
965 txn.mt = SLIM_MSG_MT_SRC_REFERRED_USER;
966 txn.dt = SLIM_MSG_DEST_LOGICALADDR;
967 txn.ec = 0;
968 txn.rbuf = NULL;
969 txn.la = sat->satcl.laddr;
970 /* satellite handling */
971 len = buf[0] & 0x1F;
972 mc = buf[1];
973 mt = (buf[0] >> 5) & 0x7;
974
975 if (mt == SLIM_MSG_MT_CORE &&
976 mc == SLIM_MSG_MC_REPORT_PRESENT) {
977 u8 laddr;
978 u8 e_addr[6];
979 for (i = 0; i < 6; i++)
980 e_addr[i] = buf[7-i];
981
982 slim_assign_laddr(&dev->ctrl, e_addr, 6, &laddr);
983 sat->satcl.laddr = laddr;
984 }
985 switch (mc) {
986 case SLIM_MSG_MC_REPORT_PRESENT:
987 /* send a Manager capability msg */
988 if (sat->sent_capability)
989 continue;
990 ret = slim_add_device(&dev->ctrl, &sat->satcl);
991 if (ret) {
992 dev_err(dev->dev,
993 "Satellite-init failed");
994 continue;
995 }
996 /* Satellite owns first 21 channels */
997 sat->satch = kzalloc(21 * sizeof(u16), GFP_KERNEL);
998 sat->nsatch = 20;
999 /* alloc all sat chans */
1000 for (i = 0; i < 21; i++)
1001 slim_alloc_ch(&sat->satcl, &sat->satch[i]);
1002 txn.mc = SLIM_USR_MC_MASTER_CAPABILITY;
1003 txn.mt = SLIM_MSG_MT_SRC_REFERRED_USER;
1004 txn.la = sat->satcl.laddr;
1005 txn.rl = 8;
1006 wbuf[0] = SAT_MAGIC_LSB;
1007 wbuf[1] = SAT_MAGIC_MSB;
1008 wbuf[2] = SAT_MSG_VER;
1009 wbuf[3] = SAT_MSG_PROT;
1010 txn.wbuf = wbuf;
1011 txn.len = 4;
1012 sat->sent_capability = true;
1013 msm_xfer_msg(&dev->ctrl, &txn);
1014 break;
1015 case SLIM_USR_MC_ADDR_QUERY:
1016 memcpy(&wbuf[1], &buf[4], 6);
1017 ret = slim_get_logical_addr(&sat->satcl,
1018 &wbuf[1], 6, &wbuf[7]);
1019 if (ret)
1020 memset(&wbuf[1], 0, 6);
1021 wbuf[0] = buf[3];
1022 txn.mc = SLIM_USR_MC_ADDR_REPLY;
1023 txn.rl = 12;
1024 txn.len = 8;
1025 txn.wbuf = wbuf;
1026 msm_xfer_msg(&dev->ctrl, &txn);
1027 break;
1028 case SLIM_USR_MC_DEFINE_CHAN:
1029 case SLIM_USR_MC_DEF_ACT_CHAN:
1030 case SLIM_USR_MC_CHAN_CTRL:
1031 if (mc != SLIM_USR_MC_CHAN_CTRL)
1032 tid = buf[7];
1033 else
1034 tid = buf[4];
1035 gen_ack = true;
1036 ret = msm_sat_define_ch(sat, buf, len, mc);
1037 if (ret) {
1038 dev_err(dev->dev,
1039 "SAT define_ch returned:%d",
1040 ret);
1041 }
1042 break;
1043 case SLIM_USR_MC_RECONFIG_NOW:
1044 tid = buf[3];
1045 gen_ack = true;
1046 ret = slim_reconfigure_now(&sat->satcl);
1047 break;
1048 case SLIM_USR_MC_REQ_BW:
1049 /* what we get is in SLOTS */
1050 bw_sl = (u32)buf[4] << 3 |
1051 ((buf[3] & 0xE0) >> 5);
1052 sat->satcl.pending_msgsl = bw_sl;
1053 tid = buf[5];
1054 gen_ack = true;
1055 break;
1056 case SLIM_USR_MC_CONNECT_SRC:
1057 case SLIM_USR_MC_CONNECT_SINK:
1058 if (mc == SLIM_USR_MC_CONNECT_SRC)
1059 txn.mc = SLIM_MSG_MC_CONNECT_SOURCE;
1060 else
1061 txn.mc = SLIM_MSG_MC_CONNECT_SINK;
1062 wbuf[0] = buf[4] & 0x1F;
1063 wbuf[1] = buf[5];
1064 tid = buf[6];
1065 txn.la = buf[3];
1066 txn.mt = SLIM_MSG_MT_CORE;
1067 txn.rl = 6;
1068 txn.len = 2;
1069 txn.wbuf = wbuf;
1070 gen_ack = true;
1071 ret = msm_xfer_msg(&dev->ctrl, &txn);
1072 break;
1073 case SLIM_USR_MC_DISCONNECT_PORT:
1074 txn.mc = SLIM_MSG_MC_DISCONNECT_PORT;
1075 wbuf[0] = buf[4] & 0x1F;
1076 tid = buf[5];
1077 txn.la = buf[3];
1078 txn.rl = 5;
1079 txn.len = 1;
1080 txn.mt = SLIM_MSG_MT_CORE;
1081 txn.wbuf = wbuf;
1082 gen_ack = true;
1083 ret = msm_xfer_msg(&dev->ctrl, &txn);
1084 default:
1085 break;
1086 }
1087 if (!gen_ack)
1088 continue;
1089 wbuf[0] = tid;
1090 if (!ret)
1091 wbuf[1] = MSM_SAT_SUCCSS;
1092 else
1093 wbuf[1] = 0;
1094 txn.mc = SLIM_USR_MC_GENERIC_ACK;
1095 txn.la = sat->satcl.laddr;
1096 txn.rl = 6;
1097 txn.len = 2;
1098 txn.wbuf = wbuf;
1099 txn.mt = SLIM_MSG_MT_SRC_REFERRED_USER;
1100 msm_xfer_msg(&dev->ctrl, &txn);
1101 }
1102}
1103
1104static void
1105msm_slim_rx_msgq_event(struct msm_slim_ctrl *dev, struct sps_event_notify *ev)
1106{
1107 u32 *buf = ev->data.transfer.user;
1108 struct sps_iovec *iovec = &ev->data.transfer.iovec;
1109
1110 /*
1111 * Note the virtual address needs to be offset by the same index
1112 * as the physical address or just pass in the actual virtual address
1113 * if the sps_mem_buffer is not needed. Note that if completion is
1114 * used, the virtual address won't be available and will need to be
1115 * calculated based on the offset of the physical address
1116 */
1117 if (ev->event_id == SPS_EVENT_DESC_DONE) {
1118
1119 pr_debug("buf = 0x%p, data = 0x%x\n", buf, *buf);
1120
1121 pr_debug("iovec = (0x%x 0x%x 0x%x)\n",
1122 iovec->addr, iovec->size, iovec->flags);
1123
1124 } else {
1125 dev_err(dev->dev, "%s: unknown event %d\n",
1126 __func__, ev->event_id);
1127 }
1128}
1129
1130static void msm_slim_rx_msgq_cb(struct sps_event_notify *notify)
1131{
1132 struct msm_slim_ctrl *dev = (struct msm_slim_ctrl *)notify->user;
1133 msm_slim_rx_msgq_event(dev, notify);
1134}
1135
1136/* Queue up Rx message buffer */
1137static inline int
1138msm_slim_post_rx_msgq(struct msm_slim_ctrl *dev, int ix)
1139{
1140 int ret;
1141 u32 flags = SPS_IOVEC_FLAG_INT;
1142 struct msm_slim_endp *endpoint = &dev->rx_msgq;
1143 struct sps_mem_buffer *mem = &endpoint->buf;
1144 struct sps_pipe *pipe = endpoint->sps;
1145
1146 /* Rx message queue buffers are 4 bytes in length */
1147 u8 *virt_addr = mem->base + (4 * ix);
1148 u32 phys_addr = mem->phys_base + (4 * ix);
1149
1150 pr_debug("index:%d, phys:0x%x, virt:0x%p\n", ix, phys_addr, virt_addr);
1151
1152 ret = sps_transfer_one(pipe, phys_addr, 4, virt_addr, flags);
1153 if (ret)
1154 dev_err(dev->dev, "transfer_one() failed 0x%x, %d\n", ret, ix);
1155
1156 return ret;
1157}
1158
1159static inline int
1160msm_slim_rx_msgq_get(struct msm_slim_ctrl *dev, u32 *data, int offset)
1161{
1162 struct msm_slim_endp *endpoint = &dev->rx_msgq;
1163 struct sps_mem_buffer *mem = &endpoint->buf;
1164 struct sps_pipe *pipe = endpoint->sps;
1165 struct sps_iovec iovec;
1166 int index;
1167 int ret;
1168
1169 ret = sps_get_iovec(pipe, &iovec);
1170 if (ret) {
1171 dev_err(dev->dev, "sps_get_iovec() failed 0x%x\n", ret);
1172 goto err_exit;
1173 }
1174
1175 pr_debug("iovec = (0x%x 0x%x 0x%x)\n",
1176 iovec.addr, iovec.size, iovec.flags);
1177 BUG_ON(iovec.addr < mem->phys_base);
1178 BUG_ON(iovec.addr >= mem->phys_base + mem->size);
1179
1180 /* Calculate buffer index */
1181 index = (iovec.addr - mem->phys_base) / 4;
1182 *(data + offset) = *((u32 *)mem->base + index);
1183
1184 pr_debug("buf = 0x%p, data = 0x%x\n", (u32 *)mem->base + index, *data);
1185
1186 /* Add buffer back to the queue */
1187 (void)msm_slim_post_rx_msgq(dev, index);
1188
1189err_exit:
1190 return ret;
1191}
1192
1193static int msm_slim_rx_msgq_thread(void *data)
1194{
1195 struct msm_slim_ctrl *dev = (struct msm_slim_ctrl *)data;
1196 struct completion *notify = &dev->rx_msgq_notify;
1197 struct msm_slim_sat *sat = NULL;
1198 u32 mc = 0;
1199 u32 mt = 0;
1200 u32 buffer[10];
1201 int index = 0;
1202 u8 msg_len = 0;
1203 int ret;
1204
1205 dev_dbg(dev->dev, "rx thread started");
1206
1207 while (!kthread_should_stop()) {
1208 set_current_state(TASK_INTERRUPTIBLE);
1209 ret = wait_for_completion_interruptible(notify);
1210
1211 if (ret)
1212 dev_err(dev->dev, "rx thread wait error:%d", ret);
1213
1214 /* 1 irq notification per message */
1215 if (!dev->use_rx_msgqs) {
1216 msm_slim_rxwq(dev);
1217 continue;
1218 }
1219
1220 ret = msm_slim_rx_msgq_get(dev, buffer, index);
1221 if (ret) {
1222 dev_err(dev->dev, "rx_msgq_get() failed 0x%x\n", ret);
1223 continue;
1224 }
1225
1226 pr_debug("message[%d] = 0x%x\n", index, *buffer);
1227
1228 /* Decide if we use generic RX or satellite RX */
1229 if (index++ == 0) {
1230 msg_len = *buffer & 0x1F;
1231 pr_debug("Start of new message, len = %d\n", msg_len);
1232 mt = (buffer[0] >> 5) & 0x7;
1233 mc = (buffer[0] >> 8) & 0xff;
1234 dev_dbg(dev->dev, "MC: %x, MT: %x\n", mc, mt);
1235 if (mt == SLIM_MSG_MT_DEST_REFERRED_USER ||
1236 mt == SLIM_MSG_MT_SRC_REFERRED_USER)
1237 sat = dev->satd;
1238
1239 } else if ((index * 4) >= msg_len) {
1240 index = 0;
1241 if (mt == SLIM_MSG_MT_CORE &&
1242 mc == SLIM_MSG_MC_REPORT_PRESENT) {
1243 u8 e_addr[6];
1244 msm_get_eaddr(e_addr, buffer);
1245 if (msm_is_sat_dev(e_addr))
1246 sat = dev->satd;
1247 }
1248 if (sat) {
1249 msm_sat_enqueue(sat, buffer, msg_len);
1250 queue_work(sat->wq, &sat->wd);
1251 sat = NULL;
1252 } else {
1253 msm_slim_rx_enqueue(dev, buffer, msg_len);
1254 msm_slim_rxwq(dev);
1255 }
1256 }
1257 }
1258
1259 return 0;
1260}
1261
1262static int __devinit msm_slim_init_rx_msgq(struct msm_slim_ctrl *dev)
1263{
1264 int i, ret;
1265 u32 pipe_offset;
1266 struct msm_slim_endp *endpoint = &dev->rx_msgq;
1267 struct sps_connect *config = &endpoint->config;
1268 struct sps_mem_buffer *descr = &config->desc;
1269 struct sps_mem_buffer *mem = &endpoint->buf;
1270 struct completion *notify = &dev->rx_msgq_notify;
1271
1272 struct sps_register_event sps_error_event; /* SPS_ERROR */
1273 struct sps_register_event sps_descr_event; /* DESCR_DONE */
1274
1275 /* Allocate the endpoint */
1276 ret = msm_slim_init_endpoint(dev, endpoint);
1277 if (ret) {
1278 dev_err(dev->dev, "init_endpoint failed 0x%x\n", ret);
1279 goto sps_init_endpoint_failed;
1280 }
1281
1282 /* Get the pipe indices for the message queues */
1283 pipe_offset = (readl_relaxed(dev->base + MGR_STATUS) & 0xfc) >> 2;
1284 dev_dbg(dev->dev, "Message queue pipe offset %d\n", pipe_offset);
1285
1286 config->mode = SPS_MODE_SRC;
1287 config->source = dev->bam.hdl;
1288 config->destination = SPS_DEV_HANDLE_MEM;
1289 config->src_pipe_index = pipe_offset;
1290 config->options = SPS_O_DESC_DONE | SPS_O_ERROR |
1291 SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
1292
1293 /* Allocate memory for the FIFO descriptors */
1294 ret = msm_slim_sps_mem_alloc(dev, descr,
1295 MSM_SLIM_DESC_NUM * sizeof(struct sps_iovec));
1296 if (ret) {
1297 dev_err(dev->dev, "unable to allocate SPS descriptors\n");
1298 goto alloc_descr_failed;
1299 }
1300
1301 ret = sps_connect(endpoint->sps, config);
1302 if (ret) {
1303 dev_err(dev->dev, "sps_connect failed 0x%x\n", ret);
1304 goto sps_connect_failed;
1305 }
1306
1307 /* Register completion for DESC_DONE */
1308 init_completion(notify);
1309 memset(&sps_descr_event, 0x00, sizeof(sps_descr_event));
1310
1311 sps_descr_event.mode = SPS_TRIGGER_CALLBACK;
1312 sps_descr_event.options = SPS_O_DESC_DONE;
1313 sps_descr_event.user = (void *)dev;
1314 sps_descr_event.xfer_done = notify;
1315
1316 ret = sps_register_event(endpoint->sps, &sps_descr_event);
1317 if (ret) {
1318 dev_err(dev->dev, "sps_connect() failed 0x%x\n", ret);
1319 goto sps_reg_event_failed;
1320 }
1321
1322 /* Register callback for errors */
1323 memset(&sps_error_event, 0x00, sizeof(sps_error_event));
1324 sps_error_event.mode = SPS_TRIGGER_CALLBACK;
1325 sps_error_event.options = SPS_O_ERROR;
1326 sps_error_event.user = (void *)dev;
1327 sps_error_event.callback = msm_slim_rx_msgq_cb;
1328
1329 ret = sps_register_event(endpoint->sps, &sps_error_event);
1330 if (ret) {
1331 dev_err(dev->dev, "sps_connect() failed 0x%x\n", ret);
1332 goto sps_reg_event_failed;
1333 }
1334
1335 /* Allocate memory for the message buffer(s), N descrs, 4-byte mesg */
1336 ret = msm_slim_sps_mem_alloc(dev, mem, MSM_SLIM_DESC_NUM * 4);
1337 if (ret) {
1338 dev_err(dev->dev, "dma_alloc_coherent failed\n");
1339 goto alloc_buffer_failed;
1340 }
1341
1342 /*
1343 * Call transfer_one for each 4-byte buffer
1344 * Use (buf->size/4) - 1 for the number of buffer to post
1345 */
1346
1347 /* Setup the transfer */
1348 for (i = 0; i < (MSM_SLIM_DESC_NUM - 1); i++) {
1349 ret = msm_slim_post_rx_msgq(dev, i);
1350 if (ret) {
1351 dev_err(dev->dev, "post_rx_msgq() failed 0x%x\n", ret);
1352 goto sps_transfer_failed;
1353 }
1354 }
1355
1356 /* Fire up the Rx message queue thread */
1357 dev->rx_msgq_thread = kthread_run(msm_slim_rx_msgq_thread, dev,
1358 MSM_SLIM_NAME "_rx_msgq_thread");
1359 if (!dev->rx_msgq_thread) {
1360 dev_err(dev->dev, "Failed to start Rx message queue thread\n");
1361 ret = -EIO;
1362 } else
1363 return 0;
1364
1365sps_transfer_failed:
1366 msm_slim_sps_mem_free(dev, mem);
1367alloc_buffer_failed:
1368 memset(&sps_error_event, 0x00, sizeof(sps_error_event));
1369 sps_register_event(endpoint->sps, &sps_error_event);
1370sps_reg_event_failed:
1371 sps_disconnect(endpoint->sps);
1372sps_connect_failed:
1373 msm_slim_sps_mem_free(dev, descr);
1374alloc_descr_failed:
1375 msm_slim_free_endpoint(endpoint);
1376sps_init_endpoint_failed:
1377 return ret;
1378}
1379
1380/* Registers BAM h/w resource with SPS driver and initializes msgq endpoints */
1381static int __devinit
1382msm_slim_sps_init(struct msm_slim_ctrl *dev, struct resource *bam_mem)
1383{
1384 int i, ret;
1385 u32 bam_handle;
1386 struct sps_bam_props bam_props = {0};
1387
1388 static struct sps_bam_sec_config_props sec_props = {
1389 .ees = {
1390 [0] = { /* LPASS */
1391 .vmid = 0,
1392 .pipe_mask = 0xFFFF98,
1393 },
1394 [1] = { /* Krait Apps */
1395 .vmid = 1,
1396 .pipe_mask = 0x3F000007,
1397 },
1398 [2] = { /* Modem */
1399 .vmid = 2,
1400 .pipe_mask = 0x00000060,
1401 },
1402 },
1403 };
1404
1405 bam_props.ee = dev->ee;
1406 bam_props.virt_addr = dev->bam.base;
1407 bam_props.phys_addr = bam_mem->start;
1408 bam_props.irq = dev->bam.irq;
1409 bam_props.manage = SPS_BAM_MGR_LOCAL;
1410 bam_props.summing_threshold = MSM_SLIM_PERF_SUMM_THRESHOLD;
1411
1412 bam_props.sec_config = SPS_BAM_SEC_DO_CONFIG;
1413 bam_props.p_sec_config_props = &sec_props;
1414
1415 bam_props.options = SPS_O_DESC_DONE | SPS_O_ERROR |
1416 SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
1417
1418 /* First 7 bits are for message Qs */
1419 for (i = 7; i < 32; i++) {
1420 /* Check what pipes are owned by Apps. */
1421 if ((sec_props.ees[dev->ee].pipe_mask >> i) & 0x1)
1422 break;
1423 }
1424 dev->pipe_b = i - 7;
1425
1426 /* Register the BAM device with the SPS driver */
1427 ret = sps_register_bam_device(&bam_props, &bam_handle);
1428 if (ret) {
1429 dev_err(dev->dev, "sps_register_bam_device failed 0x%x\n", ret);
1430 return ret;
1431 }
1432 dev->bam.hdl = bam_handle;
1433 dev_dbg(dev->dev, "SLIM BAM registered, handle = 0x%x\n", bam_handle);
1434
1435 ret = msm_slim_init_rx_msgq(dev);
1436 if (ret) {
1437 dev_err(dev->dev, "msm_slim_init_rx_msgq failed 0x%x\n", ret);
1438 goto rx_msgq_init_failed;
1439 }
1440
1441 return 0;
1442rx_msgq_init_failed:
1443 sps_deregister_bam_device(bam_handle);
1444 dev->bam.hdl = 0L;
1445 return ret;
1446}
1447
1448static void msm_slim_sps_exit(struct msm_slim_ctrl *dev)
1449{
1450 if (dev->use_rx_msgqs) {
1451 struct msm_slim_endp *endpoint = &dev->rx_msgq;
1452 struct sps_connect *config = &endpoint->config;
1453 struct sps_mem_buffer *descr = &config->desc;
1454 struct sps_mem_buffer *mem = &endpoint->buf;
1455 struct sps_register_event sps_event;
1456 memset(&sps_event, 0x00, sizeof(sps_event));
1457 msm_slim_sps_mem_free(dev, mem);
1458 sps_register_event(endpoint->sps, &sps_event);
1459 sps_disconnect(endpoint->sps);
1460 msm_slim_sps_mem_free(dev, descr);
1461 msm_slim_free_endpoint(endpoint);
1462 }
1463 sps_deregister_bam_device(dev->bam.hdl);
1464}
1465
1466static int __devinit msm_slim_probe(struct platform_device *pdev)
1467{
1468 struct msm_slim_ctrl *dev;
1469 int ret;
1470 struct resource *bam_mem, *bam_io;
1471 struct resource *slim_mem, *slim_io;
1472 struct resource *irq, *bam_irq;
1473 slim_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1474 "slimbus_physical");
1475 if (!slim_mem) {
1476 dev_err(&pdev->dev, "no slimbus physical memory resource\n");
1477 return -ENODEV;
1478 }
1479 slim_io = request_mem_region(slim_mem->start, resource_size(slim_mem),
1480 pdev->name);
1481 if (!slim_io) {
1482 dev_err(&pdev->dev, "slimbus memory already claimed\n");
1483 return -EBUSY;
1484 }
1485
1486 bam_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1487 "slimbus_bam_physical");
1488 if (!bam_mem) {
1489 dev_err(&pdev->dev, "no slimbus BAM memory resource\n");
1490 ret = -ENODEV;
1491 goto err_get_res_bam_failed;
1492 }
1493 bam_io = request_mem_region(bam_mem->start, resource_size(bam_mem),
1494 pdev->name);
1495 if (!bam_io) {
1496 release_mem_region(slim_mem->start, resource_size(slim_mem));
1497 dev_err(&pdev->dev, "slimbus BAM memory already claimed\n");
1498 ret = -EBUSY;
1499 goto err_get_res_bam_failed;
1500 }
1501 irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
1502 "slimbus_irq");
1503 if (!irq) {
1504 dev_err(&pdev->dev, "no slimbus IRQ resource\n");
1505 ret = -ENODEV;
1506 goto err_get_res_failed;
1507 }
1508 bam_irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
1509 "slimbus_bam_irq");
1510 if (!bam_irq) {
1511 dev_err(&pdev->dev, "no slimbus BAM IRQ resource\n");
1512 ret = -ENODEV;
1513 goto err_get_res_failed;
1514 }
1515
1516 dev = kzalloc(sizeof(struct msm_slim_ctrl), GFP_KERNEL);
1517 if (!dev) {
1518 dev_err(&pdev->dev, "no memory for MSM slimbus controller\n");
1519 ret = -ENOMEM;
1520 goto err_get_res_failed;
1521 }
1522 dev->dev = &pdev->dev;
1523 platform_set_drvdata(pdev, dev);
1524 slim_set_ctrldata(&dev->ctrl, dev);
1525 dev->base = ioremap(slim_mem->start, resource_size(slim_mem));
1526 if (!dev->base) {
1527 dev_err(&pdev->dev, "IOremap failed\n");
1528 ret = -ENOMEM;
1529 goto err_ioremap_failed;
1530 }
1531 dev->bam.base = ioremap(bam_mem->start, resource_size(bam_mem));
1532 if (!dev->bam.base) {
1533 dev_err(&pdev->dev, "BAM IOremap failed\n");
1534 ret = -ENOMEM;
1535 goto err_ioremap_bam_failed;
1536 }
1537 dev->ctrl.nr = pdev->id;
1538 dev->ctrl.nchans = MSM_SLIM_NCHANS;
1539 dev->ctrl.nports = MSM_SLIM_NPORTS;
1540 dev->ctrl.set_laddr = msm_set_laddr;
1541 dev->ctrl.xfer_msg = msm_xfer_msg;
Sagar Dharia144e5e02011-08-08 17:30:11 -06001542 dev->ctrl.wakeup = msm_clk_pause_wakeup;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001543 dev->ctrl.config_port = msm_config_port;
1544 dev->ctrl.port_xfer = msm_slim_port_xfer;
1545 dev->ctrl.port_xfer_status = msm_slim_port_xfer_status;
1546 /* Reserve some messaging BW for satellite-apps driver communication */
1547 dev->ctrl.sched.pending_msgsl = 30;
1548
1549 init_completion(&dev->reconf);
1550 mutex_init(&dev->tx_lock);
1551 spin_lock_init(&dev->rx_lock);
1552 dev->ee = 1;
1553 dev->use_rx_msgqs = 1;
1554 dev->irq = irq->start;
1555 dev->bam.irq = bam_irq->start;
1556
1557 ret = msm_slim_sps_init(dev, bam_mem);
1558 if (ret != 0) {
1559 dev_err(dev->dev, "error SPS init\n");
1560 goto err_sps_init_failed;
1561 }
1562
1563
1564 dev->rclk = clk_get(dev->dev, "audio_slimbus_clk");
1565 if (dev->rclk) {
1566 clk_set_rate(dev->rclk, SLIM_ROOT_FREQ);
1567 clk_enable(dev->rclk);
1568 } else {
1569 dev_err(dev->dev, "slimbus clock not found");
1570 goto err_clk_get_failed;
1571 }
1572 dev->framer.rootfreq = SLIM_ROOT_FREQ >> 3;
1573 dev->framer.superfreq =
1574 dev->framer.rootfreq / SLIM_CL_PER_SUPERFRAME_DIV8;
1575 dev->ctrl.a_framer = &dev->framer;
1576 dev->ctrl.clkgear = SLIM_MAX_CLK_GEAR;
1577 ret = slim_add_numbered_controller(&dev->ctrl);
1578 if (ret) {
1579 dev_err(dev->dev, "error adding controller\n");
1580 goto err_ctrl_failed;
1581 }
1582
1583 ret = request_irq(dev->irq, msm_slim_interrupt, IRQF_TRIGGER_HIGH,
1584 "msm_slim_irq", dev);
1585 if (ret) {
1586 dev_err(&pdev->dev, "request IRQ failed\n");
1587 goto err_request_irq_failed;
1588 }
1589
1590 dev->satd = kzalloc(sizeof(struct msm_slim_sat), GFP_KERNEL);
1591 if (!dev->satd) {
1592 ret = -ENOMEM;
1593 goto err_sat_failed;
1594 }
1595 dev->satd->dev = dev;
1596 dev->satd->satcl.name = "msm_sat_dev";
1597 spin_lock_init(&dev->satd->lock);
1598 INIT_WORK(&dev->satd->wd, slim_sat_rxprocess);
1599 dev->satd->wq = create_singlethread_workqueue("msm_slim_sat");
1600 /* Component register initialization */
1601 writel_relaxed(1, dev->base + COMP_CFG);
1602 writel_relaxed((EE_MGR_RSC_GRP | EE_NGD_2 | EE_NGD_1),
1603 dev->base + COMP_TRUST_CFG);
1604
1605 /*
1606 * Manager register initialization
1607 * If RX msg Q is used, disable RX_MSG_RCVD interrupt
1608 */
1609 if (dev->use_rx_msgqs)
1610 writel_relaxed((MGR_INT_RECFG_DONE | MGR_INT_TX_NACKED_2 |
1611 MGR_INT_MSG_BUF_CONTE | /* MGR_INT_RX_MSG_RCVD | */
1612 MGR_INT_TX_MSG_SENT), dev->base + MGR_INT_EN);
1613 else
1614 writel_relaxed((MGR_INT_RECFG_DONE | MGR_INT_TX_NACKED_2 |
1615 MGR_INT_MSG_BUF_CONTE | MGR_INT_RX_MSG_RCVD |
1616 MGR_INT_TX_MSG_SENT), dev->base + MGR_INT_EN);
1617 writel_relaxed(1, dev->base + MGR_CFG);
1618 /*
1619 * Framer registers are beyond 1K memory region after Manager and/or
1620 * component registers. Make sure those writes are ordered
1621 * before framer register writes
1622 */
1623 wmb();
1624
1625 /* Framer register initialization */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001626 writel_relaxed((0xA << REF_CLK_GEAR) | (0xA << CLK_GEAR) |
1627 (1 << ROOT_FREQ) | (1 << FRM_ACTIVE) | 1,
1628 dev->base + FRM_CFG);
1629 /*
1630 * Make sure that framer wake-up and enabling writes go through
1631 * before any other component is enabled. Framer is responsible for
1632 * clocking the bus and enabling framer first will ensure that other
1633 * devices can report presence when they are enabled
1634 */
1635 mb();
1636
1637 /* Enable RX msg Q */
1638 if (dev->use_rx_msgqs)
1639 writel_relaxed(MGR_CFG_ENABLE | MGR_CFG_RX_MSGQ_EN,
1640 dev->base + MGR_CFG);
1641 else
1642 writel_relaxed(MGR_CFG_ENABLE, dev->base + MGR_CFG);
1643 /*
1644 * Make sure that manager-enable is written through before interface
1645 * device is enabled
1646 */
1647 mb();
1648 writel_relaxed(1, dev->base + INTF_CFG);
1649 /*
1650 * Make sure that interface-enable is written through before enabling
1651 * ported generic device inside MSM manager
1652 */
1653 mb();
1654 writel_relaxed(1, dev->base + PGD_CFG);
1655 writel_relaxed(0x3F<<17, dev->base + (PGD_OWN_EEn + (4 * dev->ee)));
1656 /*
1657 * Make sure that ported generic device is enabled and port-EE settings
1658 * are written through before finally enabling the component
1659 */
1660 mb();
1661
1662 writel_relaxed(1, dev->base + COMP_CFG);
1663 /*
1664 * Make sure that all writes have gone through before exiting this
1665 * function
1666 */
1667 mb();
1668 dev_dbg(dev->dev, "MSM SB controller is up!\n");
1669 return 0;
1670
1671err_sat_failed:
1672 free_irq(dev->irq, dev);
1673err_request_irq_failed:
1674 slim_del_controller(&dev->ctrl);
1675err_ctrl_failed:
1676 clk_disable(dev->rclk);
1677 clk_put(dev->rclk);
1678err_clk_get_failed:
1679 msm_slim_sps_exit(dev);
1680err_sps_init_failed:
1681 iounmap(dev->bam.base);
1682err_ioremap_bam_failed:
1683 iounmap(dev->base);
1684err_ioremap_failed:
1685 kfree(dev);
1686err_get_res_failed:
1687 release_mem_region(bam_mem->start, resource_size(bam_mem));
1688err_get_res_bam_failed:
1689 release_mem_region(slim_mem->start, resource_size(slim_mem));
1690 return ret;
1691}
1692
1693static int __devexit msm_slim_remove(struct platform_device *pdev)
1694{
1695 struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
1696 struct resource *bam_mem;
1697 struct resource *slim_mem;
1698 struct msm_slim_sat *sat = dev->satd;
1699 slim_remove_device(&sat->satcl);
1700 kfree(sat->satch);
1701 destroy_workqueue(sat->wq);
1702 kfree(sat);
1703 free_irq(dev->irq, dev);
1704 slim_del_controller(&dev->ctrl);
1705 clk_disable(dev->rclk);
1706 clk_put(dev->rclk);
1707 msm_slim_sps_exit(dev);
1708 kthread_stop(dev->rx_msgq_thread);
1709 iounmap(dev->bam.base);
1710 iounmap(dev->base);
1711 kfree(dev);
1712 bam_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1713 "slimbus_bam_physical");
Sagar Dhariae77961f2011-09-27 14:03:50 -06001714 if (bam_mem)
1715 release_mem_region(bam_mem->start, resource_size(bam_mem));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001716 slim_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1717 "slimbus_physical");
Sagar Dhariae77961f2011-09-27 14:03:50 -06001718 if (slim_mem)
1719 release_mem_region(slim_mem->start, resource_size(slim_mem));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001720 return 0;
1721}
1722
1723#ifdef CONFIG_PM
1724static int msm_slim_suspend(struct device *device)
1725{
1726 struct platform_device *pdev = to_platform_device(device);
1727 struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
Sagar Dharia144e5e02011-08-08 17:30:11 -06001728 int ret = slim_ctrl_clk_pause(&dev->ctrl, false, SLIM_CLK_UNSPECIFIED);
1729 /* Make sure clock pause goes through */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001730 mutex_lock(&dev->tx_lock);
Sagar Dharia144e5e02011-08-08 17:30:11 -06001731 if (!ret && dev->reconf_busy) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001732 wait_for_completion(&dev->reconf);
1733 dev->reconf_busy = false;
1734 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001735 mutex_unlock(&dev->tx_lock);
Sagar Dharia144e5e02011-08-08 17:30:11 -06001736 if (!ret) {
1737 clk_disable(dev->rclk);
1738 disable_irq(dev->irq);
1739 dev->suspended = 1;
1740 } else if (ret == -EBUSY) {
1741 /*
1742 * If the clock pause failed due to active channels, there is
1743 * a possibility that some audio stream is active during suspend
1744 * We dont want to return suspend failure in that case so that
1745 * display and relevant components can still go to suspend.
1746 * If there is some other error, then it should be passed-on
1747 * to system level suspend
1748 */
1749 ret = 0;
1750 }
1751 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001752}
1753
1754static int msm_slim_resume(struct device *device)
1755{
1756 struct platform_device *pdev = to_platform_device(device);
1757 struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
Sagar Dharia144e5e02011-08-08 17:30:11 -06001758 mutex_lock(&dev->tx_lock);
1759 if (dev->suspended) {
1760 dev->suspended = 0;
1761 mutex_unlock(&dev->tx_lock);
1762 enable_irq(dev->irq);
1763 return slim_ctrl_clk_pause(&dev->ctrl, true, 0);
1764 }
1765 mutex_unlock(&dev->tx_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001766 return 0;
1767}
1768#else
1769#define msm_slim_suspend NULL
1770#define msm_slim_resume NULL
1771#endif /* CONFIG_PM */
1772
1773#ifdef CONFIG_PM_RUNTIME
1774static int msm_slim_runtime_idle(struct device *dev)
1775{
1776 dev_dbg(dev, "pm_runtime: idle...\n");
1777 return 0;
1778}
1779
1780static int msm_slim_runtime_suspend(struct device *dev)
1781{
1782 dev_dbg(dev, "pm_runtime: suspending...\n");
1783 return 0;
1784}
1785
1786static int msm_slim_runtime_resume(struct device *dev)
1787{
1788 dev_dbg(dev, "pm_runtime: resuming...\n");
1789 return 0;
1790}
1791#else
1792#define msm_slim_runtime_idle NULL
1793#define msm_slim_runtime_suspend NULL
1794#define msm_slim_runtime_resume NULL
1795#endif
1796
1797static const struct dev_pm_ops msm_slim_dev_pm_ops = {
1798 SET_SYSTEM_SLEEP_PM_OPS(
1799 msm_slim_suspend,
1800 msm_slim_resume
1801 )
1802 SET_RUNTIME_PM_OPS(
1803 msm_slim_runtime_suspend,
1804 msm_slim_runtime_resume,
1805 msm_slim_runtime_idle
1806 )
1807};
1808
1809static struct platform_driver msm_slim_driver = {
1810 .probe = msm_slim_probe,
1811 .remove = msm_slim_remove,
1812 .driver = {
1813 .name = MSM_SLIM_NAME,
1814 .owner = THIS_MODULE,
1815 .pm = &msm_slim_dev_pm_ops,
1816 },
1817};
1818
1819static int msm_slim_init(void)
1820{
1821 return platform_driver_register(&msm_slim_driver);
1822}
1823subsys_initcall(msm_slim_init);
1824
1825static void msm_slim_exit(void)
1826{
1827 platform_driver_unregister(&msm_slim_driver);
1828}
1829module_exit(msm_slim_exit);
1830
1831MODULE_LICENSE("GPL v2");
1832MODULE_VERSION("0.1");
1833MODULE_DESCRIPTION("MSM Slimbus controller");
1834MODULE_ALIAS("platform:msm-slim");