blob: 984ab7150b639eb57f32ff01a29a38c41902347a [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12#include <linux/irq.h>
13#include <linux/kernel.h>
14#include <linux/init.h>
15#include <linux/slab.h>
16#include <linux/io.h>
17#include <linux/interrupt.h>
18#include <linux/platform_device.h>
19#include <linux/dma-mapping.h>
20#include <linux/slimbus/slimbus.h>
21#include <linux/delay.h>
22#include <linux/kthread.h>
23#include <linux/clk.h>
Sagar Dharia45ee38a2011-08-03 17:01:31 -060024#include <linux/pm_runtime.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070025#include <mach/sps.h>
26
27/* Per spec.max 40 bytes per received message */
28#define SLIM_RX_MSGQ_BUF_LEN 40
29
30#define SLIM_USR_MC_GENERIC_ACK 0x25
31#define SLIM_USR_MC_MASTER_CAPABILITY 0x0
32#define SLIM_USR_MC_REPORT_SATELLITE 0x1
33#define SLIM_USR_MC_ADDR_QUERY 0xD
34#define SLIM_USR_MC_ADDR_REPLY 0xE
35#define SLIM_USR_MC_DEFINE_CHAN 0x20
36#define SLIM_USR_MC_DEF_ACT_CHAN 0x21
37#define SLIM_USR_MC_CHAN_CTRL 0x23
38#define SLIM_USR_MC_RECONFIG_NOW 0x24
39#define SLIM_USR_MC_REQ_BW 0x28
40#define SLIM_USR_MC_CONNECT_SRC 0x2C
41#define SLIM_USR_MC_CONNECT_SINK 0x2D
42#define SLIM_USR_MC_DISCONNECT_PORT 0x2E
43
44/* MSM Slimbus peripheral settings */
45#define MSM_SLIM_PERF_SUMM_THRESHOLD 0x8000
46#define MSM_SLIM_NCHANS 32
47#define MSM_SLIM_NPORTS 24
Sagar Dharia45ee38a2011-08-03 17:01:31 -060048#define MSM_SLIM_AUTOSUSPEND MSEC_PER_SEC
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070049
50/*
51 * Need enough descriptors to receive present messages from slaves
52 * if received simultaneously. Present message needs 3 descriptors
53 * and this size will ensure around 10 simultaneous reports.
54 */
55#define MSM_SLIM_DESC_NUM 32
56
57#define SLIM_MSG_ASM_FIRST_WORD(l, mt, mc, dt, ad) \
58 ((l) | ((mt) << 5) | ((mc) << 8) | ((dt) << 15) | ((ad) << 16))
59
60#define MSM_SLIM_NAME "msm_slim_ctrl"
61#define SLIM_ROOT_FREQ 24576000
62
63#define MSM_CONCUR_MSG 8
64#define SAT_CONCUR_MSG 8
65#define DEF_WATERMARK (8 << 1)
66#define DEF_ALIGN 0
67#define DEF_PACK (1 << 6)
68#define ENABLE_PORT 1
69
70#define DEF_BLKSZ 0
71#define DEF_TRANSZ 0
72
73#define SAT_MAGIC_LSB 0xD9
74#define SAT_MAGIC_MSB 0xC5
75#define SAT_MSG_VER 0x1
76#define SAT_MSG_PROT 0x1
77#define MSM_SAT_SUCCSS 0x20
78
79#define QC_MFGID_LSB 0x2
80#define QC_MFGID_MSB 0x17
81#define QC_CHIPID_SL 0x10
82#define QC_DEVID_SAT1 0x3
83#define QC_DEVID_SAT2 0x4
84#define QC_DEVID_PGD 0x5
Sagar Dharia45ee38a2011-08-03 17:01:31 -060085#define QC_MSM_DEVS 5
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070086
87/* Component registers */
88enum comp_reg {
89 COMP_CFG = 0,
90 COMP_TRUST_CFG = 0x14,
91};
92
93/* Manager registers */
94enum mgr_reg {
95 MGR_CFG = 0x200,
96 MGR_STATUS = 0x204,
97 MGR_RX_MSGQ_CFG = 0x208,
98 MGR_INT_EN = 0x210,
99 MGR_INT_STAT = 0x214,
100 MGR_INT_CLR = 0x218,
101 MGR_TX_MSG = 0x230,
102 MGR_RX_MSG = 0x270,
103 MGR_VE_STAT = 0x300,
104};
105
106enum msg_cfg {
107 MGR_CFG_ENABLE = 1,
108 MGR_CFG_RX_MSGQ_EN = 1 << 1,
109 MGR_CFG_TX_MSGQ_EN_HIGH = 1 << 2,
110 MGR_CFG_TX_MSGQ_EN_LOW = 1 << 3,
111};
112/* Message queue types */
113enum msm_slim_msgq_type {
114 MSGQ_RX = 0,
115 MSGQ_TX_LOW = 1,
116 MSGQ_TX_HIGH = 2,
117};
118/* Framer registers */
119enum frm_reg {
120 FRM_CFG = 0x400,
121 FRM_STAT = 0x404,
122 FRM_INT_EN = 0x410,
123 FRM_INT_STAT = 0x414,
124 FRM_INT_CLR = 0x418,
125 FRM_WAKEUP = 0x41C,
126 FRM_CLKCTL_DONE = 0x420,
127 FRM_IE_STAT = 0x430,
128 FRM_VE_STAT = 0x440,
129};
130
131/* Interface registers */
132enum intf_reg {
133 INTF_CFG = 0x600,
134 INTF_STAT = 0x604,
135 INTF_INT_EN = 0x610,
136 INTF_INT_STAT = 0x614,
137 INTF_INT_CLR = 0x618,
138 INTF_IE_STAT = 0x630,
139 INTF_VE_STAT = 0x640,
140};
141
142/* Manager PGD registers */
143enum pgd_reg {
144 PGD_CFG = 0x1000,
145 PGD_STAT = 0x1004,
146 PGD_INT_EN = 0x1010,
147 PGD_INT_STAT = 0x1014,
148 PGD_INT_CLR = 0x1018,
149 PGD_OWN_EEn = 0x1020,
150 PGD_PORT_INT_EN_EEn = 0x1030,
151 PGD_PORT_INT_ST_EEn = 0x1034,
152 PGD_PORT_INT_CL_EEn = 0x1038,
153 PGD_PORT_CFGn = 0x1080,
154 PGD_PORT_STATn = 0x1084,
155 PGD_PORT_PARAMn = 0x1088,
156 PGD_PORT_BLKn = 0x108C,
157 PGD_PORT_TRANn = 0x1090,
158 PGD_PORT_MCHANn = 0x1094,
159 PGD_PORT_PSHPLLn = 0x1098,
160 PGD_PORT_PC_CFGn = 0x1600,
161 PGD_PORT_PC_VALn = 0x1604,
162 PGD_PORT_PC_VFR_TSn = 0x1608,
163 PGD_PORT_PC_VFR_STn = 0x160C,
164 PGD_PORT_PC_VFR_CLn = 0x1610,
165 PGD_IE_STAT = 0x1700,
166 PGD_VE_STAT = 0x1710,
167};
168
169enum rsc_grp {
170 EE_MGR_RSC_GRP = 1 << 10,
171 EE_NGD_2 = 2 << 6,
172 EE_NGD_1 = 0,
173};
174
175enum mgr_intr {
176 MGR_INT_RECFG_DONE = 1 << 24,
177 MGR_INT_TX_NACKED_2 = 1 << 25,
178 MGR_INT_MSG_BUF_CONTE = 1 << 26,
179 MGR_INT_RX_MSG_RCVD = 1 << 30,
180 MGR_INT_TX_MSG_SENT = 1 << 31,
181};
182
183enum frm_cfg {
184 FRM_ACTIVE = 1,
185 CLK_GEAR = 7,
186 ROOT_FREQ = 11,
187 REF_CLK_GEAR = 15,
188};
189
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600190enum msm_ctrl_state {
191 MSM_CTRL_AWAKE,
192 MSM_CTRL_SLEEPING,
193 MSM_CTRL_ASLEEP,
194};
195
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700196struct msm_slim_sps_bam {
197 u32 hdl;
198 void __iomem *base;
199 int irq;
200};
201
202struct msm_slim_endp {
203 struct sps_pipe *sps;
204 struct sps_connect config;
205 struct sps_register_event event;
206 struct sps_mem_buffer buf;
207 struct completion *xcomp;
208 bool connected;
209};
210
211struct msm_slim_ctrl {
212 struct slim_controller ctrl;
213 struct slim_framer framer;
214 struct device *dev;
215 void __iomem *base;
Sagar Dhariacc969452011-09-19 10:34:30 -0600216 struct resource *slew_mem;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700217 u32 curr_bw;
218 u8 msg_cnt;
219 u32 tx_buf[10];
220 u8 rx_msgs[MSM_CONCUR_MSG][SLIM_RX_MSGQ_BUF_LEN];
221 spinlock_t rx_lock;
222 int head;
223 int tail;
224 int irq;
225 int err;
226 int ee;
227 struct completion *wr_comp;
228 struct msm_slim_sat *satd;
229 struct msm_slim_endp pipes[7];
230 struct msm_slim_sps_bam bam;
231 struct msm_slim_endp rx_msgq;
232 struct completion rx_msgq_notify;
233 struct task_struct *rx_msgq_thread;
234 struct clk *rclk;
235 struct mutex tx_lock;
236 u8 pgdla;
237 bool use_rx_msgqs;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700238 int pipe_b;
239 struct completion reconf;
240 bool reconf_busy;
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600241 bool chan_active;
242 enum msm_ctrl_state state;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700243};
244
245struct msm_slim_sat {
246 struct slim_device satcl;
247 struct msm_slim_ctrl *dev;
248 struct workqueue_struct *wq;
249 struct work_struct wd;
250 u8 sat_msgs[SAT_CONCUR_MSG][40];
251 u16 *satch;
252 u8 nsatch;
253 bool sent_capability;
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600254 bool pending_reconf;
255 bool pending_capability;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700256 int shead;
257 int stail;
258 spinlock_t lock;
259};
260
261static int msm_slim_rx_enqueue(struct msm_slim_ctrl *dev, u32 *buf, u8 len)
262{
263 spin_lock(&dev->rx_lock);
264 if ((dev->tail + 1) % MSM_CONCUR_MSG == dev->head) {
265 spin_unlock(&dev->rx_lock);
266 dev_err(dev->dev, "RX QUEUE full!");
267 return -EXFULL;
268 }
269 memcpy((u8 *)dev->rx_msgs[dev->tail], (u8 *)buf, len);
270 dev->tail = (dev->tail + 1) % MSM_CONCUR_MSG;
271 spin_unlock(&dev->rx_lock);
272 return 0;
273}
274
275static int msm_slim_rx_dequeue(struct msm_slim_ctrl *dev, u8 *buf)
276{
277 unsigned long flags;
278 spin_lock_irqsave(&dev->rx_lock, flags);
279 if (dev->tail == dev->head) {
280 spin_unlock_irqrestore(&dev->rx_lock, flags);
281 return -ENODATA;
282 }
283 memcpy(buf, (u8 *)dev->rx_msgs[dev->head], 40);
284 dev->head = (dev->head + 1) % MSM_CONCUR_MSG;
285 spin_unlock_irqrestore(&dev->rx_lock, flags);
286 return 0;
287}
288
289static int msm_sat_enqueue(struct msm_slim_sat *sat, u32 *buf, u8 len)
290{
291 struct msm_slim_ctrl *dev = sat->dev;
292 spin_lock(&sat->lock);
293 if ((sat->stail + 1) % SAT_CONCUR_MSG == sat->shead) {
294 spin_unlock(&sat->lock);
295 dev_err(dev->dev, "SAT QUEUE full!");
296 return -EXFULL;
297 }
298 memcpy(sat->sat_msgs[sat->stail], (u8 *)buf, len);
299 sat->stail = (sat->stail + 1) % SAT_CONCUR_MSG;
300 spin_unlock(&sat->lock);
301 return 0;
302}
303
304static int msm_sat_dequeue(struct msm_slim_sat *sat, u8 *buf)
305{
306 unsigned long flags;
307 spin_lock_irqsave(&sat->lock, flags);
308 if (sat->stail == sat->shead) {
309 spin_unlock_irqrestore(&sat->lock, flags);
310 return -ENODATA;
311 }
312 memcpy(buf, sat->sat_msgs[sat->shead], 40);
313 sat->shead = (sat->shead + 1) % SAT_CONCUR_MSG;
314 spin_unlock_irqrestore(&sat->lock, flags);
315 return 0;
316}
317
318static void msm_get_eaddr(u8 *e_addr, u32 *buffer)
319{
320 e_addr[0] = (buffer[1] >> 24) & 0xff;
321 e_addr[1] = (buffer[1] >> 16) & 0xff;
322 e_addr[2] = (buffer[1] >> 8) & 0xff;
323 e_addr[3] = buffer[1] & 0xff;
324 e_addr[4] = (buffer[0] >> 24) & 0xff;
325 e_addr[5] = (buffer[0] >> 16) & 0xff;
326}
327
328static bool msm_is_sat_dev(u8 *e_addr)
329{
330 if (e_addr[5] == QC_MFGID_LSB && e_addr[4] == QC_MFGID_MSB &&
331 e_addr[2] != QC_CHIPID_SL &&
332 (e_addr[1] == QC_DEVID_SAT1 || e_addr[1] == QC_DEVID_SAT2))
333 return true;
334 return false;
335}
336
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600337static void msm_slim_get_ctrl(struct msm_slim_ctrl *dev)
338{
339 pm_runtime_get_sync(dev->dev);
340}
341static void msm_slim_put_ctrl(struct msm_slim_ctrl *dev)
342{
343 pm_runtime_mark_last_busy(dev->dev);
344 pm_runtime_put(dev->dev);
345}
346
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700347static irqreturn_t msm_slim_interrupt(int irq, void *d)
348{
349 struct msm_slim_ctrl *dev = d;
350 u32 pstat;
351 u32 stat = readl_relaxed(dev->base + MGR_INT_STAT);
352
353 if (stat & MGR_INT_TX_MSG_SENT || stat & MGR_INT_TX_NACKED_2) {
354 if (stat & MGR_INT_TX_MSG_SENT)
355 writel_relaxed(MGR_INT_TX_MSG_SENT,
356 dev->base + MGR_INT_CLR);
357 else {
358 writel_relaxed(MGR_INT_TX_NACKED_2,
359 dev->base + MGR_INT_CLR);
360 dev->err = -EIO;
361 }
362 /*
363 * Guarantee that interrupt clear bit write goes through before
364 * signalling completion/exiting ISR
365 */
366 mb();
367 if (dev->wr_comp)
368 complete(dev->wr_comp);
369 }
370 if (stat & MGR_INT_RX_MSG_RCVD) {
371 u32 rx_buf[10];
372 u32 mc, mt;
373 u8 len, i;
374 rx_buf[0] = readl_relaxed(dev->base + MGR_RX_MSG);
375 len = rx_buf[0] & 0x1F;
376 for (i = 1; i < ((len + 3) >> 2); i++) {
377 rx_buf[i] = readl_relaxed(dev->base + MGR_RX_MSG +
378 (4 * i));
379 dev_dbg(dev->dev, "reading data: %x\n", rx_buf[i]);
380 }
381 mt = (rx_buf[0] >> 5) & 0x7;
382 mc = (rx_buf[0] >> 8) & 0xff;
383 dev_dbg(dev->dev, "MC: %x, MT: %x\n", mc, mt);
384 if (mt == SLIM_MSG_MT_DEST_REFERRED_USER ||
385 mt == SLIM_MSG_MT_SRC_REFERRED_USER) {
386 struct msm_slim_sat *sat = dev->satd;
387 msm_sat_enqueue(sat, rx_buf, len);
388 writel_relaxed(MGR_INT_RX_MSG_RCVD,
389 dev->base + MGR_INT_CLR);
390 /*
391 * Guarantee that CLR bit write goes through before
392 * queuing work
393 */
394 mb();
395 queue_work(sat->wq, &sat->wd);
396 } else if (mt == SLIM_MSG_MT_CORE &&
397 mc == SLIM_MSG_MC_REPORT_PRESENT) {
398 u8 e_addr[6];
399 msm_get_eaddr(e_addr, rx_buf);
400 if (msm_is_sat_dev(e_addr)) {
401 /*
402 * Consider possibility that this device may
403 * be reporting more than once?
404 */
405 struct msm_slim_sat *sat = dev->satd;
406 msm_sat_enqueue(sat, rx_buf, len);
407 writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
408 MGR_INT_CLR);
409 /*
410 * Guarantee that CLR bit write goes through
411 * before queuing work
412 */
413 mb();
414 queue_work(sat->wq, &sat->wd);
415 } else {
416 msm_slim_rx_enqueue(dev, rx_buf, len);
417 writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
418 MGR_INT_CLR);
419 /*
420 * Guarantee that CLR bit write goes through
421 * before signalling completion
422 */
423 mb();
424 complete(&dev->rx_msgq_notify);
425 }
426 } else if (mc == SLIM_MSG_MC_REPLY_INFORMATION ||
427 mc == SLIM_MSG_MC_REPLY_VALUE) {
428 msm_slim_rx_enqueue(dev, rx_buf, len);
429 writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
430 MGR_INT_CLR);
431 /*
432 * Guarantee that CLR bit write goes through
433 * before signalling completion
434 */
435 mb();
436 complete(&dev->rx_msgq_notify);
Sagar Dharia144e5e02011-08-08 17:30:11 -0600437 } else if (mc == SLIM_MSG_MC_REPORT_INFORMATION) {
438 u8 *buf = (u8 *)rx_buf;
439 u8 l_addr = buf[2];
440 u16 ele = (u16)buf[4] << 4;
441 ele |= ((buf[3] & 0xf0) >> 4);
442 dev_err(dev->dev, "Slim-dev:%d report inf element:0x%x",
443 l_addr, ele);
444 for (i = 0; i < len - 5; i++)
445 dev_err(dev->dev, "offset:0x%x:bit mask:%x",
446 i, buf[i+5]);
447 writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
448 MGR_INT_CLR);
449 /*
450 * Guarantee that CLR bit write goes through
451 * before exiting
452 */
453 mb();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700454 } else {
455 dev_err(dev->dev, "Unexpected MC,%x MT:%x, len:%d",
456 mc, mt, len);
457 for (i = 0; i < ((len + 3) >> 2); i++)
458 dev_err(dev->dev, "error msg: %x", rx_buf[i]);
459 writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
460 MGR_INT_CLR);
461 /*
462 * Guarantee that CLR bit write goes through
463 * before exiting
464 */
465 mb();
466 }
467 }
468 if (stat & MGR_INT_RECFG_DONE) {
469 writel_relaxed(MGR_INT_RECFG_DONE, dev->base + MGR_INT_CLR);
470 /*
471 * Guarantee that CLR bit write goes through
472 * before exiting ISR
473 */
474 mb();
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600475 if (dev->ctrl.sched.usedslots == 0 && dev->chan_active) {
476 dev->chan_active = false;
477 msm_slim_put_ctrl(dev);
478 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700479 complete(&dev->reconf);
480 }
481 pstat = readl_relaxed(dev->base + PGD_PORT_INT_ST_EEn + (16 * dev->ee));
482 if (pstat != 0) {
483 int i = 0;
484 for (i = dev->pipe_b; i < MSM_SLIM_NPORTS; i++) {
485 if (pstat & 1 << i) {
486 u32 val = readl_relaxed(dev->base +
487 PGD_PORT_STATn + (i * 32));
488 if (val & (1 << 19)) {
489 dev->ctrl.ports[i].err =
490 SLIM_P_DISCONNECT;
491 dev->pipes[i-dev->pipe_b].connected =
492 false;
493 /*
494 * SPS will call completion since
495 * ERROR flags are registered
496 */
497 } else if (val & (1 << 2))
498 dev->ctrl.ports[i].err =
499 SLIM_P_OVERFLOW;
500 else if (val & (1 << 3))
501 dev->ctrl.ports[i].err =
502 SLIM_P_UNDERFLOW;
503 }
504 writel_relaxed(1, dev->base + PGD_PORT_INT_CL_EEn +
505 (dev->ee * 16));
506 }
507 /*
508 * Guarantee that port interrupt bit(s) clearing writes go
509 * through before exiting ISR
510 */
511 mb();
512 }
513
514 return IRQ_HANDLED;
515}
516
517static int
518msm_slim_init_endpoint(struct msm_slim_ctrl *dev, struct msm_slim_endp *ep)
519{
520 int ret;
521 struct sps_pipe *endpoint;
522 struct sps_connect *config = &ep->config;
523
524 /* Allocate the endpoint */
525 endpoint = sps_alloc_endpoint();
526 if (!endpoint) {
527 dev_err(dev->dev, "sps_alloc_endpoint failed\n");
528 return -ENOMEM;
529 }
530
531 /* Get default connection configuration for an endpoint */
532 ret = sps_get_config(endpoint, config);
533 if (ret) {
534 dev_err(dev->dev, "sps_get_config failed 0x%x\n", ret);
535 goto sps_config_failed;
536 }
537
538 ep->sps = endpoint;
539 return 0;
540
541sps_config_failed:
542 sps_free_endpoint(endpoint);
543 return ret;
544}
545
546static void
547msm_slim_free_endpoint(struct msm_slim_endp *ep)
548{
549 sps_free_endpoint(ep->sps);
550 ep->sps = NULL;
551}
552
553static int msm_slim_sps_mem_alloc(
554 struct msm_slim_ctrl *dev, struct sps_mem_buffer *mem, u32 len)
555{
556 dma_addr_t phys;
557
558 mem->size = len;
559 mem->min_size = 0;
560 mem->base = dma_alloc_coherent(dev->dev, mem->size, &phys, GFP_KERNEL);
561
562 if (!mem->base) {
563 dev_err(dev->dev, "dma_alloc_coherent(%d) failed\n", len);
564 return -ENOMEM;
565 }
566
567 mem->phys_base = phys;
568 memset(mem->base, 0x00, mem->size);
569 return 0;
570}
571
572static void
573msm_slim_sps_mem_free(struct msm_slim_ctrl *dev, struct sps_mem_buffer *mem)
574{
575 dma_free_coherent(dev->dev, mem->size, mem->base, mem->phys_base);
576 mem->size = 0;
577 mem->base = NULL;
578 mem->phys_base = 0;
579}
580
581static void msm_hw_set_port(struct msm_slim_ctrl *dev, u8 pn)
582{
583 u32 set_cfg = DEF_WATERMARK | DEF_ALIGN | DEF_PACK | ENABLE_PORT;
584 u32 int_port = readl_relaxed(dev->base + PGD_PORT_INT_EN_EEn +
585 (dev->ee * 16));
586 writel_relaxed(set_cfg, dev->base + PGD_PORT_CFGn + (pn * 32));
587 writel_relaxed(DEF_BLKSZ, dev->base + PGD_PORT_BLKn + (pn * 32));
588 writel_relaxed(DEF_TRANSZ, dev->base + PGD_PORT_TRANn + (pn * 32));
589 writel_relaxed((int_port | 1 << pn) , dev->base + PGD_PORT_INT_EN_EEn +
590 (dev->ee * 16));
591 /* Make sure that port registers are updated before returning */
592 mb();
593}
594
595static int msm_slim_connect_pipe_port(struct msm_slim_ctrl *dev, u8 pn)
596{
597 struct msm_slim_endp *endpoint = &dev->pipes[pn];
598 struct sps_connect *cfg = &endpoint->config;
599 u32 stat;
600 int ret = sps_get_config(dev->pipes[pn].sps, cfg);
601 if (ret) {
602 dev_err(dev->dev, "sps pipe-port get config error%x\n", ret);
603 return ret;
604 }
605 cfg->options = SPS_O_DESC_DONE | SPS_O_ERROR |
606 SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
607
608 if (dev->pipes[pn].connected) {
609 ret = sps_set_config(dev->pipes[pn].sps, cfg);
610 if (ret) {
611 dev_err(dev->dev, "sps pipe-port set config erro:%x\n",
612 ret);
613 return ret;
614 }
615 }
616
617 stat = readl_relaxed(dev->base + PGD_PORT_STATn +
618 (32 * (pn + dev->pipe_b)));
619 if (dev->ctrl.ports[pn].flow == SLIM_SRC) {
620 cfg->destination = dev->bam.hdl;
621 cfg->source = SPS_DEV_HANDLE_MEM;
622 cfg->dest_pipe_index = ((stat & (0xFF << 4)) >> 4);
623 cfg->src_pipe_index = 0;
624 dev_dbg(dev->dev, "flow src:pipe num:%d",
625 cfg->dest_pipe_index);
626 cfg->mode = SPS_MODE_DEST;
627 } else {
628 cfg->source = dev->bam.hdl;
629 cfg->destination = SPS_DEV_HANDLE_MEM;
630 cfg->src_pipe_index = ((stat & (0xFF << 4)) >> 4);
631 cfg->dest_pipe_index = 0;
632 dev_dbg(dev->dev, "flow dest:pipe num:%d",
633 cfg->src_pipe_index);
634 cfg->mode = SPS_MODE_SRC;
635 }
636 /* Space for desciptor FIFOs */
637 cfg->desc.size = MSM_SLIM_DESC_NUM * sizeof(struct sps_iovec);
638 cfg->config = SPS_CONFIG_DEFAULT;
639 ret = sps_connect(dev->pipes[pn].sps, cfg);
640 if (!ret) {
641 dev->pipes[pn].connected = true;
642 msm_hw_set_port(dev, pn + dev->pipe_b);
643 }
644 return ret;
645}
646
647static u32 *msm_get_msg_buf(struct slim_controller *ctrl, int len)
648{
649 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
650 /*
651 * Currently we block a transaction until the current one completes.
652 * In case we need multiple transactions, use message Q
653 */
654 return dev->tx_buf;
655}
656
657static int msm_send_msg_buf(struct slim_controller *ctrl, u32 *buf, u8 len)
658{
659 int i;
660 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
661 for (i = 0; i < (len + 3) >> 2; i++) {
662 dev_dbg(dev->dev, "TX data:0x%x\n", buf[i]);
663 writel_relaxed(buf[i], dev->base + MGR_TX_MSG + (i * 4));
664 }
665 /* Guarantee that message is sent before returning */
666 mb();
667 return 0;
668}
669
670static int msm_xfer_msg(struct slim_controller *ctrl, struct slim_msg_txn *txn)
671{
672 DECLARE_COMPLETION_ONSTACK(done);
673 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
674 u32 *pbuf;
675 u8 *puc;
676 int timeout;
677 u8 la = txn->la;
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600678 u8 mc = (u8)(txn->mc & 0xFF);
679 /*
680 * Voting for runtime PM: Slimbus has 2 possible use cases:
681 * 1. messaging
682 * 2. Data channels
683 * Messaging case goes through messaging slots and data channels
684 * use their own slots
685 * This "get" votes for messaging bandwidth
686 */
687 if (!(txn->mc & SLIM_MSG_CLK_PAUSE_SEQ_FLG))
688 msm_slim_get_ctrl(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700689 mutex_lock(&dev->tx_lock);
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600690 if (dev->state == MSM_CTRL_ASLEEP) {
691 dev_err(dev->dev, "runtime or system PM suspended state");
692 mutex_unlock(&dev->tx_lock);
693 if (!(txn->mc & SLIM_MSG_CLK_PAUSE_SEQ_FLG))
694 msm_slim_put_ctrl(dev);
695 return -EBUSY;
696 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700697 if (txn->mt == SLIM_MSG_MT_CORE &&
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600698 mc == SLIM_MSG_MC_BEGIN_RECONFIGURATION) {
699 if (dev->reconf_busy) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700700 wait_for_completion(&dev->reconf);
701 dev->reconf_busy = false;
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600702 }
703 /* This "get" votes for data channels */
704 if (dev->ctrl.sched.usedslots != 0 &&
705 !dev->chan_active) {
706 dev->chan_active = true;
707 msm_slim_get_ctrl(dev);
708 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700709 }
710 txn->rl--;
711 pbuf = msm_get_msg_buf(ctrl, txn->rl);
712 dev->wr_comp = NULL;
713 dev->err = 0;
714
715 if (txn->dt == SLIM_MSG_DEST_ENUMADDR) {
716 mutex_unlock(&dev->tx_lock);
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600717 if (!(txn->mc & SLIM_MSG_CLK_PAUSE_SEQ_FLG))
718 msm_slim_put_ctrl(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700719 return -EPROTONOSUPPORT;
720 }
721 if (txn->mt == SLIM_MSG_MT_CORE && txn->la == 0xFF &&
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600722 (mc == SLIM_MSG_MC_CONNECT_SOURCE ||
723 mc == SLIM_MSG_MC_CONNECT_SINK ||
724 mc == SLIM_MSG_MC_DISCONNECT_PORT))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700725 la = dev->pgdla;
726 if (txn->dt == SLIM_MSG_DEST_LOGICALADDR)
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600727 *pbuf = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt, mc, 0, la);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700728 else
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600729 *pbuf = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt, mc, 1, la);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700730 if (txn->dt == SLIM_MSG_DEST_LOGICALADDR)
731 puc = ((u8 *)pbuf) + 3;
732 else
733 puc = ((u8 *)pbuf) + 2;
734 if (txn->rbuf)
735 *(puc++) = txn->tid;
736 if ((txn->mt == SLIM_MSG_MT_CORE) &&
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600737 ((mc >= SLIM_MSG_MC_REQUEST_INFORMATION &&
738 mc <= SLIM_MSG_MC_REPORT_INFORMATION) ||
739 (mc >= SLIM_MSG_MC_REQUEST_VALUE &&
740 mc <= SLIM_MSG_MC_CHANGE_VALUE))) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700741 *(puc++) = (txn->ec & 0xFF);
742 *(puc++) = (txn->ec >> 8)&0xFF;
743 }
744 if (txn->wbuf)
745 memcpy(puc, txn->wbuf, txn->len);
746 if (txn->mt == SLIM_MSG_MT_CORE && txn->la == 0xFF &&
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600747 (mc == SLIM_MSG_MC_CONNECT_SOURCE ||
748 mc == SLIM_MSG_MC_CONNECT_SINK ||
749 mc == SLIM_MSG_MC_DISCONNECT_PORT)) {
750 if (mc != SLIM_MSG_MC_DISCONNECT_PORT)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700751 dev->err = msm_slim_connect_pipe_port(dev, *puc);
752 else {
753 struct msm_slim_endp *endpoint = &dev->pipes[*puc];
754 struct sps_register_event sps_event;
755 memset(&sps_event, 0, sizeof(sps_event));
756 sps_register_event(endpoint->sps, &sps_event);
757 sps_disconnect(endpoint->sps);
758 /*
759 * Remove channel disconnects master-side ports from
760 * channel. No need to send that again on the bus
761 */
762 dev->pipes[*puc].connected = false;
763 mutex_unlock(&dev->tx_lock);
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600764 msm_slim_put_ctrl(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700765 return 0;
766 }
767 if (dev->err) {
768 dev_err(dev->dev, "pipe-port connect err:%d", dev->err);
769 mutex_unlock(&dev->tx_lock);
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600770 msm_slim_put_ctrl(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700771 return dev->err;
772 }
773 *(puc) = *(puc) + dev->pipe_b;
774 }
775 if (txn->mt == SLIM_MSG_MT_CORE &&
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600776 mc == SLIM_MSG_MC_BEGIN_RECONFIGURATION)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700777 dev->reconf_busy = true;
778 dev->wr_comp = &done;
779 msm_send_msg_buf(ctrl, pbuf, txn->rl);
780 timeout = wait_for_completion_timeout(&done, HZ);
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600781
782 if ((txn->mc == (SLIM_MSG_MC_RECONFIGURE_NOW |
783 SLIM_MSG_CLK_PAUSE_SEQ_FLG)) && timeout) {
784 timeout = wait_for_completion_timeout(&dev->reconf, HZ);
785 dev->reconf_busy = false;
786 if (timeout) {
787 clk_disable(dev->rclk);
788 disable_irq(dev->irq);
789 }
790 }
791 if ((txn->mc == (SLIM_MSG_MC_RECONFIGURE_NOW |
792 SLIM_MSG_CLK_PAUSE_SEQ_FLG)) && !timeout) {
793 dev->reconf_busy = false;
794 dev_err(dev->dev, "clock pause failed");
795 mutex_unlock(&dev->tx_lock);
796 return -ETIMEDOUT;
797 }
798
799 mutex_unlock(&dev->tx_lock);
800 if (!txn->rbuf && !(txn->mc & SLIM_MSG_CLK_PAUSE_SEQ_FLG))
801 msm_slim_put_ctrl(dev);
802
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700803 if (!timeout)
804 dev_err(dev->dev, "TX timed out:MC:0x%x,mt:0x%x", txn->mc,
805 txn->mt);
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600806
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700807 return timeout ? dev->err : -ETIMEDOUT;
808}
809
810static int msm_set_laddr(struct slim_controller *ctrl, const u8 *ea,
811 u8 elen, u8 laddr)
812{
813 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
814 DECLARE_COMPLETION_ONSTACK(done);
815 int timeout;
816 u32 *buf;
817 mutex_lock(&dev->tx_lock);
818 buf = msm_get_msg_buf(ctrl, 9);
819 buf[0] = SLIM_MSG_ASM_FIRST_WORD(9, SLIM_MSG_MT_CORE,
820 SLIM_MSG_MC_ASSIGN_LOGICAL_ADDRESS,
821 SLIM_MSG_DEST_LOGICALADDR,
822 ea[5] | ea[4] << 8);
823 buf[1] = ea[3] | (ea[2] << 8) | (ea[1] << 16) | (ea[0] << 24);
824 buf[2] = laddr;
825
826 dev->wr_comp = &done;
827 msm_send_msg_buf(ctrl, buf, 9);
828 timeout = wait_for_completion_timeout(&done, HZ);
829 mutex_unlock(&dev->tx_lock);
830 return timeout ? dev->err : -ETIMEDOUT;
831}
832
Sagar Dharia144e5e02011-08-08 17:30:11 -0600833static int msm_clk_pause_wakeup(struct slim_controller *ctrl)
834{
835 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600836 enable_irq(dev->irq);
Sagar Dharia144e5e02011-08-08 17:30:11 -0600837 clk_enable(dev->rclk);
838 writel_relaxed(1, dev->base + FRM_WAKEUP);
839 /* Make sure framer wakeup write goes through before exiting function */
840 mb();
841 /*
842 * Workaround: Currently, slave is reporting lost-sync messages
843 * after slimbus comes out of clock pause.
844 * Transaction with slave fail before slave reports that message
845 * Give some time for that report to come
846 * Slimbus wakes up in clock gear 10 at 24.576MHz. With each superframe
847 * being 250 usecs, we wait for 20 superframes here to ensure
848 * we get the message
849 */
850 usleep_range(5000, 5000);
851 return 0;
852}
853
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700854static int msm_config_port(struct slim_controller *ctrl, u8 pn)
855{
856 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
857 struct msm_slim_endp *endpoint;
858 int ret = 0;
859 if (ctrl->ports[pn].req == SLIM_REQ_HALF_DUP ||
860 ctrl->ports[pn].req == SLIM_REQ_MULTI_CH)
861 return -EPROTONOSUPPORT;
862 if (pn >= (MSM_SLIM_NPORTS - dev->pipe_b))
863 return -ENODEV;
864
865 endpoint = &dev->pipes[pn];
866 ret = msm_slim_init_endpoint(dev, endpoint);
867 dev_dbg(dev->dev, "sps register bam error code:%x\n", ret);
868 return ret;
869}
870
871static enum slim_port_err msm_slim_port_xfer_status(struct slim_controller *ctr,
872 u8 pn, u8 **done_buf, u32 *done_len)
873{
874 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctr);
875 struct sps_iovec sio;
876 int ret;
877 if (done_len)
878 *done_len = 0;
879 if (done_buf)
880 *done_buf = NULL;
881 if (!dev->pipes[pn].connected)
882 return SLIM_P_DISCONNECT;
883 ret = sps_get_iovec(dev->pipes[pn].sps, &sio);
884 if (!ret) {
885 if (done_len)
886 *done_len = sio.size;
887 if (done_buf)
888 *done_buf = (u8 *)sio.addr;
889 }
890 dev_dbg(dev->dev, "get iovec returned %d\n", ret);
891 return SLIM_P_INPROGRESS;
892}
893
894static int msm_slim_port_xfer(struct slim_controller *ctrl, u8 pn, u8 *iobuf,
895 u32 len, struct completion *comp)
896{
897 struct sps_register_event sreg;
898 int ret;
899 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
Sagar Dhariae77961f2011-09-27 14:03:50 -0600900 if (pn >= 7)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700901 return -ENODEV;
902
903
904 ctrl->ports[pn].xcomp = comp;
905 sreg.options = (SPS_EVENT_DESC_DONE|SPS_EVENT_ERROR);
906 sreg.mode = SPS_TRIGGER_WAIT;
907 sreg.xfer_done = comp;
908 sreg.callback = NULL;
909 sreg.user = &ctrl->ports[pn];
910 ret = sps_register_event(dev->pipes[pn].sps, &sreg);
911 if (ret) {
912 dev_dbg(dev->dev, "sps register event error:%x\n", ret);
913 return ret;
914 }
915 ret = sps_transfer_one(dev->pipes[pn].sps, (u32)iobuf, len, NULL,
916 SPS_IOVEC_FLAG_INT);
917 dev_dbg(dev->dev, "sps submit xfer error code:%x\n", ret);
918
919 return ret;
920}
921
922static int msm_sat_define_ch(struct msm_slim_sat *sat, u8 *buf, u8 len, u8 mc)
923{
924 struct msm_slim_ctrl *dev = sat->dev;
925 enum slim_ch_control oper;
926 int i;
927 int ret = 0;
928 if (mc == SLIM_USR_MC_CHAN_CTRL) {
929 u16 chanh = sat->satch[buf[5]];
930 oper = ((buf[3] & 0xC0) >> 6);
931 /* part of grp. activating/removing 1 will take care of rest */
932 ret = slim_control_ch(&sat->satcl, chanh, oper, false);
933 } else {
934 u16 chh[40];
935 struct slim_ch prop;
936 u32 exp;
937 u8 coeff, cc;
938 u8 prrate = buf[6];
939 for (i = 8; i < len; i++)
940 chh[i-8] = sat->satch[buf[i]];
941 prop.dataf = (enum slim_ch_dataf)((buf[3] & 0xE0) >> 5);
942 prop.auxf = (enum slim_ch_auxf)((buf[4] & 0xC0) >> 5);
943 prop.baser = SLIM_RATE_4000HZ;
944 if (prrate & 0x8)
945 prop.baser = SLIM_RATE_11025HZ;
946 else
947 prop.baser = SLIM_RATE_4000HZ;
948 prop.prot = (enum slim_ch_proto)(buf[5] & 0x0F);
949 prop.sampleszbits = (buf[4] & 0x1F)*SLIM_CL_PER_SL;
950 exp = (u32)((buf[5] & 0xF0) >> 4);
951 coeff = (buf[4] & 0x20) >> 5;
952 cc = (coeff ? 3 : 1);
953 prop.ratem = cc * (1 << exp);
954 if (i > 9)
955 ret = slim_define_ch(&sat->satcl, &prop, chh, len - 8,
956 true, &sat->satch[buf[8]]);
957 else
958 ret = slim_define_ch(&sat->satcl, &prop,
959 &sat->satch[buf[8]], 1, false,
960 NULL);
961 dev_dbg(dev->dev, "define sat grp returned:%d", ret);
962
963 /* part of group so activating 1 will take care of rest */
964 if (mc == SLIM_USR_MC_DEF_ACT_CHAN)
965 ret = slim_control_ch(&sat->satcl,
966 sat->satch[buf[8]],
967 SLIM_CH_ACTIVATE, false);
968 }
969 return ret;
970}
971
972static void msm_slim_rxwq(struct msm_slim_ctrl *dev)
973{
974 u8 buf[40];
975 u8 mc, mt, len;
976 int i, ret;
977 if ((msm_slim_rx_dequeue(dev, (u8 *)buf)) != -ENODATA) {
978 len = buf[0] & 0x1F;
979 mt = (buf[0] >> 5) & 0x7;
980 mc = buf[1];
981 if (mt == SLIM_MSG_MT_CORE &&
982 mc == SLIM_MSG_MC_REPORT_PRESENT) {
983 u8 laddr;
984 u8 e_addr[6];
985 for (i = 0; i < 6; i++)
986 e_addr[i] = buf[7-i];
987
988 ret = slim_assign_laddr(&dev->ctrl, e_addr, 6, &laddr);
989 /* Is this Qualcomm ported generic device? */
990 if (!ret && e_addr[5] == QC_MFGID_LSB &&
991 e_addr[4] == QC_MFGID_MSB &&
992 e_addr[1] == QC_DEVID_PGD &&
993 e_addr[2] != QC_CHIPID_SL)
994 dev->pgdla = laddr;
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600995 if (!ret && !pm_runtime_enabled(dev->dev) &&
Sagar Dhariafb5826a2011-11-28 19:28:44 -0700996 laddr == (QC_MSM_DEVS - 1)) {
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600997 pm_runtime_enable(dev->dev);
Sagar Dhariafb5826a2011-11-28 19:28:44 -0700998 /*
999 * Avoid runtime-PM by default, but allow
1000 * command line activation
1001 */
1002 pm_runtime_forbid(dev->dev);
1003 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001004
1005 } else if (mc == SLIM_MSG_MC_REPLY_INFORMATION ||
1006 mc == SLIM_MSG_MC_REPLY_VALUE) {
1007 u8 tid = buf[3];
1008 dev_dbg(dev->dev, "tid:%d, len:%d\n", tid, len - 4);
1009 slim_msg_response(&dev->ctrl, &buf[4], tid,
1010 len - 4);
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001011 msm_slim_put_ctrl(dev);
Sagar Dharia144e5e02011-08-08 17:30:11 -06001012 } else if (mc == SLIM_MSG_MC_REPORT_INFORMATION) {
1013 u8 l_addr = buf[2];
1014 u16 ele = (u16)buf[4] << 4;
1015 ele |= ((buf[3] & 0xf0) >> 4);
1016 dev_err(dev->dev, "Slim-dev:%d report inf element:0x%x",
1017 l_addr, ele);
1018 for (i = 0; i < len - 5; i++)
1019 dev_err(dev->dev, "offset:0x%x:bit mask:%x",
1020 i, buf[i+5]);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001021 } else {
1022 dev_err(dev->dev, "unexpected message:mc:%x, mt:%x",
1023 mc, mt);
1024 for (i = 0; i < len; i++)
1025 dev_err(dev->dev, "error msg: %x", buf[i]);
1026
1027 }
1028 } else
1029 dev_err(dev->dev, "rxwq called and no dequeue");
1030}
1031
1032static void slim_sat_rxprocess(struct work_struct *work)
1033{
1034 struct msm_slim_sat *sat = container_of(work, struct msm_slim_sat, wd);
1035 struct msm_slim_ctrl *dev = sat->dev;
1036 u8 buf[40];
1037
1038 while ((msm_sat_dequeue(sat, buf)) != -ENODATA) {
1039 struct slim_msg_txn txn;
1040 int i;
1041 u8 len, mc, mt;
1042 u32 bw_sl;
1043 int ret = 0;
1044 bool gen_ack = false;
1045 u8 tid;
1046 u8 wbuf[8];
1047 txn.mt = SLIM_MSG_MT_SRC_REFERRED_USER;
1048 txn.dt = SLIM_MSG_DEST_LOGICALADDR;
1049 txn.ec = 0;
1050 txn.rbuf = NULL;
1051 txn.la = sat->satcl.laddr;
1052 /* satellite handling */
1053 len = buf[0] & 0x1F;
1054 mc = buf[1];
1055 mt = (buf[0] >> 5) & 0x7;
1056
1057 if (mt == SLIM_MSG_MT_CORE &&
1058 mc == SLIM_MSG_MC_REPORT_PRESENT) {
1059 u8 laddr;
1060 u8 e_addr[6];
1061 for (i = 0; i < 6; i++)
1062 e_addr[i] = buf[7-i];
1063
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001064 if (pm_runtime_enabled(dev->dev)) {
1065 msm_slim_get_ctrl(dev);
1066 sat->pending_capability = true;
1067 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001068 slim_assign_laddr(&dev->ctrl, e_addr, 6, &laddr);
1069 sat->satcl.laddr = laddr;
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001070 } else if (mt != SLIM_MSG_MT_CORE &&
1071 mc != SLIM_MSG_MC_REPORT_PRESENT)
1072 msm_slim_get_ctrl(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001073 switch (mc) {
1074 case SLIM_MSG_MC_REPORT_PRESENT:
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001075 /* Remove runtime_pm vote once satellite acks */
1076 if (mt != SLIM_MSG_MT_CORE) {
1077 if (pm_runtime_enabled(dev->dev) &&
1078 sat->pending_capability) {
1079 msm_slim_put_ctrl(dev);
1080 sat->pending_capability = false;
1081 }
1082 continue;
1083 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001084 /* send a Manager capability msg */
1085 if (sat->sent_capability)
1086 continue;
1087 ret = slim_add_device(&dev->ctrl, &sat->satcl);
1088 if (ret) {
1089 dev_err(dev->dev,
1090 "Satellite-init failed");
1091 continue;
1092 }
1093 /* Satellite owns first 21 channels */
1094 sat->satch = kzalloc(21 * sizeof(u16), GFP_KERNEL);
1095 sat->nsatch = 20;
1096 /* alloc all sat chans */
1097 for (i = 0; i < 21; i++)
1098 slim_alloc_ch(&sat->satcl, &sat->satch[i]);
1099 txn.mc = SLIM_USR_MC_MASTER_CAPABILITY;
1100 txn.mt = SLIM_MSG_MT_SRC_REFERRED_USER;
1101 txn.la = sat->satcl.laddr;
1102 txn.rl = 8;
1103 wbuf[0] = SAT_MAGIC_LSB;
1104 wbuf[1] = SAT_MAGIC_MSB;
1105 wbuf[2] = SAT_MSG_VER;
1106 wbuf[3] = SAT_MSG_PROT;
1107 txn.wbuf = wbuf;
1108 txn.len = 4;
1109 sat->sent_capability = true;
1110 msm_xfer_msg(&dev->ctrl, &txn);
1111 break;
1112 case SLIM_USR_MC_ADDR_QUERY:
1113 memcpy(&wbuf[1], &buf[4], 6);
1114 ret = slim_get_logical_addr(&sat->satcl,
1115 &wbuf[1], 6, &wbuf[7]);
1116 if (ret)
1117 memset(&wbuf[1], 0, 6);
1118 wbuf[0] = buf[3];
1119 txn.mc = SLIM_USR_MC_ADDR_REPLY;
1120 txn.rl = 12;
1121 txn.len = 8;
1122 txn.wbuf = wbuf;
1123 msm_xfer_msg(&dev->ctrl, &txn);
1124 break;
1125 case SLIM_USR_MC_DEFINE_CHAN:
1126 case SLIM_USR_MC_DEF_ACT_CHAN:
1127 case SLIM_USR_MC_CHAN_CTRL:
1128 if (mc != SLIM_USR_MC_CHAN_CTRL)
1129 tid = buf[7];
1130 else
1131 tid = buf[4];
1132 gen_ack = true;
1133 ret = msm_sat_define_ch(sat, buf, len, mc);
1134 if (ret) {
1135 dev_err(dev->dev,
1136 "SAT define_ch returned:%d",
1137 ret);
1138 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001139 if (!sat->pending_reconf) {
1140 msm_slim_get_ctrl(dev);
1141 sat->pending_reconf = true;
1142 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001143 break;
1144 case SLIM_USR_MC_RECONFIG_NOW:
1145 tid = buf[3];
1146 gen_ack = true;
1147 ret = slim_reconfigure_now(&sat->satcl);
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001148 if (sat->pending_reconf) {
1149 msm_slim_put_ctrl(dev);
1150 sat->pending_reconf = false;
1151 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001152 break;
1153 case SLIM_USR_MC_REQ_BW:
1154 /* what we get is in SLOTS */
1155 bw_sl = (u32)buf[4] << 3 |
1156 ((buf[3] & 0xE0) >> 5);
1157 sat->satcl.pending_msgsl = bw_sl;
1158 tid = buf[5];
1159 gen_ack = true;
1160 break;
1161 case SLIM_USR_MC_CONNECT_SRC:
1162 case SLIM_USR_MC_CONNECT_SINK:
1163 if (mc == SLIM_USR_MC_CONNECT_SRC)
1164 txn.mc = SLIM_MSG_MC_CONNECT_SOURCE;
1165 else
1166 txn.mc = SLIM_MSG_MC_CONNECT_SINK;
1167 wbuf[0] = buf[4] & 0x1F;
1168 wbuf[1] = buf[5];
1169 tid = buf[6];
1170 txn.la = buf[3];
1171 txn.mt = SLIM_MSG_MT_CORE;
1172 txn.rl = 6;
1173 txn.len = 2;
1174 txn.wbuf = wbuf;
1175 gen_ack = true;
1176 ret = msm_xfer_msg(&dev->ctrl, &txn);
1177 break;
1178 case SLIM_USR_MC_DISCONNECT_PORT:
1179 txn.mc = SLIM_MSG_MC_DISCONNECT_PORT;
1180 wbuf[0] = buf[4] & 0x1F;
1181 tid = buf[5];
1182 txn.la = buf[3];
1183 txn.rl = 5;
1184 txn.len = 1;
1185 txn.mt = SLIM_MSG_MT_CORE;
1186 txn.wbuf = wbuf;
1187 gen_ack = true;
1188 ret = msm_xfer_msg(&dev->ctrl, &txn);
1189 default:
1190 break;
1191 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001192 if (!gen_ack) {
1193 if (mc != SLIM_MSG_MC_REPORT_PRESENT)
1194 msm_slim_put_ctrl(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001195 continue;
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001196 }
1197
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001198 wbuf[0] = tid;
1199 if (!ret)
1200 wbuf[1] = MSM_SAT_SUCCSS;
1201 else
1202 wbuf[1] = 0;
1203 txn.mc = SLIM_USR_MC_GENERIC_ACK;
1204 txn.la = sat->satcl.laddr;
1205 txn.rl = 6;
1206 txn.len = 2;
1207 txn.wbuf = wbuf;
1208 txn.mt = SLIM_MSG_MT_SRC_REFERRED_USER;
1209 msm_xfer_msg(&dev->ctrl, &txn);
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001210 msm_slim_put_ctrl(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001211 }
1212}
1213
1214static void
1215msm_slim_rx_msgq_event(struct msm_slim_ctrl *dev, struct sps_event_notify *ev)
1216{
1217 u32 *buf = ev->data.transfer.user;
1218 struct sps_iovec *iovec = &ev->data.transfer.iovec;
1219
1220 /*
1221 * Note the virtual address needs to be offset by the same index
1222 * as the physical address or just pass in the actual virtual address
1223 * if the sps_mem_buffer is not needed. Note that if completion is
1224 * used, the virtual address won't be available and will need to be
1225 * calculated based on the offset of the physical address
1226 */
1227 if (ev->event_id == SPS_EVENT_DESC_DONE) {
1228
1229 pr_debug("buf = 0x%p, data = 0x%x\n", buf, *buf);
1230
1231 pr_debug("iovec = (0x%x 0x%x 0x%x)\n",
1232 iovec->addr, iovec->size, iovec->flags);
1233
1234 } else {
1235 dev_err(dev->dev, "%s: unknown event %d\n",
1236 __func__, ev->event_id);
1237 }
1238}
1239
1240static void msm_slim_rx_msgq_cb(struct sps_event_notify *notify)
1241{
1242 struct msm_slim_ctrl *dev = (struct msm_slim_ctrl *)notify->user;
1243 msm_slim_rx_msgq_event(dev, notify);
1244}
1245
1246/* Queue up Rx message buffer */
1247static inline int
1248msm_slim_post_rx_msgq(struct msm_slim_ctrl *dev, int ix)
1249{
1250 int ret;
1251 u32 flags = SPS_IOVEC_FLAG_INT;
1252 struct msm_slim_endp *endpoint = &dev->rx_msgq;
1253 struct sps_mem_buffer *mem = &endpoint->buf;
1254 struct sps_pipe *pipe = endpoint->sps;
1255
1256 /* Rx message queue buffers are 4 bytes in length */
1257 u8 *virt_addr = mem->base + (4 * ix);
1258 u32 phys_addr = mem->phys_base + (4 * ix);
1259
1260 pr_debug("index:%d, phys:0x%x, virt:0x%p\n", ix, phys_addr, virt_addr);
1261
1262 ret = sps_transfer_one(pipe, phys_addr, 4, virt_addr, flags);
1263 if (ret)
1264 dev_err(dev->dev, "transfer_one() failed 0x%x, %d\n", ret, ix);
1265
1266 return ret;
1267}
1268
1269static inline int
1270msm_slim_rx_msgq_get(struct msm_slim_ctrl *dev, u32 *data, int offset)
1271{
1272 struct msm_slim_endp *endpoint = &dev->rx_msgq;
1273 struct sps_mem_buffer *mem = &endpoint->buf;
1274 struct sps_pipe *pipe = endpoint->sps;
1275 struct sps_iovec iovec;
1276 int index;
1277 int ret;
1278
1279 ret = sps_get_iovec(pipe, &iovec);
1280 if (ret) {
1281 dev_err(dev->dev, "sps_get_iovec() failed 0x%x\n", ret);
1282 goto err_exit;
1283 }
1284
1285 pr_debug("iovec = (0x%x 0x%x 0x%x)\n",
1286 iovec.addr, iovec.size, iovec.flags);
1287 BUG_ON(iovec.addr < mem->phys_base);
1288 BUG_ON(iovec.addr >= mem->phys_base + mem->size);
1289
1290 /* Calculate buffer index */
1291 index = (iovec.addr - mem->phys_base) / 4;
1292 *(data + offset) = *((u32 *)mem->base + index);
1293
1294 pr_debug("buf = 0x%p, data = 0x%x\n", (u32 *)mem->base + index, *data);
1295
1296 /* Add buffer back to the queue */
1297 (void)msm_slim_post_rx_msgq(dev, index);
1298
1299err_exit:
1300 return ret;
1301}
1302
1303static int msm_slim_rx_msgq_thread(void *data)
1304{
1305 struct msm_slim_ctrl *dev = (struct msm_slim_ctrl *)data;
1306 struct completion *notify = &dev->rx_msgq_notify;
1307 struct msm_slim_sat *sat = NULL;
1308 u32 mc = 0;
1309 u32 mt = 0;
1310 u32 buffer[10];
1311 int index = 0;
1312 u8 msg_len = 0;
1313 int ret;
1314
1315 dev_dbg(dev->dev, "rx thread started");
1316
1317 while (!kthread_should_stop()) {
1318 set_current_state(TASK_INTERRUPTIBLE);
1319 ret = wait_for_completion_interruptible(notify);
1320
1321 if (ret)
1322 dev_err(dev->dev, "rx thread wait error:%d", ret);
1323
1324 /* 1 irq notification per message */
1325 if (!dev->use_rx_msgqs) {
1326 msm_slim_rxwq(dev);
1327 continue;
1328 }
1329
1330 ret = msm_slim_rx_msgq_get(dev, buffer, index);
1331 if (ret) {
1332 dev_err(dev->dev, "rx_msgq_get() failed 0x%x\n", ret);
1333 continue;
1334 }
1335
1336 pr_debug("message[%d] = 0x%x\n", index, *buffer);
1337
1338 /* Decide if we use generic RX or satellite RX */
1339 if (index++ == 0) {
1340 msg_len = *buffer & 0x1F;
1341 pr_debug("Start of new message, len = %d\n", msg_len);
1342 mt = (buffer[0] >> 5) & 0x7;
1343 mc = (buffer[0] >> 8) & 0xff;
1344 dev_dbg(dev->dev, "MC: %x, MT: %x\n", mc, mt);
1345 if (mt == SLIM_MSG_MT_DEST_REFERRED_USER ||
1346 mt == SLIM_MSG_MT_SRC_REFERRED_USER)
1347 sat = dev->satd;
1348
1349 } else if ((index * 4) >= msg_len) {
1350 index = 0;
1351 if (mt == SLIM_MSG_MT_CORE &&
1352 mc == SLIM_MSG_MC_REPORT_PRESENT) {
1353 u8 e_addr[6];
1354 msm_get_eaddr(e_addr, buffer);
1355 if (msm_is_sat_dev(e_addr))
1356 sat = dev->satd;
1357 }
1358 if (sat) {
1359 msm_sat_enqueue(sat, buffer, msg_len);
1360 queue_work(sat->wq, &sat->wd);
1361 sat = NULL;
1362 } else {
1363 msm_slim_rx_enqueue(dev, buffer, msg_len);
1364 msm_slim_rxwq(dev);
1365 }
1366 }
1367 }
1368
1369 return 0;
1370}
1371
1372static int __devinit msm_slim_init_rx_msgq(struct msm_slim_ctrl *dev)
1373{
1374 int i, ret;
1375 u32 pipe_offset;
1376 struct msm_slim_endp *endpoint = &dev->rx_msgq;
1377 struct sps_connect *config = &endpoint->config;
1378 struct sps_mem_buffer *descr = &config->desc;
1379 struct sps_mem_buffer *mem = &endpoint->buf;
1380 struct completion *notify = &dev->rx_msgq_notify;
1381
1382 struct sps_register_event sps_error_event; /* SPS_ERROR */
1383 struct sps_register_event sps_descr_event; /* DESCR_DONE */
1384
1385 /* Allocate the endpoint */
1386 ret = msm_slim_init_endpoint(dev, endpoint);
1387 if (ret) {
1388 dev_err(dev->dev, "init_endpoint failed 0x%x\n", ret);
1389 goto sps_init_endpoint_failed;
1390 }
1391
1392 /* Get the pipe indices for the message queues */
1393 pipe_offset = (readl_relaxed(dev->base + MGR_STATUS) & 0xfc) >> 2;
1394 dev_dbg(dev->dev, "Message queue pipe offset %d\n", pipe_offset);
1395
1396 config->mode = SPS_MODE_SRC;
1397 config->source = dev->bam.hdl;
1398 config->destination = SPS_DEV_HANDLE_MEM;
1399 config->src_pipe_index = pipe_offset;
1400 config->options = SPS_O_DESC_DONE | SPS_O_ERROR |
1401 SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
1402
1403 /* Allocate memory for the FIFO descriptors */
1404 ret = msm_slim_sps_mem_alloc(dev, descr,
1405 MSM_SLIM_DESC_NUM * sizeof(struct sps_iovec));
1406 if (ret) {
1407 dev_err(dev->dev, "unable to allocate SPS descriptors\n");
1408 goto alloc_descr_failed;
1409 }
1410
1411 ret = sps_connect(endpoint->sps, config);
1412 if (ret) {
1413 dev_err(dev->dev, "sps_connect failed 0x%x\n", ret);
1414 goto sps_connect_failed;
1415 }
1416
1417 /* Register completion for DESC_DONE */
1418 init_completion(notify);
1419 memset(&sps_descr_event, 0x00, sizeof(sps_descr_event));
1420
1421 sps_descr_event.mode = SPS_TRIGGER_CALLBACK;
1422 sps_descr_event.options = SPS_O_DESC_DONE;
1423 sps_descr_event.user = (void *)dev;
1424 sps_descr_event.xfer_done = notify;
1425
1426 ret = sps_register_event(endpoint->sps, &sps_descr_event);
1427 if (ret) {
1428 dev_err(dev->dev, "sps_connect() failed 0x%x\n", ret);
1429 goto sps_reg_event_failed;
1430 }
1431
1432 /* Register callback for errors */
1433 memset(&sps_error_event, 0x00, sizeof(sps_error_event));
1434 sps_error_event.mode = SPS_TRIGGER_CALLBACK;
1435 sps_error_event.options = SPS_O_ERROR;
1436 sps_error_event.user = (void *)dev;
1437 sps_error_event.callback = msm_slim_rx_msgq_cb;
1438
1439 ret = sps_register_event(endpoint->sps, &sps_error_event);
1440 if (ret) {
1441 dev_err(dev->dev, "sps_connect() failed 0x%x\n", ret);
1442 goto sps_reg_event_failed;
1443 }
1444
1445 /* Allocate memory for the message buffer(s), N descrs, 4-byte mesg */
1446 ret = msm_slim_sps_mem_alloc(dev, mem, MSM_SLIM_DESC_NUM * 4);
1447 if (ret) {
1448 dev_err(dev->dev, "dma_alloc_coherent failed\n");
1449 goto alloc_buffer_failed;
1450 }
1451
1452 /*
1453 * Call transfer_one for each 4-byte buffer
1454 * Use (buf->size/4) - 1 for the number of buffer to post
1455 */
1456
1457 /* Setup the transfer */
1458 for (i = 0; i < (MSM_SLIM_DESC_NUM - 1); i++) {
1459 ret = msm_slim_post_rx_msgq(dev, i);
1460 if (ret) {
1461 dev_err(dev->dev, "post_rx_msgq() failed 0x%x\n", ret);
1462 goto sps_transfer_failed;
1463 }
1464 }
1465
1466 /* Fire up the Rx message queue thread */
1467 dev->rx_msgq_thread = kthread_run(msm_slim_rx_msgq_thread, dev,
1468 MSM_SLIM_NAME "_rx_msgq_thread");
1469 if (!dev->rx_msgq_thread) {
1470 dev_err(dev->dev, "Failed to start Rx message queue thread\n");
1471 ret = -EIO;
1472 } else
1473 return 0;
1474
1475sps_transfer_failed:
1476 msm_slim_sps_mem_free(dev, mem);
1477alloc_buffer_failed:
1478 memset(&sps_error_event, 0x00, sizeof(sps_error_event));
1479 sps_register_event(endpoint->sps, &sps_error_event);
1480sps_reg_event_failed:
1481 sps_disconnect(endpoint->sps);
1482sps_connect_failed:
1483 msm_slim_sps_mem_free(dev, descr);
1484alloc_descr_failed:
1485 msm_slim_free_endpoint(endpoint);
1486sps_init_endpoint_failed:
1487 return ret;
1488}
1489
1490/* Registers BAM h/w resource with SPS driver and initializes msgq endpoints */
1491static int __devinit
1492msm_slim_sps_init(struct msm_slim_ctrl *dev, struct resource *bam_mem)
1493{
1494 int i, ret;
1495 u32 bam_handle;
1496 struct sps_bam_props bam_props = {0};
1497
1498 static struct sps_bam_sec_config_props sec_props = {
1499 .ees = {
1500 [0] = { /* LPASS */
1501 .vmid = 0,
1502 .pipe_mask = 0xFFFF98,
1503 },
1504 [1] = { /* Krait Apps */
1505 .vmid = 1,
1506 .pipe_mask = 0x3F000007,
1507 },
1508 [2] = { /* Modem */
1509 .vmid = 2,
1510 .pipe_mask = 0x00000060,
1511 },
1512 },
1513 };
1514
1515 bam_props.ee = dev->ee;
1516 bam_props.virt_addr = dev->bam.base;
1517 bam_props.phys_addr = bam_mem->start;
1518 bam_props.irq = dev->bam.irq;
1519 bam_props.manage = SPS_BAM_MGR_LOCAL;
1520 bam_props.summing_threshold = MSM_SLIM_PERF_SUMM_THRESHOLD;
1521
1522 bam_props.sec_config = SPS_BAM_SEC_DO_CONFIG;
1523 bam_props.p_sec_config_props = &sec_props;
1524
1525 bam_props.options = SPS_O_DESC_DONE | SPS_O_ERROR |
1526 SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
1527
1528 /* First 7 bits are for message Qs */
1529 for (i = 7; i < 32; i++) {
1530 /* Check what pipes are owned by Apps. */
1531 if ((sec_props.ees[dev->ee].pipe_mask >> i) & 0x1)
1532 break;
1533 }
1534 dev->pipe_b = i - 7;
1535
1536 /* Register the BAM device with the SPS driver */
1537 ret = sps_register_bam_device(&bam_props, &bam_handle);
1538 if (ret) {
1539 dev_err(dev->dev, "sps_register_bam_device failed 0x%x\n", ret);
1540 return ret;
1541 }
1542 dev->bam.hdl = bam_handle;
1543 dev_dbg(dev->dev, "SLIM BAM registered, handle = 0x%x\n", bam_handle);
1544
1545 ret = msm_slim_init_rx_msgq(dev);
1546 if (ret) {
1547 dev_err(dev->dev, "msm_slim_init_rx_msgq failed 0x%x\n", ret);
1548 goto rx_msgq_init_failed;
1549 }
1550
1551 return 0;
1552rx_msgq_init_failed:
1553 sps_deregister_bam_device(bam_handle);
1554 dev->bam.hdl = 0L;
1555 return ret;
1556}
1557
1558static void msm_slim_sps_exit(struct msm_slim_ctrl *dev)
1559{
1560 if (dev->use_rx_msgqs) {
1561 struct msm_slim_endp *endpoint = &dev->rx_msgq;
1562 struct sps_connect *config = &endpoint->config;
1563 struct sps_mem_buffer *descr = &config->desc;
1564 struct sps_mem_buffer *mem = &endpoint->buf;
1565 struct sps_register_event sps_event;
1566 memset(&sps_event, 0x00, sizeof(sps_event));
1567 msm_slim_sps_mem_free(dev, mem);
1568 sps_register_event(endpoint->sps, &sps_event);
1569 sps_disconnect(endpoint->sps);
1570 msm_slim_sps_mem_free(dev, descr);
1571 msm_slim_free_endpoint(endpoint);
1572 }
1573 sps_deregister_bam_device(dev->bam.hdl);
1574}
1575
Sagar Dhariacc969452011-09-19 10:34:30 -06001576static void msm_slim_prg_slew(struct platform_device *pdev,
1577 struct msm_slim_ctrl *dev)
1578{
1579 struct resource *slew_io;
1580 void __iomem *slew_reg;
1581 /* SLEW RATE register for this slimbus */
1582 dev->slew_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1583 "slimbus_slew_reg");
1584 if (!dev->slew_mem) {
1585 dev_dbg(&pdev->dev, "no slimbus slew resource\n");
1586 return;
1587 }
1588 slew_io = request_mem_region(dev->slew_mem->start,
1589 resource_size(dev->slew_mem), pdev->name);
1590 if (!slew_io) {
1591 dev_dbg(&pdev->dev, "slimbus-slew mem claimed\n");
1592 dev->slew_mem = NULL;
1593 return;
1594 }
1595
1596 slew_reg = ioremap(dev->slew_mem->start, resource_size(dev->slew_mem));
1597 if (!slew_reg) {
1598 dev_dbg(dev->dev, "slew register mapping failed");
1599 release_mem_region(dev->slew_mem->start,
1600 resource_size(dev->slew_mem));
1601 dev->slew_mem = NULL;
1602 return;
1603 }
1604 writel_relaxed(1, slew_reg);
1605 /* Make sure slimbus-slew rate enabling goes through */
1606 wmb();
1607 iounmap(slew_reg);
1608}
1609
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001610static int __devinit msm_slim_probe(struct platform_device *pdev)
1611{
1612 struct msm_slim_ctrl *dev;
1613 int ret;
1614 struct resource *bam_mem, *bam_io;
1615 struct resource *slim_mem, *slim_io;
1616 struct resource *irq, *bam_irq;
1617 slim_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1618 "slimbus_physical");
1619 if (!slim_mem) {
1620 dev_err(&pdev->dev, "no slimbus physical memory resource\n");
1621 return -ENODEV;
1622 }
1623 slim_io = request_mem_region(slim_mem->start, resource_size(slim_mem),
1624 pdev->name);
1625 if (!slim_io) {
1626 dev_err(&pdev->dev, "slimbus memory already claimed\n");
1627 return -EBUSY;
1628 }
1629
1630 bam_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1631 "slimbus_bam_physical");
1632 if (!bam_mem) {
1633 dev_err(&pdev->dev, "no slimbus BAM memory resource\n");
1634 ret = -ENODEV;
1635 goto err_get_res_bam_failed;
1636 }
1637 bam_io = request_mem_region(bam_mem->start, resource_size(bam_mem),
1638 pdev->name);
1639 if (!bam_io) {
1640 release_mem_region(slim_mem->start, resource_size(slim_mem));
1641 dev_err(&pdev->dev, "slimbus BAM memory already claimed\n");
1642 ret = -EBUSY;
1643 goto err_get_res_bam_failed;
1644 }
1645 irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
1646 "slimbus_irq");
1647 if (!irq) {
1648 dev_err(&pdev->dev, "no slimbus IRQ resource\n");
1649 ret = -ENODEV;
1650 goto err_get_res_failed;
1651 }
1652 bam_irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
1653 "slimbus_bam_irq");
1654 if (!bam_irq) {
1655 dev_err(&pdev->dev, "no slimbus BAM IRQ resource\n");
1656 ret = -ENODEV;
1657 goto err_get_res_failed;
1658 }
1659
1660 dev = kzalloc(sizeof(struct msm_slim_ctrl), GFP_KERNEL);
1661 if (!dev) {
1662 dev_err(&pdev->dev, "no memory for MSM slimbus controller\n");
1663 ret = -ENOMEM;
1664 goto err_get_res_failed;
1665 }
1666 dev->dev = &pdev->dev;
1667 platform_set_drvdata(pdev, dev);
1668 slim_set_ctrldata(&dev->ctrl, dev);
1669 dev->base = ioremap(slim_mem->start, resource_size(slim_mem));
1670 if (!dev->base) {
1671 dev_err(&pdev->dev, "IOremap failed\n");
1672 ret = -ENOMEM;
1673 goto err_ioremap_failed;
1674 }
1675 dev->bam.base = ioremap(bam_mem->start, resource_size(bam_mem));
1676 if (!dev->bam.base) {
1677 dev_err(&pdev->dev, "BAM IOremap failed\n");
1678 ret = -ENOMEM;
1679 goto err_ioremap_bam_failed;
1680 }
1681 dev->ctrl.nr = pdev->id;
1682 dev->ctrl.nchans = MSM_SLIM_NCHANS;
1683 dev->ctrl.nports = MSM_SLIM_NPORTS;
1684 dev->ctrl.set_laddr = msm_set_laddr;
1685 dev->ctrl.xfer_msg = msm_xfer_msg;
Sagar Dharia144e5e02011-08-08 17:30:11 -06001686 dev->ctrl.wakeup = msm_clk_pause_wakeup;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001687 dev->ctrl.config_port = msm_config_port;
1688 dev->ctrl.port_xfer = msm_slim_port_xfer;
1689 dev->ctrl.port_xfer_status = msm_slim_port_xfer_status;
1690 /* Reserve some messaging BW for satellite-apps driver communication */
1691 dev->ctrl.sched.pending_msgsl = 30;
1692
1693 init_completion(&dev->reconf);
1694 mutex_init(&dev->tx_lock);
1695 spin_lock_init(&dev->rx_lock);
1696 dev->ee = 1;
1697 dev->use_rx_msgqs = 1;
1698 dev->irq = irq->start;
1699 dev->bam.irq = bam_irq->start;
1700
1701 ret = msm_slim_sps_init(dev, bam_mem);
1702 if (ret != 0) {
1703 dev_err(dev->dev, "error SPS init\n");
1704 goto err_sps_init_failed;
1705 }
1706
1707
1708 dev->rclk = clk_get(dev->dev, "audio_slimbus_clk");
Sagar Dhariacc969452011-09-19 10:34:30 -06001709 if (!dev->rclk) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001710 dev_err(dev->dev, "slimbus clock not found");
1711 goto err_clk_get_failed;
1712 }
1713 dev->framer.rootfreq = SLIM_ROOT_FREQ >> 3;
1714 dev->framer.superfreq =
1715 dev->framer.rootfreq / SLIM_CL_PER_SUPERFRAME_DIV8;
1716 dev->ctrl.a_framer = &dev->framer;
1717 dev->ctrl.clkgear = SLIM_MAX_CLK_GEAR;
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001718 dev->ctrl.dev.parent = &pdev->dev;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001719
1720 ret = request_irq(dev->irq, msm_slim_interrupt, IRQF_TRIGGER_HIGH,
1721 "msm_slim_irq", dev);
1722 if (ret) {
1723 dev_err(&pdev->dev, "request IRQ failed\n");
1724 goto err_request_irq_failed;
1725 }
1726
1727 dev->satd = kzalloc(sizeof(struct msm_slim_sat), GFP_KERNEL);
1728 if (!dev->satd) {
1729 ret = -ENOMEM;
1730 goto err_sat_failed;
1731 }
Sagar Dhariacc969452011-09-19 10:34:30 -06001732
1733 msm_slim_prg_slew(pdev, dev);
1734 clk_set_rate(dev->rclk, SLIM_ROOT_FREQ);
1735 clk_enable(dev->rclk);
1736
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001737 dev->satd->dev = dev;
1738 dev->satd->satcl.name = "msm_sat_dev";
1739 spin_lock_init(&dev->satd->lock);
1740 INIT_WORK(&dev->satd->wd, slim_sat_rxprocess);
1741 dev->satd->wq = create_singlethread_workqueue("msm_slim_sat");
1742 /* Component register initialization */
1743 writel_relaxed(1, dev->base + COMP_CFG);
1744 writel_relaxed((EE_MGR_RSC_GRP | EE_NGD_2 | EE_NGD_1),
1745 dev->base + COMP_TRUST_CFG);
1746
1747 /*
1748 * Manager register initialization
1749 * If RX msg Q is used, disable RX_MSG_RCVD interrupt
1750 */
1751 if (dev->use_rx_msgqs)
1752 writel_relaxed((MGR_INT_RECFG_DONE | MGR_INT_TX_NACKED_2 |
1753 MGR_INT_MSG_BUF_CONTE | /* MGR_INT_RX_MSG_RCVD | */
1754 MGR_INT_TX_MSG_SENT), dev->base + MGR_INT_EN);
1755 else
1756 writel_relaxed((MGR_INT_RECFG_DONE | MGR_INT_TX_NACKED_2 |
1757 MGR_INT_MSG_BUF_CONTE | MGR_INT_RX_MSG_RCVD |
1758 MGR_INT_TX_MSG_SENT), dev->base + MGR_INT_EN);
1759 writel_relaxed(1, dev->base + MGR_CFG);
1760 /*
1761 * Framer registers are beyond 1K memory region after Manager and/or
1762 * component registers. Make sure those writes are ordered
1763 * before framer register writes
1764 */
1765 wmb();
1766
Sagar Dharia72007922011-12-13 21:14:26 -07001767 /* Register with framework before enabling frame, clock */
1768 ret = slim_add_numbered_controller(&dev->ctrl);
1769 if (ret) {
1770 dev_err(dev->dev, "error adding controller\n");
1771 goto err_ctrl_failed;
1772 }
1773
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001774 /* Framer register initialization */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001775 writel_relaxed((0xA << REF_CLK_GEAR) | (0xA << CLK_GEAR) |
1776 (1 << ROOT_FREQ) | (1 << FRM_ACTIVE) | 1,
1777 dev->base + FRM_CFG);
1778 /*
1779 * Make sure that framer wake-up and enabling writes go through
1780 * before any other component is enabled. Framer is responsible for
1781 * clocking the bus and enabling framer first will ensure that other
1782 * devices can report presence when they are enabled
1783 */
1784 mb();
1785
1786 /* Enable RX msg Q */
1787 if (dev->use_rx_msgqs)
1788 writel_relaxed(MGR_CFG_ENABLE | MGR_CFG_RX_MSGQ_EN,
1789 dev->base + MGR_CFG);
1790 else
1791 writel_relaxed(MGR_CFG_ENABLE, dev->base + MGR_CFG);
1792 /*
1793 * Make sure that manager-enable is written through before interface
1794 * device is enabled
1795 */
1796 mb();
1797 writel_relaxed(1, dev->base + INTF_CFG);
1798 /*
1799 * Make sure that interface-enable is written through before enabling
1800 * ported generic device inside MSM manager
1801 */
1802 mb();
1803 writel_relaxed(1, dev->base + PGD_CFG);
1804 writel_relaxed(0x3F<<17, dev->base + (PGD_OWN_EEn + (4 * dev->ee)));
1805 /*
1806 * Make sure that ported generic device is enabled and port-EE settings
1807 * are written through before finally enabling the component
1808 */
1809 mb();
1810
1811 writel_relaxed(1, dev->base + COMP_CFG);
1812 /*
1813 * Make sure that all writes have gone through before exiting this
1814 * function
1815 */
1816 mb();
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001817 pm_runtime_use_autosuspend(&pdev->dev);
1818 pm_runtime_set_autosuspend_delay(&pdev->dev, MSM_SLIM_AUTOSUSPEND);
1819 pm_runtime_set_active(&pdev->dev);
1820
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001821 dev_dbg(dev->dev, "MSM SB controller is up!\n");
1822 return 0;
1823
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001824err_ctrl_failed:
1825 writel_relaxed(0, dev->base + COMP_CFG);
1826 kfree(dev->satd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001827err_sat_failed:
1828 free_irq(dev->irq, dev);
1829err_request_irq_failed:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001830 clk_disable(dev->rclk);
1831 clk_put(dev->rclk);
1832err_clk_get_failed:
1833 msm_slim_sps_exit(dev);
1834err_sps_init_failed:
1835 iounmap(dev->bam.base);
1836err_ioremap_bam_failed:
1837 iounmap(dev->base);
1838err_ioremap_failed:
1839 kfree(dev);
1840err_get_res_failed:
1841 release_mem_region(bam_mem->start, resource_size(bam_mem));
1842err_get_res_bam_failed:
1843 release_mem_region(slim_mem->start, resource_size(slim_mem));
1844 return ret;
1845}
1846
1847static int __devexit msm_slim_remove(struct platform_device *pdev)
1848{
1849 struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
1850 struct resource *bam_mem;
1851 struct resource *slim_mem;
Sagar Dhariacc969452011-09-19 10:34:30 -06001852 struct resource *slew_mem = dev->slew_mem;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001853 struct msm_slim_sat *sat = dev->satd;
1854 slim_remove_device(&sat->satcl);
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001855 pm_runtime_disable(&pdev->dev);
1856 pm_runtime_set_suspended(&pdev->dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001857 kfree(sat->satch);
1858 destroy_workqueue(sat->wq);
1859 kfree(sat);
1860 free_irq(dev->irq, dev);
1861 slim_del_controller(&dev->ctrl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001862 clk_put(dev->rclk);
1863 msm_slim_sps_exit(dev);
1864 kthread_stop(dev->rx_msgq_thread);
1865 iounmap(dev->bam.base);
1866 iounmap(dev->base);
1867 kfree(dev);
1868 bam_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1869 "slimbus_bam_physical");
Sagar Dhariae77961f2011-09-27 14:03:50 -06001870 if (bam_mem)
1871 release_mem_region(bam_mem->start, resource_size(bam_mem));
Sagar Dhariacc969452011-09-19 10:34:30 -06001872 if (slew_mem)
1873 release_mem_region(slew_mem->start, resource_size(slew_mem));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001874 slim_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1875 "slimbus_physical");
Sagar Dhariae77961f2011-09-27 14:03:50 -06001876 if (slim_mem)
1877 release_mem_region(slim_mem->start, resource_size(slim_mem));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001878 return 0;
1879}
1880
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001881#ifdef CONFIG_PM_RUNTIME
1882static int msm_slim_runtime_idle(struct device *device)
1883{
1884 dev_dbg(device, "pm_runtime: idle...\n");
1885 pm_request_autosuspend(device);
1886 return -EAGAIN;
1887}
1888#endif
1889
1890/*
1891 * If PM_RUNTIME is not defined, these 2 functions become helper
1892 * functions to be called from system suspend/resume. So they are not
1893 * inside ifdef CONFIG_PM_RUNTIME
1894 */
1895static int msm_slim_runtime_suspend(struct device *device)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001896{
1897 struct platform_device *pdev = to_platform_device(device);
1898 struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001899 int ret;
1900 dev_dbg(device, "pm_runtime: suspending...\n");
1901 dev->state = MSM_CTRL_SLEEPING;
1902 ret = slim_ctrl_clk_pause(&dev->ctrl, false, SLIM_CLK_UNSPECIFIED);
1903 if (ret)
1904 dev->state = MSM_CTRL_AWAKE;
1905 else
1906 dev->state = MSM_CTRL_ASLEEP;
1907 return ret;
1908}
1909
1910static int msm_slim_runtime_resume(struct device *device)
1911{
1912 struct platform_device *pdev = to_platform_device(device);
1913 struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
1914 int ret = 0;
1915 dev_dbg(device, "pm_runtime: resuming...\n");
1916 if (dev->state == MSM_CTRL_ASLEEP)
1917 ret = slim_ctrl_clk_pause(&dev->ctrl, true, 0);
1918 if (ret)
1919 dev->state = MSM_CTRL_ASLEEP;
1920 else
1921 dev->state = MSM_CTRL_AWAKE;
1922 return ret;
1923}
1924
1925#ifdef CONFIG_PM_SLEEP
1926static int msm_slim_suspend(struct device *dev)
1927{
1928 int ret = 0;
1929 if (!pm_runtime_enabled(dev) || !pm_runtime_suspended(dev)) {
1930 dev_dbg(dev, "system suspend");
1931 ret = msm_slim_runtime_suspend(dev);
Sagar Dharia6b559e02011-08-03 17:01:31 -06001932 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001933 if (ret == -EBUSY) {
Sagar Dharia144e5e02011-08-08 17:30:11 -06001934 /*
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001935 * If the clock pause failed due to active channels, there is
1936 * a possibility that some audio stream is active during suspend
1937 * We dont want to return suspend failure in that case so that
1938 * display and relevant components can still go to suspend.
1939 * If there is some other error, then it should be passed-on
1940 * to system level suspend
1941 */
Sagar Dharia144e5e02011-08-08 17:30:11 -06001942 ret = 0;
1943 }
1944 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001945}
1946
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001947static int msm_slim_resume(struct device *dev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001948{
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001949 /* If runtime_pm is enabled, this resume shouldn't do anything */
1950 if (!pm_runtime_enabled(dev) || !pm_runtime_suspended(dev)) {
1951 int ret;
1952 dev_dbg(dev, "system resume");
1953 ret = msm_slim_runtime_resume(dev);
1954 if (!ret) {
1955 pm_runtime_mark_last_busy(dev);
1956 pm_request_autosuspend(dev);
1957 }
1958 return ret;
1959
Sagar Dharia144e5e02011-08-08 17:30:11 -06001960 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001961 return 0;
1962}
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001963#endif /* CONFIG_PM_SLEEP */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001964
1965static const struct dev_pm_ops msm_slim_dev_pm_ops = {
1966 SET_SYSTEM_SLEEP_PM_OPS(
1967 msm_slim_suspend,
1968 msm_slim_resume
1969 )
1970 SET_RUNTIME_PM_OPS(
1971 msm_slim_runtime_suspend,
1972 msm_slim_runtime_resume,
1973 msm_slim_runtime_idle
1974 )
1975};
1976
1977static struct platform_driver msm_slim_driver = {
1978 .probe = msm_slim_probe,
1979 .remove = msm_slim_remove,
1980 .driver = {
1981 .name = MSM_SLIM_NAME,
1982 .owner = THIS_MODULE,
1983 .pm = &msm_slim_dev_pm_ops,
1984 },
1985};
1986
1987static int msm_slim_init(void)
1988{
1989 return platform_driver_register(&msm_slim_driver);
1990}
1991subsys_initcall(msm_slim_init);
1992
1993static void msm_slim_exit(void)
1994{
1995 platform_driver_unregister(&msm_slim_driver);
1996}
1997module_exit(msm_slim_exit);
1998
1999MODULE_LICENSE("GPL v2");
2000MODULE_VERSION("0.1");
2001MODULE_DESCRIPTION("MSM Slimbus controller");
2002MODULE_ALIAS("platform:msm-slim");