blob: 2aab31ef82b8e5c7324261a8c31fe7c9d71afdaf [file] [log] [blame]
Sagar Dharia790cfd02011-09-25 17:56:24 -06001/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12#include <linux/irq.h>
13#include <linux/kernel.h>
14#include <linux/init.h>
15#include <linux/slab.h>
16#include <linux/io.h>
17#include <linux/interrupt.h>
18#include <linux/platform_device.h>
19#include <linux/dma-mapping.h>
20#include <linux/slimbus/slimbus.h>
21#include <linux/delay.h>
22#include <linux/kthread.h>
23#include <linux/clk.h>
Sagar Dharia45ee38a2011-08-03 17:01:31 -060024#include <linux/pm_runtime.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070025#include <mach/sps.h>
26
27/* Per spec.max 40 bytes per received message */
28#define SLIM_RX_MSGQ_BUF_LEN 40
29
30#define SLIM_USR_MC_GENERIC_ACK 0x25
31#define SLIM_USR_MC_MASTER_CAPABILITY 0x0
32#define SLIM_USR_MC_REPORT_SATELLITE 0x1
33#define SLIM_USR_MC_ADDR_QUERY 0xD
34#define SLIM_USR_MC_ADDR_REPLY 0xE
35#define SLIM_USR_MC_DEFINE_CHAN 0x20
36#define SLIM_USR_MC_DEF_ACT_CHAN 0x21
37#define SLIM_USR_MC_CHAN_CTRL 0x23
38#define SLIM_USR_MC_RECONFIG_NOW 0x24
39#define SLIM_USR_MC_REQ_BW 0x28
40#define SLIM_USR_MC_CONNECT_SRC 0x2C
41#define SLIM_USR_MC_CONNECT_SINK 0x2D
42#define SLIM_USR_MC_DISCONNECT_PORT 0x2E
43
44/* MSM Slimbus peripheral settings */
45#define MSM_SLIM_PERF_SUMM_THRESHOLD 0x8000
46#define MSM_SLIM_NCHANS 32
47#define MSM_SLIM_NPORTS 24
Sagar Dharia45ee38a2011-08-03 17:01:31 -060048#define MSM_SLIM_AUTOSUSPEND MSEC_PER_SEC
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070049
50/*
51 * Need enough descriptors to receive present messages from slaves
52 * if received simultaneously. Present message needs 3 descriptors
53 * and this size will ensure around 10 simultaneous reports.
54 */
55#define MSM_SLIM_DESC_NUM 32
56
57#define SLIM_MSG_ASM_FIRST_WORD(l, mt, mc, dt, ad) \
58 ((l) | ((mt) << 5) | ((mc) << 8) | ((dt) << 15) | ((ad) << 16))
59
60#define MSM_SLIM_NAME "msm_slim_ctrl"
61#define SLIM_ROOT_FREQ 24576000
62
63#define MSM_CONCUR_MSG 8
64#define SAT_CONCUR_MSG 8
65#define DEF_WATERMARK (8 << 1)
66#define DEF_ALIGN 0
67#define DEF_PACK (1 << 6)
68#define ENABLE_PORT 1
69
70#define DEF_BLKSZ 0
71#define DEF_TRANSZ 0
72
73#define SAT_MAGIC_LSB 0xD9
74#define SAT_MAGIC_MSB 0xC5
75#define SAT_MSG_VER 0x1
76#define SAT_MSG_PROT 0x1
77#define MSM_SAT_SUCCSS 0x20
Sagar Dharia790cfd02011-09-25 17:56:24 -060078#define MSM_MAX_NSATS 2
Sagar Dharia0ffdca12011-09-25 18:55:53 -060079#define MSM_MAX_SATCH 32
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070080
81#define QC_MFGID_LSB 0x2
82#define QC_MFGID_MSB 0x17
83#define QC_CHIPID_SL 0x10
84#define QC_DEVID_SAT1 0x3
85#define QC_DEVID_SAT2 0x4
86#define QC_DEVID_PGD 0x5
Sagar Dharia45ee38a2011-08-03 17:01:31 -060087#define QC_MSM_DEVS 5
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070088
89/* Component registers */
90enum comp_reg {
91 COMP_CFG = 0,
92 COMP_TRUST_CFG = 0x14,
93};
94
95/* Manager registers */
96enum mgr_reg {
97 MGR_CFG = 0x200,
98 MGR_STATUS = 0x204,
99 MGR_RX_MSGQ_CFG = 0x208,
100 MGR_INT_EN = 0x210,
101 MGR_INT_STAT = 0x214,
102 MGR_INT_CLR = 0x218,
103 MGR_TX_MSG = 0x230,
104 MGR_RX_MSG = 0x270,
105 MGR_VE_STAT = 0x300,
106};
107
108enum msg_cfg {
109 MGR_CFG_ENABLE = 1,
110 MGR_CFG_RX_MSGQ_EN = 1 << 1,
111 MGR_CFG_TX_MSGQ_EN_HIGH = 1 << 2,
112 MGR_CFG_TX_MSGQ_EN_LOW = 1 << 3,
113};
114/* Message queue types */
115enum msm_slim_msgq_type {
116 MSGQ_RX = 0,
117 MSGQ_TX_LOW = 1,
118 MSGQ_TX_HIGH = 2,
119};
120/* Framer registers */
121enum frm_reg {
122 FRM_CFG = 0x400,
123 FRM_STAT = 0x404,
124 FRM_INT_EN = 0x410,
125 FRM_INT_STAT = 0x414,
126 FRM_INT_CLR = 0x418,
127 FRM_WAKEUP = 0x41C,
128 FRM_CLKCTL_DONE = 0x420,
129 FRM_IE_STAT = 0x430,
130 FRM_VE_STAT = 0x440,
131};
132
133/* Interface registers */
134enum intf_reg {
135 INTF_CFG = 0x600,
136 INTF_STAT = 0x604,
137 INTF_INT_EN = 0x610,
138 INTF_INT_STAT = 0x614,
139 INTF_INT_CLR = 0x618,
140 INTF_IE_STAT = 0x630,
141 INTF_VE_STAT = 0x640,
142};
143
144/* Manager PGD registers */
145enum pgd_reg {
146 PGD_CFG = 0x1000,
147 PGD_STAT = 0x1004,
148 PGD_INT_EN = 0x1010,
149 PGD_INT_STAT = 0x1014,
150 PGD_INT_CLR = 0x1018,
151 PGD_OWN_EEn = 0x1020,
152 PGD_PORT_INT_EN_EEn = 0x1030,
153 PGD_PORT_INT_ST_EEn = 0x1034,
154 PGD_PORT_INT_CL_EEn = 0x1038,
155 PGD_PORT_CFGn = 0x1080,
156 PGD_PORT_STATn = 0x1084,
157 PGD_PORT_PARAMn = 0x1088,
158 PGD_PORT_BLKn = 0x108C,
159 PGD_PORT_TRANn = 0x1090,
160 PGD_PORT_MCHANn = 0x1094,
161 PGD_PORT_PSHPLLn = 0x1098,
162 PGD_PORT_PC_CFGn = 0x1600,
163 PGD_PORT_PC_VALn = 0x1604,
164 PGD_PORT_PC_VFR_TSn = 0x1608,
165 PGD_PORT_PC_VFR_STn = 0x160C,
166 PGD_PORT_PC_VFR_CLn = 0x1610,
167 PGD_IE_STAT = 0x1700,
168 PGD_VE_STAT = 0x1710,
169};
170
171enum rsc_grp {
172 EE_MGR_RSC_GRP = 1 << 10,
173 EE_NGD_2 = 2 << 6,
174 EE_NGD_1 = 0,
175};
176
177enum mgr_intr {
178 MGR_INT_RECFG_DONE = 1 << 24,
179 MGR_INT_TX_NACKED_2 = 1 << 25,
180 MGR_INT_MSG_BUF_CONTE = 1 << 26,
181 MGR_INT_RX_MSG_RCVD = 1 << 30,
182 MGR_INT_TX_MSG_SENT = 1 << 31,
183};
184
185enum frm_cfg {
186 FRM_ACTIVE = 1,
187 CLK_GEAR = 7,
188 ROOT_FREQ = 11,
189 REF_CLK_GEAR = 15,
190};
191
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600192enum msm_ctrl_state {
193 MSM_CTRL_AWAKE,
194 MSM_CTRL_SLEEPING,
195 MSM_CTRL_ASLEEP,
196};
197
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700198struct msm_slim_sps_bam {
199 u32 hdl;
200 void __iomem *base;
201 int irq;
202};
203
204struct msm_slim_endp {
205 struct sps_pipe *sps;
206 struct sps_connect config;
207 struct sps_register_event event;
208 struct sps_mem_buffer buf;
209 struct completion *xcomp;
210 bool connected;
211};
212
213struct msm_slim_ctrl {
214 struct slim_controller ctrl;
215 struct slim_framer framer;
216 struct device *dev;
217 void __iomem *base;
Sagar Dhariacc969452011-09-19 10:34:30 -0600218 struct resource *slew_mem;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700219 u32 curr_bw;
220 u8 msg_cnt;
221 u32 tx_buf[10];
222 u8 rx_msgs[MSM_CONCUR_MSG][SLIM_RX_MSGQ_BUF_LEN];
223 spinlock_t rx_lock;
224 int head;
225 int tail;
226 int irq;
227 int err;
228 int ee;
229 struct completion *wr_comp;
Sagar Dharia790cfd02011-09-25 17:56:24 -0600230 struct msm_slim_sat *satd[MSM_MAX_NSATS];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700231 struct msm_slim_endp pipes[7];
232 struct msm_slim_sps_bam bam;
233 struct msm_slim_endp rx_msgq;
234 struct completion rx_msgq_notify;
235 struct task_struct *rx_msgq_thread;
236 struct clk *rclk;
237 struct mutex tx_lock;
238 u8 pgdla;
239 bool use_rx_msgqs;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700240 int pipe_b;
241 struct completion reconf;
242 bool reconf_busy;
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600243 bool chan_active;
244 enum msm_ctrl_state state;
Sagar Dharia790cfd02011-09-25 17:56:24 -0600245 int nsats;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700246};
247
Sagar Dharia0ffdca12011-09-25 18:55:53 -0600248struct msm_sat_chan {
249 u8 chan;
250 u16 chanh;
251 int req_rem;
252 int req_def;
253};
254
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700255struct msm_slim_sat {
256 struct slim_device satcl;
257 struct msm_slim_ctrl *dev;
258 struct workqueue_struct *wq;
259 struct work_struct wd;
260 u8 sat_msgs[SAT_CONCUR_MSG][40];
Sagar Dharia0ffdca12011-09-25 18:55:53 -0600261 struct msm_sat_chan *satch;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700262 u8 nsatch;
263 bool sent_capability;
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600264 bool pending_reconf;
265 bool pending_capability;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700266 int shead;
267 int stail;
268 spinlock_t lock;
269};
270
Sagar Dharia790cfd02011-09-25 17:56:24 -0600271static struct msm_slim_sat *msm_slim_alloc_sat(struct msm_slim_ctrl *dev);
272
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700273static int msm_slim_rx_enqueue(struct msm_slim_ctrl *dev, u32 *buf, u8 len)
274{
275 spin_lock(&dev->rx_lock);
276 if ((dev->tail + 1) % MSM_CONCUR_MSG == dev->head) {
277 spin_unlock(&dev->rx_lock);
278 dev_err(dev->dev, "RX QUEUE full!");
279 return -EXFULL;
280 }
281 memcpy((u8 *)dev->rx_msgs[dev->tail], (u8 *)buf, len);
282 dev->tail = (dev->tail + 1) % MSM_CONCUR_MSG;
283 spin_unlock(&dev->rx_lock);
284 return 0;
285}
286
287static int msm_slim_rx_dequeue(struct msm_slim_ctrl *dev, u8 *buf)
288{
289 unsigned long flags;
290 spin_lock_irqsave(&dev->rx_lock, flags);
291 if (dev->tail == dev->head) {
292 spin_unlock_irqrestore(&dev->rx_lock, flags);
293 return -ENODATA;
294 }
295 memcpy(buf, (u8 *)dev->rx_msgs[dev->head], 40);
296 dev->head = (dev->head + 1) % MSM_CONCUR_MSG;
297 spin_unlock_irqrestore(&dev->rx_lock, flags);
298 return 0;
299}
300
301static int msm_sat_enqueue(struct msm_slim_sat *sat, u32 *buf, u8 len)
302{
303 struct msm_slim_ctrl *dev = sat->dev;
304 spin_lock(&sat->lock);
305 if ((sat->stail + 1) % SAT_CONCUR_MSG == sat->shead) {
306 spin_unlock(&sat->lock);
307 dev_err(dev->dev, "SAT QUEUE full!");
308 return -EXFULL;
309 }
310 memcpy(sat->sat_msgs[sat->stail], (u8 *)buf, len);
311 sat->stail = (sat->stail + 1) % SAT_CONCUR_MSG;
312 spin_unlock(&sat->lock);
313 return 0;
314}
315
316static int msm_sat_dequeue(struct msm_slim_sat *sat, u8 *buf)
317{
318 unsigned long flags;
319 spin_lock_irqsave(&sat->lock, flags);
320 if (sat->stail == sat->shead) {
321 spin_unlock_irqrestore(&sat->lock, flags);
322 return -ENODATA;
323 }
324 memcpy(buf, sat->sat_msgs[sat->shead], 40);
325 sat->shead = (sat->shead + 1) % SAT_CONCUR_MSG;
326 spin_unlock_irqrestore(&sat->lock, flags);
327 return 0;
328}
329
330static void msm_get_eaddr(u8 *e_addr, u32 *buffer)
331{
332 e_addr[0] = (buffer[1] >> 24) & 0xff;
333 e_addr[1] = (buffer[1] >> 16) & 0xff;
334 e_addr[2] = (buffer[1] >> 8) & 0xff;
335 e_addr[3] = buffer[1] & 0xff;
336 e_addr[4] = (buffer[0] >> 24) & 0xff;
337 e_addr[5] = (buffer[0] >> 16) & 0xff;
338}
339
340static bool msm_is_sat_dev(u8 *e_addr)
341{
342 if (e_addr[5] == QC_MFGID_LSB && e_addr[4] == QC_MFGID_MSB &&
343 e_addr[2] != QC_CHIPID_SL &&
344 (e_addr[1] == QC_DEVID_SAT1 || e_addr[1] == QC_DEVID_SAT2))
345 return true;
346 return false;
347}
348
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700349static int msm_slim_get_ctrl(struct msm_slim_ctrl *dev)
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600350{
Sagar Dharia45e77912012-01-10 09:55:18 -0700351#ifdef CONFIG_PM_RUNTIME
352 int ref = 0;
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700353 int ret = pm_runtime_get_sync(dev->dev);
354 if (ret >= 0) {
355 ref = atomic_read(&dev->dev->power.usage_count);
356 if (ref <= 0) {
357 dev_err(dev->dev, "reference count -ve:%d", ref);
358 ret = -ENODEV;
359 }
360 }
361 return ret;
Sagar Dharia45e77912012-01-10 09:55:18 -0700362#else
363 return -ENODEV;
364#endif
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600365}
366static void msm_slim_put_ctrl(struct msm_slim_ctrl *dev)
367{
Sagar Dharia45e77912012-01-10 09:55:18 -0700368#ifdef CONFIG_PM_RUNTIME
Sagar Dharia38fd1872012-02-06 18:36:38 -0700369 int ref;
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600370 pm_runtime_mark_last_busy(dev->dev);
Sagar Dharia38fd1872012-02-06 18:36:38 -0700371 ref = atomic_read(&dev->dev->power.usage_count);
372 if (ref <= 0)
373 dev_err(dev->dev, "reference count mismatch:%d", ref);
374 else
375 pm_runtime_put(dev->dev);
Sagar Dharia45e77912012-01-10 09:55:18 -0700376#endif
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600377}
378
Sagar Dharia790cfd02011-09-25 17:56:24 -0600379static struct msm_slim_sat *addr_to_sat(struct msm_slim_ctrl *dev, u8 laddr)
380{
381 struct msm_slim_sat *sat = NULL;
382 int i = 0;
383 while (!sat && i < dev->nsats) {
384 if (laddr == dev->satd[i]->satcl.laddr)
385 sat = dev->satd[i];
386 i++;
387 }
388 return sat;
389}
390
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700391static irqreturn_t msm_slim_interrupt(int irq, void *d)
392{
393 struct msm_slim_ctrl *dev = d;
394 u32 pstat;
395 u32 stat = readl_relaxed(dev->base + MGR_INT_STAT);
396
397 if (stat & MGR_INT_TX_MSG_SENT || stat & MGR_INT_TX_NACKED_2) {
398 if (stat & MGR_INT_TX_MSG_SENT)
399 writel_relaxed(MGR_INT_TX_MSG_SENT,
400 dev->base + MGR_INT_CLR);
401 else {
402 writel_relaxed(MGR_INT_TX_NACKED_2,
403 dev->base + MGR_INT_CLR);
404 dev->err = -EIO;
405 }
406 /*
407 * Guarantee that interrupt clear bit write goes through before
408 * signalling completion/exiting ISR
409 */
410 mb();
411 if (dev->wr_comp)
412 complete(dev->wr_comp);
413 }
414 if (stat & MGR_INT_RX_MSG_RCVD) {
415 u32 rx_buf[10];
416 u32 mc, mt;
417 u8 len, i;
418 rx_buf[0] = readl_relaxed(dev->base + MGR_RX_MSG);
419 len = rx_buf[0] & 0x1F;
420 for (i = 1; i < ((len + 3) >> 2); i++) {
421 rx_buf[i] = readl_relaxed(dev->base + MGR_RX_MSG +
422 (4 * i));
423 dev_dbg(dev->dev, "reading data: %x\n", rx_buf[i]);
424 }
425 mt = (rx_buf[0] >> 5) & 0x7;
426 mc = (rx_buf[0] >> 8) & 0xff;
427 dev_dbg(dev->dev, "MC: %x, MT: %x\n", mc, mt);
428 if (mt == SLIM_MSG_MT_DEST_REFERRED_USER ||
429 mt == SLIM_MSG_MT_SRC_REFERRED_USER) {
Sagar Dharia790cfd02011-09-25 17:56:24 -0600430 u8 laddr = (u8)((rx_buf[0] >> 16) & 0xFF);
431 struct msm_slim_sat *sat = addr_to_sat(dev, laddr);
432 if (sat)
433 msm_sat_enqueue(sat, rx_buf, len);
434 else
435 dev_err(dev->dev, "unknown sat:%d message",
436 laddr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700437 writel_relaxed(MGR_INT_RX_MSG_RCVD,
438 dev->base + MGR_INT_CLR);
439 /*
440 * Guarantee that CLR bit write goes through before
441 * queuing work
442 */
443 mb();
Sagar Dharia790cfd02011-09-25 17:56:24 -0600444 if (sat)
445 queue_work(sat->wq, &sat->wd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700446 } else if (mt == SLIM_MSG_MT_CORE &&
447 mc == SLIM_MSG_MC_REPORT_PRESENT) {
448 u8 e_addr[6];
449 msm_get_eaddr(e_addr, rx_buf);
Sagar Dharia790cfd02011-09-25 17:56:24 -0600450 msm_slim_rx_enqueue(dev, rx_buf, len);
451 writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
452 MGR_INT_CLR);
453 /*
454 * Guarantee that CLR bit write goes through
455 * before signalling completion
456 */
457 mb();
458 complete(&dev->rx_msgq_notify);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700459 } else if (mc == SLIM_MSG_MC_REPLY_INFORMATION ||
460 mc == SLIM_MSG_MC_REPLY_VALUE) {
461 msm_slim_rx_enqueue(dev, rx_buf, len);
462 writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
463 MGR_INT_CLR);
464 /*
465 * Guarantee that CLR bit write goes through
466 * before signalling completion
467 */
468 mb();
469 complete(&dev->rx_msgq_notify);
Sagar Dharia144e5e02011-08-08 17:30:11 -0600470 } else if (mc == SLIM_MSG_MC_REPORT_INFORMATION) {
471 u8 *buf = (u8 *)rx_buf;
472 u8 l_addr = buf[2];
473 u16 ele = (u16)buf[4] << 4;
474 ele |= ((buf[3] & 0xf0) >> 4);
475 dev_err(dev->dev, "Slim-dev:%d report inf element:0x%x",
476 l_addr, ele);
477 for (i = 0; i < len - 5; i++)
478 dev_err(dev->dev, "offset:0x%x:bit mask:%x",
479 i, buf[i+5]);
480 writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
481 MGR_INT_CLR);
482 /*
483 * Guarantee that CLR bit write goes through
484 * before exiting
485 */
486 mb();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700487 } else {
488 dev_err(dev->dev, "Unexpected MC,%x MT:%x, len:%d",
489 mc, mt, len);
490 for (i = 0; i < ((len + 3) >> 2); i++)
491 dev_err(dev->dev, "error msg: %x", rx_buf[i]);
492 writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
493 MGR_INT_CLR);
494 /*
495 * Guarantee that CLR bit write goes through
496 * before exiting
497 */
498 mb();
499 }
500 }
501 if (stat & MGR_INT_RECFG_DONE) {
502 writel_relaxed(MGR_INT_RECFG_DONE, dev->base + MGR_INT_CLR);
503 /*
504 * Guarantee that CLR bit write goes through
505 * before exiting ISR
506 */
507 mb();
508 complete(&dev->reconf);
509 }
510 pstat = readl_relaxed(dev->base + PGD_PORT_INT_ST_EEn + (16 * dev->ee));
511 if (pstat != 0) {
512 int i = 0;
513 for (i = dev->pipe_b; i < MSM_SLIM_NPORTS; i++) {
514 if (pstat & 1 << i) {
515 u32 val = readl_relaxed(dev->base +
516 PGD_PORT_STATn + (i * 32));
517 if (val & (1 << 19)) {
518 dev->ctrl.ports[i].err =
519 SLIM_P_DISCONNECT;
520 dev->pipes[i-dev->pipe_b].connected =
521 false;
522 /*
523 * SPS will call completion since
524 * ERROR flags are registered
525 */
526 } else if (val & (1 << 2))
527 dev->ctrl.ports[i].err =
528 SLIM_P_OVERFLOW;
529 else if (val & (1 << 3))
530 dev->ctrl.ports[i].err =
531 SLIM_P_UNDERFLOW;
532 }
533 writel_relaxed(1, dev->base + PGD_PORT_INT_CL_EEn +
534 (dev->ee * 16));
535 }
536 /*
537 * Guarantee that port interrupt bit(s) clearing writes go
538 * through before exiting ISR
539 */
540 mb();
541 }
542
543 return IRQ_HANDLED;
544}
545
546static int
547msm_slim_init_endpoint(struct msm_slim_ctrl *dev, struct msm_slim_endp *ep)
548{
549 int ret;
550 struct sps_pipe *endpoint;
551 struct sps_connect *config = &ep->config;
552
553 /* Allocate the endpoint */
554 endpoint = sps_alloc_endpoint();
555 if (!endpoint) {
556 dev_err(dev->dev, "sps_alloc_endpoint failed\n");
557 return -ENOMEM;
558 }
559
560 /* Get default connection configuration for an endpoint */
561 ret = sps_get_config(endpoint, config);
562 if (ret) {
563 dev_err(dev->dev, "sps_get_config failed 0x%x\n", ret);
564 goto sps_config_failed;
565 }
566
567 ep->sps = endpoint;
568 return 0;
569
570sps_config_failed:
571 sps_free_endpoint(endpoint);
572 return ret;
573}
574
575static void
576msm_slim_free_endpoint(struct msm_slim_endp *ep)
577{
578 sps_free_endpoint(ep->sps);
579 ep->sps = NULL;
580}
581
582static int msm_slim_sps_mem_alloc(
583 struct msm_slim_ctrl *dev, struct sps_mem_buffer *mem, u32 len)
584{
585 dma_addr_t phys;
586
587 mem->size = len;
588 mem->min_size = 0;
589 mem->base = dma_alloc_coherent(dev->dev, mem->size, &phys, GFP_KERNEL);
590
591 if (!mem->base) {
592 dev_err(dev->dev, "dma_alloc_coherent(%d) failed\n", len);
593 return -ENOMEM;
594 }
595
596 mem->phys_base = phys;
597 memset(mem->base, 0x00, mem->size);
598 return 0;
599}
600
601static void
602msm_slim_sps_mem_free(struct msm_slim_ctrl *dev, struct sps_mem_buffer *mem)
603{
604 dma_free_coherent(dev->dev, mem->size, mem->base, mem->phys_base);
605 mem->size = 0;
606 mem->base = NULL;
607 mem->phys_base = 0;
608}
609
610static void msm_hw_set_port(struct msm_slim_ctrl *dev, u8 pn)
611{
612 u32 set_cfg = DEF_WATERMARK | DEF_ALIGN | DEF_PACK | ENABLE_PORT;
613 u32 int_port = readl_relaxed(dev->base + PGD_PORT_INT_EN_EEn +
614 (dev->ee * 16));
615 writel_relaxed(set_cfg, dev->base + PGD_PORT_CFGn + (pn * 32));
616 writel_relaxed(DEF_BLKSZ, dev->base + PGD_PORT_BLKn + (pn * 32));
617 writel_relaxed(DEF_TRANSZ, dev->base + PGD_PORT_TRANn + (pn * 32));
618 writel_relaxed((int_port | 1 << pn) , dev->base + PGD_PORT_INT_EN_EEn +
619 (dev->ee * 16));
620 /* Make sure that port registers are updated before returning */
621 mb();
622}
623
624static int msm_slim_connect_pipe_port(struct msm_slim_ctrl *dev, u8 pn)
625{
626 struct msm_slim_endp *endpoint = &dev->pipes[pn];
627 struct sps_connect *cfg = &endpoint->config;
628 u32 stat;
629 int ret = sps_get_config(dev->pipes[pn].sps, cfg);
630 if (ret) {
631 dev_err(dev->dev, "sps pipe-port get config error%x\n", ret);
632 return ret;
633 }
634 cfg->options = SPS_O_DESC_DONE | SPS_O_ERROR |
635 SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
636
637 if (dev->pipes[pn].connected) {
638 ret = sps_set_config(dev->pipes[pn].sps, cfg);
639 if (ret) {
640 dev_err(dev->dev, "sps pipe-port set config erro:%x\n",
641 ret);
642 return ret;
643 }
644 }
645
646 stat = readl_relaxed(dev->base + PGD_PORT_STATn +
647 (32 * (pn + dev->pipe_b)));
648 if (dev->ctrl.ports[pn].flow == SLIM_SRC) {
649 cfg->destination = dev->bam.hdl;
650 cfg->source = SPS_DEV_HANDLE_MEM;
651 cfg->dest_pipe_index = ((stat & (0xFF << 4)) >> 4);
652 cfg->src_pipe_index = 0;
653 dev_dbg(dev->dev, "flow src:pipe num:%d",
654 cfg->dest_pipe_index);
655 cfg->mode = SPS_MODE_DEST;
656 } else {
657 cfg->source = dev->bam.hdl;
658 cfg->destination = SPS_DEV_HANDLE_MEM;
659 cfg->src_pipe_index = ((stat & (0xFF << 4)) >> 4);
660 cfg->dest_pipe_index = 0;
661 dev_dbg(dev->dev, "flow dest:pipe num:%d",
662 cfg->src_pipe_index);
663 cfg->mode = SPS_MODE_SRC;
664 }
665 /* Space for desciptor FIFOs */
666 cfg->desc.size = MSM_SLIM_DESC_NUM * sizeof(struct sps_iovec);
667 cfg->config = SPS_CONFIG_DEFAULT;
668 ret = sps_connect(dev->pipes[pn].sps, cfg);
669 if (!ret) {
670 dev->pipes[pn].connected = true;
671 msm_hw_set_port(dev, pn + dev->pipe_b);
672 }
673 return ret;
674}
675
676static u32 *msm_get_msg_buf(struct slim_controller *ctrl, int len)
677{
678 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
679 /*
680 * Currently we block a transaction until the current one completes.
681 * In case we need multiple transactions, use message Q
682 */
683 return dev->tx_buf;
684}
685
686static int msm_send_msg_buf(struct slim_controller *ctrl, u32 *buf, u8 len)
687{
688 int i;
689 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
690 for (i = 0; i < (len + 3) >> 2; i++) {
691 dev_dbg(dev->dev, "TX data:0x%x\n", buf[i]);
692 writel_relaxed(buf[i], dev->base + MGR_TX_MSG + (i * 4));
693 }
694 /* Guarantee that message is sent before returning */
695 mb();
696 return 0;
697}
698
699static int msm_xfer_msg(struct slim_controller *ctrl, struct slim_msg_txn *txn)
700{
701 DECLARE_COMPLETION_ONSTACK(done);
702 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
703 u32 *pbuf;
704 u8 *puc;
705 int timeout;
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700706 int msgv = -1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700707 u8 la = txn->la;
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600708 u8 mc = (u8)(txn->mc & 0xFF);
709 /*
710 * Voting for runtime PM: Slimbus has 2 possible use cases:
711 * 1. messaging
712 * 2. Data channels
713 * Messaging case goes through messaging slots and data channels
714 * use their own slots
715 * This "get" votes for messaging bandwidth
716 */
717 if (!(txn->mc & SLIM_MSG_CLK_PAUSE_SEQ_FLG))
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700718 msgv = msm_slim_get_ctrl(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700719 mutex_lock(&dev->tx_lock);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700720 if (dev->state == MSM_CTRL_ASLEEP ||
721 ((!(txn->mc & SLIM_MSG_CLK_PAUSE_SEQ_FLG)) &&
722 dev->state == MSM_CTRL_SLEEPING)) {
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600723 dev_err(dev->dev, "runtime or system PM suspended state");
724 mutex_unlock(&dev->tx_lock);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700725 if (msgv >= 0)
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600726 msm_slim_put_ctrl(dev);
727 return -EBUSY;
728 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700729 if (txn->mt == SLIM_MSG_MT_CORE &&
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600730 mc == SLIM_MSG_MC_BEGIN_RECONFIGURATION) {
731 if (dev->reconf_busy) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700732 wait_for_completion(&dev->reconf);
733 dev->reconf_busy = false;
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600734 }
735 /* This "get" votes for data channels */
736 if (dev->ctrl.sched.usedslots != 0 &&
737 !dev->chan_active) {
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700738 int chv = msm_slim_get_ctrl(dev);
739 if (chv >= 0)
740 dev->chan_active = true;
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600741 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700742 }
743 txn->rl--;
744 pbuf = msm_get_msg_buf(ctrl, txn->rl);
745 dev->wr_comp = NULL;
746 dev->err = 0;
747
748 if (txn->dt == SLIM_MSG_DEST_ENUMADDR) {
749 mutex_unlock(&dev->tx_lock);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700750 if (msgv >= 0)
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600751 msm_slim_put_ctrl(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700752 return -EPROTONOSUPPORT;
753 }
754 if (txn->mt == SLIM_MSG_MT_CORE && txn->la == 0xFF &&
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600755 (mc == SLIM_MSG_MC_CONNECT_SOURCE ||
756 mc == SLIM_MSG_MC_CONNECT_SINK ||
757 mc == SLIM_MSG_MC_DISCONNECT_PORT))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700758 la = dev->pgdla;
759 if (txn->dt == SLIM_MSG_DEST_LOGICALADDR)
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600760 *pbuf = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt, mc, 0, la);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700761 else
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600762 *pbuf = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt, mc, 1, la);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700763 if (txn->dt == SLIM_MSG_DEST_LOGICALADDR)
764 puc = ((u8 *)pbuf) + 3;
765 else
766 puc = ((u8 *)pbuf) + 2;
767 if (txn->rbuf)
768 *(puc++) = txn->tid;
769 if ((txn->mt == SLIM_MSG_MT_CORE) &&
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600770 ((mc >= SLIM_MSG_MC_REQUEST_INFORMATION &&
771 mc <= SLIM_MSG_MC_REPORT_INFORMATION) ||
772 (mc >= SLIM_MSG_MC_REQUEST_VALUE &&
773 mc <= SLIM_MSG_MC_CHANGE_VALUE))) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700774 *(puc++) = (txn->ec & 0xFF);
775 *(puc++) = (txn->ec >> 8)&0xFF;
776 }
777 if (txn->wbuf)
778 memcpy(puc, txn->wbuf, txn->len);
779 if (txn->mt == SLIM_MSG_MT_CORE && txn->la == 0xFF &&
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600780 (mc == SLIM_MSG_MC_CONNECT_SOURCE ||
781 mc == SLIM_MSG_MC_CONNECT_SINK ||
782 mc == SLIM_MSG_MC_DISCONNECT_PORT)) {
783 if (mc != SLIM_MSG_MC_DISCONNECT_PORT)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700784 dev->err = msm_slim_connect_pipe_port(dev, *puc);
785 else {
786 struct msm_slim_endp *endpoint = &dev->pipes[*puc];
787 struct sps_register_event sps_event;
788 memset(&sps_event, 0, sizeof(sps_event));
789 sps_register_event(endpoint->sps, &sps_event);
790 sps_disconnect(endpoint->sps);
791 /*
792 * Remove channel disconnects master-side ports from
793 * channel. No need to send that again on the bus
794 */
795 dev->pipes[*puc].connected = false;
796 mutex_unlock(&dev->tx_lock);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700797 if (msgv >= 0)
798 msm_slim_put_ctrl(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700799 return 0;
800 }
801 if (dev->err) {
802 dev_err(dev->dev, "pipe-port connect err:%d", dev->err);
803 mutex_unlock(&dev->tx_lock);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700804 if (msgv >= 0)
805 msm_slim_put_ctrl(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700806 return dev->err;
807 }
808 *(puc) = *(puc) + dev->pipe_b;
809 }
810 if (txn->mt == SLIM_MSG_MT_CORE &&
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600811 mc == SLIM_MSG_MC_BEGIN_RECONFIGURATION)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700812 dev->reconf_busy = true;
813 dev->wr_comp = &done;
814 msm_send_msg_buf(ctrl, pbuf, txn->rl);
815 timeout = wait_for_completion_timeout(&done, HZ);
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600816
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700817 if (mc == SLIM_MSG_MC_RECONFIGURE_NOW) {
818 if ((txn->mc == (SLIM_MSG_MC_RECONFIGURE_NOW |
819 SLIM_MSG_CLK_PAUSE_SEQ_FLG)) &&
820 timeout) {
821 timeout = wait_for_completion_timeout(&dev->reconf, HZ);
822 dev->reconf_busy = false;
823 if (timeout) {
Sagar Dharia9acf7f42012-03-08 09:45:30 -0700824 clk_disable_unprepare(dev->rclk);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700825 disable_irq(dev->irq);
826 }
827 }
828 if ((txn->mc == (SLIM_MSG_MC_RECONFIGURE_NOW |
829 SLIM_MSG_CLK_PAUSE_SEQ_FLG)) &&
830 !timeout) {
831 dev->reconf_busy = false;
832 dev_err(dev->dev, "clock pause failed");
833 mutex_unlock(&dev->tx_lock);
834 return -ETIMEDOUT;
835 }
836 if (txn->mt == SLIM_MSG_MT_CORE &&
837 txn->mc == SLIM_MSG_MC_RECONFIGURE_NOW) {
838 if (dev->ctrl.sched.usedslots == 0 &&
839 dev->chan_active) {
840 dev->chan_active = false;
841 msm_slim_put_ctrl(dev);
842 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600843 }
844 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600845 mutex_unlock(&dev->tx_lock);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700846 if (msgv >= 0)
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600847 msm_slim_put_ctrl(dev);
848
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700849 if (!timeout)
850 dev_err(dev->dev, "TX timed out:MC:0x%x,mt:0x%x", txn->mc,
851 txn->mt);
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600852
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700853 return timeout ? dev->err : -ETIMEDOUT;
854}
855
856static int msm_set_laddr(struct slim_controller *ctrl, const u8 *ea,
857 u8 elen, u8 laddr)
858{
859 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
860 DECLARE_COMPLETION_ONSTACK(done);
861 int timeout;
862 u32 *buf;
863 mutex_lock(&dev->tx_lock);
864 buf = msm_get_msg_buf(ctrl, 9);
865 buf[0] = SLIM_MSG_ASM_FIRST_WORD(9, SLIM_MSG_MT_CORE,
866 SLIM_MSG_MC_ASSIGN_LOGICAL_ADDRESS,
867 SLIM_MSG_DEST_LOGICALADDR,
868 ea[5] | ea[4] << 8);
869 buf[1] = ea[3] | (ea[2] << 8) | (ea[1] << 16) | (ea[0] << 24);
870 buf[2] = laddr;
871
872 dev->wr_comp = &done;
873 msm_send_msg_buf(ctrl, buf, 9);
874 timeout = wait_for_completion_timeout(&done, HZ);
875 mutex_unlock(&dev->tx_lock);
876 return timeout ? dev->err : -ETIMEDOUT;
877}
878
Sagar Dharia144e5e02011-08-08 17:30:11 -0600879static int msm_clk_pause_wakeup(struct slim_controller *ctrl)
880{
881 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600882 enable_irq(dev->irq);
Sagar Dharia9acf7f42012-03-08 09:45:30 -0700883 clk_prepare_enable(dev->rclk);
Sagar Dharia144e5e02011-08-08 17:30:11 -0600884 writel_relaxed(1, dev->base + FRM_WAKEUP);
885 /* Make sure framer wakeup write goes through before exiting function */
886 mb();
887 /*
888 * Workaround: Currently, slave is reporting lost-sync messages
889 * after slimbus comes out of clock pause.
890 * Transaction with slave fail before slave reports that message
891 * Give some time for that report to come
892 * Slimbus wakes up in clock gear 10 at 24.576MHz. With each superframe
893 * being 250 usecs, we wait for 20 superframes here to ensure
894 * we get the message
895 */
896 usleep_range(5000, 5000);
897 return 0;
898}
899
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700900static int msm_config_port(struct slim_controller *ctrl, u8 pn)
901{
902 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
903 struct msm_slim_endp *endpoint;
904 int ret = 0;
905 if (ctrl->ports[pn].req == SLIM_REQ_HALF_DUP ||
906 ctrl->ports[pn].req == SLIM_REQ_MULTI_CH)
907 return -EPROTONOSUPPORT;
908 if (pn >= (MSM_SLIM_NPORTS - dev->pipe_b))
909 return -ENODEV;
910
911 endpoint = &dev->pipes[pn];
912 ret = msm_slim_init_endpoint(dev, endpoint);
913 dev_dbg(dev->dev, "sps register bam error code:%x\n", ret);
914 return ret;
915}
916
917static enum slim_port_err msm_slim_port_xfer_status(struct slim_controller *ctr,
918 u8 pn, u8 **done_buf, u32 *done_len)
919{
920 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctr);
921 struct sps_iovec sio;
922 int ret;
923 if (done_len)
924 *done_len = 0;
925 if (done_buf)
926 *done_buf = NULL;
927 if (!dev->pipes[pn].connected)
928 return SLIM_P_DISCONNECT;
929 ret = sps_get_iovec(dev->pipes[pn].sps, &sio);
930 if (!ret) {
931 if (done_len)
932 *done_len = sio.size;
933 if (done_buf)
934 *done_buf = (u8 *)sio.addr;
935 }
936 dev_dbg(dev->dev, "get iovec returned %d\n", ret);
937 return SLIM_P_INPROGRESS;
938}
939
940static int msm_slim_port_xfer(struct slim_controller *ctrl, u8 pn, u8 *iobuf,
941 u32 len, struct completion *comp)
942{
943 struct sps_register_event sreg;
944 int ret;
945 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
Sagar Dhariae77961f2011-09-27 14:03:50 -0600946 if (pn >= 7)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700947 return -ENODEV;
948
949
950 ctrl->ports[pn].xcomp = comp;
951 sreg.options = (SPS_EVENT_DESC_DONE|SPS_EVENT_ERROR);
952 sreg.mode = SPS_TRIGGER_WAIT;
953 sreg.xfer_done = comp;
954 sreg.callback = NULL;
955 sreg.user = &ctrl->ports[pn];
956 ret = sps_register_event(dev->pipes[pn].sps, &sreg);
957 if (ret) {
958 dev_dbg(dev->dev, "sps register event error:%x\n", ret);
959 return ret;
960 }
961 ret = sps_transfer_one(dev->pipes[pn].sps, (u32)iobuf, len, NULL,
962 SPS_IOVEC_FLAG_INT);
963 dev_dbg(dev->dev, "sps submit xfer error code:%x\n", ret);
964
965 return ret;
966}
967
968static int msm_sat_define_ch(struct msm_slim_sat *sat, u8 *buf, u8 len, u8 mc)
969{
970 struct msm_slim_ctrl *dev = sat->dev;
971 enum slim_ch_control oper;
972 int i;
973 int ret = 0;
974 if (mc == SLIM_USR_MC_CHAN_CTRL) {
Sagar Dharia0ffdca12011-09-25 18:55:53 -0600975 for (i = 0; i < sat->nsatch; i++) {
976 if (buf[5] == sat->satch[i].chan)
977 break;
978 }
979 if (i >= sat->nsatch)
980 return -ENOTCONN;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700981 oper = ((buf[3] & 0xC0) >> 6);
982 /* part of grp. activating/removing 1 will take care of rest */
Sagar Dharia0ffdca12011-09-25 18:55:53 -0600983 ret = slim_control_ch(&sat->satcl, sat->satch[i].chanh, oper,
984 false);
985 if (!ret) {
986 for (i = 5; i < len; i++) {
987 int j;
988 for (j = 0; j < sat->nsatch; j++) {
989 if (buf[i] == sat->satch[j].chan) {
990 if (oper == SLIM_CH_REMOVE)
991 sat->satch[j].req_rem++;
992 else
993 sat->satch[j].req_def++;
994 break;
995 }
996 }
997 }
998 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700999 } else {
1000 u16 chh[40];
1001 struct slim_ch prop;
1002 u32 exp;
1003 u8 coeff, cc;
1004 u8 prrate = buf[6];
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001005 if (len <= 8)
1006 return -EINVAL;
1007 for (i = 8; i < len; i++) {
1008 int j = 0;
1009 for (j = 0; j < sat->nsatch; j++) {
1010 if (sat->satch[j].chan == buf[i]) {
1011 chh[i - 8] = sat->satch[j].chanh;
1012 break;
1013 }
1014 }
1015 if (j < sat->nsatch) {
1016 u16 dummy;
1017 ret = slim_query_ch(&sat->satcl, buf[i],
1018 &dummy);
1019 if (ret)
1020 return ret;
1021 if (mc == SLIM_USR_MC_DEF_ACT_CHAN)
1022 sat->satch[j].req_def++;
1023 continue;
1024 }
1025 if (sat->nsatch >= MSM_MAX_SATCH)
1026 return -EXFULL;
1027 ret = slim_query_ch(&sat->satcl, buf[i], &chh[i - 8]);
1028 if (ret)
1029 return ret;
1030 sat->satch[j].chan = buf[i];
1031 sat->satch[j].chanh = chh[i - 8];
1032 if (mc == SLIM_USR_MC_DEF_ACT_CHAN)
1033 sat->satch[j].req_def++;
1034 sat->nsatch++;
1035 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001036 prop.dataf = (enum slim_ch_dataf)((buf[3] & 0xE0) >> 5);
1037 prop.auxf = (enum slim_ch_auxf)((buf[4] & 0xC0) >> 5);
1038 prop.baser = SLIM_RATE_4000HZ;
1039 if (prrate & 0x8)
1040 prop.baser = SLIM_RATE_11025HZ;
1041 else
1042 prop.baser = SLIM_RATE_4000HZ;
1043 prop.prot = (enum slim_ch_proto)(buf[5] & 0x0F);
1044 prop.sampleszbits = (buf[4] & 0x1F)*SLIM_CL_PER_SL;
1045 exp = (u32)((buf[5] & 0xF0) >> 4);
1046 coeff = (buf[4] & 0x20) >> 5;
1047 cc = (coeff ? 3 : 1);
1048 prop.ratem = cc * (1 << exp);
1049 if (i > 9)
1050 ret = slim_define_ch(&sat->satcl, &prop, chh, len - 8,
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001051 true, &chh[0]);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001052 else
1053 ret = slim_define_ch(&sat->satcl, &prop,
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001054 &chh[0], 1, false, NULL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001055 dev_dbg(dev->dev, "define sat grp returned:%d", ret);
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001056 if (ret)
1057 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001058
1059 /* part of group so activating 1 will take care of rest */
1060 if (mc == SLIM_USR_MC_DEF_ACT_CHAN)
1061 ret = slim_control_ch(&sat->satcl,
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001062 chh[0],
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001063 SLIM_CH_ACTIVATE, false);
1064 }
1065 return ret;
1066}
1067
1068static void msm_slim_rxwq(struct msm_slim_ctrl *dev)
1069{
1070 u8 buf[40];
1071 u8 mc, mt, len;
1072 int i, ret;
1073 if ((msm_slim_rx_dequeue(dev, (u8 *)buf)) != -ENODATA) {
1074 len = buf[0] & 0x1F;
1075 mt = (buf[0] >> 5) & 0x7;
1076 mc = buf[1];
1077 if (mt == SLIM_MSG_MT_CORE &&
1078 mc == SLIM_MSG_MC_REPORT_PRESENT) {
1079 u8 laddr;
1080 u8 e_addr[6];
1081 for (i = 0; i < 6; i++)
1082 e_addr[i] = buf[7-i];
1083
1084 ret = slim_assign_laddr(&dev->ctrl, e_addr, 6, &laddr);
1085 /* Is this Qualcomm ported generic device? */
1086 if (!ret && e_addr[5] == QC_MFGID_LSB &&
1087 e_addr[4] == QC_MFGID_MSB &&
1088 e_addr[1] == QC_DEVID_PGD &&
1089 e_addr[2] != QC_CHIPID_SL)
1090 dev->pgdla = laddr;
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001091 if (!ret && !pm_runtime_enabled(dev->dev) &&
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001092 laddr == (QC_MSM_DEVS - 1))
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001093 pm_runtime_enable(dev->dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001094
Sagar Dharia790cfd02011-09-25 17:56:24 -06001095 if (!ret && msm_is_sat_dev(e_addr)) {
1096 struct msm_slim_sat *sat = addr_to_sat(dev,
1097 laddr);
1098 if (!sat)
1099 sat = msm_slim_alloc_sat(dev);
1100 if (!sat)
1101 return;
1102
1103 sat->satcl.laddr = laddr;
1104 msm_sat_enqueue(sat, (u32 *)buf, len);
1105 queue_work(sat->wq, &sat->wd);
1106 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001107 } else if (mc == SLIM_MSG_MC_REPLY_INFORMATION ||
1108 mc == SLIM_MSG_MC_REPLY_VALUE) {
1109 u8 tid = buf[3];
1110 dev_dbg(dev->dev, "tid:%d, len:%d\n", tid, len - 4);
1111 slim_msg_response(&dev->ctrl, &buf[4], tid,
1112 len - 4);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001113 pm_runtime_mark_last_busy(dev->dev);
Sagar Dharia144e5e02011-08-08 17:30:11 -06001114 } else if (mc == SLIM_MSG_MC_REPORT_INFORMATION) {
1115 u8 l_addr = buf[2];
1116 u16 ele = (u16)buf[4] << 4;
1117 ele |= ((buf[3] & 0xf0) >> 4);
1118 dev_err(dev->dev, "Slim-dev:%d report inf element:0x%x",
1119 l_addr, ele);
1120 for (i = 0; i < len - 5; i++)
1121 dev_err(dev->dev, "offset:0x%x:bit mask:%x",
1122 i, buf[i+5]);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001123 } else {
1124 dev_err(dev->dev, "unexpected message:mc:%x, mt:%x",
1125 mc, mt);
1126 for (i = 0; i < len; i++)
1127 dev_err(dev->dev, "error msg: %x", buf[i]);
1128
1129 }
1130 } else
1131 dev_err(dev->dev, "rxwq called and no dequeue");
1132}
1133
1134static void slim_sat_rxprocess(struct work_struct *work)
1135{
1136 struct msm_slim_sat *sat = container_of(work, struct msm_slim_sat, wd);
1137 struct msm_slim_ctrl *dev = sat->dev;
1138 u8 buf[40];
1139
1140 while ((msm_sat_dequeue(sat, buf)) != -ENODATA) {
1141 struct slim_msg_txn txn;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001142 u8 len, mc, mt;
1143 u32 bw_sl;
1144 int ret = 0;
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001145 int satv = -1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001146 bool gen_ack = false;
1147 u8 tid;
1148 u8 wbuf[8];
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001149 int i;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001150 txn.mt = SLIM_MSG_MT_SRC_REFERRED_USER;
1151 txn.dt = SLIM_MSG_DEST_LOGICALADDR;
1152 txn.ec = 0;
1153 txn.rbuf = NULL;
1154 txn.la = sat->satcl.laddr;
1155 /* satellite handling */
1156 len = buf[0] & 0x1F;
1157 mc = buf[1];
1158 mt = (buf[0] >> 5) & 0x7;
1159
1160 if (mt == SLIM_MSG_MT_CORE &&
1161 mc == SLIM_MSG_MC_REPORT_PRESENT) {
1162 u8 laddr;
1163 u8 e_addr[6];
1164 for (i = 0; i < 6; i++)
1165 e_addr[i] = buf[7-i];
1166
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001167 if (pm_runtime_enabled(dev->dev)) {
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001168 satv = msm_slim_get_ctrl(dev);
1169 if (satv >= 0)
1170 sat->pending_capability = true;
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001171 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001172 slim_assign_laddr(&dev->ctrl, e_addr, 6, &laddr);
1173 sat->satcl.laddr = laddr;
Sagar Dharia69bf5572012-02-21 14:45:35 -07001174 /*
1175 * Since capability message is already sent, present
1176 * message will indicate subsystem hosting this
1177 * satellite has restarted.
1178 * Remove all active channels of this satellite
1179 * when this is detected
1180 */
1181 if (sat->sent_capability) {
1182 for (i = 0; i < sat->nsatch; i++) {
1183 enum slim_ch_state chs =
1184 slim_get_ch_state(&sat->satcl,
1185 sat->satch[i].chanh);
1186 pr_err("Slim-SSR, sat:%d, rm chan:%d",
1187 laddr,
1188 sat->satch[i].chan);
1189 if (chs == SLIM_CH_ACTIVE)
1190 slim_control_ch(&sat->satcl,
1191 sat->satch[i].chanh,
1192 SLIM_CH_REMOVE, true);
1193 }
1194 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001195 } else if (mt != SLIM_MSG_MT_CORE &&
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001196 mc != SLIM_MSG_MC_REPORT_PRESENT) {
1197 satv = msm_slim_get_ctrl(dev);
1198 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001199 switch (mc) {
1200 case SLIM_MSG_MC_REPORT_PRESENT:
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001201 /* Remove runtime_pm vote once satellite acks */
1202 if (mt != SLIM_MSG_MT_CORE) {
1203 if (pm_runtime_enabled(dev->dev) &&
1204 sat->pending_capability) {
1205 msm_slim_put_ctrl(dev);
1206 sat->pending_capability = false;
1207 }
1208 continue;
1209 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001210 /* send a Manager capability msg */
Sagar Dharia790cfd02011-09-25 17:56:24 -06001211 if (sat->sent_capability) {
1212 if (mt == SLIM_MSG_MT_CORE)
1213 goto send_capability;
1214 else
1215 continue;
1216 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001217 ret = slim_add_device(&dev->ctrl, &sat->satcl);
1218 if (ret) {
1219 dev_err(dev->dev,
1220 "Satellite-init failed");
1221 continue;
1222 }
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001223 /* Satellite-channels */
1224 sat->satch = kzalloc(MSM_MAX_SATCH *
1225 sizeof(struct msm_sat_chan),
1226 GFP_KERNEL);
Sagar Dharia790cfd02011-09-25 17:56:24 -06001227send_capability:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001228 txn.mc = SLIM_USR_MC_MASTER_CAPABILITY;
1229 txn.mt = SLIM_MSG_MT_SRC_REFERRED_USER;
1230 txn.la = sat->satcl.laddr;
1231 txn.rl = 8;
1232 wbuf[0] = SAT_MAGIC_LSB;
1233 wbuf[1] = SAT_MAGIC_MSB;
1234 wbuf[2] = SAT_MSG_VER;
1235 wbuf[3] = SAT_MSG_PROT;
1236 txn.wbuf = wbuf;
1237 txn.len = 4;
1238 sat->sent_capability = true;
1239 msm_xfer_msg(&dev->ctrl, &txn);
1240 break;
1241 case SLIM_USR_MC_ADDR_QUERY:
1242 memcpy(&wbuf[1], &buf[4], 6);
1243 ret = slim_get_logical_addr(&sat->satcl,
1244 &wbuf[1], 6, &wbuf[7]);
1245 if (ret)
1246 memset(&wbuf[1], 0, 6);
1247 wbuf[0] = buf[3];
1248 txn.mc = SLIM_USR_MC_ADDR_REPLY;
1249 txn.rl = 12;
1250 txn.len = 8;
1251 txn.wbuf = wbuf;
1252 msm_xfer_msg(&dev->ctrl, &txn);
1253 break;
1254 case SLIM_USR_MC_DEFINE_CHAN:
1255 case SLIM_USR_MC_DEF_ACT_CHAN:
1256 case SLIM_USR_MC_CHAN_CTRL:
1257 if (mc != SLIM_USR_MC_CHAN_CTRL)
1258 tid = buf[7];
1259 else
1260 tid = buf[4];
1261 gen_ack = true;
1262 ret = msm_sat_define_ch(sat, buf, len, mc);
1263 if (ret) {
1264 dev_err(dev->dev,
1265 "SAT define_ch returned:%d",
1266 ret);
1267 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001268 if (!sat->pending_reconf) {
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001269 int chv = msm_slim_get_ctrl(dev);
1270 if (chv >= 0)
1271 sat->pending_reconf = true;
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001272 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001273 break;
1274 case SLIM_USR_MC_RECONFIG_NOW:
1275 tid = buf[3];
1276 gen_ack = true;
1277 ret = slim_reconfigure_now(&sat->satcl);
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001278 for (i = 0; i < sat->nsatch; i++) {
1279 struct msm_sat_chan *sch = &sat->satch[i];
1280 if (sch->req_rem) {
1281 if (!ret)
1282 slim_dealloc_ch(&sat->satcl,
1283 sch->chanh);
1284 sch->req_rem--;
1285 } else if (sch->req_def) {
1286 if (ret)
1287 slim_dealloc_ch(&sat->satcl,
1288 sch->chanh);
1289 sch->req_def--;
1290 }
1291 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001292 if (sat->pending_reconf) {
1293 msm_slim_put_ctrl(dev);
1294 sat->pending_reconf = false;
1295 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001296 break;
1297 case SLIM_USR_MC_REQ_BW:
1298 /* what we get is in SLOTS */
1299 bw_sl = (u32)buf[4] << 3 |
1300 ((buf[3] & 0xE0) >> 5);
1301 sat->satcl.pending_msgsl = bw_sl;
1302 tid = buf[5];
1303 gen_ack = true;
1304 break;
1305 case SLIM_USR_MC_CONNECT_SRC:
1306 case SLIM_USR_MC_CONNECT_SINK:
1307 if (mc == SLIM_USR_MC_CONNECT_SRC)
1308 txn.mc = SLIM_MSG_MC_CONNECT_SOURCE;
1309 else
1310 txn.mc = SLIM_MSG_MC_CONNECT_SINK;
1311 wbuf[0] = buf[4] & 0x1F;
1312 wbuf[1] = buf[5];
1313 tid = buf[6];
1314 txn.la = buf[3];
1315 txn.mt = SLIM_MSG_MT_CORE;
1316 txn.rl = 6;
1317 txn.len = 2;
1318 txn.wbuf = wbuf;
1319 gen_ack = true;
1320 ret = msm_xfer_msg(&dev->ctrl, &txn);
1321 break;
1322 case SLIM_USR_MC_DISCONNECT_PORT:
1323 txn.mc = SLIM_MSG_MC_DISCONNECT_PORT;
1324 wbuf[0] = buf[4] & 0x1F;
1325 tid = buf[5];
1326 txn.la = buf[3];
1327 txn.rl = 5;
1328 txn.len = 1;
1329 txn.mt = SLIM_MSG_MT_CORE;
1330 txn.wbuf = wbuf;
1331 gen_ack = true;
1332 ret = msm_xfer_msg(&dev->ctrl, &txn);
1333 default:
1334 break;
1335 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001336 if (!gen_ack) {
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001337 if (mc != SLIM_MSG_MC_REPORT_PRESENT && satv >= 0)
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001338 msm_slim_put_ctrl(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001339 continue;
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001340 }
1341
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001342 wbuf[0] = tid;
1343 if (!ret)
1344 wbuf[1] = MSM_SAT_SUCCSS;
1345 else
1346 wbuf[1] = 0;
1347 txn.mc = SLIM_USR_MC_GENERIC_ACK;
1348 txn.la = sat->satcl.laddr;
1349 txn.rl = 6;
1350 txn.len = 2;
1351 txn.wbuf = wbuf;
1352 txn.mt = SLIM_MSG_MT_SRC_REFERRED_USER;
1353 msm_xfer_msg(&dev->ctrl, &txn);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001354 if (satv >= 0)
1355 msm_slim_put_ctrl(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001356 }
1357}
1358
Sagar Dharia790cfd02011-09-25 17:56:24 -06001359static struct msm_slim_sat *msm_slim_alloc_sat(struct msm_slim_ctrl *dev)
1360{
1361 struct msm_slim_sat *sat;
1362 char *name;
1363 if (dev->nsats >= MSM_MAX_NSATS)
1364 return NULL;
1365
1366 sat = kzalloc(sizeof(struct msm_slim_sat), GFP_KERNEL);
1367 if (!sat) {
1368 dev_err(dev->dev, "no memory for satellite");
1369 return NULL;
1370 }
1371 name = kzalloc(SLIMBUS_NAME_SIZE, GFP_KERNEL);
1372 if (!name) {
1373 dev_err(dev->dev, "no memory for satellite name");
1374 kfree(sat);
1375 return NULL;
1376 }
1377 dev->satd[dev->nsats] = sat;
1378 sat->dev = dev;
1379 snprintf(name, SLIMBUS_NAME_SIZE, "msm_sat%d", dev->nsats);
1380 sat->satcl.name = name;
1381 spin_lock_init(&sat->lock);
1382 INIT_WORK(&sat->wd, slim_sat_rxprocess);
1383 sat->wq = create_singlethread_workqueue(sat->satcl.name);
1384 if (!sat->wq) {
1385 kfree(name);
1386 kfree(sat);
1387 return NULL;
1388 }
1389 /*
1390 * Both sats will be allocated from RX thread and RX thread will
1391 * process messages sequentially. No synchronization necessary
1392 */
1393 dev->nsats++;
1394 return sat;
1395}
1396
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001397static void
1398msm_slim_rx_msgq_event(struct msm_slim_ctrl *dev, struct sps_event_notify *ev)
1399{
1400 u32 *buf = ev->data.transfer.user;
1401 struct sps_iovec *iovec = &ev->data.transfer.iovec;
1402
1403 /*
1404 * Note the virtual address needs to be offset by the same index
1405 * as the physical address or just pass in the actual virtual address
1406 * if the sps_mem_buffer is not needed. Note that if completion is
1407 * used, the virtual address won't be available and will need to be
1408 * calculated based on the offset of the physical address
1409 */
1410 if (ev->event_id == SPS_EVENT_DESC_DONE) {
1411
1412 pr_debug("buf = 0x%p, data = 0x%x\n", buf, *buf);
1413
1414 pr_debug("iovec = (0x%x 0x%x 0x%x)\n",
1415 iovec->addr, iovec->size, iovec->flags);
1416
1417 } else {
1418 dev_err(dev->dev, "%s: unknown event %d\n",
1419 __func__, ev->event_id);
1420 }
1421}
1422
1423static void msm_slim_rx_msgq_cb(struct sps_event_notify *notify)
1424{
1425 struct msm_slim_ctrl *dev = (struct msm_slim_ctrl *)notify->user;
1426 msm_slim_rx_msgq_event(dev, notify);
1427}
1428
1429/* Queue up Rx message buffer */
1430static inline int
1431msm_slim_post_rx_msgq(struct msm_slim_ctrl *dev, int ix)
1432{
1433 int ret;
1434 u32 flags = SPS_IOVEC_FLAG_INT;
1435 struct msm_slim_endp *endpoint = &dev->rx_msgq;
1436 struct sps_mem_buffer *mem = &endpoint->buf;
1437 struct sps_pipe *pipe = endpoint->sps;
1438
1439 /* Rx message queue buffers are 4 bytes in length */
1440 u8 *virt_addr = mem->base + (4 * ix);
1441 u32 phys_addr = mem->phys_base + (4 * ix);
1442
1443 pr_debug("index:%d, phys:0x%x, virt:0x%p\n", ix, phys_addr, virt_addr);
1444
1445 ret = sps_transfer_one(pipe, phys_addr, 4, virt_addr, flags);
1446 if (ret)
1447 dev_err(dev->dev, "transfer_one() failed 0x%x, %d\n", ret, ix);
1448
1449 return ret;
1450}
1451
1452static inline int
1453msm_slim_rx_msgq_get(struct msm_slim_ctrl *dev, u32 *data, int offset)
1454{
1455 struct msm_slim_endp *endpoint = &dev->rx_msgq;
1456 struct sps_mem_buffer *mem = &endpoint->buf;
1457 struct sps_pipe *pipe = endpoint->sps;
1458 struct sps_iovec iovec;
1459 int index;
1460 int ret;
1461
1462 ret = sps_get_iovec(pipe, &iovec);
1463 if (ret) {
1464 dev_err(dev->dev, "sps_get_iovec() failed 0x%x\n", ret);
1465 goto err_exit;
1466 }
1467
1468 pr_debug("iovec = (0x%x 0x%x 0x%x)\n",
1469 iovec.addr, iovec.size, iovec.flags);
1470 BUG_ON(iovec.addr < mem->phys_base);
1471 BUG_ON(iovec.addr >= mem->phys_base + mem->size);
1472
1473 /* Calculate buffer index */
1474 index = (iovec.addr - mem->phys_base) / 4;
1475 *(data + offset) = *((u32 *)mem->base + index);
1476
1477 pr_debug("buf = 0x%p, data = 0x%x\n", (u32 *)mem->base + index, *data);
1478
1479 /* Add buffer back to the queue */
1480 (void)msm_slim_post_rx_msgq(dev, index);
1481
1482err_exit:
1483 return ret;
1484}
1485
1486static int msm_slim_rx_msgq_thread(void *data)
1487{
1488 struct msm_slim_ctrl *dev = (struct msm_slim_ctrl *)data;
1489 struct completion *notify = &dev->rx_msgq_notify;
1490 struct msm_slim_sat *sat = NULL;
1491 u32 mc = 0;
1492 u32 mt = 0;
1493 u32 buffer[10];
1494 int index = 0;
1495 u8 msg_len = 0;
1496 int ret;
1497
1498 dev_dbg(dev->dev, "rx thread started");
1499
1500 while (!kthread_should_stop()) {
1501 set_current_state(TASK_INTERRUPTIBLE);
1502 ret = wait_for_completion_interruptible(notify);
1503
1504 if (ret)
1505 dev_err(dev->dev, "rx thread wait error:%d", ret);
1506
1507 /* 1 irq notification per message */
1508 if (!dev->use_rx_msgqs) {
1509 msm_slim_rxwq(dev);
1510 continue;
1511 }
1512
1513 ret = msm_slim_rx_msgq_get(dev, buffer, index);
1514 if (ret) {
1515 dev_err(dev->dev, "rx_msgq_get() failed 0x%x\n", ret);
1516 continue;
1517 }
1518
1519 pr_debug("message[%d] = 0x%x\n", index, *buffer);
1520
1521 /* Decide if we use generic RX or satellite RX */
1522 if (index++ == 0) {
1523 msg_len = *buffer & 0x1F;
1524 pr_debug("Start of new message, len = %d\n", msg_len);
1525 mt = (buffer[0] >> 5) & 0x7;
1526 mc = (buffer[0] >> 8) & 0xff;
1527 dev_dbg(dev->dev, "MC: %x, MT: %x\n", mc, mt);
1528 if (mt == SLIM_MSG_MT_DEST_REFERRED_USER ||
Sagar Dharia790cfd02011-09-25 17:56:24 -06001529 mt == SLIM_MSG_MT_SRC_REFERRED_USER) {
1530 u8 laddr;
1531 laddr = (u8)((buffer[0] >> 16) & 0xff);
1532 sat = addr_to_sat(dev, laddr);
1533 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001534 } else if ((index * 4) >= msg_len) {
1535 index = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001536 if (sat) {
1537 msm_sat_enqueue(sat, buffer, msg_len);
1538 queue_work(sat->wq, &sat->wd);
1539 sat = NULL;
1540 } else {
1541 msm_slim_rx_enqueue(dev, buffer, msg_len);
1542 msm_slim_rxwq(dev);
1543 }
1544 }
1545 }
1546
1547 return 0;
1548}
1549
1550static int __devinit msm_slim_init_rx_msgq(struct msm_slim_ctrl *dev)
1551{
1552 int i, ret;
1553 u32 pipe_offset;
1554 struct msm_slim_endp *endpoint = &dev->rx_msgq;
1555 struct sps_connect *config = &endpoint->config;
1556 struct sps_mem_buffer *descr = &config->desc;
1557 struct sps_mem_buffer *mem = &endpoint->buf;
1558 struct completion *notify = &dev->rx_msgq_notify;
1559
1560 struct sps_register_event sps_error_event; /* SPS_ERROR */
1561 struct sps_register_event sps_descr_event; /* DESCR_DONE */
1562
Sagar Dharia31ac5812012-01-04 11:38:59 -07001563 init_completion(notify);
1564 if (!dev->use_rx_msgqs)
1565 goto rx_thread_create;
1566
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001567 /* Allocate the endpoint */
1568 ret = msm_slim_init_endpoint(dev, endpoint);
1569 if (ret) {
1570 dev_err(dev->dev, "init_endpoint failed 0x%x\n", ret);
1571 goto sps_init_endpoint_failed;
1572 }
1573
1574 /* Get the pipe indices for the message queues */
1575 pipe_offset = (readl_relaxed(dev->base + MGR_STATUS) & 0xfc) >> 2;
1576 dev_dbg(dev->dev, "Message queue pipe offset %d\n", pipe_offset);
1577
1578 config->mode = SPS_MODE_SRC;
1579 config->source = dev->bam.hdl;
1580 config->destination = SPS_DEV_HANDLE_MEM;
1581 config->src_pipe_index = pipe_offset;
1582 config->options = SPS_O_DESC_DONE | SPS_O_ERROR |
1583 SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
1584
1585 /* Allocate memory for the FIFO descriptors */
1586 ret = msm_slim_sps_mem_alloc(dev, descr,
1587 MSM_SLIM_DESC_NUM * sizeof(struct sps_iovec));
1588 if (ret) {
1589 dev_err(dev->dev, "unable to allocate SPS descriptors\n");
1590 goto alloc_descr_failed;
1591 }
1592
1593 ret = sps_connect(endpoint->sps, config);
1594 if (ret) {
1595 dev_err(dev->dev, "sps_connect failed 0x%x\n", ret);
1596 goto sps_connect_failed;
1597 }
1598
1599 /* Register completion for DESC_DONE */
1600 init_completion(notify);
1601 memset(&sps_descr_event, 0x00, sizeof(sps_descr_event));
1602
1603 sps_descr_event.mode = SPS_TRIGGER_CALLBACK;
1604 sps_descr_event.options = SPS_O_DESC_DONE;
1605 sps_descr_event.user = (void *)dev;
1606 sps_descr_event.xfer_done = notify;
1607
1608 ret = sps_register_event(endpoint->sps, &sps_descr_event);
1609 if (ret) {
1610 dev_err(dev->dev, "sps_connect() failed 0x%x\n", ret);
1611 goto sps_reg_event_failed;
1612 }
1613
1614 /* Register callback for errors */
1615 memset(&sps_error_event, 0x00, sizeof(sps_error_event));
1616 sps_error_event.mode = SPS_TRIGGER_CALLBACK;
1617 sps_error_event.options = SPS_O_ERROR;
1618 sps_error_event.user = (void *)dev;
1619 sps_error_event.callback = msm_slim_rx_msgq_cb;
1620
1621 ret = sps_register_event(endpoint->sps, &sps_error_event);
1622 if (ret) {
1623 dev_err(dev->dev, "sps_connect() failed 0x%x\n", ret);
1624 goto sps_reg_event_failed;
1625 }
1626
1627 /* Allocate memory for the message buffer(s), N descrs, 4-byte mesg */
1628 ret = msm_slim_sps_mem_alloc(dev, mem, MSM_SLIM_DESC_NUM * 4);
1629 if (ret) {
1630 dev_err(dev->dev, "dma_alloc_coherent failed\n");
1631 goto alloc_buffer_failed;
1632 }
1633
1634 /*
1635 * Call transfer_one for each 4-byte buffer
1636 * Use (buf->size/4) - 1 for the number of buffer to post
1637 */
1638
1639 /* Setup the transfer */
1640 for (i = 0; i < (MSM_SLIM_DESC_NUM - 1); i++) {
1641 ret = msm_slim_post_rx_msgq(dev, i);
1642 if (ret) {
1643 dev_err(dev->dev, "post_rx_msgq() failed 0x%x\n", ret);
1644 goto sps_transfer_failed;
1645 }
1646 }
1647
Sagar Dharia31ac5812012-01-04 11:38:59 -07001648rx_thread_create:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001649 /* Fire up the Rx message queue thread */
1650 dev->rx_msgq_thread = kthread_run(msm_slim_rx_msgq_thread, dev,
1651 MSM_SLIM_NAME "_rx_msgq_thread");
1652 if (!dev->rx_msgq_thread) {
1653 dev_err(dev->dev, "Failed to start Rx message queue thread\n");
Sagar Dharia31ac5812012-01-04 11:38:59 -07001654 /* Tear-down BAMs or return? */
1655 if (!dev->use_rx_msgqs)
1656 return -EIO;
1657 else
1658 ret = -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001659 } else
1660 return 0;
1661
1662sps_transfer_failed:
1663 msm_slim_sps_mem_free(dev, mem);
1664alloc_buffer_failed:
1665 memset(&sps_error_event, 0x00, sizeof(sps_error_event));
1666 sps_register_event(endpoint->sps, &sps_error_event);
1667sps_reg_event_failed:
1668 sps_disconnect(endpoint->sps);
1669sps_connect_failed:
1670 msm_slim_sps_mem_free(dev, descr);
1671alloc_descr_failed:
1672 msm_slim_free_endpoint(endpoint);
1673sps_init_endpoint_failed:
Sagar Dharia31ac5812012-01-04 11:38:59 -07001674 dev->use_rx_msgqs = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001675 return ret;
1676}
1677
1678/* Registers BAM h/w resource with SPS driver and initializes msgq endpoints */
1679static int __devinit
1680msm_slim_sps_init(struct msm_slim_ctrl *dev, struct resource *bam_mem)
1681{
1682 int i, ret;
1683 u32 bam_handle;
1684 struct sps_bam_props bam_props = {0};
1685
1686 static struct sps_bam_sec_config_props sec_props = {
1687 .ees = {
1688 [0] = { /* LPASS */
1689 .vmid = 0,
1690 .pipe_mask = 0xFFFF98,
1691 },
1692 [1] = { /* Krait Apps */
1693 .vmid = 1,
1694 .pipe_mask = 0x3F000007,
1695 },
1696 [2] = { /* Modem */
1697 .vmid = 2,
1698 .pipe_mask = 0x00000060,
1699 },
1700 },
1701 };
1702
Sagar Dharia31ac5812012-01-04 11:38:59 -07001703 if (!dev->use_rx_msgqs)
1704 goto init_rx_msgq;
1705
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001706 bam_props.ee = dev->ee;
1707 bam_props.virt_addr = dev->bam.base;
1708 bam_props.phys_addr = bam_mem->start;
1709 bam_props.irq = dev->bam.irq;
1710 bam_props.manage = SPS_BAM_MGR_LOCAL;
1711 bam_props.summing_threshold = MSM_SLIM_PERF_SUMM_THRESHOLD;
1712
1713 bam_props.sec_config = SPS_BAM_SEC_DO_CONFIG;
1714 bam_props.p_sec_config_props = &sec_props;
1715
1716 bam_props.options = SPS_O_DESC_DONE | SPS_O_ERROR |
1717 SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
1718
1719 /* First 7 bits are for message Qs */
1720 for (i = 7; i < 32; i++) {
1721 /* Check what pipes are owned by Apps. */
1722 if ((sec_props.ees[dev->ee].pipe_mask >> i) & 0x1)
1723 break;
1724 }
1725 dev->pipe_b = i - 7;
1726
1727 /* Register the BAM device with the SPS driver */
1728 ret = sps_register_bam_device(&bam_props, &bam_handle);
1729 if (ret) {
Sagar Dharia31ac5812012-01-04 11:38:59 -07001730 dev_err(dev->dev, "disabling BAM: reg-bam failed 0x%x\n", ret);
1731 dev->use_rx_msgqs = 0;
1732 goto init_rx_msgq;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001733 }
1734 dev->bam.hdl = bam_handle;
1735 dev_dbg(dev->dev, "SLIM BAM registered, handle = 0x%x\n", bam_handle);
1736
Sagar Dharia31ac5812012-01-04 11:38:59 -07001737init_rx_msgq:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001738 ret = msm_slim_init_rx_msgq(dev);
Sagar Dharia31ac5812012-01-04 11:38:59 -07001739 if (ret)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001740 dev_err(dev->dev, "msm_slim_init_rx_msgq failed 0x%x\n", ret);
Sagar Dharia31ac5812012-01-04 11:38:59 -07001741 if (!dev->use_rx_msgqs && bam_handle) {
1742 sps_deregister_bam_device(bam_handle);
1743 dev->bam.hdl = 0L;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001744 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001745 return ret;
1746}
1747
1748static void msm_slim_sps_exit(struct msm_slim_ctrl *dev)
1749{
1750 if (dev->use_rx_msgqs) {
1751 struct msm_slim_endp *endpoint = &dev->rx_msgq;
1752 struct sps_connect *config = &endpoint->config;
1753 struct sps_mem_buffer *descr = &config->desc;
1754 struct sps_mem_buffer *mem = &endpoint->buf;
1755 struct sps_register_event sps_event;
1756 memset(&sps_event, 0x00, sizeof(sps_event));
1757 msm_slim_sps_mem_free(dev, mem);
1758 sps_register_event(endpoint->sps, &sps_event);
1759 sps_disconnect(endpoint->sps);
1760 msm_slim_sps_mem_free(dev, descr);
1761 msm_slim_free_endpoint(endpoint);
Sagar Dharia31ac5812012-01-04 11:38:59 -07001762 sps_deregister_bam_device(dev->bam.hdl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001763 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001764}
1765
Sagar Dhariacc969452011-09-19 10:34:30 -06001766static void msm_slim_prg_slew(struct platform_device *pdev,
1767 struct msm_slim_ctrl *dev)
1768{
1769 struct resource *slew_io;
1770 void __iomem *slew_reg;
1771 /* SLEW RATE register for this slimbus */
1772 dev->slew_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1773 "slimbus_slew_reg");
1774 if (!dev->slew_mem) {
1775 dev_dbg(&pdev->dev, "no slimbus slew resource\n");
1776 return;
1777 }
1778 slew_io = request_mem_region(dev->slew_mem->start,
1779 resource_size(dev->slew_mem), pdev->name);
1780 if (!slew_io) {
1781 dev_dbg(&pdev->dev, "slimbus-slew mem claimed\n");
1782 dev->slew_mem = NULL;
1783 return;
1784 }
1785
1786 slew_reg = ioremap(dev->slew_mem->start, resource_size(dev->slew_mem));
1787 if (!slew_reg) {
1788 dev_dbg(dev->dev, "slew register mapping failed");
1789 release_mem_region(dev->slew_mem->start,
1790 resource_size(dev->slew_mem));
1791 dev->slew_mem = NULL;
1792 return;
1793 }
1794 writel_relaxed(1, slew_reg);
1795 /* Make sure slimbus-slew rate enabling goes through */
1796 wmb();
1797 iounmap(slew_reg);
1798}
1799
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001800static int __devinit msm_slim_probe(struct platform_device *pdev)
1801{
1802 struct msm_slim_ctrl *dev;
1803 int ret;
1804 struct resource *bam_mem, *bam_io;
1805 struct resource *slim_mem, *slim_io;
1806 struct resource *irq, *bam_irq;
1807 slim_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1808 "slimbus_physical");
1809 if (!slim_mem) {
1810 dev_err(&pdev->dev, "no slimbus physical memory resource\n");
1811 return -ENODEV;
1812 }
1813 slim_io = request_mem_region(slim_mem->start, resource_size(slim_mem),
1814 pdev->name);
1815 if (!slim_io) {
1816 dev_err(&pdev->dev, "slimbus memory already claimed\n");
1817 return -EBUSY;
1818 }
1819
1820 bam_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1821 "slimbus_bam_physical");
1822 if (!bam_mem) {
1823 dev_err(&pdev->dev, "no slimbus BAM memory resource\n");
1824 ret = -ENODEV;
1825 goto err_get_res_bam_failed;
1826 }
1827 bam_io = request_mem_region(bam_mem->start, resource_size(bam_mem),
1828 pdev->name);
1829 if (!bam_io) {
1830 release_mem_region(slim_mem->start, resource_size(slim_mem));
1831 dev_err(&pdev->dev, "slimbus BAM memory already claimed\n");
1832 ret = -EBUSY;
1833 goto err_get_res_bam_failed;
1834 }
1835 irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
1836 "slimbus_irq");
1837 if (!irq) {
1838 dev_err(&pdev->dev, "no slimbus IRQ resource\n");
1839 ret = -ENODEV;
1840 goto err_get_res_failed;
1841 }
1842 bam_irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
1843 "slimbus_bam_irq");
1844 if (!bam_irq) {
1845 dev_err(&pdev->dev, "no slimbus BAM IRQ resource\n");
1846 ret = -ENODEV;
1847 goto err_get_res_failed;
1848 }
1849
1850 dev = kzalloc(sizeof(struct msm_slim_ctrl), GFP_KERNEL);
1851 if (!dev) {
1852 dev_err(&pdev->dev, "no memory for MSM slimbus controller\n");
1853 ret = -ENOMEM;
1854 goto err_get_res_failed;
1855 }
1856 dev->dev = &pdev->dev;
1857 platform_set_drvdata(pdev, dev);
1858 slim_set_ctrldata(&dev->ctrl, dev);
1859 dev->base = ioremap(slim_mem->start, resource_size(slim_mem));
1860 if (!dev->base) {
1861 dev_err(&pdev->dev, "IOremap failed\n");
1862 ret = -ENOMEM;
1863 goto err_ioremap_failed;
1864 }
1865 dev->bam.base = ioremap(bam_mem->start, resource_size(bam_mem));
1866 if (!dev->bam.base) {
1867 dev_err(&pdev->dev, "BAM IOremap failed\n");
1868 ret = -ENOMEM;
1869 goto err_ioremap_bam_failed;
1870 }
1871 dev->ctrl.nr = pdev->id;
1872 dev->ctrl.nchans = MSM_SLIM_NCHANS;
1873 dev->ctrl.nports = MSM_SLIM_NPORTS;
1874 dev->ctrl.set_laddr = msm_set_laddr;
1875 dev->ctrl.xfer_msg = msm_xfer_msg;
Sagar Dharia144e5e02011-08-08 17:30:11 -06001876 dev->ctrl.wakeup = msm_clk_pause_wakeup;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001877 dev->ctrl.config_port = msm_config_port;
1878 dev->ctrl.port_xfer = msm_slim_port_xfer;
1879 dev->ctrl.port_xfer_status = msm_slim_port_xfer_status;
1880 /* Reserve some messaging BW for satellite-apps driver communication */
1881 dev->ctrl.sched.pending_msgsl = 30;
1882
1883 init_completion(&dev->reconf);
1884 mutex_init(&dev->tx_lock);
1885 spin_lock_init(&dev->rx_lock);
1886 dev->ee = 1;
1887 dev->use_rx_msgqs = 1;
1888 dev->irq = irq->start;
1889 dev->bam.irq = bam_irq->start;
1890
1891 ret = msm_slim_sps_init(dev, bam_mem);
1892 if (ret != 0) {
1893 dev_err(dev->dev, "error SPS init\n");
1894 goto err_sps_init_failed;
1895 }
1896
1897
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001898 dev->framer.rootfreq = SLIM_ROOT_FREQ >> 3;
1899 dev->framer.superfreq =
1900 dev->framer.rootfreq / SLIM_CL_PER_SUPERFRAME_DIV8;
1901 dev->ctrl.a_framer = &dev->framer;
1902 dev->ctrl.clkgear = SLIM_MAX_CLK_GEAR;
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001903 dev->ctrl.dev.parent = &pdev->dev;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001904
1905 ret = request_irq(dev->irq, msm_slim_interrupt, IRQF_TRIGGER_HIGH,
1906 "msm_slim_irq", dev);
1907 if (ret) {
1908 dev_err(&pdev->dev, "request IRQ failed\n");
1909 goto err_request_irq_failed;
1910 }
1911
Sagar Dhariacc969452011-09-19 10:34:30 -06001912 msm_slim_prg_slew(pdev, dev);
Sagar Dhariab1c0acf2012-02-06 18:16:58 -07001913
1914 /* Register with framework before enabling frame, clock */
1915 ret = slim_add_numbered_controller(&dev->ctrl);
1916 if (ret) {
1917 dev_err(dev->dev, "error adding controller\n");
1918 goto err_ctrl_failed;
1919 }
1920
1921
Tianyi Gou44a81b02012-02-06 17:49:07 -08001922 dev->rclk = clk_get(dev->dev, "core_clk");
Sagar Dhariab1c0acf2012-02-06 18:16:58 -07001923 if (!dev->rclk) {
1924 dev_err(dev->dev, "slimbus clock not found");
1925 goto err_clk_get_failed;
1926 }
Sagar Dhariacc969452011-09-19 10:34:30 -06001927 clk_set_rate(dev->rclk, SLIM_ROOT_FREQ);
Sagar Dharia9acf7f42012-03-08 09:45:30 -07001928 clk_prepare_enable(dev->rclk);
Sagar Dhariacc969452011-09-19 10:34:30 -06001929
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001930 /* Component register initialization */
1931 writel_relaxed(1, dev->base + COMP_CFG);
1932 writel_relaxed((EE_MGR_RSC_GRP | EE_NGD_2 | EE_NGD_1),
1933 dev->base + COMP_TRUST_CFG);
1934
1935 /*
1936 * Manager register initialization
1937 * If RX msg Q is used, disable RX_MSG_RCVD interrupt
1938 */
1939 if (dev->use_rx_msgqs)
1940 writel_relaxed((MGR_INT_RECFG_DONE | MGR_INT_TX_NACKED_2 |
1941 MGR_INT_MSG_BUF_CONTE | /* MGR_INT_RX_MSG_RCVD | */
1942 MGR_INT_TX_MSG_SENT), dev->base + MGR_INT_EN);
1943 else
1944 writel_relaxed((MGR_INT_RECFG_DONE | MGR_INT_TX_NACKED_2 |
1945 MGR_INT_MSG_BUF_CONTE | MGR_INT_RX_MSG_RCVD |
1946 MGR_INT_TX_MSG_SENT), dev->base + MGR_INT_EN);
1947 writel_relaxed(1, dev->base + MGR_CFG);
1948 /*
1949 * Framer registers are beyond 1K memory region after Manager and/or
1950 * component registers. Make sure those writes are ordered
1951 * before framer register writes
1952 */
1953 wmb();
1954
1955 /* Framer register initialization */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001956 writel_relaxed((0xA << REF_CLK_GEAR) | (0xA << CLK_GEAR) |
1957 (1 << ROOT_FREQ) | (1 << FRM_ACTIVE) | 1,
1958 dev->base + FRM_CFG);
1959 /*
1960 * Make sure that framer wake-up and enabling writes go through
1961 * before any other component is enabled. Framer is responsible for
1962 * clocking the bus and enabling framer first will ensure that other
1963 * devices can report presence when they are enabled
1964 */
1965 mb();
1966
1967 /* Enable RX msg Q */
1968 if (dev->use_rx_msgqs)
1969 writel_relaxed(MGR_CFG_ENABLE | MGR_CFG_RX_MSGQ_EN,
1970 dev->base + MGR_CFG);
1971 else
1972 writel_relaxed(MGR_CFG_ENABLE, dev->base + MGR_CFG);
1973 /*
1974 * Make sure that manager-enable is written through before interface
1975 * device is enabled
1976 */
1977 mb();
1978 writel_relaxed(1, dev->base + INTF_CFG);
1979 /*
1980 * Make sure that interface-enable is written through before enabling
1981 * ported generic device inside MSM manager
1982 */
1983 mb();
1984 writel_relaxed(1, dev->base + PGD_CFG);
1985 writel_relaxed(0x3F<<17, dev->base + (PGD_OWN_EEn + (4 * dev->ee)));
1986 /*
1987 * Make sure that ported generic device is enabled and port-EE settings
1988 * are written through before finally enabling the component
1989 */
1990 mb();
1991
1992 writel_relaxed(1, dev->base + COMP_CFG);
1993 /*
1994 * Make sure that all writes have gone through before exiting this
1995 * function
1996 */
1997 mb();
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001998 pm_runtime_use_autosuspend(&pdev->dev);
1999 pm_runtime_set_autosuspend_delay(&pdev->dev, MSM_SLIM_AUTOSUSPEND);
2000 pm_runtime_set_active(&pdev->dev);
2001
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002002 dev_dbg(dev->dev, "MSM SB controller is up!\n");
2003 return 0;
2004
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002005err_ctrl_failed:
2006 writel_relaxed(0, dev->base + COMP_CFG);
Sagar Dhariab1c0acf2012-02-06 18:16:58 -07002007err_clk_get_failed:
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002008 kfree(dev->satd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002009err_request_irq_failed:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002010 msm_slim_sps_exit(dev);
2011err_sps_init_failed:
2012 iounmap(dev->bam.base);
2013err_ioremap_bam_failed:
2014 iounmap(dev->base);
2015err_ioremap_failed:
2016 kfree(dev);
2017err_get_res_failed:
2018 release_mem_region(bam_mem->start, resource_size(bam_mem));
2019err_get_res_bam_failed:
2020 release_mem_region(slim_mem->start, resource_size(slim_mem));
2021 return ret;
2022}
2023
2024static int __devexit msm_slim_remove(struct platform_device *pdev)
2025{
2026 struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
2027 struct resource *bam_mem;
2028 struct resource *slim_mem;
Sagar Dhariacc969452011-09-19 10:34:30 -06002029 struct resource *slew_mem = dev->slew_mem;
Sagar Dharia790cfd02011-09-25 17:56:24 -06002030 int i;
2031 for (i = 0; i < dev->nsats; i++) {
2032 struct msm_slim_sat *sat = dev->satd[i];
Sagar Dharia0ffdca12011-09-25 18:55:53 -06002033 int j;
2034 for (j = 0; j < sat->nsatch; j++)
2035 slim_dealloc_ch(&sat->satcl, sat->satch[j].chanh);
Sagar Dharia790cfd02011-09-25 17:56:24 -06002036 slim_remove_device(&sat->satcl);
2037 kfree(sat->satch);
2038 destroy_workqueue(sat->wq);
2039 kfree(sat->satcl.name);
2040 kfree(sat);
2041 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002042 pm_runtime_disable(&pdev->dev);
2043 pm_runtime_set_suspended(&pdev->dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002044 free_irq(dev->irq, dev);
2045 slim_del_controller(&dev->ctrl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002046 clk_put(dev->rclk);
2047 msm_slim_sps_exit(dev);
2048 kthread_stop(dev->rx_msgq_thread);
2049 iounmap(dev->bam.base);
2050 iounmap(dev->base);
2051 kfree(dev);
2052 bam_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2053 "slimbus_bam_physical");
Sagar Dhariae77961f2011-09-27 14:03:50 -06002054 if (bam_mem)
2055 release_mem_region(bam_mem->start, resource_size(bam_mem));
Sagar Dhariacc969452011-09-19 10:34:30 -06002056 if (slew_mem)
2057 release_mem_region(slew_mem->start, resource_size(slew_mem));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002058 slim_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2059 "slimbus_physical");
Sagar Dhariae77961f2011-09-27 14:03:50 -06002060 if (slim_mem)
2061 release_mem_region(slim_mem->start, resource_size(slim_mem));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002062 return 0;
2063}
2064
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002065#ifdef CONFIG_PM_RUNTIME
2066static int msm_slim_runtime_idle(struct device *device)
2067{
2068 dev_dbg(device, "pm_runtime: idle...\n");
2069 pm_request_autosuspend(device);
2070 return -EAGAIN;
2071}
2072#endif
2073
2074/*
2075 * If PM_RUNTIME is not defined, these 2 functions become helper
2076 * functions to be called from system suspend/resume. So they are not
2077 * inside ifdef CONFIG_PM_RUNTIME
2078 */
Sagar Dharia45e77912012-01-10 09:55:18 -07002079#ifdef CONFIG_PM_SLEEP
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002080static int msm_slim_runtime_suspend(struct device *device)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002081{
2082 struct platform_device *pdev = to_platform_device(device);
2083 struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002084 int ret;
2085 dev_dbg(device, "pm_runtime: suspending...\n");
2086 dev->state = MSM_CTRL_SLEEPING;
2087 ret = slim_ctrl_clk_pause(&dev->ctrl, false, SLIM_CLK_UNSPECIFIED);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07002088 if (ret) {
2089 dev_err(device, "clk pause not entered:%d", ret);
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002090 dev->state = MSM_CTRL_AWAKE;
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07002091 } else {
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002092 dev->state = MSM_CTRL_ASLEEP;
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07002093 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002094 return ret;
2095}
2096
2097static int msm_slim_runtime_resume(struct device *device)
2098{
2099 struct platform_device *pdev = to_platform_device(device);
2100 struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
2101 int ret = 0;
2102 dev_dbg(device, "pm_runtime: resuming...\n");
2103 if (dev->state == MSM_CTRL_ASLEEP)
2104 ret = slim_ctrl_clk_pause(&dev->ctrl, true, 0);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07002105 if (ret) {
2106 dev_err(device, "clk pause not exited:%d", ret);
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002107 dev->state = MSM_CTRL_ASLEEP;
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07002108 } else {
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002109 dev->state = MSM_CTRL_AWAKE;
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07002110 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002111 return ret;
2112}
2113
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002114static int msm_slim_suspend(struct device *dev)
2115{
2116 int ret = 0;
2117 if (!pm_runtime_enabled(dev) || !pm_runtime_suspended(dev)) {
2118 dev_dbg(dev, "system suspend");
2119 ret = msm_slim_runtime_suspend(dev);
Sagar Dharia6b559e02011-08-03 17:01:31 -06002120 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002121 if (ret == -EBUSY) {
Sagar Dharia144e5e02011-08-08 17:30:11 -06002122 /*
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002123 * If the clock pause failed due to active channels, there is
2124 * a possibility that some audio stream is active during suspend
2125 * We dont want to return suspend failure in that case so that
2126 * display and relevant components can still go to suspend.
2127 * If there is some other error, then it should be passed-on
2128 * to system level suspend
2129 */
Sagar Dharia144e5e02011-08-08 17:30:11 -06002130 ret = 0;
2131 }
2132 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002133}
2134
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002135static int msm_slim_resume(struct device *dev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002136{
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002137 /* If runtime_pm is enabled, this resume shouldn't do anything */
2138 if (!pm_runtime_enabled(dev) || !pm_runtime_suspended(dev)) {
2139 int ret;
2140 dev_dbg(dev, "system resume");
2141 ret = msm_slim_runtime_resume(dev);
2142 if (!ret) {
2143 pm_runtime_mark_last_busy(dev);
2144 pm_request_autosuspend(dev);
2145 }
2146 return ret;
2147
Sagar Dharia144e5e02011-08-08 17:30:11 -06002148 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002149 return 0;
2150}
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002151#endif /* CONFIG_PM_SLEEP */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002152
2153static const struct dev_pm_ops msm_slim_dev_pm_ops = {
2154 SET_SYSTEM_SLEEP_PM_OPS(
2155 msm_slim_suspend,
2156 msm_slim_resume
2157 )
2158 SET_RUNTIME_PM_OPS(
2159 msm_slim_runtime_suspend,
2160 msm_slim_runtime_resume,
2161 msm_slim_runtime_idle
2162 )
2163};
2164
2165static struct platform_driver msm_slim_driver = {
2166 .probe = msm_slim_probe,
2167 .remove = msm_slim_remove,
2168 .driver = {
2169 .name = MSM_SLIM_NAME,
2170 .owner = THIS_MODULE,
2171 .pm = &msm_slim_dev_pm_ops,
2172 },
2173};
2174
2175static int msm_slim_init(void)
2176{
2177 return platform_driver_register(&msm_slim_driver);
2178}
2179subsys_initcall(msm_slim_init);
2180
2181static void msm_slim_exit(void)
2182{
2183 platform_driver_unregister(&msm_slim_driver);
2184}
2185module_exit(msm_slim_exit);
2186
2187MODULE_LICENSE("GPL v2");
2188MODULE_VERSION("0.1");
2189MODULE_DESCRIPTION("MSM Slimbus controller");
2190MODULE_ALIAS("platform:msm-slim");