blob: d7a83124acab0ccb2fae519a8f15b31cdfa05d88 [file] [log] [blame]
Sagar Dharia790cfd02011-09-25 17:56:24 -06001/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12#include <linux/irq.h>
13#include <linux/kernel.h>
14#include <linux/init.h>
15#include <linux/slab.h>
16#include <linux/io.h>
17#include <linux/interrupt.h>
18#include <linux/platform_device.h>
19#include <linux/dma-mapping.h>
20#include <linux/slimbus/slimbus.h>
21#include <linux/delay.h>
22#include <linux/kthread.h>
23#include <linux/clk.h>
Sagar Dharia45ee38a2011-08-03 17:01:31 -060024#include <linux/pm_runtime.h>
Sagar Dhariaf8f603b2012-03-21 15:25:17 -060025#include <linux/of.h>
26#include <linux/of_slimbus.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070027#include <mach/sps.h>
28
29/* Per spec.max 40 bytes per received message */
30#define SLIM_RX_MSGQ_BUF_LEN 40
31
32#define SLIM_USR_MC_GENERIC_ACK 0x25
33#define SLIM_USR_MC_MASTER_CAPABILITY 0x0
34#define SLIM_USR_MC_REPORT_SATELLITE 0x1
35#define SLIM_USR_MC_ADDR_QUERY 0xD
36#define SLIM_USR_MC_ADDR_REPLY 0xE
37#define SLIM_USR_MC_DEFINE_CHAN 0x20
38#define SLIM_USR_MC_DEF_ACT_CHAN 0x21
39#define SLIM_USR_MC_CHAN_CTRL 0x23
40#define SLIM_USR_MC_RECONFIG_NOW 0x24
41#define SLIM_USR_MC_REQ_BW 0x28
42#define SLIM_USR_MC_CONNECT_SRC 0x2C
43#define SLIM_USR_MC_CONNECT_SINK 0x2D
44#define SLIM_USR_MC_DISCONNECT_PORT 0x2E
45
46/* MSM Slimbus peripheral settings */
47#define MSM_SLIM_PERF_SUMM_THRESHOLD 0x8000
48#define MSM_SLIM_NCHANS 32
49#define MSM_SLIM_NPORTS 24
Sagar Dharia45ee38a2011-08-03 17:01:31 -060050#define MSM_SLIM_AUTOSUSPEND MSEC_PER_SEC
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070051
52/*
53 * Need enough descriptors to receive present messages from slaves
54 * if received simultaneously. Present message needs 3 descriptors
55 * and this size will ensure around 10 simultaneous reports.
56 */
57#define MSM_SLIM_DESC_NUM 32
58
59#define SLIM_MSG_ASM_FIRST_WORD(l, mt, mc, dt, ad) \
60 ((l) | ((mt) << 5) | ((mc) << 8) | ((dt) << 15) | ((ad) << 16))
61
62#define MSM_SLIM_NAME "msm_slim_ctrl"
63#define SLIM_ROOT_FREQ 24576000
64
65#define MSM_CONCUR_MSG 8
66#define SAT_CONCUR_MSG 8
67#define DEF_WATERMARK (8 << 1)
68#define DEF_ALIGN 0
69#define DEF_PACK (1 << 6)
70#define ENABLE_PORT 1
71
72#define DEF_BLKSZ 0
73#define DEF_TRANSZ 0
74
75#define SAT_MAGIC_LSB 0xD9
76#define SAT_MAGIC_MSB 0xC5
77#define SAT_MSG_VER 0x1
78#define SAT_MSG_PROT 0x1
79#define MSM_SAT_SUCCSS 0x20
Sagar Dharia790cfd02011-09-25 17:56:24 -060080#define MSM_MAX_NSATS 2
Sagar Dharia0ffdca12011-09-25 18:55:53 -060081#define MSM_MAX_SATCH 32
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070082
83#define QC_MFGID_LSB 0x2
84#define QC_MFGID_MSB 0x17
85#define QC_CHIPID_SL 0x10
86#define QC_DEVID_SAT1 0x3
87#define QC_DEVID_SAT2 0x4
88#define QC_DEVID_PGD 0x5
Sagar Dharia45ee38a2011-08-03 17:01:31 -060089#define QC_MSM_DEVS 5
Sagar Dhariaf323f8c2012-09-04 11:27:26 -060090#define INIT_MX_RETRIES 10
91#define DEF_RETRY_MS 10
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070092
Sagar Dharia82e516f2012-03-16 16:01:23 -060093#define PGD_THIS_EE(r, v) ((v) ? PGD_THIS_EE_V2(r) : PGD_THIS_EE_V1(r))
94#define PGD_PORT(r, p, v) ((v) ? PGD_PORT_V2(r, p) : PGD_PORT_V1(r, p))
95#define CFG_PORT(r, v) ((v) ? CFG_PORT_V2(r) : CFG_PORT_V1(r))
96
97#define PGD_THIS_EE_V2(r) (dev->base + (r ## _V2) + (dev->ee * 0x1000))
98#define PGD_PORT_V2(r, p) (dev->base + (r ## _V2) + ((p) * 0x1000))
99#define CFG_PORT_V2(r) ((r ## _V2))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700100/* Component registers */
Sagar Dharia82e516f2012-03-16 16:01:23 -0600101enum comp_reg_v2 {
102 COMP_CFG_V2 = 4,
103 COMP_TRUST_CFG_V2 = 0x3000,
104};
105
106/* Manager PGD registers */
107enum pgd_reg_v2 {
108 PGD_CFG_V2 = 0x800,
109 PGD_STAT_V2 = 0x804,
110 PGD_INT_EN_V2 = 0x810,
111 PGD_INT_STAT_V2 = 0x814,
112 PGD_INT_CLR_V2 = 0x818,
113 PGD_OWN_EEn_V2 = 0x300C,
114 PGD_PORT_INT_EN_EEn_V2 = 0x5000,
115 PGD_PORT_INT_ST_EEn_V2 = 0x5004,
116 PGD_PORT_INT_CL_EEn_V2 = 0x5008,
117 PGD_PORT_CFGn_V2 = 0x14000,
118 PGD_PORT_STATn_V2 = 0x14004,
119 PGD_PORT_PARAMn_V2 = 0x14008,
120 PGD_PORT_BLKn_V2 = 0x1400C,
121 PGD_PORT_TRANn_V2 = 0x14010,
122 PGD_PORT_MCHANn_V2 = 0x14014,
123 PGD_PORT_PSHPLLn_V2 = 0x14018,
124 PGD_PORT_PC_CFGn_V2 = 0x8000,
125 PGD_PORT_PC_VALn_V2 = 0x8004,
126 PGD_PORT_PC_VFR_TSn_V2 = 0x8008,
127 PGD_PORT_PC_VFR_STn_V2 = 0x800C,
128 PGD_PORT_PC_VFR_CLn_V2 = 0x8010,
129 PGD_IE_STAT_V2 = 0x820,
130 PGD_VE_STAT_V2 = 0x830,
131};
132
133#define PGD_THIS_EE_V1(r) (dev->base + (r ## _V1) + (dev->ee * 16))
134#define PGD_PORT_V1(r, p) (dev->base + (r ## _V1) + ((p) * 32))
135#define CFG_PORT_V1(r) ((r ## _V1))
136/* Component registers */
137enum comp_reg_v1 {
138 COMP_CFG_V1 = 0,
139 COMP_TRUST_CFG_V1 = 0x14,
140};
141
142/* Manager PGD registers */
143enum pgd_reg_v1 {
144 PGD_CFG_V1 = 0x1000,
145 PGD_STAT_V1 = 0x1004,
146 PGD_INT_EN_V1 = 0x1010,
147 PGD_INT_STAT_V1 = 0x1014,
148 PGD_INT_CLR_V1 = 0x1018,
149 PGD_OWN_EEn_V1 = 0x1020,
150 PGD_PORT_INT_EN_EEn_V1 = 0x1030,
151 PGD_PORT_INT_ST_EEn_V1 = 0x1034,
152 PGD_PORT_INT_CL_EEn_V1 = 0x1038,
153 PGD_PORT_CFGn_V1 = 0x1080,
154 PGD_PORT_STATn_V1 = 0x1084,
155 PGD_PORT_PARAMn_V1 = 0x1088,
156 PGD_PORT_BLKn_V1 = 0x108C,
157 PGD_PORT_TRANn_V1 = 0x1090,
158 PGD_PORT_MCHANn_V1 = 0x1094,
159 PGD_PORT_PSHPLLn_V1 = 0x1098,
160 PGD_PORT_PC_CFGn_V1 = 0x1600,
161 PGD_PORT_PC_VALn_V1 = 0x1604,
162 PGD_PORT_PC_VFR_TSn_V1 = 0x1608,
163 PGD_PORT_PC_VFR_STn_V1 = 0x160C,
164 PGD_PORT_PC_VFR_CLn_V1 = 0x1610,
165 PGD_IE_STAT_V1 = 0x1700,
166 PGD_VE_STAT_V1 = 0x1710,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700167};
168
169/* Manager registers */
170enum mgr_reg {
171 MGR_CFG = 0x200,
172 MGR_STATUS = 0x204,
173 MGR_RX_MSGQ_CFG = 0x208,
174 MGR_INT_EN = 0x210,
175 MGR_INT_STAT = 0x214,
176 MGR_INT_CLR = 0x218,
177 MGR_TX_MSG = 0x230,
178 MGR_RX_MSG = 0x270,
Sagar Dhariaf323f8c2012-09-04 11:27:26 -0600179 MGR_IE_STAT = 0x2F0,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700180 MGR_VE_STAT = 0x300,
181};
182
183enum msg_cfg {
184 MGR_CFG_ENABLE = 1,
185 MGR_CFG_RX_MSGQ_EN = 1 << 1,
186 MGR_CFG_TX_MSGQ_EN_HIGH = 1 << 2,
187 MGR_CFG_TX_MSGQ_EN_LOW = 1 << 3,
188};
189/* Message queue types */
190enum msm_slim_msgq_type {
191 MSGQ_RX = 0,
192 MSGQ_TX_LOW = 1,
193 MSGQ_TX_HIGH = 2,
194};
195/* Framer registers */
196enum frm_reg {
197 FRM_CFG = 0x400,
198 FRM_STAT = 0x404,
199 FRM_INT_EN = 0x410,
200 FRM_INT_STAT = 0x414,
201 FRM_INT_CLR = 0x418,
202 FRM_WAKEUP = 0x41C,
203 FRM_CLKCTL_DONE = 0x420,
204 FRM_IE_STAT = 0x430,
205 FRM_VE_STAT = 0x440,
206};
207
208/* Interface registers */
209enum intf_reg {
210 INTF_CFG = 0x600,
211 INTF_STAT = 0x604,
212 INTF_INT_EN = 0x610,
213 INTF_INT_STAT = 0x614,
214 INTF_INT_CLR = 0x618,
215 INTF_IE_STAT = 0x630,
216 INTF_VE_STAT = 0x640,
217};
218
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700219enum rsc_grp {
220 EE_MGR_RSC_GRP = 1 << 10,
221 EE_NGD_2 = 2 << 6,
222 EE_NGD_1 = 0,
223};
224
225enum mgr_intr {
226 MGR_INT_RECFG_DONE = 1 << 24,
227 MGR_INT_TX_NACKED_2 = 1 << 25,
228 MGR_INT_MSG_BUF_CONTE = 1 << 26,
229 MGR_INT_RX_MSG_RCVD = 1 << 30,
230 MGR_INT_TX_MSG_SENT = 1 << 31,
231};
232
233enum frm_cfg {
234 FRM_ACTIVE = 1,
235 CLK_GEAR = 7,
236 ROOT_FREQ = 11,
237 REF_CLK_GEAR = 15,
238};
239
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600240enum msm_ctrl_state {
241 MSM_CTRL_AWAKE,
242 MSM_CTRL_SLEEPING,
243 MSM_CTRL_ASLEEP,
244};
245
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700246struct msm_slim_sps_bam {
247 u32 hdl;
248 void __iomem *base;
249 int irq;
250};
251
252struct msm_slim_endp {
253 struct sps_pipe *sps;
254 struct sps_connect config;
255 struct sps_register_event event;
256 struct sps_mem_buffer buf;
257 struct completion *xcomp;
258 bool connected;
259};
260
261struct msm_slim_ctrl {
262 struct slim_controller ctrl;
263 struct slim_framer framer;
264 struct device *dev;
265 void __iomem *base;
Sagar Dhariacc969452011-09-19 10:34:30 -0600266 struct resource *slew_mem;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700267 u32 curr_bw;
268 u8 msg_cnt;
269 u32 tx_buf[10];
270 u8 rx_msgs[MSM_CONCUR_MSG][SLIM_RX_MSGQ_BUF_LEN];
271 spinlock_t rx_lock;
272 int head;
273 int tail;
274 int irq;
275 int err;
276 int ee;
277 struct completion *wr_comp;
Sagar Dharia790cfd02011-09-25 17:56:24 -0600278 struct msm_slim_sat *satd[MSM_MAX_NSATS];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700279 struct msm_slim_endp pipes[7];
280 struct msm_slim_sps_bam bam;
281 struct msm_slim_endp rx_msgq;
282 struct completion rx_msgq_notify;
283 struct task_struct *rx_msgq_thread;
284 struct clk *rclk;
285 struct mutex tx_lock;
286 u8 pgdla;
287 bool use_rx_msgqs;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700288 int pipe_b;
289 struct completion reconf;
290 bool reconf_busy;
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600291 bool chan_active;
292 enum msm_ctrl_state state;
Sagar Dharia790cfd02011-09-25 17:56:24 -0600293 int nsats;
Sagar Dharia82e516f2012-03-16 16:01:23 -0600294 u32 ver;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700295};
296
Sagar Dharia0ffdca12011-09-25 18:55:53 -0600297struct msm_sat_chan {
298 u8 chan;
299 u16 chanh;
300 int req_rem;
301 int req_def;
Ajay Dudani2c71b242012-08-15 00:01:57 -0600302 bool reconf;
Sagar Dharia0ffdca12011-09-25 18:55:53 -0600303};
304
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700305struct msm_slim_sat {
306 struct slim_device satcl;
307 struct msm_slim_ctrl *dev;
308 struct workqueue_struct *wq;
309 struct work_struct wd;
310 u8 sat_msgs[SAT_CONCUR_MSG][40];
Sagar Dharia0ffdca12011-09-25 18:55:53 -0600311 struct msm_sat_chan *satch;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700312 u8 nsatch;
313 bool sent_capability;
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600314 bool pending_reconf;
315 bool pending_capability;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700316 int shead;
317 int stail;
318 spinlock_t lock;
319};
320
Sagar Dharia790cfd02011-09-25 17:56:24 -0600321static struct msm_slim_sat *msm_slim_alloc_sat(struct msm_slim_ctrl *dev);
322
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700323static int msm_slim_rx_enqueue(struct msm_slim_ctrl *dev, u32 *buf, u8 len)
324{
325 spin_lock(&dev->rx_lock);
326 if ((dev->tail + 1) % MSM_CONCUR_MSG == dev->head) {
327 spin_unlock(&dev->rx_lock);
328 dev_err(dev->dev, "RX QUEUE full!");
329 return -EXFULL;
330 }
331 memcpy((u8 *)dev->rx_msgs[dev->tail], (u8 *)buf, len);
332 dev->tail = (dev->tail + 1) % MSM_CONCUR_MSG;
333 spin_unlock(&dev->rx_lock);
334 return 0;
335}
336
337static int msm_slim_rx_dequeue(struct msm_slim_ctrl *dev, u8 *buf)
338{
339 unsigned long flags;
340 spin_lock_irqsave(&dev->rx_lock, flags);
341 if (dev->tail == dev->head) {
342 spin_unlock_irqrestore(&dev->rx_lock, flags);
343 return -ENODATA;
344 }
345 memcpy(buf, (u8 *)dev->rx_msgs[dev->head], 40);
346 dev->head = (dev->head + 1) % MSM_CONCUR_MSG;
347 spin_unlock_irqrestore(&dev->rx_lock, flags);
348 return 0;
349}
350
351static int msm_sat_enqueue(struct msm_slim_sat *sat, u32 *buf, u8 len)
352{
353 struct msm_slim_ctrl *dev = sat->dev;
354 spin_lock(&sat->lock);
355 if ((sat->stail + 1) % SAT_CONCUR_MSG == sat->shead) {
356 spin_unlock(&sat->lock);
357 dev_err(dev->dev, "SAT QUEUE full!");
358 return -EXFULL;
359 }
360 memcpy(sat->sat_msgs[sat->stail], (u8 *)buf, len);
361 sat->stail = (sat->stail + 1) % SAT_CONCUR_MSG;
362 spin_unlock(&sat->lock);
363 return 0;
364}
365
366static int msm_sat_dequeue(struct msm_slim_sat *sat, u8 *buf)
367{
368 unsigned long flags;
369 spin_lock_irqsave(&sat->lock, flags);
370 if (sat->stail == sat->shead) {
371 spin_unlock_irqrestore(&sat->lock, flags);
372 return -ENODATA;
373 }
374 memcpy(buf, sat->sat_msgs[sat->shead], 40);
375 sat->shead = (sat->shead + 1) % SAT_CONCUR_MSG;
376 spin_unlock_irqrestore(&sat->lock, flags);
377 return 0;
378}
379
380static void msm_get_eaddr(u8 *e_addr, u32 *buffer)
381{
382 e_addr[0] = (buffer[1] >> 24) & 0xff;
383 e_addr[1] = (buffer[1] >> 16) & 0xff;
384 e_addr[2] = (buffer[1] >> 8) & 0xff;
385 e_addr[3] = buffer[1] & 0xff;
386 e_addr[4] = (buffer[0] >> 24) & 0xff;
387 e_addr[5] = (buffer[0] >> 16) & 0xff;
388}
389
390static bool msm_is_sat_dev(u8 *e_addr)
391{
392 if (e_addr[5] == QC_MFGID_LSB && e_addr[4] == QC_MFGID_MSB &&
393 e_addr[2] != QC_CHIPID_SL &&
394 (e_addr[1] == QC_DEVID_SAT1 || e_addr[1] == QC_DEVID_SAT2))
395 return true;
396 return false;
397}
398
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700399static int msm_slim_get_ctrl(struct msm_slim_ctrl *dev)
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600400{
Sagar Dharia45e77912012-01-10 09:55:18 -0700401#ifdef CONFIG_PM_RUNTIME
402 int ref = 0;
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700403 int ret = pm_runtime_get_sync(dev->dev);
404 if (ret >= 0) {
405 ref = atomic_read(&dev->dev->power.usage_count);
406 if (ref <= 0) {
407 dev_err(dev->dev, "reference count -ve:%d", ref);
408 ret = -ENODEV;
409 }
410 }
411 return ret;
Sagar Dharia45e77912012-01-10 09:55:18 -0700412#else
413 return -ENODEV;
414#endif
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600415}
416static void msm_slim_put_ctrl(struct msm_slim_ctrl *dev)
417{
Sagar Dharia45e77912012-01-10 09:55:18 -0700418#ifdef CONFIG_PM_RUNTIME
Sagar Dharia38fd1872012-02-06 18:36:38 -0700419 int ref;
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600420 pm_runtime_mark_last_busy(dev->dev);
Sagar Dharia38fd1872012-02-06 18:36:38 -0700421 ref = atomic_read(&dev->dev->power.usage_count);
422 if (ref <= 0)
423 dev_err(dev->dev, "reference count mismatch:%d", ref);
424 else
425 pm_runtime_put(dev->dev);
Sagar Dharia45e77912012-01-10 09:55:18 -0700426#endif
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600427}
428
Sagar Dharia790cfd02011-09-25 17:56:24 -0600429static struct msm_slim_sat *addr_to_sat(struct msm_slim_ctrl *dev, u8 laddr)
430{
431 struct msm_slim_sat *sat = NULL;
432 int i = 0;
433 while (!sat && i < dev->nsats) {
434 if (laddr == dev->satd[i]->satcl.laddr)
435 sat = dev->satd[i];
436 i++;
437 }
438 return sat;
439}
440
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700441static irqreturn_t msm_slim_interrupt(int irq, void *d)
442{
443 struct msm_slim_ctrl *dev = d;
444 u32 pstat;
445 u32 stat = readl_relaxed(dev->base + MGR_INT_STAT);
446
447 if (stat & MGR_INT_TX_MSG_SENT || stat & MGR_INT_TX_NACKED_2) {
448 if (stat & MGR_INT_TX_MSG_SENT)
449 writel_relaxed(MGR_INT_TX_MSG_SENT,
450 dev->base + MGR_INT_CLR);
451 else {
Sagar Dhariaf323f8c2012-09-04 11:27:26 -0600452 u32 mgr_stat = readl_relaxed(dev->base + MGR_STATUS);
453 u32 mgr_ie_stat = readl_relaxed(dev->base +
454 MGR_IE_STAT);
455 u32 frm_stat = readl_relaxed(dev->base + FRM_STAT);
456 u32 frm_cfg = readl_relaxed(dev->base + FRM_CFG);
457 u32 frm_intr_stat = readl_relaxed(dev->base +
458 FRM_INT_STAT);
459 u32 frm_ie_stat = readl_relaxed(dev->base +
460 FRM_IE_STAT);
461 u32 intf_stat = readl_relaxed(dev->base + INTF_STAT);
462 u32 intf_intr_stat = readl_relaxed(dev->base +
463 INTF_INT_STAT);
464 u32 intf_ie_stat = readl_relaxed(dev->base +
465 INTF_IE_STAT);
466
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700467 writel_relaxed(MGR_INT_TX_NACKED_2,
468 dev->base + MGR_INT_CLR);
Sagar Dhariaf323f8c2012-09-04 11:27:26 -0600469 pr_err("TX Nack MGR dump:int_stat:0x%x, mgr_stat:0x%x",
470 stat, mgr_stat);
471 pr_err("TX Nack MGR dump:ie_stat:0x%x", mgr_ie_stat);
472 pr_err("TX Nack FRM dump:int_stat:0x%x, frm_stat:0x%x",
473 frm_intr_stat, frm_stat);
474 pr_err("TX Nack FRM dump:frm_cfg:0x%x, ie_stat:0x%x",
475 frm_cfg, frm_ie_stat);
476 pr_err("TX Nack INTF dump:intr_st:0x%x, intf_stat:0x%x",
477 intf_intr_stat, intf_stat);
478 pr_err("TX Nack INTF dump:ie_stat:0x%x", intf_ie_stat);
479
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700480 dev->err = -EIO;
481 }
482 /*
483 * Guarantee that interrupt clear bit write goes through before
484 * signalling completion/exiting ISR
485 */
486 mb();
487 if (dev->wr_comp)
488 complete(dev->wr_comp);
489 }
490 if (stat & MGR_INT_RX_MSG_RCVD) {
491 u32 rx_buf[10];
492 u32 mc, mt;
493 u8 len, i;
494 rx_buf[0] = readl_relaxed(dev->base + MGR_RX_MSG);
495 len = rx_buf[0] & 0x1F;
496 for (i = 1; i < ((len + 3) >> 2); i++) {
497 rx_buf[i] = readl_relaxed(dev->base + MGR_RX_MSG +
498 (4 * i));
499 dev_dbg(dev->dev, "reading data: %x\n", rx_buf[i]);
500 }
501 mt = (rx_buf[0] >> 5) & 0x7;
502 mc = (rx_buf[0] >> 8) & 0xff;
503 dev_dbg(dev->dev, "MC: %x, MT: %x\n", mc, mt);
504 if (mt == SLIM_MSG_MT_DEST_REFERRED_USER ||
505 mt == SLIM_MSG_MT_SRC_REFERRED_USER) {
Sagar Dharia790cfd02011-09-25 17:56:24 -0600506 u8 laddr = (u8)((rx_buf[0] >> 16) & 0xFF);
507 struct msm_slim_sat *sat = addr_to_sat(dev, laddr);
508 if (sat)
509 msm_sat_enqueue(sat, rx_buf, len);
510 else
511 dev_err(dev->dev, "unknown sat:%d message",
512 laddr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700513 writel_relaxed(MGR_INT_RX_MSG_RCVD,
514 dev->base + MGR_INT_CLR);
515 /*
516 * Guarantee that CLR bit write goes through before
517 * queuing work
518 */
519 mb();
Sagar Dharia790cfd02011-09-25 17:56:24 -0600520 if (sat)
521 queue_work(sat->wq, &sat->wd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700522 } else if (mt == SLIM_MSG_MT_CORE &&
523 mc == SLIM_MSG_MC_REPORT_PRESENT) {
524 u8 e_addr[6];
525 msm_get_eaddr(e_addr, rx_buf);
Sagar Dharia790cfd02011-09-25 17:56:24 -0600526 msm_slim_rx_enqueue(dev, rx_buf, len);
527 writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
528 MGR_INT_CLR);
529 /*
530 * Guarantee that CLR bit write goes through
531 * before signalling completion
532 */
533 mb();
534 complete(&dev->rx_msgq_notify);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700535 } else if (mc == SLIM_MSG_MC_REPLY_INFORMATION ||
536 mc == SLIM_MSG_MC_REPLY_VALUE) {
537 msm_slim_rx_enqueue(dev, rx_buf, len);
538 writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
539 MGR_INT_CLR);
540 /*
541 * Guarantee that CLR bit write goes through
542 * before signalling completion
543 */
544 mb();
545 complete(&dev->rx_msgq_notify);
Sagar Dharia144e5e02011-08-08 17:30:11 -0600546 } else if (mc == SLIM_MSG_MC_REPORT_INFORMATION) {
547 u8 *buf = (u8 *)rx_buf;
548 u8 l_addr = buf[2];
549 u16 ele = (u16)buf[4] << 4;
550 ele |= ((buf[3] & 0xf0) >> 4);
551 dev_err(dev->dev, "Slim-dev:%d report inf element:0x%x",
552 l_addr, ele);
553 for (i = 0; i < len - 5; i++)
554 dev_err(dev->dev, "offset:0x%x:bit mask:%x",
555 i, buf[i+5]);
556 writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
557 MGR_INT_CLR);
558 /*
559 * Guarantee that CLR bit write goes through
560 * before exiting
561 */
562 mb();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700563 } else {
564 dev_err(dev->dev, "Unexpected MC,%x MT:%x, len:%d",
565 mc, mt, len);
566 for (i = 0; i < ((len + 3) >> 2); i++)
567 dev_err(dev->dev, "error msg: %x", rx_buf[i]);
568 writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
569 MGR_INT_CLR);
570 /*
571 * Guarantee that CLR bit write goes through
572 * before exiting
573 */
574 mb();
575 }
576 }
577 if (stat & MGR_INT_RECFG_DONE) {
578 writel_relaxed(MGR_INT_RECFG_DONE, dev->base + MGR_INT_CLR);
579 /*
580 * Guarantee that CLR bit write goes through
581 * before exiting ISR
582 */
583 mb();
584 complete(&dev->reconf);
585 }
Sagar Dharia82e516f2012-03-16 16:01:23 -0600586 pstat = readl_relaxed(PGD_THIS_EE(PGD_PORT_INT_ST_EEn, dev->ver));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700587 if (pstat != 0) {
588 int i = 0;
589 for (i = dev->pipe_b; i < MSM_SLIM_NPORTS; i++) {
590 if (pstat & 1 << i) {
Sagar Dharia82e516f2012-03-16 16:01:23 -0600591 u32 val = readl_relaxed(PGD_PORT(PGD_PORT_STATn,
592 i, dev->ver));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700593 if (val & (1 << 19)) {
594 dev->ctrl.ports[i].err =
595 SLIM_P_DISCONNECT;
596 dev->pipes[i-dev->pipe_b].connected =
597 false;
598 /*
599 * SPS will call completion since
600 * ERROR flags are registered
601 */
602 } else if (val & (1 << 2))
603 dev->ctrl.ports[i].err =
604 SLIM_P_OVERFLOW;
605 else if (val & (1 << 3))
606 dev->ctrl.ports[i].err =
607 SLIM_P_UNDERFLOW;
608 }
Sagar Dharia82e516f2012-03-16 16:01:23 -0600609 writel_relaxed(1, PGD_THIS_EE(PGD_PORT_INT_CL_EEn,
610 dev->ver));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700611 }
612 /*
613 * Guarantee that port interrupt bit(s) clearing writes go
614 * through before exiting ISR
615 */
616 mb();
617 }
618
619 return IRQ_HANDLED;
620}
621
622static int
623msm_slim_init_endpoint(struct msm_slim_ctrl *dev, struct msm_slim_endp *ep)
624{
625 int ret;
626 struct sps_pipe *endpoint;
627 struct sps_connect *config = &ep->config;
628
629 /* Allocate the endpoint */
630 endpoint = sps_alloc_endpoint();
631 if (!endpoint) {
632 dev_err(dev->dev, "sps_alloc_endpoint failed\n");
633 return -ENOMEM;
634 }
635
636 /* Get default connection configuration for an endpoint */
637 ret = sps_get_config(endpoint, config);
638 if (ret) {
639 dev_err(dev->dev, "sps_get_config failed 0x%x\n", ret);
640 goto sps_config_failed;
641 }
642
643 ep->sps = endpoint;
644 return 0;
645
646sps_config_failed:
647 sps_free_endpoint(endpoint);
648 return ret;
649}
650
651static void
652msm_slim_free_endpoint(struct msm_slim_endp *ep)
653{
654 sps_free_endpoint(ep->sps);
655 ep->sps = NULL;
656}
657
658static int msm_slim_sps_mem_alloc(
659 struct msm_slim_ctrl *dev, struct sps_mem_buffer *mem, u32 len)
660{
661 dma_addr_t phys;
662
663 mem->size = len;
664 mem->min_size = 0;
665 mem->base = dma_alloc_coherent(dev->dev, mem->size, &phys, GFP_KERNEL);
666
667 if (!mem->base) {
668 dev_err(dev->dev, "dma_alloc_coherent(%d) failed\n", len);
669 return -ENOMEM;
670 }
671
672 mem->phys_base = phys;
673 memset(mem->base, 0x00, mem->size);
674 return 0;
675}
676
677static void
678msm_slim_sps_mem_free(struct msm_slim_ctrl *dev, struct sps_mem_buffer *mem)
679{
680 dma_free_coherent(dev->dev, mem->size, mem->base, mem->phys_base);
681 mem->size = 0;
682 mem->base = NULL;
683 mem->phys_base = 0;
684}
685
686static void msm_hw_set_port(struct msm_slim_ctrl *dev, u8 pn)
687{
688 u32 set_cfg = DEF_WATERMARK | DEF_ALIGN | DEF_PACK | ENABLE_PORT;
Sagar Dharia82e516f2012-03-16 16:01:23 -0600689 u32 int_port = readl_relaxed(PGD_THIS_EE(PGD_PORT_INT_EN_EEn,
690 dev->ver));
691 writel_relaxed(set_cfg, PGD_PORT(PGD_PORT_CFGn, pn, dev->ver));
692 writel_relaxed(DEF_BLKSZ, PGD_PORT(PGD_PORT_BLKn, pn, dev->ver));
693 writel_relaxed(DEF_TRANSZ, PGD_PORT(PGD_PORT_TRANn, pn, dev->ver));
694 writel_relaxed((int_port | 1 << pn) , PGD_THIS_EE(PGD_PORT_INT_EN_EEn,
695 dev->ver));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700696 /* Make sure that port registers are updated before returning */
697 mb();
698}
699
700static int msm_slim_connect_pipe_port(struct msm_slim_ctrl *dev, u8 pn)
701{
702 struct msm_slim_endp *endpoint = &dev->pipes[pn];
703 struct sps_connect *cfg = &endpoint->config;
704 u32 stat;
705 int ret = sps_get_config(dev->pipes[pn].sps, cfg);
706 if (ret) {
707 dev_err(dev->dev, "sps pipe-port get config error%x\n", ret);
708 return ret;
709 }
710 cfg->options = SPS_O_DESC_DONE | SPS_O_ERROR |
711 SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
712
713 if (dev->pipes[pn].connected) {
714 ret = sps_set_config(dev->pipes[pn].sps, cfg);
715 if (ret) {
716 dev_err(dev->dev, "sps pipe-port set config erro:%x\n",
717 ret);
718 return ret;
719 }
720 }
721
Sagar Dharia82e516f2012-03-16 16:01:23 -0600722 stat = readl_relaxed(PGD_PORT(PGD_PORT_STATn, (pn + dev->pipe_b),
723 dev->ver));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700724 if (dev->ctrl.ports[pn].flow == SLIM_SRC) {
725 cfg->destination = dev->bam.hdl;
726 cfg->source = SPS_DEV_HANDLE_MEM;
727 cfg->dest_pipe_index = ((stat & (0xFF << 4)) >> 4);
728 cfg->src_pipe_index = 0;
729 dev_dbg(dev->dev, "flow src:pipe num:%d",
730 cfg->dest_pipe_index);
731 cfg->mode = SPS_MODE_DEST;
732 } else {
733 cfg->source = dev->bam.hdl;
734 cfg->destination = SPS_DEV_HANDLE_MEM;
735 cfg->src_pipe_index = ((stat & (0xFF << 4)) >> 4);
736 cfg->dest_pipe_index = 0;
737 dev_dbg(dev->dev, "flow dest:pipe num:%d",
738 cfg->src_pipe_index);
739 cfg->mode = SPS_MODE_SRC;
740 }
741 /* Space for desciptor FIFOs */
742 cfg->desc.size = MSM_SLIM_DESC_NUM * sizeof(struct sps_iovec);
743 cfg->config = SPS_CONFIG_DEFAULT;
744 ret = sps_connect(dev->pipes[pn].sps, cfg);
745 if (!ret) {
746 dev->pipes[pn].connected = true;
747 msm_hw_set_port(dev, pn + dev->pipe_b);
748 }
749 return ret;
750}
751
752static u32 *msm_get_msg_buf(struct slim_controller *ctrl, int len)
753{
754 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
755 /*
756 * Currently we block a transaction until the current one completes.
757 * In case we need multiple transactions, use message Q
758 */
759 return dev->tx_buf;
760}
761
762static int msm_send_msg_buf(struct slim_controller *ctrl, u32 *buf, u8 len)
763{
764 int i;
765 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
766 for (i = 0; i < (len + 3) >> 2; i++) {
767 dev_dbg(dev->dev, "TX data:0x%x\n", buf[i]);
768 writel_relaxed(buf[i], dev->base + MGR_TX_MSG + (i * 4));
769 }
770 /* Guarantee that message is sent before returning */
771 mb();
772 return 0;
773}
774
775static int msm_xfer_msg(struct slim_controller *ctrl, struct slim_msg_txn *txn)
776{
777 DECLARE_COMPLETION_ONSTACK(done);
778 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
779 u32 *pbuf;
780 u8 *puc;
781 int timeout;
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700782 int msgv = -1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700783 u8 la = txn->la;
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600784 u8 mc = (u8)(txn->mc & 0xFF);
785 /*
786 * Voting for runtime PM: Slimbus has 2 possible use cases:
787 * 1. messaging
788 * 2. Data channels
789 * Messaging case goes through messaging slots and data channels
790 * use their own slots
791 * This "get" votes for messaging bandwidth
792 */
793 if (!(txn->mc & SLIM_MSG_CLK_PAUSE_SEQ_FLG))
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700794 msgv = msm_slim_get_ctrl(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700795 mutex_lock(&dev->tx_lock);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700796 if (dev->state == MSM_CTRL_ASLEEP ||
797 ((!(txn->mc & SLIM_MSG_CLK_PAUSE_SEQ_FLG)) &&
798 dev->state == MSM_CTRL_SLEEPING)) {
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600799 dev_err(dev->dev, "runtime or system PM suspended state");
800 mutex_unlock(&dev->tx_lock);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700801 if (msgv >= 0)
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600802 msm_slim_put_ctrl(dev);
803 return -EBUSY;
804 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700805 if (txn->mt == SLIM_MSG_MT_CORE &&
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600806 mc == SLIM_MSG_MC_BEGIN_RECONFIGURATION) {
807 if (dev->reconf_busy) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700808 wait_for_completion(&dev->reconf);
809 dev->reconf_busy = false;
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600810 }
811 /* This "get" votes for data channels */
812 if (dev->ctrl.sched.usedslots != 0 &&
813 !dev->chan_active) {
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700814 int chv = msm_slim_get_ctrl(dev);
815 if (chv >= 0)
816 dev->chan_active = true;
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600817 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700818 }
819 txn->rl--;
820 pbuf = msm_get_msg_buf(ctrl, txn->rl);
821 dev->wr_comp = NULL;
822 dev->err = 0;
823
824 if (txn->dt == SLIM_MSG_DEST_ENUMADDR) {
825 mutex_unlock(&dev->tx_lock);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700826 if (msgv >= 0)
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600827 msm_slim_put_ctrl(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700828 return -EPROTONOSUPPORT;
829 }
830 if (txn->mt == SLIM_MSG_MT_CORE && txn->la == 0xFF &&
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600831 (mc == SLIM_MSG_MC_CONNECT_SOURCE ||
832 mc == SLIM_MSG_MC_CONNECT_SINK ||
833 mc == SLIM_MSG_MC_DISCONNECT_PORT))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700834 la = dev->pgdla;
835 if (txn->dt == SLIM_MSG_DEST_LOGICALADDR)
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600836 *pbuf = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt, mc, 0, la);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700837 else
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600838 *pbuf = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt, mc, 1, la);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700839 if (txn->dt == SLIM_MSG_DEST_LOGICALADDR)
840 puc = ((u8 *)pbuf) + 3;
841 else
842 puc = ((u8 *)pbuf) + 2;
843 if (txn->rbuf)
844 *(puc++) = txn->tid;
845 if ((txn->mt == SLIM_MSG_MT_CORE) &&
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600846 ((mc >= SLIM_MSG_MC_REQUEST_INFORMATION &&
847 mc <= SLIM_MSG_MC_REPORT_INFORMATION) ||
848 (mc >= SLIM_MSG_MC_REQUEST_VALUE &&
849 mc <= SLIM_MSG_MC_CHANGE_VALUE))) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700850 *(puc++) = (txn->ec & 0xFF);
851 *(puc++) = (txn->ec >> 8)&0xFF;
852 }
853 if (txn->wbuf)
854 memcpy(puc, txn->wbuf, txn->len);
855 if (txn->mt == SLIM_MSG_MT_CORE && txn->la == 0xFF &&
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600856 (mc == SLIM_MSG_MC_CONNECT_SOURCE ||
857 mc == SLIM_MSG_MC_CONNECT_SINK ||
858 mc == SLIM_MSG_MC_DISCONNECT_PORT)) {
859 if (mc != SLIM_MSG_MC_DISCONNECT_PORT)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700860 dev->err = msm_slim_connect_pipe_port(dev, *puc);
861 else {
862 struct msm_slim_endp *endpoint = &dev->pipes[*puc];
863 struct sps_register_event sps_event;
864 memset(&sps_event, 0, sizeof(sps_event));
865 sps_register_event(endpoint->sps, &sps_event);
866 sps_disconnect(endpoint->sps);
867 /*
868 * Remove channel disconnects master-side ports from
869 * channel. No need to send that again on the bus
870 */
871 dev->pipes[*puc].connected = false;
872 mutex_unlock(&dev->tx_lock);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700873 if (msgv >= 0)
874 msm_slim_put_ctrl(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700875 return 0;
876 }
877 if (dev->err) {
878 dev_err(dev->dev, "pipe-port connect err:%d", dev->err);
879 mutex_unlock(&dev->tx_lock);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700880 if (msgv >= 0)
881 msm_slim_put_ctrl(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700882 return dev->err;
883 }
884 *(puc) = *(puc) + dev->pipe_b;
885 }
886 if (txn->mt == SLIM_MSG_MT_CORE &&
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600887 mc == SLIM_MSG_MC_BEGIN_RECONFIGURATION)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700888 dev->reconf_busy = true;
889 dev->wr_comp = &done;
890 msm_send_msg_buf(ctrl, pbuf, txn->rl);
891 timeout = wait_for_completion_timeout(&done, HZ);
Sagar Dhariaf323f8c2012-09-04 11:27:26 -0600892 if (!timeout)
893 dev->wr_comp = NULL;
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700894 if (mc == SLIM_MSG_MC_RECONFIGURE_NOW) {
895 if ((txn->mc == (SLIM_MSG_MC_RECONFIGURE_NOW |
896 SLIM_MSG_CLK_PAUSE_SEQ_FLG)) &&
897 timeout) {
898 timeout = wait_for_completion_timeout(&dev->reconf, HZ);
899 dev->reconf_busy = false;
900 if (timeout) {
Sagar Dharia9acf7f42012-03-08 09:45:30 -0700901 clk_disable_unprepare(dev->rclk);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700902 disable_irq(dev->irq);
903 }
904 }
905 if ((txn->mc == (SLIM_MSG_MC_RECONFIGURE_NOW |
906 SLIM_MSG_CLK_PAUSE_SEQ_FLG)) &&
907 !timeout) {
908 dev->reconf_busy = false;
909 dev_err(dev->dev, "clock pause failed");
910 mutex_unlock(&dev->tx_lock);
911 return -ETIMEDOUT;
912 }
913 if (txn->mt == SLIM_MSG_MT_CORE &&
914 txn->mc == SLIM_MSG_MC_RECONFIGURE_NOW) {
915 if (dev->ctrl.sched.usedslots == 0 &&
916 dev->chan_active) {
917 dev->chan_active = false;
918 msm_slim_put_ctrl(dev);
919 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600920 }
921 }
ehgrace.kim1f6cbba2012-08-03 16:05:34 -0700922 if (!timeout) {
923 dev_err(dev->dev, "TX timed out:MC:0x%x,mt:0x%x",
924 txn->mc, txn->mt);
925 dev->wr_comp = NULL;
926 }
927
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600928 mutex_unlock(&dev->tx_lock);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700929 if (msgv >= 0)
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600930 msm_slim_put_ctrl(dev);
931
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700932 return timeout ? dev->err : -ETIMEDOUT;
933}
934
Sagar Dhariaf323f8c2012-09-04 11:27:26 -0600935static void msm_slim_wait_retry(struct msm_slim_ctrl *dev)
936{
937 int msec_per_frm = 0;
938 int sfr_per_sec;
939 /* Wait for 1 superframe, or default time and then retry */
940 sfr_per_sec = dev->framer.superfreq /
941 (1 << (SLIM_MAX_CLK_GEAR - dev->ctrl.clkgear));
942 if (sfr_per_sec)
943 msec_per_frm = MSEC_PER_SEC / sfr_per_sec;
944 if (msec_per_frm < DEF_RETRY_MS)
945 msec_per_frm = DEF_RETRY_MS;
946 msleep(msec_per_frm);
947}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700948static int msm_set_laddr(struct slim_controller *ctrl, const u8 *ea,
949 u8 elen, u8 laddr)
950{
951 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
Sagar Dhariaf323f8c2012-09-04 11:27:26 -0600952 struct completion done;
953 int timeout, ret, retries = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700954 u32 *buf;
Sagar Dhariaf323f8c2012-09-04 11:27:26 -0600955retry_laddr:
956 init_completion(&done);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700957 mutex_lock(&dev->tx_lock);
958 buf = msm_get_msg_buf(ctrl, 9);
959 buf[0] = SLIM_MSG_ASM_FIRST_WORD(9, SLIM_MSG_MT_CORE,
960 SLIM_MSG_MC_ASSIGN_LOGICAL_ADDRESS,
961 SLIM_MSG_DEST_LOGICALADDR,
962 ea[5] | ea[4] << 8);
963 buf[1] = ea[3] | (ea[2] << 8) | (ea[1] << 16) | (ea[0] << 24);
964 buf[2] = laddr;
965
966 dev->wr_comp = &done;
Sagar Dhariaf323f8c2012-09-04 11:27:26 -0600967 ret = msm_send_msg_buf(ctrl, buf, 9);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700968 timeout = wait_for_completion_timeout(&done, HZ);
ehgrace.kim1f6cbba2012-08-03 16:05:34 -0700969 if (!timeout)
Sagar Dhariaf323f8c2012-09-04 11:27:26 -0600970 dev->err = -ETIMEDOUT;
971 if (dev->err) {
972 ret = dev->err;
973 dev->err = 0;
ehgrace.kim1f6cbba2012-08-03 16:05:34 -0700974 dev->wr_comp = NULL;
Sagar Dhariaf323f8c2012-09-04 11:27:26 -0600975 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700976 mutex_unlock(&dev->tx_lock);
Sagar Dhariaf323f8c2012-09-04 11:27:26 -0600977 if (ret) {
978 pr_err("set LADDR:0x%x failed:ret:%d, retrying", laddr, ret);
979 if (retries < INIT_MX_RETRIES) {
980 msm_slim_wait_retry(dev);
981 retries++;
982 goto retry_laddr;
983 } else {
984 pr_err("set LADDR failed after retrying:ret:%d", ret);
985 }
986 }
987 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700988}
989
Sagar Dharia144e5e02011-08-08 17:30:11 -0600990static int msm_clk_pause_wakeup(struct slim_controller *ctrl)
991{
992 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600993 enable_irq(dev->irq);
Sagar Dharia9acf7f42012-03-08 09:45:30 -0700994 clk_prepare_enable(dev->rclk);
Sagar Dharia144e5e02011-08-08 17:30:11 -0600995 writel_relaxed(1, dev->base + FRM_WAKEUP);
996 /* Make sure framer wakeup write goes through before exiting function */
997 mb();
998 /*
999 * Workaround: Currently, slave is reporting lost-sync messages
1000 * after slimbus comes out of clock pause.
1001 * Transaction with slave fail before slave reports that message
1002 * Give some time for that report to come
1003 * Slimbus wakes up in clock gear 10 at 24.576MHz. With each superframe
1004 * being 250 usecs, we wait for 20 superframes here to ensure
1005 * we get the message
1006 */
1007 usleep_range(5000, 5000);
1008 return 0;
1009}
1010
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001011static int msm_config_port(struct slim_controller *ctrl, u8 pn)
1012{
1013 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
1014 struct msm_slim_endp *endpoint;
1015 int ret = 0;
1016 if (ctrl->ports[pn].req == SLIM_REQ_HALF_DUP ||
1017 ctrl->ports[pn].req == SLIM_REQ_MULTI_CH)
1018 return -EPROTONOSUPPORT;
1019 if (pn >= (MSM_SLIM_NPORTS - dev->pipe_b))
1020 return -ENODEV;
1021
1022 endpoint = &dev->pipes[pn];
1023 ret = msm_slim_init_endpoint(dev, endpoint);
1024 dev_dbg(dev->dev, "sps register bam error code:%x\n", ret);
1025 return ret;
1026}
1027
1028static enum slim_port_err msm_slim_port_xfer_status(struct slim_controller *ctr,
1029 u8 pn, u8 **done_buf, u32 *done_len)
1030{
1031 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctr);
1032 struct sps_iovec sio;
1033 int ret;
1034 if (done_len)
1035 *done_len = 0;
1036 if (done_buf)
1037 *done_buf = NULL;
1038 if (!dev->pipes[pn].connected)
1039 return SLIM_P_DISCONNECT;
1040 ret = sps_get_iovec(dev->pipes[pn].sps, &sio);
1041 if (!ret) {
1042 if (done_len)
1043 *done_len = sio.size;
1044 if (done_buf)
1045 *done_buf = (u8 *)sio.addr;
1046 }
1047 dev_dbg(dev->dev, "get iovec returned %d\n", ret);
1048 return SLIM_P_INPROGRESS;
1049}
1050
1051static int msm_slim_port_xfer(struct slim_controller *ctrl, u8 pn, u8 *iobuf,
1052 u32 len, struct completion *comp)
1053{
1054 struct sps_register_event sreg;
1055 int ret;
1056 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
Sagar Dhariae77961f2011-09-27 14:03:50 -06001057 if (pn >= 7)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001058 return -ENODEV;
1059
1060
1061 ctrl->ports[pn].xcomp = comp;
1062 sreg.options = (SPS_EVENT_DESC_DONE|SPS_EVENT_ERROR);
1063 sreg.mode = SPS_TRIGGER_WAIT;
1064 sreg.xfer_done = comp;
1065 sreg.callback = NULL;
1066 sreg.user = &ctrl->ports[pn];
1067 ret = sps_register_event(dev->pipes[pn].sps, &sreg);
1068 if (ret) {
1069 dev_dbg(dev->dev, "sps register event error:%x\n", ret);
1070 return ret;
1071 }
1072 ret = sps_transfer_one(dev->pipes[pn].sps, (u32)iobuf, len, NULL,
1073 SPS_IOVEC_FLAG_INT);
1074 dev_dbg(dev->dev, "sps submit xfer error code:%x\n", ret);
1075
1076 return ret;
1077}
1078
1079static int msm_sat_define_ch(struct msm_slim_sat *sat, u8 *buf, u8 len, u8 mc)
1080{
1081 struct msm_slim_ctrl *dev = sat->dev;
1082 enum slim_ch_control oper;
1083 int i;
1084 int ret = 0;
1085 if (mc == SLIM_USR_MC_CHAN_CTRL) {
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001086 for (i = 0; i < sat->nsatch; i++) {
1087 if (buf[5] == sat->satch[i].chan)
1088 break;
1089 }
1090 if (i >= sat->nsatch)
1091 return -ENOTCONN;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001092 oper = ((buf[3] & 0xC0) >> 6);
1093 /* part of grp. activating/removing 1 will take care of rest */
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001094 ret = slim_control_ch(&sat->satcl, sat->satch[i].chanh, oper,
1095 false);
1096 if (!ret) {
1097 for (i = 5; i < len; i++) {
1098 int j;
1099 for (j = 0; j < sat->nsatch; j++) {
1100 if (buf[i] == sat->satch[j].chan) {
1101 if (oper == SLIM_CH_REMOVE)
1102 sat->satch[j].req_rem++;
1103 else
1104 sat->satch[j].req_def++;
1105 break;
1106 }
1107 }
1108 }
1109 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001110 } else {
1111 u16 chh[40];
1112 struct slim_ch prop;
1113 u32 exp;
1114 u8 coeff, cc;
1115 u8 prrate = buf[6];
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001116 if (len <= 8)
1117 return -EINVAL;
1118 for (i = 8; i < len; i++) {
1119 int j = 0;
1120 for (j = 0; j < sat->nsatch; j++) {
1121 if (sat->satch[j].chan == buf[i]) {
1122 chh[i - 8] = sat->satch[j].chanh;
1123 break;
1124 }
1125 }
1126 if (j < sat->nsatch) {
1127 u16 dummy;
1128 ret = slim_query_ch(&sat->satcl, buf[i],
1129 &dummy);
1130 if (ret)
1131 return ret;
1132 if (mc == SLIM_USR_MC_DEF_ACT_CHAN)
1133 sat->satch[j].req_def++;
1134 continue;
1135 }
1136 if (sat->nsatch >= MSM_MAX_SATCH)
1137 return -EXFULL;
1138 ret = slim_query_ch(&sat->satcl, buf[i], &chh[i - 8]);
1139 if (ret)
1140 return ret;
1141 sat->satch[j].chan = buf[i];
1142 sat->satch[j].chanh = chh[i - 8];
1143 if (mc == SLIM_USR_MC_DEF_ACT_CHAN)
1144 sat->satch[j].req_def++;
1145 sat->nsatch++;
1146 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001147 prop.dataf = (enum slim_ch_dataf)((buf[3] & 0xE0) >> 5);
1148 prop.auxf = (enum slim_ch_auxf)((buf[4] & 0xC0) >> 5);
1149 prop.baser = SLIM_RATE_4000HZ;
1150 if (prrate & 0x8)
1151 prop.baser = SLIM_RATE_11025HZ;
1152 else
1153 prop.baser = SLIM_RATE_4000HZ;
1154 prop.prot = (enum slim_ch_proto)(buf[5] & 0x0F);
1155 prop.sampleszbits = (buf[4] & 0x1F)*SLIM_CL_PER_SL;
1156 exp = (u32)((buf[5] & 0xF0) >> 4);
1157 coeff = (buf[4] & 0x20) >> 5;
1158 cc = (coeff ? 3 : 1);
1159 prop.ratem = cc * (1 << exp);
1160 if (i > 9)
1161 ret = slim_define_ch(&sat->satcl, &prop, chh, len - 8,
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001162 true, &chh[0]);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001163 else
1164 ret = slim_define_ch(&sat->satcl, &prop,
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001165 &chh[0], 1, false, NULL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001166 dev_dbg(dev->dev, "define sat grp returned:%d", ret);
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001167 if (ret)
1168 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001169
1170 /* part of group so activating 1 will take care of rest */
1171 if (mc == SLIM_USR_MC_DEF_ACT_CHAN)
1172 ret = slim_control_ch(&sat->satcl,
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001173 chh[0],
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001174 SLIM_CH_ACTIVATE, false);
1175 }
1176 return ret;
1177}
1178
1179static void msm_slim_rxwq(struct msm_slim_ctrl *dev)
1180{
1181 u8 buf[40];
1182 u8 mc, mt, len;
1183 int i, ret;
1184 if ((msm_slim_rx_dequeue(dev, (u8 *)buf)) != -ENODATA) {
1185 len = buf[0] & 0x1F;
1186 mt = (buf[0] >> 5) & 0x7;
1187 mc = buf[1];
1188 if (mt == SLIM_MSG_MT_CORE &&
1189 mc == SLIM_MSG_MC_REPORT_PRESENT) {
1190 u8 laddr;
1191 u8 e_addr[6];
1192 for (i = 0; i < 6; i++)
1193 e_addr[i] = buf[7-i];
1194
1195 ret = slim_assign_laddr(&dev->ctrl, e_addr, 6, &laddr);
1196 /* Is this Qualcomm ported generic device? */
1197 if (!ret && e_addr[5] == QC_MFGID_LSB &&
1198 e_addr[4] == QC_MFGID_MSB &&
1199 e_addr[1] == QC_DEVID_PGD &&
1200 e_addr[2] != QC_CHIPID_SL)
1201 dev->pgdla = laddr;
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001202 if (!ret && !pm_runtime_enabled(dev->dev) &&
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001203 laddr == (QC_MSM_DEVS - 1))
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001204 pm_runtime_enable(dev->dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001205
Sagar Dharia790cfd02011-09-25 17:56:24 -06001206 if (!ret && msm_is_sat_dev(e_addr)) {
1207 struct msm_slim_sat *sat = addr_to_sat(dev,
1208 laddr);
1209 if (!sat)
1210 sat = msm_slim_alloc_sat(dev);
1211 if (!sat)
1212 return;
1213
1214 sat->satcl.laddr = laddr;
1215 msm_sat_enqueue(sat, (u32 *)buf, len);
1216 queue_work(sat->wq, &sat->wd);
1217 }
Sagar Dhariaf323f8c2012-09-04 11:27:26 -06001218 if (ret)
1219 pr_err("assign laddr failed, error:%d", ret);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001220 } else if (mc == SLIM_MSG_MC_REPLY_INFORMATION ||
1221 mc == SLIM_MSG_MC_REPLY_VALUE) {
1222 u8 tid = buf[3];
1223 dev_dbg(dev->dev, "tid:%d, len:%d\n", tid, len - 4);
1224 slim_msg_response(&dev->ctrl, &buf[4], tid,
1225 len - 4);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001226 pm_runtime_mark_last_busy(dev->dev);
Sagar Dharia144e5e02011-08-08 17:30:11 -06001227 } else if (mc == SLIM_MSG_MC_REPORT_INFORMATION) {
1228 u8 l_addr = buf[2];
1229 u16 ele = (u16)buf[4] << 4;
1230 ele |= ((buf[3] & 0xf0) >> 4);
1231 dev_err(dev->dev, "Slim-dev:%d report inf element:0x%x",
1232 l_addr, ele);
1233 for (i = 0; i < len - 5; i++)
1234 dev_err(dev->dev, "offset:0x%x:bit mask:%x",
1235 i, buf[i+5]);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001236 } else {
1237 dev_err(dev->dev, "unexpected message:mc:%x, mt:%x",
1238 mc, mt);
1239 for (i = 0; i < len; i++)
1240 dev_err(dev->dev, "error msg: %x", buf[i]);
1241
1242 }
1243 } else
1244 dev_err(dev->dev, "rxwq called and no dequeue");
1245}
1246
1247static void slim_sat_rxprocess(struct work_struct *work)
1248{
1249 struct msm_slim_sat *sat = container_of(work, struct msm_slim_sat, wd);
1250 struct msm_slim_ctrl *dev = sat->dev;
1251 u8 buf[40];
1252
1253 while ((msm_sat_dequeue(sat, buf)) != -ENODATA) {
1254 struct slim_msg_txn txn;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001255 u8 len, mc, mt;
1256 u32 bw_sl;
1257 int ret = 0;
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001258 int satv = -1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001259 bool gen_ack = false;
1260 u8 tid;
1261 u8 wbuf[8];
Sagar Dhariaf323f8c2012-09-04 11:27:26 -06001262 int i, retries = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001263 txn.mt = SLIM_MSG_MT_SRC_REFERRED_USER;
1264 txn.dt = SLIM_MSG_DEST_LOGICALADDR;
1265 txn.ec = 0;
1266 txn.rbuf = NULL;
1267 txn.la = sat->satcl.laddr;
1268 /* satellite handling */
1269 len = buf[0] & 0x1F;
1270 mc = buf[1];
1271 mt = (buf[0] >> 5) & 0x7;
1272
1273 if (mt == SLIM_MSG_MT_CORE &&
1274 mc == SLIM_MSG_MC_REPORT_PRESENT) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001275 u8 e_addr[6];
1276 for (i = 0; i < 6; i++)
1277 e_addr[i] = buf[7-i];
1278
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001279 if (pm_runtime_enabled(dev->dev)) {
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001280 satv = msm_slim_get_ctrl(dev);
1281 if (satv >= 0)
1282 sat->pending_capability = true;
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001283 }
Sagar Dharia69bf5572012-02-21 14:45:35 -07001284 /*
1285 * Since capability message is already sent, present
1286 * message will indicate subsystem hosting this
1287 * satellite has restarted.
1288 * Remove all active channels of this satellite
1289 * when this is detected
1290 */
1291 if (sat->sent_capability) {
1292 for (i = 0; i < sat->nsatch; i++) {
Ajay Dudani2c71b242012-08-15 00:01:57 -06001293 if (sat->satch[i].reconf) {
1294 pr_err("SSR, sat:%d, rm ch:%d",
Sagar Dhariaf323f8c2012-09-04 11:27:26 -06001295 sat->satcl.laddr,
Sagar Dharia69bf5572012-02-21 14:45:35 -07001296 sat->satch[i].chan);
Sagar Dharia69bf5572012-02-21 14:45:35 -07001297 slim_control_ch(&sat->satcl,
1298 sat->satch[i].chanh,
1299 SLIM_CH_REMOVE, true);
Ajay Dudani2c71b242012-08-15 00:01:57 -06001300 sat->satch[i].reconf = false;
1301 }
Sagar Dharia69bf5572012-02-21 14:45:35 -07001302 }
1303 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001304 } else if (mt != SLIM_MSG_MT_CORE &&
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001305 mc != SLIM_MSG_MC_REPORT_PRESENT) {
1306 satv = msm_slim_get_ctrl(dev);
1307 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001308 switch (mc) {
1309 case SLIM_MSG_MC_REPORT_PRESENT:
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001310 /* Remove runtime_pm vote once satellite acks */
1311 if (mt != SLIM_MSG_MT_CORE) {
1312 if (pm_runtime_enabled(dev->dev) &&
1313 sat->pending_capability) {
1314 msm_slim_put_ctrl(dev);
1315 sat->pending_capability = false;
1316 }
1317 continue;
1318 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001319 /* send a Manager capability msg */
Sagar Dharia790cfd02011-09-25 17:56:24 -06001320 if (sat->sent_capability) {
1321 if (mt == SLIM_MSG_MT_CORE)
1322 goto send_capability;
1323 else
1324 continue;
1325 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001326 ret = slim_add_device(&dev->ctrl, &sat->satcl);
1327 if (ret) {
1328 dev_err(dev->dev,
1329 "Satellite-init failed");
1330 continue;
1331 }
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001332 /* Satellite-channels */
1333 sat->satch = kzalloc(MSM_MAX_SATCH *
1334 sizeof(struct msm_sat_chan),
1335 GFP_KERNEL);
Sagar Dharia790cfd02011-09-25 17:56:24 -06001336send_capability:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001337 txn.mc = SLIM_USR_MC_MASTER_CAPABILITY;
1338 txn.mt = SLIM_MSG_MT_SRC_REFERRED_USER;
1339 txn.la = sat->satcl.laddr;
1340 txn.rl = 8;
1341 wbuf[0] = SAT_MAGIC_LSB;
1342 wbuf[1] = SAT_MAGIC_MSB;
1343 wbuf[2] = SAT_MSG_VER;
1344 wbuf[3] = SAT_MSG_PROT;
1345 txn.wbuf = wbuf;
1346 txn.len = 4;
Sagar Dhariaf323f8c2012-09-04 11:27:26 -06001347 ret = msm_xfer_msg(&dev->ctrl, &txn);
1348 if (ret) {
1349 pr_err("capability for:0x%x fail:%d, retry:%d",
1350 sat->satcl.laddr, ret, retries);
1351 if (retries < INIT_MX_RETRIES) {
1352 msm_slim_wait_retry(dev);
1353 retries++;
1354 goto send_capability;
1355 } else {
1356 pr_err("failed after all retries:%d",
1357 ret);
1358 }
1359 } else {
1360 sat->sent_capability = true;
1361 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001362 break;
1363 case SLIM_USR_MC_ADDR_QUERY:
1364 memcpy(&wbuf[1], &buf[4], 6);
1365 ret = slim_get_logical_addr(&sat->satcl,
1366 &wbuf[1], 6, &wbuf[7]);
1367 if (ret)
1368 memset(&wbuf[1], 0, 6);
1369 wbuf[0] = buf[3];
1370 txn.mc = SLIM_USR_MC_ADDR_REPLY;
1371 txn.rl = 12;
1372 txn.len = 8;
1373 txn.wbuf = wbuf;
1374 msm_xfer_msg(&dev->ctrl, &txn);
1375 break;
1376 case SLIM_USR_MC_DEFINE_CHAN:
1377 case SLIM_USR_MC_DEF_ACT_CHAN:
1378 case SLIM_USR_MC_CHAN_CTRL:
1379 if (mc != SLIM_USR_MC_CHAN_CTRL)
1380 tid = buf[7];
1381 else
1382 tid = buf[4];
1383 gen_ack = true;
1384 ret = msm_sat_define_ch(sat, buf, len, mc);
1385 if (ret) {
1386 dev_err(dev->dev,
1387 "SAT define_ch returned:%d",
1388 ret);
1389 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001390 if (!sat->pending_reconf) {
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001391 int chv = msm_slim_get_ctrl(dev);
1392 if (chv >= 0)
1393 sat->pending_reconf = true;
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001394 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001395 break;
1396 case SLIM_USR_MC_RECONFIG_NOW:
1397 tid = buf[3];
1398 gen_ack = true;
1399 ret = slim_reconfigure_now(&sat->satcl);
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001400 for (i = 0; i < sat->nsatch; i++) {
1401 struct msm_sat_chan *sch = &sat->satch[i];
Sagar Dhariad1d81402012-09-05 12:19:24 -06001402 if (sch->req_rem && sch->reconf) {
Ajay Dudani2c71b242012-08-15 00:01:57 -06001403 if (!ret) {
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001404 slim_dealloc_ch(&sat->satcl,
1405 sch->chanh);
Ajay Dudani2c71b242012-08-15 00:01:57 -06001406 sch->reconf = false;
1407 }
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001408 sch->req_rem--;
1409 } else if (sch->req_def) {
1410 if (ret)
1411 slim_dealloc_ch(&sat->satcl,
1412 sch->chanh);
Ajay Dudani2c71b242012-08-15 00:01:57 -06001413 else
1414 sch->reconf = true;
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001415 sch->req_def--;
1416 }
1417 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001418 if (sat->pending_reconf) {
1419 msm_slim_put_ctrl(dev);
1420 sat->pending_reconf = false;
1421 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001422 break;
1423 case SLIM_USR_MC_REQ_BW:
1424 /* what we get is in SLOTS */
1425 bw_sl = (u32)buf[4] << 3 |
1426 ((buf[3] & 0xE0) >> 5);
1427 sat->satcl.pending_msgsl = bw_sl;
1428 tid = buf[5];
1429 gen_ack = true;
1430 break;
1431 case SLIM_USR_MC_CONNECT_SRC:
1432 case SLIM_USR_MC_CONNECT_SINK:
1433 if (mc == SLIM_USR_MC_CONNECT_SRC)
1434 txn.mc = SLIM_MSG_MC_CONNECT_SOURCE;
1435 else
1436 txn.mc = SLIM_MSG_MC_CONNECT_SINK;
1437 wbuf[0] = buf[4] & 0x1F;
1438 wbuf[1] = buf[5];
1439 tid = buf[6];
1440 txn.la = buf[3];
1441 txn.mt = SLIM_MSG_MT_CORE;
1442 txn.rl = 6;
1443 txn.len = 2;
1444 txn.wbuf = wbuf;
1445 gen_ack = true;
1446 ret = msm_xfer_msg(&dev->ctrl, &txn);
1447 break;
1448 case SLIM_USR_MC_DISCONNECT_PORT:
1449 txn.mc = SLIM_MSG_MC_DISCONNECT_PORT;
1450 wbuf[0] = buf[4] & 0x1F;
1451 tid = buf[5];
1452 txn.la = buf[3];
1453 txn.rl = 5;
1454 txn.len = 1;
1455 txn.mt = SLIM_MSG_MT_CORE;
1456 txn.wbuf = wbuf;
1457 gen_ack = true;
1458 ret = msm_xfer_msg(&dev->ctrl, &txn);
1459 default:
1460 break;
1461 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001462 if (!gen_ack) {
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001463 if (mc != SLIM_MSG_MC_REPORT_PRESENT && satv >= 0)
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001464 msm_slim_put_ctrl(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001465 continue;
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001466 }
1467
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001468 wbuf[0] = tid;
1469 if (!ret)
1470 wbuf[1] = MSM_SAT_SUCCSS;
1471 else
1472 wbuf[1] = 0;
1473 txn.mc = SLIM_USR_MC_GENERIC_ACK;
1474 txn.la = sat->satcl.laddr;
1475 txn.rl = 6;
1476 txn.len = 2;
1477 txn.wbuf = wbuf;
1478 txn.mt = SLIM_MSG_MT_SRC_REFERRED_USER;
1479 msm_xfer_msg(&dev->ctrl, &txn);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001480 if (satv >= 0)
1481 msm_slim_put_ctrl(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001482 }
1483}
1484
Sagar Dharia790cfd02011-09-25 17:56:24 -06001485static struct msm_slim_sat *msm_slim_alloc_sat(struct msm_slim_ctrl *dev)
1486{
1487 struct msm_slim_sat *sat;
1488 char *name;
1489 if (dev->nsats >= MSM_MAX_NSATS)
1490 return NULL;
1491
1492 sat = kzalloc(sizeof(struct msm_slim_sat), GFP_KERNEL);
1493 if (!sat) {
1494 dev_err(dev->dev, "no memory for satellite");
1495 return NULL;
1496 }
1497 name = kzalloc(SLIMBUS_NAME_SIZE, GFP_KERNEL);
1498 if (!name) {
1499 dev_err(dev->dev, "no memory for satellite name");
1500 kfree(sat);
1501 return NULL;
1502 }
1503 dev->satd[dev->nsats] = sat;
1504 sat->dev = dev;
1505 snprintf(name, SLIMBUS_NAME_SIZE, "msm_sat%d", dev->nsats);
1506 sat->satcl.name = name;
1507 spin_lock_init(&sat->lock);
1508 INIT_WORK(&sat->wd, slim_sat_rxprocess);
1509 sat->wq = create_singlethread_workqueue(sat->satcl.name);
1510 if (!sat->wq) {
1511 kfree(name);
1512 kfree(sat);
1513 return NULL;
1514 }
1515 /*
1516 * Both sats will be allocated from RX thread and RX thread will
1517 * process messages sequentially. No synchronization necessary
1518 */
1519 dev->nsats++;
1520 return sat;
1521}
1522
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001523static void
1524msm_slim_rx_msgq_event(struct msm_slim_ctrl *dev, struct sps_event_notify *ev)
1525{
1526 u32 *buf = ev->data.transfer.user;
1527 struct sps_iovec *iovec = &ev->data.transfer.iovec;
1528
1529 /*
1530 * Note the virtual address needs to be offset by the same index
1531 * as the physical address or just pass in the actual virtual address
1532 * if the sps_mem_buffer is not needed. Note that if completion is
1533 * used, the virtual address won't be available and will need to be
1534 * calculated based on the offset of the physical address
1535 */
1536 if (ev->event_id == SPS_EVENT_DESC_DONE) {
1537
1538 pr_debug("buf = 0x%p, data = 0x%x\n", buf, *buf);
1539
1540 pr_debug("iovec = (0x%x 0x%x 0x%x)\n",
1541 iovec->addr, iovec->size, iovec->flags);
1542
1543 } else {
1544 dev_err(dev->dev, "%s: unknown event %d\n",
1545 __func__, ev->event_id);
1546 }
1547}
1548
1549static void msm_slim_rx_msgq_cb(struct sps_event_notify *notify)
1550{
1551 struct msm_slim_ctrl *dev = (struct msm_slim_ctrl *)notify->user;
1552 msm_slim_rx_msgq_event(dev, notify);
1553}
1554
1555/* Queue up Rx message buffer */
1556static inline int
1557msm_slim_post_rx_msgq(struct msm_slim_ctrl *dev, int ix)
1558{
1559 int ret;
1560 u32 flags = SPS_IOVEC_FLAG_INT;
1561 struct msm_slim_endp *endpoint = &dev->rx_msgq;
1562 struct sps_mem_buffer *mem = &endpoint->buf;
1563 struct sps_pipe *pipe = endpoint->sps;
1564
1565 /* Rx message queue buffers are 4 bytes in length */
1566 u8 *virt_addr = mem->base + (4 * ix);
1567 u32 phys_addr = mem->phys_base + (4 * ix);
1568
1569 pr_debug("index:%d, phys:0x%x, virt:0x%p\n", ix, phys_addr, virt_addr);
1570
1571 ret = sps_transfer_one(pipe, phys_addr, 4, virt_addr, flags);
1572 if (ret)
1573 dev_err(dev->dev, "transfer_one() failed 0x%x, %d\n", ret, ix);
1574
1575 return ret;
1576}
1577
1578static inline int
1579msm_slim_rx_msgq_get(struct msm_slim_ctrl *dev, u32 *data, int offset)
1580{
1581 struct msm_slim_endp *endpoint = &dev->rx_msgq;
1582 struct sps_mem_buffer *mem = &endpoint->buf;
1583 struct sps_pipe *pipe = endpoint->sps;
1584 struct sps_iovec iovec;
1585 int index;
1586 int ret;
1587
1588 ret = sps_get_iovec(pipe, &iovec);
1589 if (ret) {
1590 dev_err(dev->dev, "sps_get_iovec() failed 0x%x\n", ret);
1591 goto err_exit;
1592 }
1593
1594 pr_debug("iovec = (0x%x 0x%x 0x%x)\n",
1595 iovec.addr, iovec.size, iovec.flags);
1596 BUG_ON(iovec.addr < mem->phys_base);
1597 BUG_ON(iovec.addr >= mem->phys_base + mem->size);
1598
1599 /* Calculate buffer index */
1600 index = (iovec.addr - mem->phys_base) / 4;
1601 *(data + offset) = *((u32 *)mem->base + index);
1602
1603 pr_debug("buf = 0x%p, data = 0x%x\n", (u32 *)mem->base + index, *data);
1604
1605 /* Add buffer back to the queue */
1606 (void)msm_slim_post_rx_msgq(dev, index);
1607
1608err_exit:
1609 return ret;
1610}
1611
1612static int msm_slim_rx_msgq_thread(void *data)
1613{
1614 struct msm_slim_ctrl *dev = (struct msm_slim_ctrl *)data;
1615 struct completion *notify = &dev->rx_msgq_notify;
1616 struct msm_slim_sat *sat = NULL;
1617 u32 mc = 0;
1618 u32 mt = 0;
1619 u32 buffer[10];
1620 int index = 0;
1621 u8 msg_len = 0;
1622 int ret;
1623
1624 dev_dbg(dev->dev, "rx thread started");
1625
1626 while (!kthread_should_stop()) {
1627 set_current_state(TASK_INTERRUPTIBLE);
1628 ret = wait_for_completion_interruptible(notify);
1629
1630 if (ret)
1631 dev_err(dev->dev, "rx thread wait error:%d", ret);
1632
1633 /* 1 irq notification per message */
1634 if (!dev->use_rx_msgqs) {
1635 msm_slim_rxwq(dev);
1636 continue;
1637 }
1638
1639 ret = msm_slim_rx_msgq_get(dev, buffer, index);
1640 if (ret) {
1641 dev_err(dev->dev, "rx_msgq_get() failed 0x%x\n", ret);
1642 continue;
1643 }
1644
1645 pr_debug("message[%d] = 0x%x\n", index, *buffer);
1646
1647 /* Decide if we use generic RX or satellite RX */
1648 if (index++ == 0) {
1649 msg_len = *buffer & 0x1F;
1650 pr_debug("Start of new message, len = %d\n", msg_len);
1651 mt = (buffer[0] >> 5) & 0x7;
1652 mc = (buffer[0] >> 8) & 0xff;
1653 dev_dbg(dev->dev, "MC: %x, MT: %x\n", mc, mt);
1654 if (mt == SLIM_MSG_MT_DEST_REFERRED_USER ||
Sagar Dharia790cfd02011-09-25 17:56:24 -06001655 mt == SLIM_MSG_MT_SRC_REFERRED_USER) {
1656 u8 laddr;
1657 laddr = (u8)((buffer[0] >> 16) & 0xff);
1658 sat = addr_to_sat(dev, laddr);
1659 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001660 } else if ((index * 4) >= msg_len) {
1661 index = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001662 if (sat) {
1663 msm_sat_enqueue(sat, buffer, msg_len);
1664 queue_work(sat->wq, &sat->wd);
1665 sat = NULL;
1666 } else {
1667 msm_slim_rx_enqueue(dev, buffer, msg_len);
1668 msm_slim_rxwq(dev);
1669 }
1670 }
1671 }
1672
1673 return 0;
1674}
1675
1676static int __devinit msm_slim_init_rx_msgq(struct msm_slim_ctrl *dev)
1677{
1678 int i, ret;
1679 u32 pipe_offset;
1680 struct msm_slim_endp *endpoint = &dev->rx_msgq;
1681 struct sps_connect *config = &endpoint->config;
1682 struct sps_mem_buffer *descr = &config->desc;
1683 struct sps_mem_buffer *mem = &endpoint->buf;
1684 struct completion *notify = &dev->rx_msgq_notify;
1685
1686 struct sps_register_event sps_error_event; /* SPS_ERROR */
1687 struct sps_register_event sps_descr_event; /* DESCR_DONE */
1688
Sagar Dharia31ac5812012-01-04 11:38:59 -07001689 init_completion(notify);
1690 if (!dev->use_rx_msgqs)
1691 goto rx_thread_create;
1692
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001693 /* Allocate the endpoint */
1694 ret = msm_slim_init_endpoint(dev, endpoint);
1695 if (ret) {
1696 dev_err(dev->dev, "init_endpoint failed 0x%x\n", ret);
1697 goto sps_init_endpoint_failed;
1698 }
1699
1700 /* Get the pipe indices for the message queues */
1701 pipe_offset = (readl_relaxed(dev->base + MGR_STATUS) & 0xfc) >> 2;
1702 dev_dbg(dev->dev, "Message queue pipe offset %d\n", pipe_offset);
1703
1704 config->mode = SPS_MODE_SRC;
1705 config->source = dev->bam.hdl;
1706 config->destination = SPS_DEV_HANDLE_MEM;
1707 config->src_pipe_index = pipe_offset;
1708 config->options = SPS_O_DESC_DONE | SPS_O_ERROR |
1709 SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
1710
1711 /* Allocate memory for the FIFO descriptors */
1712 ret = msm_slim_sps_mem_alloc(dev, descr,
1713 MSM_SLIM_DESC_NUM * sizeof(struct sps_iovec));
1714 if (ret) {
1715 dev_err(dev->dev, "unable to allocate SPS descriptors\n");
1716 goto alloc_descr_failed;
1717 }
1718
1719 ret = sps_connect(endpoint->sps, config);
1720 if (ret) {
1721 dev_err(dev->dev, "sps_connect failed 0x%x\n", ret);
1722 goto sps_connect_failed;
1723 }
1724
1725 /* Register completion for DESC_DONE */
1726 init_completion(notify);
1727 memset(&sps_descr_event, 0x00, sizeof(sps_descr_event));
1728
1729 sps_descr_event.mode = SPS_TRIGGER_CALLBACK;
1730 sps_descr_event.options = SPS_O_DESC_DONE;
1731 sps_descr_event.user = (void *)dev;
1732 sps_descr_event.xfer_done = notify;
1733
1734 ret = sps_register_event(endpoint->sps, &sps_descr_event);
1735 if (ret) {
1736 dev_err(dev->dev, "sps_connect() failed 0x%x\n", ret);
1737 goto sps_reg_event_failed;
1738 }
1739
1740 /* Register callback for errors */
1741 memset(&sps_error_event, 0x00, sizeof(sps_error_event));
1742 sps_error_event.mode = SPS_TRIGGER_CALLBACK;
1743 sps_error_event.options = SPS_O_ERROR;
1744 sps_error_event.user = (void *)dev;
1745 sps_error_event.callback = msm_slim_rx_msgq_cb;
1746
1747 ret = sps_register_event(endpoint->sps, &sps_error_event);
1748 if (ret) {
1749 dev_err(dev->dev, "sps_connect() failed 0x%x\n", ret);
1750 goto sps_reg_event_failed;
1751 }
1752
1753 /* Allocate memory for the message buffer(s), N descrs, 4-byte mesg */
1754 ret = msm_slim_sps_mem_alloc(dev, mem, MSM_SLIM_DESC_NUM * 4);
1755 if (ret) {
1756 dev_err(dev->dev, "dma_alloc_coherent failed\n");
1757 goto alloc_buffer_failed;
1758 }
1759
1760 /*
1761 * Call transfer_one for each 4-byte buffer
1762 * Use (buf->size/4) - 1 for the number of buffer to post
1763 */
1764
1765 /* Setup the transfer */
1766 for (i = 0; i < (MSM_SLIM_DESC_NUM - 1); i++) {
1767 ret = msm_slim_post_rx_msgq(dev, i);
1768 if (ret) {
1769 dev_err(dev->dev, "post_rx_msgq() failed 0x%x\n", ret);
1770 goto sps_transfer_failed;
1771 }
1772 }
1773
Sagar Dharia31ac5812012-01-04 11:38:59 -07001774rx_thread_create:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001775 /* Fire up the Rx message queue thread */
1776 dev->rx_msgq_thread = kthread_run(msm_slim_rx_msgq_thread, dev,
1777 MSM_SLIM_NAME "_rx_msgq_thread");
1778 if (!dev->rx_msgq_thread) {
1779 dev_err(dev->dev, "Failed to start Rx message queue thread\n");
Sagar Dharia31ac5812012-01-04 11:38:59 -07001780 /* Tear-down BAMs or return? */
1781 if (!dev->use_rx_msgqs)
1782 return -EIO;
1783 else
1784 ret = -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001785 } else
1786 return 0;
1787
1788sps_transfer_failed:
1789 msm_slim_sps_mem_free(dev, mem);
1790alloc_buffer_failed:
1791 memset(&sps_error_event, 0x00, sizeof(sps_error_event));
1792 sps_register_event(endpoint->sps, &sps_error_event);
1793sps_reg_event_failed:
1794 sps_disconnect(endpoint->sps);
1795sps_connect_failed:
1796 msm_slim_sps_mem_free(dev, descr);
1797alloc_descr_failed:
1798 msm_slim_free_endpoint(endpoint);
1799sps_init_endpoint_failed:
Sagar Dharia31ac5812012-01-04 11:38:59 -07001800 dev->use_rx_msgqs = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001801 return ret;
1802}
1803
1804/* Registers BAM h/w resource with SPS driver and initializes msgq endpoints */
1805static int __devinit
1806msm_slim_sps_init(struct msm_slim_ctrl *dev, struct resource *bam_mem)
1807{
1808 int i, ret;
1809 u32 bam_handle;
1810 struct sps_bam_props bam_props = {0};
1811
1812 static struct sps_bam_sec_config_props sec_props = {
1813 .ees = {
1814 [0] = { /* LPASS */
1815 .vmid = 0,
1816 .pipe_mask = 0xFFFF98,
1817 },
1818 [1] = { /* Krait Apps */
1819 .vmid = 1,
1820 .pipe_mask = 0x3F000007,
1821 },
1822 [2] = { /* Modem */
1823 .vmid = 2,
1824 .pipe_mask = 0x00000060,
1825 },
1826 },
1827 };
1828
1829 bam_props.ee = dev->ee;
1830 bam_props.virt_addr = dev->bam.base;
1831 bam_props.phys_addr = bam_mem->start;
1832 bam_props.irq = dev->bam.irq;
1833 bam_props.manage = SPS_BAM_MGR_LOCAL;
1834 bam_props.summing_threshold = MSM_SLIM_PERF_SUMM_THRESHOLD;
1835
1836 bam_props.sec_config = SPS_BAM_SEC_DO_CONFIG;
1837 bam_props.p_sec_config_props = &sec_props;
1838
1839 bam_props.options = SPS_O_DESC_DONE | SPS_O_ERROR |
1840 SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
1841
1842 /* First 7 bits are for message Qs */
1843 for (i = 7; i < 32; i++) {
1844 /* Check what pipes are owned by Apps. */
1845 if ((sec_props.ees[dev->ee].pipe_mask >> i) & 0x1)
1846 break;
1847 }
1848 dev->pipe_b = i - 7;
1849
1850 /* Register the BAM device with the SPS driver */
1851 ret = sps_register_bam_device(&bam_props, &bam_handle);
1852 if (ret) {
Sagar Dharia31ac5812012-01-04 11:38:59 -07001853 dev_err(dev->dev, "disabling BAM: reg-bam failed 0x%x\n", ret);
1854 dev->use_rx_msgqs = 0;
1855 goto init_rx_msgq;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001856 }
1857 dev->bam.hdl = bam_handle;
1858 dev_dbg(dev->dev, "SLIM BAM registered, handle = 0x%x\n", bam_handle);
1859
Sagar Dharia31ac5812012-01-04 11:38:59 -07001860init_rx_msgq:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001861 ret = msm_slim_init_rx_msgq(dev);
Sagar Dharia31ac5812012-01-04 11:38:59 -07001862 if (ret)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001863 dev_err(dev->dev, "msm_slim_init_rx_msgq failed 0x%x\n", ret);
Sagar Dharia1beb2202012-07-31 19:06:21 -06001864 if (ret && bam_handle) {
Sagar Dharia31ac5812012-01-04 11:38:59 -07001865 sps_deregister_bam_device(bam_handle);
1866 dev->bam.hdl = 0L;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001867 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001868 return ret;
1869}
1870
1871static void msm_slim_sps_exit(struct msm_slim_ctrl *dev)
1872{
1873 if (dev->use_rx_msgqs) {
1874 struct msm_slim_endp *endpoint = &dev->rx_msgq;
1875 struct sps_connect *config = &endpoint->config;
1876 struct sps_mem_buffer *descr = &config->desc;
1877 struct sps_mem_buffer *mem = &endpoint->buf;
1878 struct sps_register_event sps_event;
1879 memset(&sps_event, 0x00, sizeof(sps_event));
1880 msm_slim_sps_mem_free(dev, mem);
1881 sps_register_event(endpoint->sps, &sps_event);
1882 sps_disconnect(endpoint->sps);
1883 msm_slim_sps_mem_free(dev, descr);
1884 msm_slim_free_endpoint(endpoint);
Sagar Dharia31ac5812012-01-04 11:38:59 -07001885 sps_deregister_bam_device(dev->bam.hdl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001886 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001887}
1888
Sagar Dhariacc969452011-09-19 10:34:30 -06001889static void msm_slim_prg_slew(struct platform_device *pdev,
1890 struct msm_slim_ctrl *dev)
1891{
1892 struct resource *slew_io;
1893 void __iomem *slew_reg;
1894 /* SLEW RATE register for this slimbus */
1895 dev->slew_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1896 "slimbus_slew_reg");
1897 if (!dev->slew_mem) {
1898 dev_dbg(&pdev->dev, "no slimbus slew resource\n");
1899 return;
1900 }
1901 slew_io = request_mem_region(dev->slew_mem->start,
1902 resource_size(dev->slew_mem), pdev->name);
1903 if (!slew_io) {
1904 dev_dbg(&pdev->dev, "slimbus-slew mem claimed\n");
1905 dev->slew_mem = NULL;
1906 return;
1907 }
1908
1909 slew_reg = ioremap(dev->slew_mem->start, resource_size(dev->slew_mem));
1910 if (!slew_reg) {
1911 dev_dbg(dev->dev, "slew register mapping failed");
1912 release_mem_region(dev->slew_mem->start,
1913 resource_size(dev->slew_mem));
1914 dev->slew_mem = NULL;
1915 return;
1916 }
1917 writel_relaxed(1, slew_reg);
1918 /* Make sure slimbus-slew rate enabling goes through */
1919 wmb();
1920 iounmap(slew_reg);
1921}
1922
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001923static int __devinit msm_slim_probe(struct platform_device *pdev)
1924{
1925 struct msm_slim_ctrl *dev;
1926 int ret;
1927 struct resource *bam_mem, *bam_io;
1928 struct resource *slim_mem, *slim_io;
1929 struct resource *irq, *bam_irq;
Sagar Dharia1beb2202012-07-31 19:06:21 -06001930 bool rxreg_access = false;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001931 slim_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1932 "slimbus_physical");
1933 if (!slim_mem) {
1934 dev_err(&pdev->dev, "no slimbus physical memory resource\n");
1935 return -ENODEV;
1936 }
1937 slim_io = request_mem_region(slim_mem->start, resource_size(slim_mem),
1938 pdev->name);
1939 if (!slim_io) {
1940 dev_err(&pdev->dev, "slimbus memory already claimed\n");
1941 return -EBUSY;
1942 }
1943
1944 bam_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1945 "slimbus_bam_physical");
1946 if (!bam_mem) {
1947 dev_err(&pdev->dev, "no slimbus BAM memory resource\n");
1948 ret = -ENODEV;
1949 goto err_get_res_bam_failed;
1950 }
1951 bam_io = request_mem_region(bam_mem->start, resource_size(bam_mem),
1952 pdev->name);
1953 if (!bam_io) {
1954 release_mem_region(slim_mem->start, resource_size(slim_mem));
1955 dev_err(&pdev->dev, "slimbus BAM memory already claimed\n");
1956 ret = -EBUSY;
1957 goto err_get_res_bam_failed;
1958 }
1959 irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
1960 "slimbus_irq");
1961 if (!irq) {
1962 dev_err(&pdev->dev, "no slimbus IRQ resource\n");
1963 ret = -ENODEV;
1964 goto err_get_res_failed;
1965 }
1966 bam_irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
1967 "slimbus_bam_irq");
1968 if (!bam_irq) {
1969 dev_err(&pdev->dev, "no slimbus BAM IRQ resource\n");
1970 ret = -ENODEV;
1971 goto err_get_res_failed;
1972 }
1973
1974 dev = kzalloc(sizeof(struct msm_slim_ctrl), GFP_KERNEL);
1975 if (!dev) {
1976 dev_err(&pdev->dev, "no memory for MSM slimbus controller\n");
1977 ret = -ENOMEM;
1978 goto err_get_res_failed;
1979 }
1980 dev->dev = &pdev->dev;
1981 platform_set_drvdata(pdev, dev);
1982 slim_set_ctrldata(&dev->ctrl, dev);
1983 dev->base = ioremap(slim_mem->start, resource_size(slim_mem));
1984 if (!dev->base) {
1985 dev_err(&pdev->dev, "IOremap failed\n");
1986 ret = -ENOMEM;
1987 goto err_ioremap_failed;
1988 }
1989 dev->bam.base = ioremap(bam_mem->start, resource_size(bam_mem));
1990 if (!dev->bam.base) {
1991 dev_err(&pdev->dev, "BAM IOremap failed\n");
1992 ret = -ENOMEM;
1993 goto err_ioremap_bam_failed;
1994 }
Sagar Dhariaf8f603b2012-03-21 15:25:17 -06001995 if (pdev->dev.of_node) {
1996
1997 ret = of_property_read_u32(pdev->dev.of_node, "cell-index",
1998 &dev->ctrl.nr);
1999 if (ret) {
2000 dev_err(&pdev->dev, "Cell index not specified:%d", ret);
2001 goto err_of_init_failed;
2002 }
Sagar Dharia1beb2202012-07-31 19:06:21 -06002003 rxreg_access = of_property_read_bool(pdev->dev.of_node,
2004 "qcom,rxreg-access");
Sagar Dhariaf8f603b2012-03-21 15:25:17 -06002005 /* Optional properties */
2006 ret = of_property_read_u32(pdev->dev.of_node,
2007 "qcom,min-clk-gear", &dev->ctrl.min_cg);
2008 ret = of_property_read_u32(pdev->dev.of_node,
2009 "qcom,max-clk-gear", &dev->ctrl.max_cg);
Sagar Dharia1beb2202012-07-31 19:06:21 -06002010 pr_debug("min_cg:%d, max_cg:%d, rxreg: %d", dev->ctrl.min_cg,
2011 dev->ctrl.max_cg, rxreg_access);
Sagar Dhariaf8f603b2012-03-21 15:25:17 -06002012 } else {
2013 dev->ctrl.nr = pdev->id;
2014 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002015 dev->ctrl.nchans = MSM_SLIM_NCHANS;
2016 dev->ctrl.nports = MSM_SLIM_NPORTS;
2017 dev->ctrl.set_laddr = msm_set_laddr;
2018 dev->ctrl.xfer_msg = msm_xfer_msg;
Sagar Dharia144e5e02011-08-08 17:30:11 -06002019 dev->ctrl.wakeup = msm_clk_pause_wakeup;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002020 dev->ctrl.config_port = msm_config_port;
2021 dev->ctrl.port_xfer = msm_slim_port_xfer;
2022 dev->ctrl.port_xfer_status = msm_slim_port_xfer_status;
2023 /* Reserve some messaging BW for satellite-apps driver communication */
2024 dev->ctrl.sched.pending_msgsl = 30;
2025
2026 init_completion(&dev->reconf);
2027 mutex_init(&dev->tx_lock);
2028 spin_lock_init(&dev->rx_lock);
2029 dev->ee = 1;
Sagar Dharia1beb2202012-07-31 19:06:21 -06002030 if (rxreg_access)
2031 dev->use_rx_msgqs = 0;
2032 else
2033 dev->use_rx_msgqs = 1;
2034
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002035 dev->irq = irq->start;
2036 dev->bam.irq = bam_irq->start;
2037
2038 ret = msm_slim_sps_init(dev, bam_mem);
2039 if (ret != 0) {
2040 dev_err(dev->dev, "error SPS init\n");
2041 goto err_sps_init_failed;
2042 }
2043
2044
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002045 dev->framer.rootfreq = SLIM_ROOT_FREQ >> 3;
2046 dev->framer.superfreq =
2047 dev->framer.rootfreq / SLIM_CL_PER_SUPERFRAME_DIV8;
2048 dev->ctrl.a_framer = &dev->framer;
2049 dev->ctrl.clkgear = SLIM_MAX_CLK_GEAR;
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002050 dev->ctrl.dev.parent = &pdev->dev;
Sagar Dhariaf8f603b2012-03-21 15:25:17 -06002051 dev->ctrl.dev.of_node = pdev->dev.of_node;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002052
2053 ret = request_irq(dev->irq, msm_slim_interrupt, IRQF_TRIGGER_HIGH,
2054 "msm_slim_irq", dev);
2055 if (ret) {
2056 dev_err(&pdev->dev, "request IRQ failed\n");
2057 goto err_request_irq_failed;
2058 }
2059
Sagar Dhariacc969452011-09-19 10:34:30 -06002060 msm_slim_prg_slew(pdev, dev);
Sagar Dhariab1c0acf2012-02-06 18:16:58 -07002061
2062 /* Register with framework before enabling frame, clock */
2063 ret = slim_add_numbered_controller(&dev->ctrl);
2064 if (ret) {
2065 dev_err(dev->dev, "error adding controller\n");
2066 goto err_ctrl_failed;
2067 }
2068
2069
Tianyi Gou44a81b02012-02-06 17:49:07 -08002070 dev->rclk = clk_get(dev->dev, "core_clk");
Sagar Dhariab1c0acf2012-02-06 18:16:58 -07002071 if (!dev->rclk) {
2072 dev_err(dev->dev, "slimbus clock not found");
2073 goto err_clk_get_failed;
2074 }
Sagar Dhariacc969452011-09-19 10:34:30 -06002075 clk_set_rate(dev->rclk, SLIM_ROOT_FREQ);
Sagar Dharia9acf7f42012-03-08 09:45:30 -07002076 clk_prepare_enable(dev->rclk);
Sagar Dhariacc969452011-09-19 10:34:30 -06002077
Sagar Dharia82e516f2012-03-16 16:01:23 -06002078 dev->ver = readl_relaxed(dev->base);
2079 /* Version info in 16 MSbits */
2080 dev->ver >>= 16;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002081 /* Component register initialization */
Sagar Dharia82e516f2012-03-16 16:01:23 -06002082 writel_relaxed(1, dev->base + CFG_PORT(COMP_CFG, dev->ver));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002083 writel_relaxed((EE_MGR_RSC_GRP | EE_NGD_2 | EE_NGD_1),
Sagar Dharia82e516f2012-03-16 16:01:23 -06002084 dev->base + CFG_PORT(COMP_TRUST_CFG, dev->ver));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002085
2086 /*
2087 * Manager register initialization
2088 * If RX msg Q is used, disable RX_MSG_RCVD interrupt
2089 */
2090 if (dev->use_rx_msgqs)
2091 writel_relaxed((MGR_INT_RECFG_DONE | MGR_INT_TX_NACKED_2 |
2092 MGR_INT_MSG_BUF_CONTE | /* MGR_INT_RX_MSG_RCVD | */
2093 MGR_INT_TX_MSG_SENT), dev->base + MGR_INT_EN);
2094 else
2095 writel_relaxed((MGR_INT_RECFG_DONE | MGR_INT_TX_NACKED_2 |
2096 MGR_INT_MSG_BUF_CONTE | MGR_INT_RX_MSG_RCVD |
2097 MGR_INT_TX_MSG_SENT), dev->base + MGR_INT_EN);
2098 writel_relaxed(1, dev->base + MGR_CFG);
2099 /*
2100 * Framer registers are beyond 1K memory region after Manager and/or
2101 * component registers. Make sure those writes are ordered
2102 * before framer register writes
2103 */
2104 wmb();
2105
2106 /* Framer register initialization */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002107 writel_relaxed((0xA << REF_CLK_GEAR) | (0xA << CLK_GEAR) |
2108 (1 << ROOT_FREQ) | (1 << FRM_ACTIVE) | 1,
2109 dev->base + FRM_CFG);
2110 /*
2111 * Make sure that framer wake-up and enabling writes go through
2112 * before any other component is enabled. Framer is responsible for
2113 * clocking the bus and enabling framer first will ensure that other
2114 * devices can report presence when they are enabled
2115 */
2116 mb();
2117
2118 /* Enable RX msg Q */
2119 if (dev->use_rx_msgqs)
2120 writel_relaxed(MGR_CFG_ENABLE | MGR_CFG_RX_MSGQ_EN,
2121 dev->base + MGR_CFG);
2122 else
2123 writel_relaxed(MGR_CFG_ENABLE, dev->base + MGR_CFG);
2124 /*
2125 * Make sure that manager-enable is written through before interface
2126 * device is enabled
2127 */
2128 mb();
2129 writel_relaxed(1, dev->base + INTF_CFG);
2130 /*
2131 * Make sure that interface-enable is written through before enabling
2132 * ported generic device inside MSM manager
2133 */
2134 mb();
Sagar Dharia82e516f2012-03-16 16:01:23 -06002135 writel_relaxed(1, dev->base + CFG_PORT(PGD_CFG, dev->ver));
2136 writel_relaxed(0x3F<<17, dev->base + CFG_PORT(PGD_OWN_EEn, dev->ver) +
2137 (4 * dev->ee));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002138 /*
2139 * Make sure that ported generic device is enabled and port-EE settings
2140 * are written through before finally enabling the component
2141 */
2142 mb();
2143
Sagar Dharia82e516f2012-03-16 16:01:23 -06002144 writel_relaxed(1, dev->base + CFG_PORT(COMP_CFG, dev->ver));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002145 /*
2146 * Make sure that all writes have gone through before exiting this
2147 * function
2148 */
2149 mb();
Sagar Dhariaf8f603b2012-03-21 15:25:17 -06002150 if (pdev->dev.of_node)
2151 of_register_slim_devices(&dev->ctrl);
2152
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002153 pm_runtime_use_autosuspend(&pdev->dev);
2154 pm_runtime_set_autosuspend_delay(&pdev->dev, MSM_SLIM_AUTOSUSPEND);
2155 pm_runtime_set_active(&pdev->dev);
2156
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002157 dev_dbg(dev->dev, "MSM SB controller is up!\n");
2158 return 0;
2159
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002160err_ctrl_failed:
Sagar Dharia82e516f2012-03-16 16:01:23 -06002161 writel_relaxed(0, dev->base + CFG_PORT(COMP_CFG, dev->ver));
Sagar Dhariab1c0acf2012-02-06 18:16:58 -07002162err_clk_get_failed:
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002163 kfree(dev->satd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002164err_request_irq_failed:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002165 msm_slim_sps_exit(dev);
2166err_sps_init_failed:
Sagar Dhariaf8f603b2012-03-21 15:25:17 -06002167err_of_init_failed:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002168 iounmap(dev->bam.base);
2169err_ioremap_bam_failed:
2170 iounmap(dev->base);
2171err_ioremap_failed:
2172 kfree(dev);
2173err_get_res_failed:
2174 release_mem_region(bam_mem->start, resource_size(bam_mem));
2175err_get_res_bam_failed:
2176 release_mem_region(slim_mem->start, resource_size(slim_mem));
2177 return ret;
2178}
2179
2180static int __devexit msm_slim_remove(struct platform_device *pdev)
2181{
2182 struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
2183 struct resource *bam_mem;
2184 struct resource *slim_mem;
Sagar Dhariacc969452011-09-19 10:34:30 -06002185 struct resource *slew_mem = dev->slew_mem;
Sagar Dharia790cfd02011-09-25 17:56:24 -06002186 int i;
2187 for (i = 0; i < dev->nsats; i++) {
2188 struct msm_slim_sat *sat = dev->satd[i];
Sagar Dharia0ffdca12011-09-25 18:55:53 -06002189 int j;
2190 for (j = 0; j < sat->nsatch; j++)
2191 slim_dealloc_ch(&sat->satcl, sat->satch[j].chanh);
Sagar Dharia790cfd02011-09-25 17:56:24 -06002192 slim_remove_device(&sat->satcl);
2193 kfree(sat->satch);
2194 destroy_workqueue(sat->wq);
2195 kfree(sat->satcl.name);
2196 kfree(sat);
2197 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002198 pm_runtime_disable(&pdev->dev);
2199 pm_runtime_set_suspended(&pdev->dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002200 free_irq(dev->irq, dev);
2201 slim_del_controller(&dev->ctrl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002202 clk_put(dev->rclk);
2203 msm_slim_sps_exit(dev);
2204 kthread_stop(dev->rx_msgq_thread);
2205 iounmap(dev->bam.base);
2206 iounmap(dev->base);
2207 kfree(dev);
2208 bam_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2209 "slimbus_bam_physical");
Sagar Dhariae77961f2011-09-27 14:03:50 -06002210 if (bam_mem)
2211 release_mem_region(bam_mem->start, resource_size(bam_mem));
Sagar Dhariacc969452011-09-19 10:34:30 -06002212 if (slew_mem)
2213 release_mem_region(slew_mem->start, resource_size(slew_mem));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002214 slim_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2215 "slimbus_physical");
Sagar Dhariae77961f2011-09-27 14:03:50 -06002216 if (slim_mem)
2217 release_mem_region(slim_mem->start, resource_size(slim_mem));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002218 return 0;
2219}
2220
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002221#ifdef CONFIG_PM_RUNTIME
2222static int msm_slim_runtime_idle(struct device *device)
2223{
2224 dev_dbg(device, "pm_runtime: idle...\n");
2225 pm_request_autosuspend(device);
2226 return -EAGAIN;
2227}
2228#endif
2229
2230/*
2231 * If PM_RUNTIME is not defined, these 2 functions become helper
2232 * functions to be called from system suspend/resume. So they are not
2233 * inside ifdef CONFIG_PM_RUNTIME
2234 */
Sagar Dharia45e77912012-01-10 09:55:18 -07002235#ifdef CONFIG_PM_SLEEP
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002236static int msm_slim_runtime_suspend(struct device *device)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002237{
2238 struct platform_device *pdev = to_platform_device(device);
2239 struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002240 int ret;
2241 dev_dbg(device, "pm_runtime: suspending...\n");
2242 dev->state = MSM_CTRL_SLEEPING;
2243 ret = slim_ctrl_clk_pause(&dev->ctrl, false, SLIM_CLK_UNSPECIFIED);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07002244 if (ret) {
2245 dev_err(device, "clk pause not entered:%d", ret);
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002246 dev->state = MSM_CTRL_AWAKE;
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07002247 } else {
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002248 dev->state = MSM_CTRL_ASLEEP;
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07002249 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002250 return ret;
2251}
2252
2253static int msm_slim_runtime_resume(struct device *device)
2254{
2255 struct platform_device *pdev = to_platform_device(device);
2256 struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
2257 int ret = 0;
2258 dev_dbg(device, "pm_runtime: resuming...\n");
2259 if (dev->state == MSM_CTRL_ASLEEP)
2260 ret = slim_ctrl_clk_pause(&dev->ctrl, true, 0);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07002261 if (ret) {
2262 dev_err(device, "clk pause not exited:%d", ret);
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002263 dev->state = MSM_CTRL_ASLEEP;
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07002264 } else {
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002265 dev->state = MSM_CTRL_AWAKE;
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07002266 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002267 return ret;
2268}
2269
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002270static int msm_slim_suspend(struct device *dev)
2271{
2272 int ret = 0;
2273 if (!pm_runtime_enabled(dev) || !pm_runtime_suspended(dev)) {
2274 dev_dbg(dev, "system suspend");
2275 ret = msm_slim_runtime_suspend(dev);
Sagar Dharia6b559e02011-08-03 17:01:31 -06002276 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002277 if (ret == -EBUSY) {
Sagar Dharia144e5e02011-08-08 17:30:11 -06002278 /*
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002279 * If the clock pause failed due to active channels, there is
2280 * a possibility that some audio stream is active during suspend
2281 * We dont want to return suspend failure in that case so that
2282 * display and relevant components can still go to suspend.
2283 * If there is some other error, then it should be passed-on
2284 * to system level suspend
2285 */
Sagar Dharia144e5e02011-08-08 17:30:11 -06002286 ret = 0;
2287 }
2288 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002289}
2290
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002291static int msm_slim_resume(struct device *dev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002292{
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002293 /* If runtime_pm is enabled, this resume shouldn't do anything */
2294 if (!pm_runtime_enabled(dev) || !pm_runtime_suspended(dev)) {
2295 int ret;
2296 dev_dbg(dev, "system resume");
2297 ret = msm_slim_runtime_resume(dev);
2298 if (!ret) {
2299 pm_runtime_mark_last_busy(dev);
2300 pm_request_autosuspend(dev);
2301 }
2302 return ret;
2303
Sagar Dharia144e5e02011-08-08 17:30:11 -06002304 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002305 return 0;
2306}
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002307#endif /* CONFIG_PM_SLEEP */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002308
2309static const struct dev_pm_ops msm_slim_dev_pm_ops = {
2310 SET_SYSTEM_SLEEP_PM_OPS(
2311 msm_slim_suspend,
2312 msm_slim_resume
2313 )
2314 SET_RUNTIME_PM_OPS(
2315 msm_slim_runtime_suspend,
2316 msm_slim_runtime_resume,
2317 msm_slim_runtime_idle
2318 )
2319};
2320
Sagar Dhariaf8f603b2012-03-21 15:25:17 -06002321static struct of_device_id msm_slim_dt_match[] = {
2322 {
2323 .compatible = "qcom,slim-msm",
2324 },
2325 {}
2326};
2327
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002328static struct platform_driver msm_slim_driver = {
2329 .probe = msm_slim_probe,
2330 .remove = msm_slim_remove,
2331 .driver = {
2332 .name = MSM_SLIM_NAME,
2333 .owner = THIS_MODULE,
2334 .pm = &msm_slim_dev_pm_ops,
Sagar Dhariaf8f603b2012-03-21 15:25:17 -06002335 .of_match_table = msm_slim_dt_match,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002336 },
2337};
2338
2339static int msm_slim_init(void)
2340{
2341 return platform_driver_register(&msm_slim_driver);
2342}
2343subsys_initcall(msm_slim_init);
2344
2345static void msm_slim_exit(void)
2346{
2347 platform_driver_unregister(&msm_slim_driver);
2348}
2349module_exit(msm_slim_exit);
2350
2351MODULE_LICENSE("GPL v2");
2352MODULE_VERSION("0.1");
2353MODULE_DESCRIPTION("MSM Slimbus controller");
2354MODULE_ALIAS("platform:msm-slim");