blob: b7081333e0742a61f34038befa016c9ef581b703 [file] [log] [blame]
Duy Truonge833aca2013-02-12 13:35:08 -08001/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12#include <linux/irq.h>
13#include <linux/kernel.h>
14#include <linux/init.h>
15#include <linux/slab.h>
16#include <linux/io.h>
17#include <linux/interrupt.h>
18#include <linux/platform_device.h>
19#include <linux/dma-mapping.h>
20#include <linux/slimbus/slimbus.h>
21#include <linux/delay.h>
22#include <linux/kthread.h>
23#include <linux/clk.h>
Sagar Dharia45ee38a2011-08-03 17:01:31 -060024#include <linux/pm_runtime.h>
Sagar Dhariaf8f603b2012-03-21 15:25:17 -060025#include <linux/of.h>
26#include <linux/of_slimbus.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070027#include <mach/sps.h>
28
29/* Per spec.max 40 bytes per received message */
30#define SLIM_RX_MSGQ_BUF_LEN 40
31
32#define SLIM_USR_MC_GENERIC_ACK 0x25
33#define SLIM_USR_MC_MASTER_CAPABILITY 0x0
34#define SLIM_USR_MC_REPORT_SATELLITE 0x1
35#define SLIM_USR_MC_ADDR_QUERY 0xD
36#define SLIM_USR_MC_ADDR_REPLY 0xE
37#define SLIM_USR_MC_DEFINE_CHAN 0x20
38#define SLIM_USR_MC_DEF_ACT_CHAN 0x21
39#define SLIM_USR_MC_CHAN_CTRL 0x23
40#define SLIM_USR_MC_RECONFIG_NOW 0x24
41#define SLIM_USR_MC_REQ_BW 0x28
42#define SLIM_USR_MC_CONNECT_SRC 0x2C
43#define SLIM_USR_MC_CONNECT_SINK 0x2D
44#define SLIM_USR_MC_DISCONNECT_PORT 0x2E
45
46/* MSM Slimbus peripheral settings */
47#define MSM_SLIM_PERF_SUMM_THRESHOLD 0x8000
48#define MSM_SLIM_NCHANS 32
49#define MSM_SLIM_NPORTS 24
Sagar Dharia45ee38a2011-08-03 17:01:31 -060050#define MSM_SLIM_AUTOSUSPEND MSEC_PER_SEC
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070051
52/*
53 * Need enough descriptors to receive present messages from slaves
54 * if received simultaneously. Present message needs 3 descriptors
55 * and this size will ensure around 10 simultaneous reports.
56 */
57#define MSM_SLIM_DESC_NUM 32
58
59#define SLIM_MSG_ASM_FIRST_WORD(l, mt, mc, dt, ad) \
60 ((l) | ((mt) << 5) | ((mc) << 8) | ((dt) << 15) | ((ad) << 16))
61
62#define MSM_SLIM_NAME "msm_slim_ctrl"
63#define SLIM_ROOT_FREQ 24576000
64
65#define MSM_CONCUR_MSG 8
66#define SAT_CONCUR_MSG 8
67#define DEF_WATERMARK (8 << 1)
68#define DEF_ALIGN 0
69#define DEF_PACK (1 << 6)
70#define ENABLE_PORT 1
71
72#define DEF_BLKSZ 0
73#define DEF_TRANSZ 0
74
75#define SAT_MAGIC_LSB 0xD9
76#define SAT_MAGIC_MSB 0xC5
77#define SAT_MSG_VER 0x1
78#define SAT_MSG_PROT 0x1
79#define MSM_SAT_SUCCSS 0x20
Sagar Dharia790cfd02011-09-25 17:56:24 -060080#define MSM_MAX_NSATS 2
Sagar Dharia0ffdca12011-09-25 18:55:53 -060081#define MSM_MAX_SATCH 32
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070082
83#define QC_MFGID_LSB 0x2
84#define QC_MFGID_MSB 0x17
85#define QC_CHIPID_SL 0x10
86#define QC_DEVID_SAT1 0x3
87#define QC_DEVID_SAT2 0x4
88#define QC_DEVID_PGD 0x5
Sagar Dharia45ee38a2011-08-03 17:01:31 -060089#define QC_MSM_DEVS 5
Sagar Dhariaf323f8c2012-09-04 11:27:26 -060090#define INIT_MX_RETRIES 10
91#define DEF_RETRY_MS 10
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070092
Sagar Dharia82e516f2012-03-16 16:01:23 -060093#define PGD_THIS_EE(r, v) ((v) ? PGD_THIS_EE_V2(r) : PGD_THIS_EE_V1(r))
94#define PGD_PORT(r, p, v) ((v) ? PGD_PORT_V2(r, p) : PGD_PORT_V1(r, p))
95#define CFG_PORT(r, v) ((v) ? CFG_PORT_V2(r) : CFG_PORT_V1(r))
96
97#define PGD_THIS_EE_V2(r) (dev->base + (r ## _V2) + (dev->ee * 0x1000))
98#define PGD_PORT_V2(r, p) (dev->base + (r ## _V2) + ((p) * 0x1000))
99#define CFG_PORT_V2(r) ((r ## _V2))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700100/* Component registers */
Sagar Dharia82e516f2012-03-16 16:01:23 -0600101enum comp_reg_v2 {
102 COMP_CFG_V2 = 4,
103 COMP_TRUST_CFG_V2 = 0x3000,
104};
105
106/* Manager PGD registers */
107enum pgd_reg_v2 {
108 PGD_CFG_V2 = 0x800,
109 PGD_STAT_V2 = 0x804,
110 PGD_INT_EN_V2 = 0x810,
111 PGD_INT_STAT_V2 = 0x814,
112 PGD_INT_CLR_V2 = 0x818,
113 PGD_OWN_EEn_V2 = 0x300C,
114 PGD_PORT_INT_EN_EEn_V2 = 0x5000,
115 PGD_PORT_INT_ST_EEn_V2 = 0x5004,
116 PGD_PORT_INT_CL_EEn_V2 = 0x5008,
117 PGD_PORT_CFGn_V2 = 0x14000,
118 PGD_PORT_STATn_V2 = 0x14004,
119 PGD_PORT_PARAMn_V2 = 0x14008,
120 PGD_PORT_BLKn_V2 = 0x1400C,
121 PGD_PORT_TRANn_V2 = 0x14010,
122 PGD_PORT_MCHANn_V2 = 0x14014,
123 PGD_PORT_PSHPLLn_V2 = 0x14018,
124 PGD_PORT_PC_CFGn_V2 = 0x8000,
125 PGD_PORT_PC_VALn_V2 = 0x8004,
126 PGD_PORT_PC_VFR_TSn_V2 = 0x8008,
127 PGD_PORT_PC_VFR_STn_V2 = 0x800C,
128 PGD_PORT_PC_VFR_CLn_V2 = 0x8010,
129 PGD_IE_STAT_V2 = 0x820,
130 PGD_VE_STAT_V2 = 0x830,
131};
132
133#define PGD_THIS_EE_V1(r) (dev->base + (r ## _V1) + (dev->ee * 16))
134#define PGD_PORT_V1(r, p) (dev->base + (r ## _V1) + ((p) * 32))
135#define CFG_PORT_V1(r) ((r ## _V1))
136/* Component registers */
137enum comp_reg_v1 {
138 COMP_CFG_V1 = 0,
139 COMP_TRUST_CFG_V1 = 0x14,
140};
141
142/* Manager PGD registers */
143enum pgd_reg_v1 {
144 PGD_CFG_V1 = 0x1000,
145 PGD_STAT_V1 = 0x1004,
146 PGD_INT_EN_V1 = 0x1010,
147 PGD_INT_STAT_V1 = 0x1014,
148 PGD_INT_CLR_V1 = 0x1018,
149 PGD_OWN_EEn_V1 = 0x1020,
150 PGD_PORT_INT_EN_EEn_V1 = 0x1030,
151 PGD_PORT_INT_ST_EEn_V1 = 0x1034,
152 PGD_PORT_INT_CL_EEn_V1 = 0x1038,
153 PGD_PORT_CFGn_V1 = 0x1080,
154 PGD_PORT_STATn_V1 = 0x1084,
155 PGD_PORT_PARAMn_V1 = 0x1088,
156 PGD_PORT_BLKn_V1 = 0x108C,
157 PGD_PORT_TRANn_V1 = 0x1090,
158 PGD_PORT_MCHANn_V1 = 0x1094,
159 PGD_PORT_PSHPLLn_V1 = 0x1098,
160 PGD_PORT_PC_CFGn_V1 = 0x1600,
161 PGD_PORT_PC_VALn_V1 = 0x1604,
162 PGD_PORT_PC_VFR_TSn_V1 = 0x1608,
163 PGD_PORT_PC_VFR_STn_V1 = 0x160C,
164 PGD_PORT_PC_VFR_CLn_V1 = 0x1610,
165 PGD_IE_STAT_V1 = 0x1700,
166 PGD_VE_STAT_V1 = 0x1710,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700167};
168
169/* Manager registers */
170enum mgr_reg {
171 MGR_CFG = 0x200,
172 MGR_STATUS = 0x204,
173 MGR_RX_MSGQ_CFG = 0x208,
174 MGR_INT_EN = 0x210,
175 MGR_INT_STAT = 0x214,
176 MGR_INT_CLR = 0x218,
177 MGR_TX_MSG = 0x230,
178 MGR_RX_MSG = 0x270,
Sagar Dhariaf323f8c2012-09-04 11:27:26 -0600179 MGR_IE_STAT = 0x2F0,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700180 MGR_VE_STAT = 0x300,
181};
182
183enum msg_cfg {
184 MGR_CFG_ENABLE = 1,
185 MGR_CFG_RX_MSGQ_EN = 1 << 1,
186 MGR_CFG_TX_MSGQ_EN_HIGH = 1 << 2,
187 MGR_CFG_TX_MSGQ_EN_LOW = 1 << 3,
188};
189/* Message queue types */
190enum msm_slim_msgq_type {
191 MSGQ_RX = 0,
192 MSGQ_TX_LOW = 1,
193 MSGQ_TX_HIGH = 2,
194};
195/* Framer registers */
196enum frm_reg {
197 FRM_CFG = 0x400,
198 FRM_STAT = 0x404,
199 FRM_INT_EN = 0x410,
200 FRM_INT_STAT = 0x414,
201 FRM_INT_CLR = 0x418,
202 FRM_WAKEUP = 0x41C,
203 FRM_CLKCTL_DONE = 0x420,
204 FRM_IE_STAT = 0x430,
205 FRM_VE_STAT = 0x440,
206};
207
208/* Interface registers */
209enum intf_reg {
210 INTF_CFG = 0x600,
211 INTF_STAT = 0x604,
212 INTF_INT_EN = 0x610,
213 INTF_INT_STAT = 0x614,
214 INTF_INT_CLR = 0x618,
215 INTF_IE_STAT = 0x630,
216 INTF_VE_STAT = 0x640,
217};
218
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700219enum rsc_grp {
220 EE_MGR_RSC_GRP = 1 << 10,
221 EE_NGD_2 = 2 << 6,
222 EE_NGD_1 = 0,
223};
224
225enum mgr_intr {
226 MGR_INT_RECFG_DONE = 1 << 24,
227 MGR_INT_TX_NACKED_2 = 1 << 25,
228 MGR_INT_MSG_BUF_CONTE = 1 << 26,
229 MGR_INT_RX_MSG_RCVD = 1 << 30,
230 MGR_INT_TX_MSG_SENT = 1 << 31,
231};
232
233enum frm_cfg {
234 FRM_ACTIVE = 1,
235 CLK_GEAR = 7,
236 ROOT_FREQ = 11,
237 REF_CLK_GEAR = 15,
Sagar Dhariad5bb0552012-08-11 15:02:12 -0600238 INTR_WAKE = 19,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700239};
240
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600241enum msm_ctrl_state {
242 MSM_CTRL_AWAKE,
243 MSM_CTRL_SLEEPING,
244 MSM_CTRL_ASLEEP,
245};
246
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700247struct msm_slim_sps_bam {
248 u32 hdl;
249 void __iomem *base;
250 int irq;
251};
252
253struct msm_slim_endp {
254 struct sps_pipe *sps;
255 struct sps_connect config;
256 struct sps_register_event event;
257 struct sps_mem_buffer buf;
258 struct completion *xcomp;
259 bool connected;
260};
261
262struct msm_slim_ctrl {
263 struct slim_controller ctrl;
264 struct slim_framer framer;
265 struct device *dev;
266 void __iomem *base;
Sagar Dhariacc969452011-09-19 10:34:30 -0600267 struct resource *slew_mem;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700268 u32 curr_bw;
269 u8 msg_cnt;
270 u32 tx_buf[10];
271 u8 rx_msgs[MSM_CONCUR_MSG][SLIM_RX_MSGQ_BUF_LEN];
272 spinlock_t rx_lock;
273 int head;
274 int tail;
275 int irq;
276 int err;
277 int ee;
278 struct completion *wr_comp;
Sagar Dharia790cfd02011-09-25 17:56:24 -0600279 struct msm_slim_sat *satd[MSM_MAX_NSATS];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700280 struct msm_slim_endp pipes[7];
281 struct msm_slim_sps_bam bam;
282 struct msm_slim_endp rx_msgq;
283 struct completion rx_msgq_notify;
284 struct task_struct *rx_msgq_thread;
285 struct clk *rclk;
Sagar Dhariad5bb0552012-08-11 15:02:12 -0600286 struct clk *hclk;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700287 struct mutex tx_lock;
288 u8 pgdla;
289 bool use_rx_msgqs;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700290 int pipe_b;
291 struct completion reconf;
292 bool reconf_busy;
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600293 bool chan_active;
294 enum msm_ctrl_state state;
Sagar Dharia790cfd02011-09-25 17:56:24 -0600295 int nsats;
Sagar Dharia82e516f2012-03-16 16:01:23 -0600296 u32 ver;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700297};
298
Sagar Dharia0ffdca12011-09-25 18:55:53 -0600299struct msm_sat_chan {
300 u8 chan;
301 u16 chanh;
302 int req_rem;
303 int req_def;
Ajay Dudani2c71b242012-08-15 00:01:57 -0600304 bool reconf;
Sagar Dharia0ffdca12011-09-25 18:55:53 -0600305};
306
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700307struct msm_slim_sat {
308 struct slim_device satcl;
309 struct msm_slim_ctrl *dev;
310 struct workqueue_struct *wq;
311 struct work_struct wd;
312 u8 sat_msgs[SAT_CONCUR_MSG][40];
Sagar Dharia0ffdca12011-09-25 18:55:53 -0600313 struct msm_sat_chan *satch;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700314 u8 nsatch;
315 bool sent_capability;
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600316 bool pending_reconf;
317 bool pending_capability;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700318 int shead;
319 int stail;
320 spinlock_t lock;
321};
322
Sagar Dharia790cfd02011-09-25 17:56:24 -0600323static struct msm_slim_sat *msm_slim_alloc_sat(struct msm_slim_ctrl *dev);
324
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700325static int msm_slim_rx_enqueue(struct msm_slim_ctrl *dev, u32 *buf, u8 len)
326{
327 spin_lock(&dev->rx_lock);
328 if ((dev->tail + 1) % MSM_CONCUR_MSG == dev->head) {
329 spin_unlock(&dev->rx_lock);
330 dev_err(dev->dev, "RX QUEUE full!");
331 return -EXFULL;
332 }
333 memcpy((u8 *)dev->rx_msgs[dev->tail], (u8 *)buf, len);
334 dev->tail = (dev->tail + 1) % MSM_CONCUR_MSG;
335 spin_unlock(&dev->rx_lock);
336 return 0;
337}
338
339static int msm_slim_rx_dequeue(struct msm_slim_ctrl *dev, u8 *buf)
340{
341 unsigned long flags;
342 spin_lock_irqsave(&dev->rx_lock, flags);
343 if (dev->tail == dev->head) {
344 spin_unlock_irqrestore(&dev->rx_lock, flags);
345 return -ENODATA;
346 }
347 memcpy(buf, (u8 *)dev->rx_msgs[dev->head], 40);
348 dev->head = (dev->head + 1) % MSM_CONCUR_MSG;
349 spin_unlock_irqrestore(&dev->rx_lock, flags);
350 return 0;
351}
352
353static int msm_sat_enqueue(struct msm_slim_sat *sat, u32 *buf, u8 len)
354{
355 struct msm_slim_ctrl *dev = sat->dev;
356 spin_lock(&sat->lock);
357 if ((sat->stail + 1) % SAT_CONCUR_MSG == sat->shead) {
358 spin_unlock(&sat->lock);
359 dev_err(dev->dev, "SAT QUEUE full!");
360 return -EXFULL;
361 }
362 memcpy(sat->sat_msgs[sat->stail], (u8 *)buf, len);
363 sat->stail = (sat->stail + 1) % SAT_CONCUR_MSG;
364 spin_unlock(&sat->lock);
365 return 0;
366}
367
368static int msm_sat_dequeue(struct msm_slim_sat *sat, u8 *buf)
369{
370 unsigned long flags;
371 spin_lock_irqsave(&sat->lock, flags);
372 if (sat->stail == sat->shead) {
373 spin_unlock_irqrestore(&sat->lock, flags);
374 return -ENODATA;
375 }
376 memcpy(buf, sat->sat_msgs[sat->shead], 40);
377 sat->shead = (sat->shead + 1) % SAT_CONCUR_MSG;
378 spin_unlock_irqrestore(&sat->lock, flags);
379 return 0;
380}
381
382static void msm_get_eaddr(u8 *e_addr, u32 *buffer)
383{
384 e_addr[0] = (buffer[1] >> 24) & 0xff;
385 e_addr[1] = (buffer[1] >> 16) & 0xff;
386 e_addr[2] = (buffer[1] >> 8) & 0xff;
387 e_addr[3] = buffer[1] & 0xff;
388 e_addr[4] = (buffer[0] >> 24) & 0xff;
389 e_addr[5] = (buffer[0] >> 16) & 0xff;
390}
391
392static bool msm_is_sat_dev(u8 *e_addr)
393{
394 if (e_addr[5] == QC_MFGID_LSB && e_addr[4] == QC_MFGID_MSB &&
395 e_addr[2] != QC_CHIPID_SL &&
396 (e_addr[1] == QC_DEVID_SAT1 || e_addr[1] == QC_DEVID_SAT2))
397 return true;
398 return false;
399}
400
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700401static int msm_slim_get_ctrl(struct msm_slim_ctrl *dev)
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600402{
Sagar Dharia45e77912012-01-10 09:55:18 -0700403#ifdef CONFIG_PM_RUNTIME
404 int ref = 0;
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700405 int ret = pm_runtime_get_sync(dev->dev);
406 if (ret >= 0) {
407 ref = atomic_read(&dev->dev->power.usage_count);
408 if (ref <= 0) {
409 dev_err(dev->dev, "reference count -ve:%d", ref);
410 ret = -ENODEV;
411 }
412 }
413 return ret;
Sagar Dharia45e77912012-01-10 09:55:18 -0700414#else
415 return -ENODEV;
416#endif
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600417}
418static void msm_slim_put_ctrl(struct msm_slim_ctrl *dev)
419{
Sagar Dharia45e77912012-01-10 09:55:18 -0700420#ifdef CONFIG_PM_RUNTIME
Sagar Dharia38fd1872012-02-06 18:36:38 -0700421 int ref;
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600422 pm_runtime_mark_last_busy(dev->dev);
Sagar Dharia38fd1872012-02-06 18:36:38 -0700423 ref = atomic_read(&dev->dev->power.usage_count);
424 if (ref <= 0)
425 dev_err(dev->dev, "reference count mismatch:%d", ref);
426 else
427 pm_runtime_put(dev->dev);
Sagar Dharia45e77912012-01-10 09:55:18 -0700428#endif
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600429}
430
Sagar Dharia790cfd02011-09-25 17:56:24 -0600431static struct msm_slim_sat *addr_to_sat(struct msm_slim_ctrl *dev, u8 laddr)
432{
433 struct msm_slim_sat *sat = NULL;
434 int i = 0;
435 while (!sat && i < dev->nsats) {
436 if (laddr == dev->satd[i]->satcl.laddr)
437 sat = dev->satd[i];
438 i++;
439 }
440 return sat;
441}
442
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700443static irqreturn_t msm_slim_interrupt(int irq, void *d)
444{
445 struct msm_slim_ctrl *dev = d;
446 u32 pstat;
447 u32 stat = readl_relaxed(dev->base + MGR_INT_STAT);
448
449 if (stat & MGR_INT_TX_MSG_SENT || stat & MGR_INT_TX_NACKED_2) {
450 if (stat & MGR_INT_TX_MSG_SENT)
451 writel_relaxed(MGR_INT_TX_MSG_SENT,
452 dev->base + MGR_INT_CLR);
453 else {
Sagar Dhariaf323f8c2012-09-04 11:27:26 -0600454 u32 mgr_stat = readl_relaxed(dev->base + MGR_STATUS);
455 u32 mgr_ie_stat = readl_relaxed(dev->base +
456 MGR_IE_STAT);
457 u32 frm_stat = readl_relaxed(dev->base + FRM_STAT);
458 u32 frm_cfg = readl_relaxed(dev->base + FRM_CFG);
459 u32 frm_intr_stat = readl_relaxed(dev->base +
460 FRM_INT_STAT);
461 u32 frm_ie_stat = readl_relaxed(dev->base +
462 FRM_IE_STAT);
463 u32 intf_stat = readl_relaxed(dev->base + INTF_STAT);
464 u32 intf_intr_stat = readl_relaxed(dev->base +
465 INTF_INT_STAT);
466 u32 intf_ie_stat = readl_relaxed(dev->base +
467 INTF_IE_STAT);
468
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700469 writel_relaxed(MGR_INT_TX_NACKED_2,
470 dev->base + MGR_INT_CLR);
Sagar Dhariaf323f8c2012-09-04 11:27:26 -0600471 pr_err("TX Nack MGR dump:int_stat:0x%x, mgr_stat:0x%x",
472 stat, mgr_stat);
473 pr_err("TX Nack MGR dump:ie_stat:0x%x", mgr_ie_stat);
474 pr_err("TX Nack FRM dump:int_stat:0x%x, frm_stat:0x%x",
475 frm_intr_stat, frm_stat);
476 pr_err("TX Nack FRM dump:frm_cfg:0x%x, ie_stat:0x%x",
477 frm_cfg, frm_ie_stat);
478 pr_err("TX Nack INTF dump:intr_st:0x%x, intf_stat:0x%x",
479 intf_intr_stat, intf_stat);
480 pr_err("TX Nack INTF dump:ie_stat:0x%x", intf_ie_stat);
481
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700482 dev->err = -EIO;
483 }
484 /*
485 * Guarantee that interrupt clear bit write goes through before
486 * signalling completion/exiting ISR
487 */
488 mb();
489 if (dev->wr_comp)
490 complete(dev->wr_comp);
491 }
492 if (stat & MGR_INT_RX_MSG_RCVD) {
493 u32 rx_buf[10];
494 u32 mc, mt;
495 u8 len, i;
496 rx_buf[0] = readl_relaxed(dev->base + MGR_RX_MSG);
497 len = rx_buf[0] & 0x1F;
498 for (i = 1; i < ((len + 3) >> 2); i++) {
499 rx_buf[i] = readl_relaxed(dev->base + MGR_RX_MSG +
500 (4 * i));
501 dev_dbg(dev->dev, "reading data: %x\n", rx_buf[i]);
502 }
503 mt = (rx_buf[0] >> 5) & 0x7;
504 mc = (rx_buf[0] >> 8) & 0xff;
505 dev_dbg(dev->dev, "MC: %x, MT: %x\n", mc, mt);
506 if (mt == SLIM_MSG_MT_DEST_REFERRED_USER ||
507 mt == SLIM_MSG_MT_SRC_REFERRED_USER) {
Sagar Dharia790cfd02011-09-25 17:56:24 -0600508 u8 laddr = (u8)((rx_buf[0] >> 16) & 0xFF);
509 struct msm_slim_sat *sat = addr_to_sat(dev, laddr);
510 if (sat)
511 msm_sat_enqueue(sat, rx_buf, len);
512 else
513 dev_err(dev->dev, "unknown sat:%d message",
514 laddr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700515 writel_relaxed(MGR_INT_RX_MSG_RCVD,
516 dev->base + MGR_INT_CLR);
517 /*
518 * Guarantee that CLR bit write goes through before
519 * queuing work
520 */
521 mb();
Sagar Dharia790cfd02011-09-25 17:56:24 -0600522 if (sat)
523 queue_work(sat->wq, &sat->wd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700524 } else if (mt == SLIM_MSG_MT_CORE &&
525 mc == SLIM_MSG_MC_REPORT_PRESENT) {
526 u8 e_addr[6];
527 msm_get_eaddr(e_addr, rx_buf);
Sagar Dharia790cfd02011-09-25 17:56:24 -0600528 msm_slim_rx_enqueue(dev, rx_buf, len);
529 writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
530 MGR_INT_CLR);
531 /*
532 * Guarantee that CLR bit write goes through
533 * before signalling completion
534 */
535 mb();
536 complete(&dev->rx_msgq_notify);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700537 } else if (mc == SLIM_MSG_MC_REPLY_INFORMATION ||
538 mc == SLIM_MSG_MC_REPLY_VALUE) {
539 msm_slim_rx_enqueue(dev, rx_buf, len);
540 writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
541 MGR_INT_CLR);
542 /*
543 * Guarantee that CLR bit write goes through
544 * before signalling completion
545 */
546 mb();
547 complete(&dev->rx_msgq_notify);
Sagar Dharia144e5e02011-08-08 17:30:11 -0600548 } else if (mc == SLIM_MSG_MC_REPORT_INFORMATION) {
549 u8 *buf = (u8 *)rx_buf;
550 u8 l_addr = buf[2];
551 u16 ele = (u16)buf[4] << 4;
552 ele |= ((buf[3] & 0xf0) >> 4);
553 dev_err(dev->dev, "Slim-dev:%d report inf element:0x%x",
554 l_addr, ele);
555 for (i = 0; i < len - 5; i++)
556 dev_err(dev->dev, "offset:0x%x:bit mask:%x",
557 i, buf[i+5]);
558 writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
559 MGR_INT_CLR);
560 /*
561 * Guarantee that CLR bit write goes through
562 * before exiting
563 */
564 mb();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700565 } else {
566 dev_err(dev->dev, "Unexpected MC,%x MT:%x, len:%d",
567 mc, mt, len);
568 for (i = 0; i < ((len + 3) >> 2); i++)
569 dev_err(dev->dev, "error msg: %x", rx_buf[i]);
570 writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
571 MGR_INT_CLR);
572 /*
573 * Guarantee that CLR bit write goes through
574 * before exiting
575 */
576 mb();
577 }
578 }
579 if (stat & MGR_INT_RECFG_DONE) {
580 writel_relaxed(MGR_INT_RECFG_DONE, dev->base + MGR_INT_CLR);
581 /*
582 * Guarantee that CLR bit write goes through
583 * before exiting ISR
584 */
585 mb();
586 complete(&dev->reconf);
587 }
Sagar Dharia82e516f2012-03-16 16:01:23 -0600588 pstat = readl_relaxed(PGD_THIS_EE(PGD_PORT_INT_ST_EEn, dev->ver));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700589 if (pstat != 0) {
590 int i = 0;
591 for (i = dev->pipe_b; i < MSM_SLIM_NPORTS; i++) {
592 if (pstat & 1 << i) {
Sagar Dharia82e516f2012-03-16 16:01:23 -0600593 u32 val = readl_relaxed(PGD_PORT(PGD_PORT_STATn,
594 i, dev->ver));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700595 if (val & (1 << 19)) {
596 dev->ctrl.ports[i].err =
597 SLIM_P_DISCONNECT;
598 dev->pipes[i-dev->pipe_b].connected =
599 false;
600 /*
601 * SPS will call completion since
602 * ERROR flags are registered
603 */
604 } else if (val & (1 << 2))
605 dev->ctrl.ports[i].err =
606 SLIM_P_OVERFLOW;
607 else if (val & (1 << 3))
608 dev->ctrl.ports[i].err =
609 SLIM_P_UNDERFLOW;
610 }
Sagar Dharia82e516f2012-03-16 16:01:23 -0600611 writel_relaxed(1, PGD_THIS_EE(PGD_PORT_INT_CL_EEn,
612 dev->ver));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700613 }
614 /*
615 * Guarantee that port interrupt bit(s) clearing writes go
616 * through before exiting ISR
617 */
618 mb();
619 }
620
621 return IRQ_HANDLED;
622}
623
624static int
625msm_slim_init_endpoint(struct msm_slim_ctrl *dev, struct msm_slim_endp *ep)
626{
627 int ret;
628 struct sps_pipe *endpoint;
629 struct sps_connect *config = &ep->config;
630
631 /* Allocate the endpoint */
632 endpoint = sps_alloc_endpoint();
633 if (!endpoint) {
634 dev_err(dev->dev, "sps_alloc_endpoint failed\n");
635 return -ENOMEM;
636 }
637
638 /* Get default connection configuration for an endpoint */
639 ret = sps_get_config(endpoint, config);
640 if (ret) {
641 dev_err(dev->dev, "sps_get_config failed 0x%x\n", ret);
642 goto sps_config_failed;
643 }
644
645 ep->sps = endpoint;
646 return 0;
647
648sps_config_failed:
649 sps_free_endpoint(endpoint);
650 return ret;
651}
652
653static void
654msm_slim_free_endpoint(struct msm_slim_endp *ep)
655{
656 sps_free_endpoint(ep->sps);
657 ep->sps = NULL;
658}
659
660static int msm_slim_sps_mem_alloc(
661 struct msm_slim_ctrl *dev, struct sps_mem_buffer *mem, u32 len)
662{
663 dma_addr_t phys;
664
665 mem->size = len;
666 mem->min_size = 0;
667 mem->base = dma_alloc_coherent(dev->dev, mem->size, &phys, GFP_KERNEL);
668
669 if (!mem->base) {
670 dev_err(dev->dev, "dma_alloc_coherent(%d) failed\n", len);
671 return -ENOMEM;
672 }
673
674 mem->phys_base = phys;
675 memset(mem->base, 0x00, mem->size);
676 return 0;
677}
678
679static void
680msm_slim_sps_mem_free(struct msm_slim_ctrl *dev, struct sps_mem_buffer *mem)
681{
682 dma_free_coherent(dev->dev, mem->size, mem->base, mem->phys_base);
683 mem->size = 0;
684 mem->base = NULL;
685 mem->phys_base = 0;
686}
687
688static void msm_hw_set_port(struct msm_slim_ctrl *dev, u8 pn)
689{
690 u32 set_cfg = DEF_WATERMARK | DEF_ALIGN | DEF_PACK | ENABLE_PORT;
Sagar Dharia82e516f2012-03-16 16:01:23 -0600691 u32 int_port = readl_relaxed(PGD_THIS_EE(PGD_PORT_INT_EN_EEn,
692 dev->ver));
693 writel_relaxed(set_cfg, PGD_PORT(PGD_PORT_CFGn, pn, dev->ver));
694 writel_relaxed(DEF_BLKSZ, PGD_PORT(PGD_PORT_BLKn, pn, dev->ver));
695 writel_relaxed(DEF_TRANSZ, PGD_PORT(PGD_PORT_TRANn, pn, dev->ver));
696 writel_relaxed((int_port | 1 << pn) , PGD_THIS_EE(PGD_PORT_INT_EN_EEn,
697 dev->ver));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700698 /* Make sure that port registers are updated before returning */
699 mb();
700}
701
702static int msm_slim_connect_pipe_port(struct msm_slim_ctrl *dev, u8 pn)
703{
704 struct msm_slim_endp *endpoint = &dev->pipes[pn];
705 struct sps_connect *cfg = &endpoint->config;
706 u32 stat;
707 int ret = sps_get_config(dev->pipes[pn].sps, cfg);
708 if (ret) {
709 dev_err(dev->dev, "sps pipe-port get config error%x\n", ret);
710 return ret;
711 }
712 cfg->options = SPS_O_DESC_DONE | SPS_O_ERROR |
713 SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
714
715 if (dev->pipes[pn].connected) {
716 ret = sps_set_config(dev->pipes[pn].sps, cfg);
717 if (ret) {
718 dev_err(dev->dev, "sps pipe-port set config erro:%x\n",
719 ret);
720 return ret;
721 }
722 }
723
Sagar Dharia82e516f2012-03-16 16:01:23 -0600724 stat = readl_relaxed(PGD_PORT(PGD_PORT_STATn, (pn + dev->pipe_b),
725 dev->ver));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700726 if (dev->ctrl.ports[pn].flow == SLIM_SRC) {
727 cfg->destination = dev->bam.hdl;
728 cfg->source = SPS_DEV_HANDLE_MEM;
729 cfg->dest_pipe_index = ((stat & (0xFF << 4)) >> 4);
730 cfg->src_pipe_index = 0;
731 dev_dbg(dev->dev, "flow src:pipe num:%d",
732 cfg->dest_pipe_index);
733 cfg->mode = SPS_MODE_DEST;
734 } else {
735 cfg->source = dev->bam.hdl;
736 cfg->destination = SPS_DEV_HANDLE_MEM;
737 cfg->src_pipe_index = ((stat & (0xFF << 4)) >> 4);
738 cfg->dest_pipe_index = 0;
739 dev_dbg(dev->dev, "flow dest:pipe num:%d",
740 cfg->src_pipe_index);
741 cfg->mode = SPS_MODE_SRC;
742 }
743 /* Space for desciptor FIFOs */
744 cfg->desc.size = MSM_SLIM_DESC_NUM * sizeof(struct sps_iovec);
745 cfg->config = SPS_CONFIG_DEFAULT;
746 ret = sps_connect(dev->pipes[pn].sps, cfg);
747 if (!ret) {
748 dev->pipes[pn].connected = true;
749 msm_hw_set_port(dev, pn + dev->pipe_b);
750 }
751 return ret;
752}
753
754static u32 *msm_get_msg_buf(struct slim_controller *ctrl, int len)
755{
756 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
757 /*
758 * Currently we block a transaction until the current one completes.
759 * In case we need multiple transactions, use message Q
760 */
761 return dev->tx_buf;
762}
763
764static int msm_send_msg_buf(struct slim_controller *ctrl, u32 *buf, u8 len)
765{
766 int i;
767 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
768 for (i = 0; i < (len + 3) >> 2; i++) {
769 dev_dbg(dev->dev, "TX data:0x%x\n", buf[i]);
770 writel_relaxed(buf[i], dev->base + MGR_TX_MSG + (i * 4));
771 }
772 /* Guarantee that message is sent before returning */
773 mb();
774 return 0;
775}
776
777static int msm_xfer_msg(struct slim_controller *ctrl, struct slim_msg_txn *txn)
778{
779 DECLARE_COMPLETION_ONSTACK(done);
780 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
781 u32 *pbuf;
782 u8 *puc;
783 int timeout;
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700784 int msgv = -1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700785 u8 la = txn->la;
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600786 u8 mc = (u8)(txn->mc & 0xFF);
787 /*
788 * Voting for runtime PM: Slimbus has 2 possible use cases:
789 * 1. messaging
790 * 2. Data channels
791 * Messaging case goes through messaging slots and data channels
792 * use their own slots
793 * This "get" votes for messaging bandwidth
794 */
795 if (!(txn->mc & SLIM_MSG_CLK_PAUSE_SEQ_FLG))
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700796 msgv = msm_slim_get_ctrl(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700797 mutex_lock(&dev->tx_lock);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700798 if (dev->state == MSM_CTRL_ASLEEP ||
799 ((!(txn->mc & SLIM_MSG_CLK_PAUSE_SEQ_FLG)) &&
800 dev->state == MSM_CTRL_SLEEPING)) {
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600801 dev_err(dev->dev, "runtime or system PM suspended state");
802 mutex_unlock(&dev->tx_lock);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700803 if (msgv >= 0)
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600804 msm_slim_put_ctrl(dev);
805 return -EBUSY;
806 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700807 if (txn->mt == SLIM_MSG_MT_CORE &&
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600808 mc == SLIM_MSG_MC_BEGIN_RECONFIGURATION) {
809 if (dev->reconf_busy) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700810 wait_for_completion(&dev->reconf);
811 dev->reconf_busy = false;
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600812 }
813 /* This "get" votes for data channels */
814 if (dev->ctrl.sched.usedslots != 0 &&
815 !dev->chan_active) {
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700816 int chv = msm_slim_get_ctrl(dev);
817 if (chv >= 0)
818 dev->chan_active = true;
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600819 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700820 }
821 txn->rl--;
822 pbuf = msm_get_msg_buf(ctrl, txn->rl);
823 dev->wr_comp = NULL;
824 dev->err = 0;
825
826 if (txn->dt == SLIM_MSG_DEST_ENUMADDR) {
827 mutex_unlock(&dev->tx_lock);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700828 if (msgv >= 0)
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600829 msm_slim_put_ctrl(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700830 return -EPROTONOSUPPORT;
831 }
832 if (txn->mt == SLIM_MSG_MT_CORE && txn->la == 0xFF &&
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600833 (mc == SLIM_MSG_MC_CONNECT_SOURCE ||
834 mc == SLIM_MSG_MC_CONNECT_SINK ||
835 mc == SLIM_MSG_MC_DISCONNECT_PORT))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700836 la = dev->pgdla;
837 if (txn->dt == SLIM_MSG_DEST_LOGICALADDR)
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600838 *pbuf = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt, mc, 0, la);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700839 else
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600840 *pbuf = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt, mc, 1, la);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700841 if (txn->dt == SLIM_MSG_DEST_LOGICALADDR)
842 puc = ((u8 *)pbuf) + 3;
843 else
844 puc = ((u8 *)pbuf) + 2;
845 if (txn->rbuf)
846 *(puc++) = txn->tid;
847 if ((txn->mt == SLIM_MSG_MT_CORE) &&
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600848 ((mc >= SLIM_MSG_MC_REQUEST_INFORMATION &&
849 mc <= SLIM_MSG_MC_REPORT_INFORMATION) ||
850 (mc >= SLIM_MSG_MC_REQUEST_VALUE &&
851 mc <= SLIM_MSG_MC_CHANGE_VALUE))) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700852 *(puc++) = (txn->ec & 0xFF);
853 *(puc++) = (txn->ec >> 8)&0xFF;
854 }
855 if (txn->wbuf)
856 memcpy(puc, txn->wbuf, txn->len);
857 if (txn->mt == SLIM_MSG_MT_CORE && txn->la == 0xFF &&
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600858 (mc == SLIM_MSG_MC_CONNECT_SOURCE ||
859 mc == SLIM_MSG_MC_CONNECT_SINK ||
860 mc == SLIM_MSG_MC_DISCONNECT_PORT)) {
861 if (mc != SLIM_MSG_MC_DISCONNECT_PORT)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700862 dev->err = msm_slim_connect_pipe_port(dev, *puc);
863 else {
864 struct msm_slim_endp *endpoint = &dev->pipes[*puc];
865 struct sps_register_event sps_event;
866 memset(&sps_event, 0, sizeof(sps_event));
867 sps_register_event(endpoint->sps, &sps_event);
868 sps_disconnect(endpoint->sps);
869 /*
870 * Remove channel disconnects master-side ports from
871 * channel. No need to send that again on the bus
872 */
873 dev->pipes[*puc].connected = false;
874 mutex_unlock(&dev->tx_lock);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700875 if (msgv >= 0)
876 msm_slim_put_ctrl(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700877 return 0;
878 }
879 if (dev->err) {
880 dev_err(dev->dev, "pipe-port connect err:%d", dev->err);
881 mutex_unlock(&dev->tx_lock);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700882 if (msgv >= 0)
883 msm_slim_put_ctrl(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700884 return dev->err;
885 }
886 *(puc) = *(puc) + dev->pipe_b;
887 }
888 if (txn->mt == SLIM_MSG_MT_CORE &&
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600889 mc == SLIM_MSG_MC_BEGIN_RECONFIGURATION)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700890 dev->reconf_busy = true;
891 dev->wr_comp = &done;
892 msm_send_msg_buf(ctrl, pbuf, txn->rl);
893 timeout = wait_for_completion_timeout(&done, HZ);
Sagar Dhariaf323f8c2012-09-04 11:27:26 -0600894 if (!timeout)
895 dev->wr_comp = NULL;
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700896 if (mc == SLIM_MSG_MC_RECONFIGURE_NOW) {
897 if ((txn->mc == (SLIM_MSG_MC_RECONFIGURE_NOW |
898 SLIM_MSG_CLK_PAUSE_SEQ_FLG)) &&
899 timeout) {
900 timeout = wait_for_completion_timeout(&dev->reconf, HZ);
901 dev->reconf_busy = false;
902 if (timeout) {
Sagar Dharia9acf7f42012-03-08 09:45:30 -0700903 clk_disable_unprepare(dev->rclk);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700904 disable_irq(dev->irq);
905 }
906 }
907 if ((txn->mc == (SLIM_MSG_MC_RECONFIGURE_NOW |
908 SLIM_MSG_CLK_PAUSE_SEQ_FLG)) &&
909 !timeout) {
910 dev->reconf_busy = false;
911 dev_err(dev->dev, "clock pause failed");
912 mutex_unlock(&dev->tx_lock);
913 return -ETIMEDOUT;
914 }
915 if (txn->mt == SLIM_MSG_MT_CORE &&
916 txn->mc == SLIM_MSG_MC_RECONFIGURE_NOW) {
917 if (dev->ctrl.sched.usedslots == 0 &&
918 dev->chan_active) {
919 dev->chan_active = false;
920 msm_slim_put_ctrl(dev);
921 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600922 }
923 }
ehgrace.kim1f6cbba2012-08-03 16:05:34 -0700924 if (!timeout) {
925 dev_err(dev->dev, "TX timed out:MC:0x%x,mt:0x%x",
926 txn->mc, txn->mt);
927 dev->wr_comp = NULL;
928 }
929
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600930 mutex_unlock(&dev->tx_lock);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700931 if (msgv >= 0)
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600932 msm_slim_put_ctrl(dev);
933
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700934 return timeout ? dev->err : -ETIMEDOUT;
935}
936
Sagar Dhariaf323f8c2012-09-04 11:27:26 -0600937static void msm_slim_wait_retry(struct msm_slim_ctrl *dev)
938{
939 int msec_per_frm = 0;
940 int sfr_per_sec;
941 /* Wait for 1 superframe, or default time and then retry */
942 sfr_per_sec = dev->framer.superfreq /
943 (1 << (SLIM_MAX_CLK_GEAR - dev->ctrl.clkgear));
944 if (sfr_per_sec)
945 msec_per_frm = MSEC_PER_SEC / sfr_per_sec;
946 if (msec_per_frm < DEF_RETRY_MS)
947 msec_per_frm = DEF_RETRY_MS;
948 msleep(msec_per_frm);
949}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700950static int msm_set_laddr(struct slim_controller *ctrl, const u8 *ea,
951 u8 elen, u8 laddr)
952{
953 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
Sagar Dhariaf323f8c2012-09-04 11:27:26 -0600954 struct completion done;
955 int timeout, ret, retries = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700956 u32 *buf;
Sagar Dhariaf323f8c2012-09-04 11:27:26 -0600957retry_laddr:
958 init_completion(&done);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700959 mutex_lock(&dev->tx_lock);
960 buf = msm_get_msg_buf(ctrl, 9);
961 buf[0] = SLIM_MSG_ASM_FIRST_WORD(9, SLIM_MSG_MT_CORE,
962 SLIM_MSG_MC_ASSIGN_LOGICAL_ADDRESS,
963 SLIM_MSG_DEST_LOGICALADDR,
964 ea[5] | ea[4] << 8);
965 buf[1] = ea[3] | (ea[2] << 8) | (ea[1] << 16) | (ea[0] << 24);
966 buf[2] = laddr;
967
968 dev->wr_comp = &done;
Sagar Dhariaf323f8c2012-09-04 11:27:26 -0600969 ret = msm_send_msg_buf(ctrl, buf, 9);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700970 timeout = wait_for_completion_timeout(&done, HZ);
ehgrace.kim1f6cbba2012-08-03 16:05:34 -0700971 if (!timeout)
Sagar Dhariaf323f8c2012-09-04 11:27:26 -0600972 dev->err = -ETIMEDOUT;
973 if (dev->err) {
974 ret = dev->err;
975 dev->err = 0;
ehgrace.kim1f6cbba2012-08-03 16:05:34 -0700976 dev->wr_comp = NULL;
Sagar Dhariaf323f8c2012-09-04 11:27:26 -0600977 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700978 mutex_unlock(&dev->tx_lock);
Sagar Dhariaf323f8c2012-09-04 11:27:26 -0600979 if (ret) {
980 pr_err("set LADDR:0x%x failed:ret:%d, retrying", laddr, ret);
981 if (retries < INIT_MX_RETRIES) {
982 msm_slim_wait_retry(dev);
983 retries++;
984 goto retry_laddr;
985 } else {
986 pr_err("set LADDR failed after retrying:ret:%d", ret);
987 }
988 }
989 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700990}
991
Sagar Dharia144e5e02011-08-08 17:30:11 -0600992static int msm_clk_pause_wakeup(struct slim_controller *ctrl)
993{
994 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600995 enable_irq(dev->irq);
Sagar Dharia9acf7f42012-03-08 09:45:30 -0700996 clk_prepare_enable(dev->rclk);
Sagar Dharia144e5e02011-08-08 17:30:11 -0600997 writel_relaxed(1, dev->base + FRM_WAKEUP);
998 /* Make sure framer wakeup write goes through before exiting function */
999 mb();
1000 /*
1001 * Workaround: Currently, slave is reporting lost-sync messages
1002 * after slimbus comes out of clock pause.
1003 * Transaction with slave fail before slave reports that message
1004 * Give some time for that report to come
1005 * Slimbus wakes up in clock gear 10 at 24.576MHz. With each superframe
1006 * being 250 usecs, we wait for 20 superframes here to ensure
1007 * we get the message
1008 */
1009 usleep_range(5000, 5000);
1010 return 0;
1011}
1012
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001013static int msm_config_port(struct slim_controller *ctrl, u8 pn)
1014{
1015 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
1016 struct msm_slim_endp *endpoint;
1017 int ret = 0;
1018 if (ctrl->ports[pn].req == SLIM_REQ_HALF_DUP ||
1019 ctrl->ports[pn].req == SLIM_REQ_MULTI_CH)
1020 return -EPROTONOSUPPORT;
1021 if (pn >= (MSM_SLIM_NPORTS - dev->pipe_b))
1022 return -ENODEV;
1023
1024 endpoint = &dev->pipes[pn];
1025 ret = msm_slim_init_endpoint(dev, endpoint);
1026 dev_dbg(dev->dev, "sps register bam error code:%x\n", ret);
1027 return ret;
1028}
1029
1030static enum slim_port_err msm_slim_port_xfer_status(struct slim_controller *ctr,
1031 u8 pn, u8 **done_buf, u32 *done_len)
1032{
1033 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctr);
1034 struct sps_iovec sio;
1035 int ret;
1036 if (done_len)
1037 *done_len = 0;
1038 if (done_buf)
1039 *done_buf = NULL;
1040 if (!dev->pipes[pn].connected)
1041 return SLIM_P_DISCONNECT;
1042 ret = sps_get_iovec(dev->pipes[pn].sps, &sio);
1043 if (!ret) {
1044 if (done_len)
1045 *done_len = sio.size;
1046 if (done_buf)
1047 *done_buf = (u8 *)sio.addr;
1048 }
1049 dev_dbg(dev->dev, "get iovec returned %d\n", ret);
1050 return SLIM_P_INPROGRESS;
1051}
1052
1053static int msm_slim_port_xfer(struct slim_controller *ctrl, u8 pn, u8 *iobuf,
1054 u32 len, struct completion *comp)
1055{
1056 struct sps_register_event sreg;
1057 int ret;
1058 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
Sagar Dhariae77961f2011-09-27 14:03:50 -06001059 if (pn >= 7)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001060 return -ENODEV;
1061
1062
1063 ctrl->ports[pn].xcomp = comp;
1064 sreg.options = (SPS_EVENT_DESC_DONE|SPS_EVENT_ERROR);
1065 sreg.mode = SPS_TRIGGER_WAIT;
1066 sreg.xfer_done = comp;
1067 sreg.callback = NULL;
1068 sreg.user = &ctrl->ports[pn];
1069 ret = sps_register_event(dev->pipes[pn].sps, &sreg);
1070 if (ret) {
1071 dev_dbg(dev->dev, "sps register event error:%x\n", ret);
1072 return ret;
1073 }
1074 ret = sps_transfer_one(dev->pipes[pn].sps, (u32)iobuf, len, NULL,
1075 SPS_IOVEC_FLAG_INT);
1076 dev_dbg(dev->dev, "sps submit xfer error code:%x\n", ret);
1077
1078 return ret;
1079}
1080
1081static int msm_sat_define_ch(struct msm_slim_sat *sat, u8 *buf, u8 len, u8 mc)
1082{
1083 struct msm_slim_ctrl *dev = sat->dev;
1084 enum slim_ch_control oper;
1085 int i;
1086 int ret = 0;
1087 if (mc == SLIM_USR_MC_CHAN_CTRL) {
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001088 for (i = 0; i < sat->nsatch; i++) {
1089 if (buf[5] == sat->satch[i].chan)
1090 break;
1091 }
1092 if (i >= sat->nsatch)
1093 return -ENOTCONN;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001094 oper = ((buf[3] & 0xC0) >> 6);
1095 /* part of grp. activating/removing 1 will take care of rest */
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001096 ret = slim_control_ch(&sat->satcl, sat->satch[i].chanh, oper,
1097 false);
1098 if (!ret) {
1099 for (i = 5; i < len; i++) {
1100 int j;
1101 for (j = 0; j < sat->nsatch; j++) {
1102 if (buf[i] == sat->satch[j].chan) {
1103 if (oper == SLIM_CH_REMOVE)
1104 sat->satch[j].req_rem++;
1105 else
1106 sat->satch[j].req_def++;
1107 break;
1108 }
1109 }
1110 }
1111 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001112 } else {
1113 u16 chh[40];
1114 struct slim_ch prop;
1115 u32 exp;
1116 u8 coeff, cc;
1117 u8 prrate = buf[6];
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001118 if (len <= 8)
1119 return -EINVAL;
1120 for (i = 8; i < len; i++) {
1121 int j = 0;
1122 for (j = 0; j < sat->nsatch; j++) {
1123 if (sat->satch[j].chan == buf[i]) {
1124 chh[i - 8] = sat->satch[j].chanh;
1125 break;
1126 }
1127 }
1128 if (j < sat->nsatch) {
1129 u16 dummy;
1130 ret = slim_query_ch(&sat->satcl, buf[i],
1131 &dummy);
1132 if (ret)
1133 return ret;
1134 if (mc == SLIM_USR_MC_DEF_ACT_CHAN)
1135 sat->satch[j].req_def++;
1136 continue;
1137 }
1138 if (sat->nsatch >= MSM_MAX_SATCH)
1139 return -EXFULL;
1140 ret = slim_query_ch(&sat->satcl, buf[i], &chh[i - 8]);
1141 if (ret)
1142 return ret;
1143 sat->satch[j].chan = buf[i];
1144 sat->satch[j].chanh = chh[i - 8];
1145 if (mc == SLIM_USR_MC_DEF_ACT_CHAN)
1146 sat->satch[j].req_def++;
1147 sat->nsatch++;
1148 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001149 prop.dataf = (enum slim_ch_dataf)((buf[3] & 0xE0) >> 5);
1150 prop.auxf = (enum slim_ch_auxf)((buf[4] & 0xC0) >> 5);
1151 prop.baser = SLIM_RATE_4000HZ;
1152 if (prrate & 0x8)
1153 prop.baser = SLIM_RATE_11025HZ;
1154 else
1155 prop.baser = SLIM_RATE_4000HZ;
1156 prop.prot = (enum slim_ch_proto)(buf[5] & 0x0F);
1157 prop.sampleszbits = (buf[4] & 0x1F)*SLIM_CL_PER_SL;
1158 exp = (u32)((buf[5] & 0xF0) >> 4);
1159 coeff = (buf[4] & 0x20) >> 5;
1160 cc = (coeff ? 3 : 1);
1161 prop.ratem = cc * (1 << exp);
1162 if (i > 9)
1163 ret = slim_define_ch(&sat->satcl, &prop, chh, len - 8,
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001164 true, &chh[0]);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001165 else
1166 ret = slim_define_ch(&sat->satcl, &prop,
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001167 &chh[0], 1, false, NULL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001168 dev_dbg(dev->dev, "define sat grp returned:%d", ret);
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001169 if (ret)
1170 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001171
1172 /* part of group so activating 1 will take care of rest */
1173 if (mc == SLIM_USR_MC_DEF_ACT_CHAN)
1174 ret = slim_control_ch(&sat->satcl,
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001175 chh[0],
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001176 SLIM_CH_ACTIVATE, false);
1177 }
1178 return ret;
1179}
1180
1181static void msm_slim_rxwq(struct msm_slim_ctrl *dev)
1182{
1183 u8 buf[40];
1184 u8 mc, mt, len;
1185 int i, ret;
1186 if ((msm_slim_rx_dequeue(dev, (u8 *)buf)) != -ENODATA) {
1187 len = buf[0] & 0x1F;
1188 mt = (buf[0] >> 5) & 0x7;
1189 mc = buf[1];
1190 if (mt == SLIM_MSG_MT_CORE &&
1191 mc == SLIM_MSG_MC_REPORT_PRESENT) {
1192 u8 laddr;
1193 u8 e_addr[6];
1194 for (i = 0; i < 6; i++)
1195 e_addr[i] = buf[7-i];
1196
1197 ret = slim_assign_laddr(&dev->ctrl, e_addr, 6, &laddr);
1198 /* Is this Qualcomm ported generic device? */
1199 if (!ret && e_addr[5] == QC_MFGID_LSB &&
1200 e_addr[4] == QC_MFGID_MSB &&
1201 e_addr[1] == QC_DEVID_PGD &&
1202 e_addr[2] != QC_CHIPID_SL)
1203 dev->pgdla = laddr;
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001204 if (!ret && !pm_runtime_enabled(dev->dev) &&
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001205 laddr == (QC_MSM_DEVS - 1))
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001206 pm_runtime_enable(dev->dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001207
Sagar Dharia790cfd02011-09-25 17:56:24 -06001208 if (!ret && msm_is_sat_dev(e_addr)) {
1209 struct msm_slim_sat *sat = addr_to_sat(dev,
1210 laddr);
1211 if (!sat)
1212 sat = msm_slim_alloc_sat(dev);
1213 if (!sat)
1214 return;
1215
1216 sat->satcl.laddr = laddr;
1217 msm_sat_enqueue(sat, (u32 *)buf, len);
1218 queue_work(sat->wq, &sat->wd);
1219 }
Sagar Dhariaf323f8c2012-09-04 11:27:26 -06001220 if (ret)
1221 pr_err("assign laddr failed, error:%d", ret);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001222 } else if (mc == SLIM_MSG_MC_REPLY_INFORMATION ||
1223 mc == SLIM_MSG_MC_REPLY_VALUE) {
1224 u8 tid = buf[3];
1225 dev_dbg(dev->dev, "tid:%d, len:%d\n", tid, len - 4);
1226 slim_msg_response(&dev->ctrl, &buf[4], tid,
1227 len - 4);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001228 pm_runtime_mark_last_busy(dev->dev);
Sagar Dharia144e5e02011-08-08 17:30:11 -06001229 } else if (mc == SLIM_MSG_MC_REPORT_INFORMATION) {
1230 u8 l_addr = buf[2];
1231 u16 ele = (u16)buf[4] << 4;
1232 ele |= ((buf[3] & 0xf0) >> 4);
1233 dev_err(dev->dev, "Slim-dev:%d report inf element:0x%x",
1234 l_addr, ele);
1235 for (i = 0; i < len - 5; i++)
1236 dev_err(dev->dev, "offset:0x%x:bit mask:%x",
1237 i, buf[i+5]);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001238 } else {
1239 dev_err(dev->dev, "unexpected message:mc:%x, mt:%x",
1240 mc, mt);
1241 for (i = 0; i < len; i++)
1242 dev_err(dev->dev, "error msg: %x", buf[i]);
1243
1244 }
1245 } else
1246 dev_err(dev->dev, "rxwq called and no dequeue");
1247}
1248
1249static void slim_sat_rxprocess(struct work_struct *work)
1250{
1251 struct msm_slim_sat *sat = container_of(work, struct msm_slim_sat, wd);
1252 struct msm_slim_ctrl *dev = sat->dev;
1253 u8 buf[40];
1254
1255 while ((msm_sat_dequeue(sat, buf)) != -ENODATA) {
1256 struct slim_msg_txn txn;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001257 u8 len, mc, mt;
1258 u32 bw_sl;
1259 int ret = 0;
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001260 int satv = -1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001261 bool gen_ack = false;
1262 u8 tid;
1263 u8 wbuf[8];
Sagar Dhariaf323f8c2012-09-04 11:27:26 -06001264 int i, retries = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001265 txn.mt = SLIM_MSG_MT_SRC_REFERRED_USER;
1266 txn.dt = SLIM_MSG_DEST_LOGICALADDR;
1267 txn.ec = 0;
1268 txn.rbuf = NULL;
1269 txn.la = sat->satcl.laddr;
1270 /* satellite handling */
1271 len = buf[0] & 0x1F;
1272 mc = buf[1];
1273 mt = (buf[0] >> 5) & 0x7;
1274
1275 if (mt == SLIM_MSG_MT_CORE &&
1276 mc == SLIM_MSG_MC_REPORT_PRESENT) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001277 u8 e_addr[6];
1278 for (i = 0; i < 6; i++)
1279 e_addr[i] = buf[7-i];
1280
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001281 if (pm_runtime_enabled(dev->dev)) {
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001282 satv = msm_slim_get_ctrl(dev);
1283 if (satv >= 0)
1284 sat->pending_capability = true;
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001285 }
Sagar Dharia69bf5572012-02-21 14:45:35 -07001286 /*
1287 * Since capability message is already sent, present
1288 * message will indicate subsystem hosting this
1289 * satellite has restarted.
1290 * Remove all active channels of this satellite
1291 * when this is detected
1292 */
1293 if (sat->sent_capability) {
1294 for (i = 0; i < sat->nsatch; i++) {
Ajay Dudani2c71b242012-08-15 00:01:57 -06001295 if (sat->satch[i].reconf) {
1296 pr_err("SSR, sat:%d, rm ch:%d",
Sagar Dhariaf323f8c2012-09-04 11:27:26 -06001297 sat->satcl.laddr,
Sagar Dharia69bf5572012-02-21 14:45:35 -07001298 sat->satch[i].chan);
Sagar Dharia69bf5572012-02-21 14:45:35 -07001299 slim_control_ch(&sat->satcl,
1300 sat->satch[i].chanh,
1301 SLIM_CH_REMOVE, true);
Ajay Dudani2c71b242012-08-15 00:01:57 -06001302 sat->satch[i].reconf = false;
1303 }
Sagar Dharia69bf5572012-02-21 14:45:35 -07001304 }
1305 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001306 } else if (mt != SLIM_MSG_MT_CORE &&
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001307 mc != SLIM_MSG_MC_REPORT_PRESENT) {
1308 satv = msm_slim_get_ctrl(dev);
1309 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001310 switch (mc) {
1311 case SLIM_MSG_MC_REPORT_PRESENT:
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001312 /* Remove runtime_pm vote once satellite acks */
1313 if (mt != SLIM_MSG_MT_CORE) {
1314 if (pm_runtime_enabled(dev->dev) &&
1315 sat->pending_capability) {
1316 msm_slim_put_ctrl(dev);
1317 sat->pending_capability = false;
1318 }
1319 continue;
1320 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001321 /* send a Manager capability msg */
Sagar Dharia790cfd02011-09-25 17:56:24 -06001322 if (sat->sent_capability) {
1323 if (mt == SLIM_MSG_MT_CORE)
1324 goto send_capability;
1325 else
1326 continue;
1327 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001328 ret = slim_add_device(&dev->ctrl, &sat->satcl);
1329 if (ret) {
1330 dev_err(dev->dev,
1331 "Satellite-init failed");
1332 continue;
1333 }
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001334 /* Satellite-channels */
1335 sat->satch = kzalloc(MSM_MAX_SATCH *
1336 sizeof(struct msm_sat_chan),
1337 GFP_KERNEL);
Sagar Dharia790cfd02011-09-25 17:56:24 -06001338send_capability:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001339 txn.mc = SLIM_USR_MC_MASTER_CAPABILITY;
1340 txn.mt = SLIM_MSG_MT_SRC_REFERRED_USER;
1341 txn.la = sat->satcl.laddr;
1342 txn.rl = 8;
1343 wbuf[0] = SAT_MAGIC_LSB;
1344 wbuf[1] = SAT_MAGIC_MSB;
1345 wbuf[2] = SAT_MSG_VER;
1346 wbuf[3] = SAT_MSG_PROT;
1347 txn.wbuf = wbuf;
1348 txn.len = 4;
Sagar Dhariaf323f8c2012-09-04 11:27:26 -06001349 ret = msm_xfer_msg(&dev->ctrl, &txn);
1350 if (ret) {
1351 pr_err("capability for:0x%x fail:%d, retry:%d",
1352 sat->satcl.laddr, ret, retries);
1353 if (retries < INIT_MX_RETRIES) {
1354 msm_slim_wait_retry(dev);
1355 retries++;
1356 goto send_capability;
1357 } else {
1358 pr_err("failed after all retries:%d",
1359 ret);
1360 }
1361 } else {
1362 sat->sent_capability = true;
1363 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001364 break;
1365 case SLIM_USR_MC_ADDR_QUERY:
1366 memcpy(&wbuf[1], &buf[4], 6);
1367 ret = slim_get_logical_addr(&sat->satcl,
1368 &wbuf[1], 6, &wbuf[7]);
1369 if (ret)
1370 memset(&wbuf[1], 0, 6);
1371 wbuf[0] = buf[3];
1372 txn.mc = SLIM_USR_MC_ADDR_REPLY;
1373 txn.rl = 12;
1374 txn.len = 8;
1375 txn.wbuf = wbuf;
1376 msm_xfer_msg(&dev->ctrl, &txn);
1377 break;
1378 case SLIM_USR_MC_DEFINE_CHAN:
1379 case SLIM_USR_MC_DEF_ACT_CHAN:
1380 case SLIM_USR_MC_CHAN_CTRL:
1381 if (mc != SLIM_USR_MC_CHAN_CTRL)
1382 tid = buf[7];
1383 else
1384 tid = buf[4];
1385 gen_ack = true;
1386 ret = msm_sat_define_ch(sat, buf, len, mc);
1387 if (ret) {
1388 dev_err(dev->dev,
1389 "SAT define_ch returned:%d",
1390 ret);
1391 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001392 if (!sat->pending_reconf) {
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001393 int chv = msm_slim_get_ctrl(dev);
1394 if (chv >= 0)
1395 sat->pending_reconf = true;
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001396 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001397 break;
1398 case SLIM_USR_MC_RECONFIG_NOW:
1399 tid = buf[3];
1400 gen_ack = true;
1401 ret = slim_reconfigure_now(&sat->satcl);
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001402 for (i = 0; i < sat->nsatch; i++) {
1403 struct msm_sat_chan *sch = &sat->satch[i];
Sagar Dhariad1d81402012-09-05 12:19:24 -06001404 if (sch->req_rem && sch->reconf) {
Ajay Dudani2c71b242012-08-15 00:01:57 -06001405 if (!ret) {
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001406 slim_dealloc_ch(&sat->satcl,
1407 sch->chanh);
Ajay Dudani2c71b242012-08-15 00:01:57 -06001408 sch->reconf = false;
1409 }
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001410 sch->req_rem--;
1411 } else if (sch->req_def) {
1412 if (ret)
1413 slim_dealloc_ch(&sat->satcl,
1414 sch->chanh);
Ajay Dudani2c71b242012-08-15 00:01:57 -06001415 else
1416 sch->reconf = true;
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001417 sch->req_def--;
1418 }
1419 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001420 if (sat->pending_reconf) {
1421 msm_slim_put_ctrl(dev);
1422 sat->pending_reconf = false;
1423 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001424 break;
1425 case SLIM_USR_MC_REQ_BW:
1426 /* what we get is in SLOTS */
1427 bw_sl = (u32)buf[4] << 3 |
1428 ((buf[3] & 0xE0) >> 5);
1429 sat->satcl.pending_msgsl = bw_sl;
1430 tid = buf[5];
1431 gen_ack = true;
1432 break;
1433 case SLIM_USR_MC_CONNECT_SRC:
1434 case SLIM_USR_MC_CONNECT_SINK:
1435 if (mc == SLIM_USR_MC_CONNECT_SRC)
1436 txn.mc = SLIM_MSG_MC_CONNECT_SOURCE;
1437 else
1438 txn.mc = SLIM_MSG_MC_CONNECT_SINK;
1439 wbuf[0] = buf[4] & 0x1F;
1440 wbuf[1] = buf[5];
1441 tid = buf[6];
1442 txn.la = buf[3];
1443 txn.mt = SLIM_MSG_MT_CORE;
1444 txn.rl = 6;
1445 txn.len = 2;
1446 txn.wbuf = wbuf;
1447 gen_ack = true;
1448 ret = msm_xfer_msg(&dev->ctrl, &txn);
1449 break;
1450 case SLIM_USR_MC_DISCONNECT_PORT:
1451 txn.mc = SLIM_MSG_MC_DISCONNECT_PORT;
1452 wbuf[0] = buf[4] & 0x1F;
1453 tid = buf[5];
1454 txn.la = buf[3];
1455 txn.rl = 5;
1456 txn.len = 1;
1457 txn.mt = SLIM_MSG_MT_CORE;
1458 txn.wbuf = wbuf;
1459 gen_ack = true;
1460 ret = msm_xfer_msg(&dev->ctrl, &txn);
1461 default:
1462 break;
1463 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001464 if (!gen_ack) {
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001465 if (mc != SLIM_MSG_MC_REPORT_PRESENT && satv >= 0)
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001466 msm_slim_put_ctrl(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001467 continue;
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001468 }
1469
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001470 wbuf[0] = tid;
1471 if (!ret)
1472 wbuf[1] = MSM_SAT_SUCCSS;
1473 else
1474 wbuf[1] = 0;
1475 txn.mc = SLIM_USR_MC_GENERIC_ACK;
1476 txn.la = sat->satcl.laddr;
1477 txn.rl = 6;
1478 txn.len = 2;
1479 txn.wbuf = wbuf;
1480 txn.mt = SLIM_MSG_MT_SRC_REFERRED_USER;
1481 msm_xfer_msg(&dev->ctrl, &txn);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001482 if (satv >= 0)
1483 msm_slim_put_ctrl(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001484 }
1485}
1486
Sagar Dharia790cfd02011-09-25 17:56:24 -06001487static struct msm_slim_sat *msm_slim_alloc_sat(struct msm_slim_ctrl *dev)
1488{
1489 struct msm_slim_sat *sat;
1490 char *name;
1491 if (dev->nsats >= MSM_MAX_NSATS)
1492 return NULL;
1493
1494 sat = kzalloc(sizeof(struct msm_slim_sat), GFP_KERNEL);
1495 if (!sat) {
1496 dev_err(dev->dev, "no memory for satellite");
1497 return NULL;
1498 }
1499 name = kzalloc(SLIMBUS_NAME_SIZE, GFP_KERNEL);
1500 if (!name) {
1501 dev_err(dev->dev, "no memory for satellite name");
1502 kfree(sat);
1503 return NULL;
1504 }
1505 dev->satd[dev->nsats] = sat;
1506 sat->dev = dev;
1507 snprintf(name, SLIMBUS_NAME_SIZE, "msm_sat%d", dev->nsats);
1508 sat->satcl.name = name;
1509 spin_lock_init(&sat->lock);
1510 INIT_WORK(&sat->wd, slim_sat_rxprocess);
1511 sat->wq = create_singlethread_workqueue(sat->satcl.name);
1512 if (!sat->wq) {
1513 kfree(name);
1514 kfree(sat);
1515 return NULL;
1516 }
1517 /*
1518 * Both sats will be allocated from RX thread and RX thread will
1519 * process messages sequentially. No synchronization necessary
1520 */
1521 dev->nsats++;
1522 return sat;
1523}
1524
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001525static void
1526msm_slim_rx_msgq_event(struct msm_slim_ctrl *dev, struct sps_event_notify *ev)
1527{
1528 u32 *buf = ev->data.transfer.user;
1529 struct sps_iovec *iovec = &ev->data.transfer.iovec;
1530
1531 /*
1532 * Note the virtual address needs to be offset by the same index
1533 * as the physical address or just pass in the actual virtual address
1534 * if the sps_mem_buffer is not needed. Note that if completion is
1535 * used, the virtual address won't be available and will need to be
1536 * calculated based on the offset of the physical address
1537 */
1538 if (ev->event_id == SPS_EVENT_DESC_DONE) {
1539
1540 pr_debug("buf = 0x%p, data = 0x%x\n", buf, *buf);
1541
1542 pr_debug("iovec = (0x%x 0x%x 0x%x)\n",
1543 iovec->addr, iovec->size, iovec->flags);
1544
1545 } else {
1546 dev_err(dev->dev, "%s: unknown event %d\n",
1547 __func__, ev->event_id);
1548 }
1549}
1550
1551static void msm_slim_rx_msgq_cb(struct sps_event_notify *notify)
1552{
1553 struct msm_slim_ctrl *dev = (struct msm_slim_ctrl *)notify->user;
1554 msm_slim_rx_msgq_event(dev, notify);
1555}
1556
1557/* Queue up Rx message buffer */
1558static inline int
1559msm_slim_post_rx_msgq(struct msm_slim_ctrl *dev, int ix)
1560{
1561 int ret;
1562 u32 flags = SPS_IOVEC_FLAG_INT;
1563 struct msm_slim_endp *endpoint = &dev->rx_msgq;
1564 struct sps_mem_buffer *mem = &endpoint->buf;
1565 struct sps_pipe *pipe = endpoint->sps;
1566
1567 /* Rx message queue buffers are 4 bytes in length */
1568 u8 *virt_addr = mem->base + (4 * ix);
1569 u32 phys_addr = mem->phys_base + (4 * ix);
1570
1571 pr_debug("index:%d, phys:0x%x, virt:0x%p\n", ix, phys_addr, virt_addr);
1572
1573 ret = sps_transfer_one(pipe, phys_addr, 4, virt_addr, flags);
1574 if (ret)
1575 dev_err(dev->dev, "transfer_one() failed 0x%x, %d\n", ret, ix);
1576
1577 return ret;
1578}
1579
1580static inline int
1581msm_slim_rx_msgq_get(struct msm_slim_ctrl *dev, u32 *data, int offset)
1582{
1583 struct msm_slim_endp *endpoint = &dev->rx_msgq;
1584 struct sps_mem_buffer *mem = &endpoint->buf;
1585 struct sps_pipe *pipe = endpoint->sps;
1586 struct sps_iovec iovec;
1587 int index;
1588 int ret;
1589
1590 ret = sps_get_iovec(pipe, &iovec);
1591 if (ret) {
1592 dev_err(dev->dev, "sps_get_iovec() failed 0x%x\n", ret);
1593 goto err_exit;
1594 }
1595
1596 pr_debug("iovec = (0x%x 0x%x 0x%x)\n",
1597 iovec.addr, iovec.size, iovec.flags);
1598 BUG_ON(iovec.addr < mem->phys_base);
1599 BUG_ON(iovec.addr >= mem->phys_base + mem->size);
1600
1601 /* Calculate buffer index */
1602 index = (iovec.addr - mem->phys_base) / 4;
1603 *(data + offset) = *((u32 *)mem->base + index);
1604
1605 pr_debug("buf = 0x%p, data = 0x%x\n", (u32 *)mem->base + index, *data);
1606
1607 /* Add buffer back to the queue */
1608 (void)msm_slim_post_rx_msgq(dev, index);
1609
1610err_exit:
1611 return ret;
1612}
1613
1614static int msm_slim_rx_msgq_thread(void *data)
1615{
1616 struct msm_slim_ctrl *dev = (struct msm_slim_ctrl *)data;
1617 struct completion *notify = &dev->rx_msgq_notify;
1618 struct msm_slim_sat *sat = NULL;
1619 u32 mc = 0;
1620 u32 mt = 0;
1621 u32 buffer[10];
1622 int index = 0;
1623 u8 msg_len = 0;
1624 int ret;
1625
1626 dev_dbg(dev->dev, "rx thread started");
1627
1628 while (!kthread_should_stop()) {
1629 set_current_state(TASK_INTERRUPTIBLE);
1630 ret = wait_for_completion_interruptible(notify);
1631
1632 if (ret)
1633 dev_err(dev->dev, "rx thread wait error:%d", ret);
1634
1635 /* 1 irq notification per message */
1636 if (!dev->use_rx_msgqs) {
1637 msm_slim_rxwq(dev);
1638 continue;
1639 }
1640
1641 ret = msm_slim_rx_msgq_get(dev, buffer, index);
1642 if (ret) {
1643 dev_err(dev->dev, "rx_msgq_get() failed 0x%x\n", ret);
1644 continue;
1645 }
1646
1647 pr_debug("message[%d] = 0x%x\n", index, *buffer);
1648
1649 /* Decide if we use generic RX or satellite RX */
1650 if (index++ == 0) {
1651 msg_len = *buffer & 0x1F;
1652 pr_debug("Start of new message, len = %d\n", msg_len);
1653 mt = (buffer[0] >> 5) & 0x7;
1654 mc = (buffer[0] >> 8) & 0xff;
1655 dev_dbg(dev->dev, "MC: %x, MT: %x\n", mc, mt);
1656 if (mt == SLIM_MSG_MT_DEST_REFERRED_USER ||
Sagar Dharia790cfd02011-09-25 17:56:24 -06001657 mt == SLIM_MSG_MT_SRC_REFERRED_USER) {
1658 u8 laddr;
1659 laddr = (u8)((buffer[0] >> 16) & 0xff);
1660 sat = addr_to_sat(dev, laddr);
1661 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001662 } else if ((index * 4) >= msg_len) {
1663 index = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001664 if (sat) {
1665 msm_sat_enqueue(sat, buffer, msg_len);
1666 queue_work(sat->wq, &sat->wd);
1667 sat = NULL;
1668 } else {
1669 msm_slim_rx_enqueue(dev, buffer, msg_len);
1670 msm_slim_rxwq(dev);
1671 }
1672 }
1673 }
1674
1675 return 0;
1676}
1677
1678static int __devinit msm_slim_init_rx_msgq(struct msm_slim_ctrl *dev)
1679{
1680 int i, ret;
1681 u32 pipe_offset;
1682 struct msm_slim_endp *endpoint = &dev->rx_msgq;
1683 struct sps_connect *config = &endpoint->config;
1684 struct sps_mem_buffer *descr = &config->desc;
1685 struct sps_mem_buffer *mem = &endpoint->buf;
1686 struct completion *notify = &dev->rx_msgq_notify;
1687
1688 struct sps_register_event sps_error_event; /* SPS_ERROR */
1689 struct sps_register_event sps_descr_event; /* DESCR_DONE */
1690
Sagar Dharia31ac5812012-01-04 11:38:59 -07001691 init_completion(notify);
1692 if (!dev->use_rx_msgqs)
1693 goto rx_thread_create;
1694
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001695 /* Allocate the endpoint */
1696 ret = msm_slim_init_endpoint(dev, endpoint);
1697 if (ret) {
1698 dev_err(dev->dev, "init_endpoint failed 0x%x\n", ret);
1699 goto sps_init_endpoint_failed;
1700 }
1701
1702 /* Get the pipe indices for the message queues */
1703 pipe_offset = (readl_relaxed(dev->base + MGR_STATUS) & 0xfc) >> 2;
1704 dev_dbg(dev->dev, "Message queue pipe offset %d\n", pipe_offset);
1705
1706 config->mode = SPS_MODE_SRC;
1707 config->source = dev->bam.hdl;
1708 config->destination = SPS_DEV_HANDLE_MEM;
1709 config->src_pipe_index = pipe_offset;
1710 config->options = SPS_O_DESC_DONE | SPS_O_ERROR |
1711 SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
1712
1713 /* Allocate memory for the FIFO descriptors */
1714 ret = msm_slim_sps_mem_alloc(dev, descr,
1715 MSM_SLIM_DESC_NUM * sizeof(struct sps_iovec));
1716 if (ret) {
1717 dev_err(dev->dev, "unable to allocate SPS descriptors\n");
1718 goto alloc_descr_failed;
1719 }
1720
1721 ret = sps_connect(endpoint->sps, config);
1722 if (ret) {
1723 dev_err(dev->dev, "sps_connect failed 0x%x\n", ret);
1724 goto sps_connect_failed;
1725 }
1726
1727 /* Register completion for DESC_DONE */
1728 init_completion(notify);
1729 memset(&sps_descr_event, 0x00, sizeof(sps_descr_event));
1730
1731 sps_descr_event.mode = SPS_TRIGGER_CALLBACK;
1732 sps_descr_event.options = SPS_O_DESC_DONE;
1733 sps_descr_event.user = (void *)dev;
1734 sps_descr_event.xfer_done = notify;
1735
1736 ret = sps_register_event(endpoint->sps, &sps_descr_event);
1737 if (ret) {
1738 dev_err(dev->dev, "sps_connect() failed 0x%x\n", ret);
1739 goto sps_reg_event_failed;
1740 }
1741
1742 /* Register callback for errors */
1743 memset(&sps_error_event, 0x00, sizeof(sps_error_event));
1744 sps_error_event.mode = SPS_TRIGGER_CALLBACK;
1745 sps_error_event.options = SPS_O_ERROR;
1746 sps_error_event.user = (void *)dev;
1747 sps_error_event.callback = msm_slim_rx_msgq_cb;
1748
1749 ret = sps_register_event(endpoint->sps, &sps_error_event);
1750 if (ret) {
1751 dev_err(dev->dev, "sps_connect() failed 0x%x\n", ret);
1752 goto sps_reg_event_failed;
1753 }
1754
1755 /* Allocate memory for the message buffer(s), N descrs, 4-byte mesg */
1756 ret = msm_slim_sps_mem_alloc(dev, mem, MSM_SLIM_DESC_NUM * 4);
1757 if (ret) {
1758 dev_err(dev->dev, "dma_alloc_coherent failed\n");
1759 goto alloc_buffer_failed;
1760 }
1761
1762 /*
1763 * Call transfer_one for each 4-byte buffer
1764 * Use (buf->size/4) - 1 for the number of buffer to post
1765 */
1766
1767 /* Setup the transfer */
1768 for (i = 0; i < (MSM_SLIM_DESC_NUM - 1); i++) {
1769 ret = msm_slim_post_rx_msgq(dev, i);
1770 if (ret) {
1771 dev_err(dev->dev, "post_rx_msgq() failed 0x%x\n", ret);
1772 goto sps_transfer_failed;
1773 }
1774 }
1775
Sagar Dharia31ac5812012-01-04 11:38:59 -07001776rx_thread_create:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001777 /* Fire up the Rx message queue thread */
1778 dev->rx_msgq_thread = kthread_run(msm_slim_rx_msgq_thread, dev,
1779 MSM_SLIM_NAME "_rx_msgq_thread");
1780 if (!dev->rx_msgq_thread) {
1781 dev_err(dev->dev, "Failed to start Rx message queue thread\n");
Sagar Dharia31ac5812012-01-04 11:38:59 -07001782 /* Tear-down BAMs or return? */
1783 if (!dev->use_rx_msgqs)
1784 return -EIO;
1785 else
1786 ret = -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001787 } else
1788 return 0;
1789
1790sps_transfer_failed:
1791 msm_slim_sps_mem_free(dev, mem);
1792alloc_buffer_failed:
1793 memset(&sps_error_event, 0x00, sizeof(sps_error_event));
1794 sps_register_event(endpoint->sps, &sps_error_event);
1795sps_reg_event_failed:
1796 sps_disconnect(endpoint->sps);
1797sps_connect_failed:
1798 msm_slim_sps_mem_free(dev, descr);
1799alloc_descr_failed:
1800 msm_slim_free_endpoint(endpoint);
1801sps_init_endpoint_failed:
Sagar Dharia31ac5812012-01-04 11:38:59 -07001802 dev->use_rx_msgqs = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001803 return ret;
1804}
1805
1806/* Registers BAM h/w resource with SPS driver and initializes msgq endpoints */
1807static int __devinit
1808msm_slim_sps_init(struct msm_slim_ctrl *dev, struct resource *bam_mem)
1809{
1810 int i, ret;
1811 u32 bam_handle;
1812 struct sps_bam_props bam_props = {0};
1813
1814 static struct sps_bam_sec_config_props sec_props = {
1815 .ees = {
1816 [0] = { /* LPASS */
1817 .vmid = 0,
1818 .pipe_mask = 0xFFFF98,
1819 },
1820 [1] = { /* Krait Apps */
1821 .vmid = 1,
1822 .pipe_mask = 0x3F000007,
1823 },
1824 [2] = { /* Modem */
1825 .vmid = 2,
1826 .pipe_mask = 0x00000060,
1827 },
1828 },
1829 };
1830
1831 bam_props.ee = dev->ee;
1832 bam_props.virt_addr = dev->bam.base;
1833 bam_props.phys_addr = bam_mem->start;
1834 bam_props.irq = dev->bam.irq;
1835 bam_props.manage = SPS_BAM_MGR_LOCAL;
1836 bam_props.summing_threshold = MSM_SLIM_PERF_SUMM_THRESHOLD;
1837
1838 bam_props.sec_config = SPS_BAM_SEC_DO_CONFIG;
1839 bam_props.p_sec_config_props = &sec_props;
1840
1841 bam_props.options = SPS_O_DESC_DONE | SPS_O_ERROR |
1842 SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
1843
1844 /* First 7 bits are for message Qs */
1845 for (i = 7; i < 32; i++) {
1846 /* Check what pipes are owned by Apps. */
1847 if ((sec_props.ees[dev->ee].pipe_mask >> i) & 0x1)
1848 break;
1849 }
1850 dev->pipe_b = i - 7;
1851
1852 /* Register the BAM device with the SPS driver */
1853 ret = sps_register_bam_device(&bam_props, &bam_handle);
1854 if (ret) {
Sagar Dharia31ac5812012-01-04 11:38:59 -07001855 dev_err(dev->dev, "disabling BAM: reg-bam failed 0x%x\n", ret);
1856 dev->use_rx_msgqs = 0;
1857 goto init_rx_msgq;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001858 }
1859 dev->bam.hdl = bam_handle;
1860 dev_dbg(dev->dev, "SLIM BAM registered, handle = 0x%x\n", bam_handle);
1861
Sagar Dharia31ac5812012-01-04 11:38:59 -07001862init_rx_msgq:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001863 ret = msm_slim_init_rx_msgq(dev);
Sagar Dharia31ac5812012-01-04 11:38:59 -07001864 if (ret)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001865 dev_err(dev->dev, "msm_slim_init_rx_msgq failed 0x%x\n", ret);
Sagar Dharia1beb2202012-07-31 19:06:21 -06001866 if (ret && bam_handle) {
Sagar Dharia31ac5812012-01-04 11:38:59 -07001867 sps_deregister_bam_device(bam_handle);
1868 dev->bam.hdl = 0L;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001869 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001870 return ret;
1871}
1872
1873static void msm_slim_sps_exit(struct msm_slim_ctrl *dev)
1874{
1875 if (dev->use_rx_msgqs) {
1876 struct msm_slim_endp *endpoint = &dev->rx_msgq;
1877 struct sps_connect *config = &endpoint->config;
1878 struct sps_mem_buffer *descr = &config->desc;
1879 struct sps_mem_buffer *mem = &endpoint->buf;
1880 struct sps_register_event sps_event;
1881 memset(&sps_event, 0x00, sizeof(sps_event));
1882 msm_slim_sps_mem_free(dev, mem);
1883 sps_register_event(endpoint->sps, &sps_event);
1884 sps_disconnect(endpoint->sps);
1885 msm_slim_sps_mem_free(dev, descr);
1886 msm_slim_free_endpoint(endpoint);
Sagar Dharia31ac5812012-01-04 11:38:59 -07001887 sps_deregister_bam_device(dev->bam.hdl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001888 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001889}
1890
Sagar Dhariacc969452011-09-19 10:34:30 -06001891static void msm_slim_prg_slew(struct platform_device *pdev,
1892 struct msm_slim_ctrl *dev)
1893{
1894 struct resource *slew_io;
1895 void __iomem *slew_reg;
1896 /* SLEW RATE register for this slimbus */
1897 dev->slew_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1898 "slimbus_slew_reg");
1899 if (!dev->slew_mem) {
1900 dev_dbg(&pdev->dev, "no slimbus slew resource\n");
1901 return;
1902 }
1903 slew_io = request_mem_region(dev->slew_mem->start,
1904 resource_size(dev->slew_mem), pdev->name);
1905 if (!slew_io) {
1906 dev_dbg(&pdev->dev, "slimbus-slew mem claimed\n");
1907 dev->slew_mem = NULL;
1908 return;
1909 }
1910
1911 slew_reg = ioremap(dev->slew_mem->start, resource_size(dev->slew_mem));
1912 if (!slew_reg) {
1913 dev_dbg(dev->dev, "slew register mapping failed");
1914 release_mem_region(dev->slew_mem->start,
1915 resource_size(dev->slew_mem));
1916 dev->slew_mem = NULL;
1917 return;
1918 }
1919 writel_relaxed(1, slew_reg);
1920 /* Make sure slimbus-slew rate enabling goes through */
1921 wmb();
1922 iounmap(slew_reg);
1923}
1924
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001925static int __devinit msm_slim_probe(struct platform_device *pdev)
1926{
1927 struct msm_slim_ctrl *dev;
1928 int ret;
1929 struct resource *bam_mem, *bam_io;
1930 struct resource *slim_mem, *slim_io;
1931 struct resource *irq, *bam_irq;
Sagar Dharia1beb2202012-07-31 19:06:21 -06001932 bool rxreg_access = false;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001933 slim_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1934 "slimbus_physical");
1935 if (!slim_mem) {
1936 dev_err(&pdev->dev, "no slimbus physical memory resource\n");
1937 return -ENODEV;
1938 }
1939 slim_io = request_mem_region(slim_mem->start, resource_size(slim_mem),
1940 pdev->name);
1941 if (!slim_io) {
1942 dev_err(&pdev->dev, "slimbus memory already claimed\n");
1943 return -EBUSY;
1944 }
1945
1946 bam_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1947 "slimbus_bam_physical");
1948 if (!bam_mem) {
1949 dev_err(&pdev->dev, "no slimbus BAM memory resource\n");
1950 ret = -ENODEV;
1951 goto err_get_res_bam_failed;
1952 }
1953 bam_io = request_mem_region(bam_mem->start, resource_size(bam_mem),
1954 pdev->name);
1955 if (!bam_io) {
1956 release_mem_region(slim_mem->start, resource_size(slim_mem));
1957 dev_err(&pdev->dev, "slimbus BAM memory already claimed\n");
1958 ret = -EBUSY;
1959 goto err_get_res_bam_failed;
1960 }
1961 irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
1962 "slimbus_irq");
1963 if (!irq) {
1964 dev_err(&pdev->dev, "no slimbus IRQ resource\n");
1965 ret = -ENODEV;
1966 goto err_get_res_failed;
1967 }
1968 bam_irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
1969 "slimbus_bam_irq");
1970 if (!bam_irq) {
1971 dev_err(&pdev->dev, "no slimbus BAM IRQ resource\n");
1972 ret = -ENODEV;
1973 goto err_get_res_failed;
1974 }
1975
1976 dev = kzalloc(sizeof(struct msm_slim_ctrl), GFP_KERNEL);
1977 if (!dev) {
1978 dev_err(&pdev->dev, "no memory for MSM slimbus controller\n");
1979 ret = -ENOMEM;
1980 goto err_get_res_failed;
1981 }
1982 dev->dev = &pdev->dev;
1983 platform_set_drvdata(pdev, dev);
1984 slim_set_ctrldata(&dev->ctrl, dev);
1985 dev->base = ioremap(slim_mem->start, resource_size(slim_mem));
1986 if (!dev->base) {
1987 dev_err(&pdev->dev, "IOremap failed\n");
1988 ret = -ENOMEM;
1989 goto err_ioremap_failed;
1990 }
1991 dev->bam.base = ioremap(bam_mem->start, resource_size(bam_mem));
1992 if (!dev->bam.base) {
1993 dev_err(&pdev->dev, "BAM IOremap failed\n");
1994 ret = -ENOMEM;
1995 goto err_ioremap_bam_failed;
1996 }
Sagar Dhariaf8f603b2012-03-21 15:25:17 -06001997 if (pdev->dev.of_node) {
1998
1999 ret = of_property_read_u32(pdev->dev.of_node, "cell-index",
2000 &dev->ctrl.nr);
2001 if (ret) {
2002 dev_err(&pdev->dev, "Cell index not specified:%d", ret);
2003 goto err_of_init_failed;
2004 }
Sagar Dharia1beb2202012-07-31 19:06:21 -06002005 rxreg_access = of_property_read_bool(pdev->dev.of_node,
2006 "qcom,rxreg-access");
Sagar Dhariaf8f603b2012-03-21 15:25:17 -06002007 /* Optional properties */
2008 ret = of_property_read_u32(pdev->dev.of_node,
2009 "qcom,min-clk-gear", &dev->ctrl.min_cg);
2010 ret = of_property_read_u32(pdev->dev.of_node,
2011 "qcom,max-clk-gear", &dev->ctrl.max_cg);
Sagar Dharia1beb2202012-07-31 19:06:21 -06002012 pr_debug("min_cg:%d, max_cg:%d, rxreg: %d", dev->ctrl.min_cg,
2013 dev->ctrl.max_cg, rxreg_access);
Sagar Dhariaf8f603b2012-03-21 15:25:17 -06002014 } else {
2015 dev->ctrl.nr = pdev->id;
2016 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002017 dev->ctrl.nchans = MSM_SLIM_NCHANS;
2018 dev->ctrl.nports = MSM_SLIM_NPORTS;
2019 dev->ctrl.set_laddr = msm_set_laddr;
2020 dev->ctrl.xfer_msg = msm_xfer_msg;
Sagar Dharia144e5e02011-08-08 17:30:11 -06002021 dev->ctrl.wakeup = msm_clk_pause_wakeup;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002022 dev->ctrl.config_port = msm_config_port;
2023 dev->ctrl.port_xfer = msm_slim_port_xfer;
2024 dev->ctrl.port_xfer_status = msm_slim_port_xfer_status;
2025 /* Reserve some messaging BW for satellite-apps driver communication */
2026 dev->ctrl.sched.pending_msgsl = 30;
2027
2028 init_completion(&dev->reconf);
2029 mutex_init(&dev->tx_lock);
2030 spin_lock_init(&dev->rx_lock);
2031 dev->ee = 1;
Sagar Dharia1beb2202012-07-31 19:06:21 -06002032 if (rxreg_access)
2033 dev->use_rx_msgqs = 0;
2034 else
2035 dev->use_rx_msgqs = 1;
2036
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002037 dev->irq = irq->start;
2038 dev->bam.irq = bam_irq->start;
2039
Sagar Dhariad5bb0552012-08-11 15:02:12 -06002040 dev->hclk = clk_get(dev->dev, "iface_clk");
2041 if (IS_ERR(dev->hclk))
2042 dev->hclk = NULL;
2043 else
2044 clk_prepare_enable(dev->hclk);
2045
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002046 ret = msm_slim_sps_init(dev, bam_mem);
2047 if (ret != 0) {
2048 dev_err(dev->dev, "error SPS init\n");
2049 goto err_sps_init_failed;
2050 }
2051
2052
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002053 dev->framer.rootfreq = SLIM_ROOT_FREQ >> 3;
2054 dev->framer.superfreq =
2055 dev->framer.rootfreq / SLIM_CL_PER_SUPERFRAME_DIV8;
2056 dev->ctrl.a_framer = &dev->framer;
2057 dev->ctrl.clkgear = SLIM_MAX_CLK_GEAR;
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002058 dev->ctrl.dev.parent = &pdev->dev;
Sagar Dhariaf8f603b2012-03-21 15:25:17 -06002059 dev->ctrl.dev.of_node = pdev->dev.of_node;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002060
2061 ret = request_irq(dev->irq, msm_slim_interrupt, IRQF_TRIGGER_HIGH,
2062 "msm_slim_irq", dev);
2063 if (ret) {
2064 dev_err(&pdev->dev, "request IRQ failed\n");
2065 goto err_request_irq_failed;
2066 }
2067
Sagar Dhariacc969452011-09-19 10:34:30 -06002068 msm_slim_prg_slew(pdev, dev);
Sagar Dhariab1c0acf2012-02-06 18:16:58 -07002069
2070 /* Register with framework before enabling frame, clock */
2071 ret = slim_add_numbered_controller(&dev->ctrl);
2072 if (ret) {
2073 dev_err(dev->dev, "error adding controller\n");
2074 goto err_ctrl_failed;
2075 }
2076
2077
Tianyi Gou44a81b02012-02-06 17:49:07 -08002078 dev->rclk = clk_get(dev->dev, "core_clk");
Sagar Dhariab1c0acf2012-02-06 18:16:58 -07002079 if (!dev->rclk) {
2080 dev_err(dev->dev, "slimbus clock not found");
2081 goto err_clk_get_failed;
2082 }
Sagar Dhariacc969452011-09-19 10:34:30 -06002083 clk_set_rate(dev->rclk, SLIM_ROOT_FREQ);
Sagar Dharia9acf7f42012-03-08 09:45:30 -07002084 clk_prepare_enable(dev->rclk);
Sagar Dhariacc969452011-09-19 10:34:30 -06002085
Sagar Dharia82e516f2012-03-16 16:01:23 -06002086 dev->ver = readl_relaxed(dev->base);
2087 /* Version info in 16 MSbits */
2088 dev->ver >>= 16;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002089 /* Component register initialization */
Sagar Dharia82e516f2012-03-16 16:01:23 -06002090 writel_relaxed(1, dev->base + CFG_PORT(COMP_CFG, dev->ver));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002091 writel_relaxed((EE_MGR_RSC_GRP | EE_NGD_2 | EE_NGD_1),
Sagar Dharia82e516f2012-03-16 16:01:23 -06002092 dev->base + CFG_PORT(COMP_TRUST_CFG, dev->ver));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002093
2094 /*
2095 * Manager register initialization
2096 * If RX msg Q is used, disable RX_MSG_RCVD interrupt
2097 */
2098 if (dev->use_rx_msgqs)
2099 writel_relaxed((MGR_INT_RECFG_DONE | MGR_INT_TX_NACKED_2 |
2100 MGR_INT_MSG_BUF_CONTE | /* MGR_INT_RX_MSG_RCVD | */
2101 MGR_INT_TX_MSG_SENT), dev->base + MGR_INT_EN);
2102 else
2103 writel_relaxed((MGR_INT_RECFG_DONE | MGR_INT_TX_NACKED_2 |
2104 MGR_INT_MSG_BUF_CONTE | MGR_INT_RX_MSG_RCVD |
2105 MGR_INT_TX_MSG_SENT), dev->base + MGR_INT_EN);
2106 writel_relaxed(1, dev->base + MGR_CFG);
2107 /*
2108 * Framer registers are beyond 1K memory region after Manager and/or
2109 * component registers. Make sure those writes are ordered
2110 * before framer register writes
2111 */
2112 wmb();
2113
2114 /* Framer register initialization */
Sagar Dhariad5bb0552012-08-11 15:02:12 -06002115 writel_relaxed((1 << INTR_WAKE) | (0xA << REF_CLK_GEAR) |
2116 (0xA << CLK_GEAR) | (1 << ROOT_FREQ) | (1 << FRM_ACTIVE) | 1,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002117 dev->base + FRM_CFG);
2118 /*
2119 * Make sure that framer wake-up and enabling writes go through
2120 * before any other component is enabled. Framer is responsible for
2121 * clocking the bus and enabling framer first will ensure that other
2122 * devices can report presence when they are enabled
2123 */
2124 mb();
2125
2126 /* Enable RX msg Q */
2127 if (dev->use_rx_msgqs)
2128 writel_relaxed(MGR_CFG_ENABLE | MGR_CFG_RX_MSGQ_EN,
2129 dev->base + MGR_CFG);
2130 else
2131 writel_relaxed(MGR_CFG_ENABLE, dev->base + MGR_CFG);
2132 /*
2133 * Make sure that manager-enable is written through before interface
2134 * device is enabled
2135 */
2136 mb();
2137 writel_relaxed(1, dev->base + INTF_CFG);
2138 /*
2139 * Make sure that interface-enable is written through before enabling
2140 * ported generic device inside MSM manager
2141 */
2142 mb();
Sagar Dharia82e516f2012-03-16 16:01:23 -06002143 writel_relaxed(1, dev->base + CFG_PORT(PGD_CFG, dev->ver));
2144 writel_relaxed(0x3F<<17, dev->base + CFG_PORT(PGD_OWN_EEn, dev->ver) +
2145 (4 * dev->ee));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002146 /*
2147 * Make sure that ported generic device is enabled and port-EE settings
2148 * are written through before finally enabling the component
2149 */
2150 mb();
2151
Sagar Dharia82e516f2012-03-16 16:01:23 -06002152 writel_relaxed(1, dev->base + CFG_PORT(COMP_CFG, dev->ver));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002153 /*
2154 * Make sure that all writes have gone through before exiting this
2155 * function
2156 */
2157 mb();
Sagar Dhariaf8f603b2012-03-21 15:25:17 -06002158 if (pdev->dev.of_node)
2159 of_register_slim_devices(&dev->ctrl);
2160
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002161 pm_runtime_use_autosuspend(&pdev->dev);
2162 pm_runtime_set_autosuspend_delay(&pdev->dev, MSM_SLIM_AUTOSUSPEND);
2163 pm_runtime_set_active(&pdev->dev);
2164
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002165 dev_dbg(dev->dev, "MSM SB controller is up!\n");
2166 return 0;
2167
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002168err_ctrl_failed:
Sagar Dharia82e516f2012-03-16 16:01:23 -06002169 writel_relaxed(0, dev->base + CFG_PORT(COMP_CFG, dev->ver));
Sagar Dhariab1c0acf2012-02-06 18:16:58 -07002170err_clk_get_failed:
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002171 kfree(dev->satd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002172err_request_irq_failed:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002173 msm_slim_sps_exit(dev);
2174err_sps_init_failed:
Sagar Dhariad5bb0552012-08-11 15:02:12 -06002175 if (dev->hclk) {
2176 clk_disable_unprepare(dev->hclk);
2177 clk_put(dev->hclk);
2178 }
Sagar Dhariaf8f603b2012-03-21 15:25:17 -06002179err_of_init_failed:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002180 iounmap(dev->bam.base);
2181err_ioremap_bam_failed:
2182 iounmap(dev->base);
2183err_ioremap_failed:
2184 kfree(dev);
2185err_get_res_failed:
2186 release_mem_region(bam_mem->start, resource_size(bam_mem));
2187err_get_res_bam_failed:
2188 release_mem_region(slim_mem->start, resource_size(slim_mem));
2189 return ret;
2190}
2191
2192static int __devexit msm_slim_remove(struct platform_device *pdev)
2193{
2194 struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
2195 struct resource *bam_mem;
2196 struct resource *slim_mem;
Sagar Dhariacc969452011-09-19 10:34:30 -06002197 struct resource *slew_mem = dev->slew_mem;
Sagar Dharia790cfd02011-09-25 17:56:24 -06002198 int i;
2199 for (i = 0; i < dev->nsats; i++) {
2200 struct msm_slim_sat *sat = dev->satd[i];
Sagar Dharia0ffdca12011-09-25 18:55:53 -06002201 int j;
2202 for (j = 0; j < sat->nsatch; j++)
2203 slim_dealloc_ch(&sat->satcl, sat->satch[j].chanh);
Sagar Dharia790cfd02011-09-25 17:56:24 -06002204 slim_remove_device(&sat->satcl);
2205 kfree(sat->satch);
2206 destroy_workqueue(sat->wq);
2207 kfree(sat->satcl.name);
2208 kfree(sat);
2209 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002210 pm_runtime_disable(&pdev->dev);
2211 pm_runtime_set_suspended(&pdev->dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002212 free_irq(dev->irq, dev);
2213 slim_del_controller(&dev->ctrl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002214 clk_put(dev->rclk);
Sagar Dhariad5bb0552012-08-11 15:02:12 -06002215 if (dev->hclk)
2216 clk_put(dev->hclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002217 msm_slim_sps_exit(dev);
2218 kthread_stop(dev->rx_msgq_thread);
2219 iounmap(dev->bam.base);
2220 iounmap(dev->base);
2221 kfree(dev);
2222 bam_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2223 "slimbus_bam_physical");
Sagar Dhariae77961f2011-09-27 14:03:50 -06002224 if (bam_mem)
2225 release_mem_region(bam_mem->start, resource_size(bam_mem));
Sagar Dhariacc969452011-09-19 10:34:30 -06002226 if (slew_mem)
2227 release_mem_region(slew_mem->start, resource_size(slew_mem));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002228 slim_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2229 "slimbus_physical");
Sagar Dhariae77961f2011-09-27 14:03:50 -06002230 if (slim_mem)
2231 release_mem_region(slim_mem->start, resource_size(slim_mem));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002232 return 0;
2233}
2234
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002235#ifdef CONFIG_PM_RUNTIME
2236static int msm_slim_runtime_idle(struct device *device)
2237{
2238 dev_dbg(device, "pm_runtime: idle...\n");
2239 pm_request_autosuspend(device);
2240 return -EAGAIN;
2241}
2242#endif
2243
2244/*
2245 * If PM_RUNTIME is not defined, these 2 functions become helper
2246 * functions to be called from system suspend/resume. So they are not
2247 * inside ifdef CONFIG_PM_RUNTIME
2248 */
Sagar Dharia45e77912012-01-10 09:55:18 -07002249#ifdef CONFIG_PM_SLEEP
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002250static int msm_slim_runtime_suspend(struct device *device)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002251{
2252 struct platform_device *pdev = to_platform_device(device);
2253 struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002254 int ret;
2255 dev_dbg(device, "pm_runtime: suspending...\n");
2256 dev->state = MSM_CTRL_SLEEPING;
2257 ret = slim_ctrl_clk_pause(&dev->ctrl, false, SLIM_CLK_UNSPECIFIED);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07002258 if (ret) {
2259 dev_err(device, "clk pause not entered:%d", ret);
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002260 dev->state = MSM_CTRL_AWAKE;
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07002261 } else {
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002262 dev->state = MSM_CTRL_ASLEEP;
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07002263 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002264 return ret;
2265}
2266
2267static int msm_slim_runtime_resume(struct device *device)
2268{
2269 struct platform_device *pdev = to_platform_device(device);
2270 struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
2271 int ret = 0;
2272 dev_dbg(device, "pm_runtime: resuming...\n");
2273 if (dev->state == MSM_CTRL_ASLEEP)
2274 ret = slim_ctrl_clk_pause(&dev->ctrl, true, 0);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07002275 if (ret) {
2276 dev_err(device, "clk pause not exited:%d", ret);
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002277 dev->state = MSM_CTRL_ASLEEP;
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07002278 } else {
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002279 dev->state = MSM_CTRL_AWAKE;
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07002280 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002281 return ret;
2282}
2283
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002284static int msm_slim_suspend(struct device *dev)
2285{
2286 int ret = 0;
2287 if (!pm_runtime_enabled(dev) || !pm_runtime_suspended(dev)) {
Sagar Dhariad5bb0552012-08-11 15:02:12 -06002288 struct platform_device *pdev = to_platform_device(dev);
2289 struct msm_slim_ctrl *cdev = platform_get_drvdata(pdev);
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002290 dev_dbg(dev, "system suspend");
2291 ret = msm_slim_runtime_suspend(dev);
Sagar Dhariad5bb0552012-08-11 15:02:12 -06002292 if (!ret) {
2293 if (cdev->hclk)
2294 clk_disable_unprepare(cdev->hclk);
2295 }
Sagar Dharia6b559e02011-08-03 17:01:31 -06002296 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002297 if (ret == -EBUSY) {
Sagar Dharia144e5e02011-08-08 17:30:11 -06002298 /*
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002299 * If the clock pause failed due to active channels, there is
2300 * a possibility that some audio stream is active during suspend
2301 * We dont want to return suspend failure in that case so that
2302 * display and relevant components can still go to suspend.
2303 * If there is some other error, then it should be passed-on
2304 * to system level suspend
2305 */
Sagar Dharia144e5e02011-08-08 17:30:11 -06002306 ret = 0;
2307 }
2308 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002309}
2310
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002311static int msm_slim_resume(struct device *dev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002312{
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002313 /* If runtime_pm is enabled, this resume shouldn't do anything */
2314 if (!pm_runtime_enabled(dev) || !pm_runtime_suspended(dev)) {
Sagar Dhariad5bb0552012-08-11 15:02:12 -06002315 struct platform_device *pdev = to_platform_device(dev);
2316 struct msm_slim_ctrl *cdev = platform_get_drvdata(pdev);
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002317 int ret;
2318 dev_dbg(dev, "system resume");
Sagar Dhariad5bb0552012-08-11 15:02:12 -06002319 if (cdev->hclk)
2320 clk_prepare_enable(cdev->hclk);
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002321 ret = msm_slim_runtime_resume(dev);
2322 if (!ret) {
2323 pm_runtime_mark_last_busy(dev);
2324 pm_request_autosuspend(dev);
2325 }
2326 return ret;
2327
Sagar Dharia144e5e02011-08-08 17:30:11 -06002328 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002329 return 0;
2330}
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002331#endif /* CONFIG_PM_SLEEP */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002332
2333static const struct dev_pm_ops msm_slim_dev_pm_ops = {
2334 SET_SYSTEM_SLEEP_PM_OPS(
2335 msm_slim_suspend,
2336 msm_slim_resume
2337 )
2338 SET_RUNTIME_PM_OPS(
2339 msm_slim_runtime_suspend,
2340 msm_slim_runtime_resume,
2341 msm_slim_runtime_idle
2342 )
2343};
2344
Sagar Dhariaf8f603b2012-03-21 15:25:17 -06002345static struct of_device_id msm_slim_dt_match[] = {
2346 {
2347 .compatible = "qcom,slim-msm",
2348 },
2349 {}
2350};
2351
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002352static struct platform_driver msm_slim_driver = {
2353 .probe = msm_slim_probe,
2354 .remove = msm_slim_remove,
2355 .driver = {
2356 .name = MSM_SLIM_NAME,
2357 .owner = THIS_MODULE,
2358 .pm = &msm_slim_dev_pm_ops,
Sagar Dhariaf8f603b2012-03-21 15:25:17 -06002359 .of_match_table = msm_slim_dt_match,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002360 },
2361};
2362
2363static int msm_slim_init(void)
2364{
2365 return platform_driver_register(&msm_slim_driver);
2366}
2367subsys_initcall(msm_slim_init);
2368
2369static void msm_slim_exit(void)
2370{
2371 platform_driver_unregister(&msm_slim_driver);
2372}
2373module_exit(msm_slim_exit);
2374
2375MODULE_LICENSE("GPL v2");
2376MODULE_VERSION("0.1");
2377MODULE_DESCRIPTION("MSM Slimbus controller");
2378MODULE_ALIAS("platform:msm-slim");