blob: 4e8312b407a23cfe9c91ede584a32817775a778a [file] [log] [blame]
Duy Truonge833aca2013-02-12 13:35:08 -08001/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12#include <linux/irq.h>
13#include <linux/kernel.h>
14#include <linux/init.h>
15#include <linux/slab.h>
16#include <linux/io.h>
17#include <linux/interrupt.h>
18#include <linux/platform_device.h>
19#include <linux/dma-mapping.h>
20#include <linux/slimbus/slimbus.h>
21#include <linux/delay.h>
22#include <linux/kthread.h>
23#include <linux/clk.h>
Sagar Dharia45ee38a2011-08-03 17:01:31 -060024#include <linux/pm_runtime.h>
Sagar Dhariaf8f603b2012-03-21 15:25:17 -060025#include <linux/of.h>
26#include <linux/of_slimbus.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070027#include <mach/sps.h>
28
29/* Per spec.max 40 bytes per received message */
30#define SLIM_RX_MSGQ_BUF_LEN 40
31
32#define SLIM_USR_MC_GENERIC_ACK 0x25
33#define SLIM_USR_MC_MASTER_CAPABILITY 0x0
34#define SLIM_USR_MC_REPORT_SATELLITE 0x1
35#define SLIM_USR_MC_ADDR_QUERY 0xD
36#define SLIM_USR_MC_ADDR_REPLY 0xE
37#define SLIM_USR_MC_DEFINE_CHAN 0x20
38#define SLIM_USR_MC_DEF_ACT_CHAN 0x21
39#define SLIM_USR_MC_CHAN_CTRL 0x23
40#define SLIM_USR_MC_RECONFIG_NOW 0x24
41#define SLIM_USR_MC_REQ_BW 0x28
42#define SLIM_USR_MC_CONNECT_SRC 0x2C
43#define SLIM_USR_MC_CONNECT_SINK 0x2D
44#define SLIM_USR_MC_DISCONNECT_PORT 0x2E
45
46/* MSM Slimbus peripheral settings */
47#define MSM_SLIM_PERF_SUMM_THRESHOLD 0x8000
48#define MSM_SLIM_NCHANS 32
49#define MSM_SLIM_NPORTS 24
Sagar Dharia45ee38a2011-08-03 17:01:31 -060050#define MSM_SLIM_AUTOSUSPEND MSEC_PER_SEC
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070051
52/*
53 * Need enough descriptors to receive present messages from slaves
54 * if received simultaneously. Present message needs 3 descriptors
55 * and this size will ensure around 10 simultaneous reports.
56 */
57#define MSM_SLIM_DESC_NUM 32
58
59#define SLIM_MSG_ASM_FIRST_WORD(l, mt, mc, dt, ad) \
60 ((l) | ((mt) << 5) | ((mc) << 8) | ((dt) << 15) | ((ad) << 16))
61
62#define MSM_SLIM_NAME "msm_slim_ctrl"
63#define SLIM_ROOT_FREQ 24576000
64
65#define MSM_CONCUR_MSG 8
66#define SAT_CONCUR_MSG 8
67#define DEF_WATERMARK (8 << 1)
68#define DEF_ALIGN 0
69#define DEF_PACK (1 << 6)
70#define ENABLE_PORT 1
71
72#define DEF_BLKSZ 0
73#define DEF_TRANSZ 0
74
75#define SAT_MAGIC_LSB 0xD9
76#define SAT_MAGIC_MSB 0xC5
77#define SAT_MSG_VER 0x1
78#define SAT_MSG_PROT 0x1
79#define MSM_SAT_SUCCSS 0x20
Sagar Dharia790cfd02011-09-25 17:56:24 -060080#define MSM_MAX_NSATS 2
Sagar Dharia0ffdca12011-09-25 18:55:53 -060081#define MSM_MAX_SATCH 32
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070082
83#define QC_MFGID_LSB 0x2
84#define QC_MFGID_MSB 0x17
85#define QC_CHIPID_SL 0x10
86#define QC_DEVID_SAT1 0x3
87#define QC_DEVID_SAT2 0x4
88#define QC_DEVID_PGD 0x5
Sagar Dharia45ee38a2011-08-03 17:01:31 -060089#define QC_MSM_DEVS 5
Sagar Dhariaf323f8c2012-09-04 11:27:26 -060090#define INIT_MX_RETRIES 10
91#define DEF_RETRY_MS 10
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070092
Sagar Dharia82e516f2012-03-16 16:01:23 -060093#define PGD_THIS_EE(r, v) ((v) ? PGD_THIS_EE_V2(r) : PGD_THIS_EE_V1(r))
94#define PGD_PORT(r, p, v) ((v) ? PGD_PORT_V2(r, p) : PGD_PORT_V1(r, p))
95#define CFG_PORT(r, v) ((v) ? CFG_PORT_V2(r) : CFG_PORT_V1(r))
96
97#define PGD_THIS_EE_V2(r) (dev->base + (r ## _V2) + (dev->ee * 0x1000))
98#define PGD_PORT_V2(r, p) (dev->base + (r ## _V2) + ((p) * 0x1000))
99#define CFG_PORT_V2(r) ((r ## _V2))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700100/* Component registers */
Sagar Dharia82e516f2012-03-16 16:01:23 -0600101enum comp_reg_v2 {
102 COMP_CFG_V2 = 4,
103 COMP_TRUST_CFG_V2 = 0x3000,
104};
105
106/* Manager PGD registers */
107enum pgd_reg_v2 {
108 PGD_CFG_V2 = 0x800,
109 PGD_STAT_V2 = 0x804,
110 PGD_INT_EN_V2 = 0x810,
111 PGD_INT_STAT_V2 = 0x814,
112 PGD_INT_CLR_V2 = 0x818,
113 PGD_OWN_EEn_V2 = 0x300C,
114 PGD_PORT_INT_EN_EEn_V2 = 0x5000,
115 PGD_PORT_INT_ST_EEn_V2 = 0x5004,
116 PGD_PORT_INT_CL_EEn_V2 = 0x5008,
117 PGD_PORT_CFGn_V2 = 0x14000,
118 PGD_PORT_STATn_V2 = 0x14004,
119 PGD_PORT_PARAMn_V2 = 0x14008,
120 PGD_PORT_BLKn_V2 = 0x1400C,
121 PGD_PORT_TRANn_V2 = 0x14010,
122 PGD_PORT_MCHANn_V2 = 0x14014,
123 PGD_PORT_PSHPLLn_V2 = 0x14018,
124 PGD_PORT_PC_CFGn_V2 = 0x8000,
125 PGD_PORT_PC_VALn_V2 = 0x8004,
126 PGD_PORT_PC_VFR_TSn_V2 = 0x8008,
127 PGD_PORT_PC_VFR_STn_V2 = 0x800C,
128 PGD_PORT_PC_VFR_CLn_V2 = 0x8010,
129 PGD_IE_STAT_V2 = 0x820,
130 PGD_VE_STAT_V2 = 0x830,
131};
132
133#define PGD_THIS_EE_V1(r) (dev->base + (r ## _V1) + (dev->ee * 16))
134#define PGD_PORT_V1(r, p) (dev->base + (r ## _V1) + ((p) * 32))
135#define CFG_PORT_V1(r) ((r ## _V1))
136/* Component registers */
137enum comp_reg_v1 {
138 COMP_CFG_V1 = 0,
139 COMP_TRUST_CFG_V1 = 0x14,
140};
141
142/* Manager PGD registers */
143enum pgd_reg_v1 {
144 PGD_CFG_V1 = 0x1000,
145 PGD_STAT_V1 = 0x1004,
146 PGD_INT_EN_V1 = 0x1010,
147 PGD_INT_STAT_V1 = 0x1014,
148 PGD_INT_CLR_V1 = 0x1018,
149 PGD_OWN_EEn_V1 = 0x1020,
150 PGD_PORT_INT_EN_EEn_V1 = 0x1030,
151 PGD_PORT_INT_ST_EEn_V1 = 0x1034,
152 PGD_PORT_INT_CL_EEn_V1 = 0x1038,
153 PGD_PORT_CFGn_V1 = 0x1080,
154 PGD_PORT_STATn_V1 = 0x1084,
155 PGD_PORT_PARAMn_V1 = 0x1088,
156 PGD_PORT_BLKn_V1 = 0x108C,
157 PGD_PORT_TRANn_V1 = 0x1090,
158 PGD_PORT_MCHANn_V1 = 0x1094,
159 PGD_PORT_PSHPLLn_V1 = 0x1098,
160 PGD_PORT_PC_CFGn_V1 = 0x1600,
161 PGD_PORT_PC_VALn_V1 = 0x1604,
162 PGD_PORT_PC_VFR_TSn_V1 = 0x1608,
163 PGD_PORT_PC_VFR_STn_V1 = 0x160C,
164 PGD_PORT_PC_VFR_CLn_V1 = 0x1610,
165 PGD_IE_STAT_V1 = 0x1700,
166 PGD_VE_STAT_V1 = 0x1710,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700167};
168
169/* Manager registers */
170enum mgr_reg {
171 MGR_CFG = 0x200,
172 MGR_STATUS = 0x204,
173 MGR_RX_MSGQ_CFG = 0x208,
174 MGR_INT_EN = 0x210,
175 MGR_INT_STAT = 0x214,
176 MGR_INT_CLR = 0x218,
177 MGR_TX_MSG = 0x230,
178 MGR_RX_MSG = 0x270,
Sagar Dhariaf323f8c2012-09-04 11:27:26 -0600179 MGR_IE_STAT = 0x2F0,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700180 MGR_VE_STAT = 0x300,
181};
182
183enum msg_cfg {
184 MGR_CFG_ENABLE = 1,
185 MGR_CFG_RX_MSGQ_EN = 1 << 1,
186 MGR_CFG_TX_MSGQ_EN_HIGH = 1 << 2,
187 MGR_CFG_TX_MSGQ_EN_LOW = 1 << 3,
188};
189/* Message queue types */
190enum msm_slim_msgq_type {
191 MSGQ_RX = 0,
192 MSGQ_TX_LOW = 1,
193 MSGQ_TX_HIGH = 2,
194};
195/* Framer registers */
196enum frm_reg {
197 FRM_CFG = 0x400,
198 FRM_STAT = 0x404,
199 FRM_INT_EN = 0x410,
200 FRM_INT_STAT = 0x414,
201 FRM_INT_CLR = 0x418,
202 FRM_WAKEUP = 0x41C,
203 FRM_CLKCTL_DONE = 0x420,
204 FRM_IE_STAT = 0x430,
205 FRM_VE_STAT = 0x440,
206};
207
208/* Interface registers */
209enum intf_reg {
210 INTF_CFG = 0x600,
211 INTF_STAT = 0x604,
212 INTF_INT_EN = 0x610,
213 INTF_INT_STAT = 0x614,
214 INTF_INT_CLR = 0x618,
215 INTF_IE_STAT = 0x630,
216 INTF_VE_STAT = 0x640,
217};
218
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700219enum rsc_grp {
220 EE_MGR_RSC_GRP = 1 << 10,
221 EE_NGD_2 = 2 << 6,
222 EE_NGD_1 = 0,
223};
224
225enum mgr_intr {
226 MGR_INT_RECFG_DONE = 1 << 24,
227 MGR_INT_TX_NACKED_2 = 1 << 25,
228 MGR_INT_MSG_BUF_CONTE = 1 << 26,
229 MGR_INT_RX_MSG_RCVD = 1 << 30,
230 MGR_INT_TX_MSG_SENT = 1 << 31,
231};
232
233enum frm_cfg {
234 FRM_ACTIVE = 1,
235 CLK_GEAR = 7,
236 ROOT_FREQ = 11,
237 REF_CLK_GEAR = 15,
Sagar Dhariad5bb0552012-08-11 15:02:12 -0600238 INTR_WAKE = 19,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700239};
240
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600241enum msm_ctrl_state {
242 MSM_CTRL_AWAKE,
243 MSM_CTRL_SLEEPING,
244 MSM_CTRL_ASLEEP,
245};
246
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700247struct msm_slim_sps_bam {
248 u32 hdl;
249 void __iomem *base;
250 int irq;
251};
252
253struct msm_slim_endp {
254 struct sps_pipe *sps;
255 struct sps_connect config;
256 struct sps_register_event event;
257 struct sps_mem_buffer buf;
258 struct completion *xcomp;
259 bool connected;
260};
261
262struct msm_slim_ctrl {
263 struct slim_controller ctrl;
264 struct slim_framer framer;
265 struct device *dev;
266 void __iomem *base;
Sagar Dhariacc969452011-09-19 10:34:30 -0600267 struct resource *slew_mem;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700268 u32 curr_bw;
269 u8 msg_cnt;
270 u32 tx_buf[10];
271 u8 rx_msgs[MSM_CONCUR_MSG][SLIM_RX_MSGQ_BUF_LEN];
272 spinlock_t rx_lock;
273 int head;
274 int tail;
275 int irq;
276 int err;
277 int ee;
278 struct completion *wr_comp;
Sagar Dharia790cfd02011-09-25 17:56:24 -0600279 struct msm_slim_sat *satd[MSM_MAX_NSATS];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700280 struct msm_slim_endp pipes[7];
281 struct msm_slim_sps_bam bam;
282 struct msm_slim_endp rx_msgq;
283 struct completion rx_msgq_notify;
284 struct task_struct *rx_msgq_thread;
285 struct clk *rclk;
Sagar Dhariad5bb0552012-08-11 15:02:12 -0600286 struct clk *hclk;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700287 struct mutex tx_lock;
288 u8 pgdla;
289 bool use_rx_msgqs;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700290 int pipe_b;
291 struct completion reconf;
292 bool reconf_busy;
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600293 bool chan_active;
294 enum msm_ctrl_state state;
Sagar Dharia790cfd02011-09-25 17:56:24 -0600295 int nsats;
Sagar Dharia82e516f2012-03-16 16:01:23 -0600296 u32 ver;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700297};
298
Sagar Dharia0ffdca12011-09-25 18:55:53 -0600299struct msm_sat_chan {
300 u8 chan;
301 u16 chanh;
302 int req_rem;
303 int req_def;
Ajay Dudani2c71b242012-08-15 00:01:57 -0600304 bool reconf;
Sagar Dharia0ffdca12011-09-25 18:55:53 -0600305};
306
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700307struct msm_slim_sat {
308 struct slim_device satcl;
309 struct msm_slim_ctrl *dev;
310 struct workqueue_struct *wq;
311 struct work_struct wd;
312 u8 sat_msgs[SAT_CONCUR_MSG][40];
Sagar Dharia0ffdca12011-09-25 18:55:53 -0600313 struct msm_sat_chan *satch;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700314 u8 nsatch;
315 bool sent_capability;
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600316 bool pending_reconf;
317 bool pending_capability;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700318 int shead;
319 int stail;
320 spinlock_t lock;
321};
322
Sagar Dharia790cfd02011-09-25 17:56:24 -0600323static struct msm_slim_sat *msm_slim_alloc_sat(struct msm_slim_ctrl *dev);
324
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700325static int msm_slim_rx_enqueue(struct msm_slim_ctrl *dev, u32 *buf, u8 len)
326{
327 spin_lock(&dev->rx_lock);
328 if ((dev->tail + 1) % MSM_CONCUR_MSG == dev->head) {
329 spin_unlock(&dev->rx_lock);
330 dev_err(dev->dev, "RX QUEUE full!");
331 return -EXFULL;
332 }
333 memcpy((u8 *)dev->rx_msgs[dev->tail], (u8 *)buf, len);
334 dev->tail = (dev->tail + 1) % MSM_CONCUR_MSG;
335 spin_unlock(&dev->rx_lock);
336 return 0;
337}
338
339static int msm_slim_rx_dequeue(struct msm_slim_ctrl *dev, u8 *buf)
340{
341 unsigned long flags;
342 spin_lock_irqsave(&dev->rx_lock, flags);
343 if (dev->tail == dev->head) {
344 spin_unlock_irqrestore(&dev->rx_lock, flags);
345 return -ENODATA;
346 }
347 memcpy(buf, (u8 *)dev->rx_msgs[dev->head], 40);
348 dev->head = (dev->head + 1) % MSM_CONCUR_MSG;
349 spin_unlock_irqrestore(&dev->rx_lock, flags);
350 return 0;
351}
352
353static int msm_sat_enqueue(struct msm_slim_sat *sat, u32 *buf, u8 len)
354{
355 struct msm_slim_ctrl *dev = sat->dev;
356 spin_lock(&sat->lock);
357 if ((sat->stail + 1) % SAT_CONCUR_MSG == sat->shead) {
358 spin_unlock(&sat->lock);
359 dev_err(dev->dev, "SAT QUEUE full!");
360 return -EXFULL;
361 }
362 memcpy(sat->sat_msgs[sat->stail], (u8 *)buf, len);
363 sat->stail = (sat->stail + 1) % SAT_CONCUR_MSG;
364 spin_unlock(&sat->lock);
365 return 0;
366}
367
368static int msm_sat_dequeue(struct msm_slim_sat *sat, u8 *buf)
369{
370 unsigned long flags;
371 spin_lock_irqsave(&sat->lock, flags);
372 if (sat->stail == sat->shead) {
373 spin_unlock_irqrestore(&sat->lock, flags);
374 return -ENODATA;
375 }
376 memcpy(buf, sat->sat_msgs[sat->shead], 40);
377 sat->shead = (sat->shead + 1) % SAT_CONCUR_MSG;
378 spin_unlock_irqrestore(&sat->lock, flags);
379 return 0;
380}
381
382static void msm_get_eaddr(u8 *e_addr, u32 *buffer)
383{
384 e_addr[0] = (buffer[1] >> 24) & 0xff;
385 e_addr[1] = (buffer[1] >> 16) & 0xff;
386 e_addr[2] = (buffer[1] >> 8) & 0xff;
387 e_addr[3] = buffer[1] & 0xff;
388 e_addr[4] = (buffer[0] >> 24) & 0xff;
389 e_addr[5] = (buffer[0] >> 16) & 0xff;
390}
391
392static bool msm_is_sat_dev(u8 *e_addr)
393{
394 if (e_addr[5] == QC_MFGID_LSB && e_addr[4] == QC_MFGID_MSB &&
395 e_addr[2] != QC_CHIPID_SL &&
396 (e_addr[1] == QC_DEVID_SAT1 || e_addr[1] == QC_DEVID_SAT2))
397 return true;
398 return false;
399}
400
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700401static int msm_slim_get_ctrl(struct msm_slim_ctrl *dev)
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600402{
Sagar Dharia45e77912012-01-10 09:55:18 -0700403#ifdef CONFIG_PM_RUNTIME
404 int ref = 0;
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700405 int ret = pm_runtime_get_sync(dev->dev);
406 if (ret >= 0) {
407 ref = atomic_read(&dev->dev->power.usage_count);
408 if (ref <= 0) {
409 dev_err(dev->dev, "reference count -ve:%d", ref);
410 ret = -ENODEV;
411 }
412 }
413 return ret;
Sagar Dharia45e77912012-01-10 09:55:18 -0700414#else
415 return -ENODEV;
416#endif
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600417}
418static void msm_slim_put_ctrl(struct msm_slim_ctrl *dev)
419{
Sagar Dharia45e77912012-01-10 09:55:18 -0700420#ifdef CONFIG_PM_RUNTIME
Sagar Dharia38fd1872012-02-06 18:36:38 -0700421 int ref;
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600422 pm_runtime_mark_last_busy(dev->dev);
Sagar Dharia38fd1872012-02-06 18:36:38 -0700423 ref = atomic_read(&dev->dev->power.usage_count);
424 if (ref <= 0)
425 dev_err(dev->dev, "reference count mismatch:%d", ref);
426 else
427 pm_runtime_put(dev->dev);
Sagar Dharia45e77912012-01-10 09:55:18 -0700428#endif
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600429}
430
Sagar Dharia790cfd02011-09-25 17:56:24 -0600431static struct msm_slim_sat *addr_to_sat(struct msm_slim_ctrl *dev, u8 laddr)
432{
433 struct msm_slim_sat *sat = NULL;
434 int i = 0;
435 while (!sat && i < dev->nsats) {
436 if (laddr == dev->satd[i]->satcl.laddr)
437 sat = dev->satd[i];
438 i++;
439 }
440 return sat;
441}
442
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700443static irqreturn_t msm_slim_interrupt(int irq, void *d)
444{
445 struct msm_slim_ctrl *dev = d;
446 u32 pstat;
447 u32 stat = readl_relaxed(dev->base + MGR_INT_STAT);
448
449 if (stat & MGR_INT_TX_MSG_SENT || stat & MGR_INT_TX_NACKED_2) {
450 if (stat & MGR_INT_TX_MSG_SENT)
451 writel_relaxed(MGR_INT_TX_MSG_SENT,
452 dev->base + MGR_INT_CLR);
453 else {
Sagar Dhariaf323f8c2012-09-04 11:27:26 -0600454 u32 mgr_stat = readl_relaxed(dev->base + MGR_STATUS);
455 u32 mgr_ie_stat = readl_relaxed(dev->base +
456 MGR_IE_STAT);
457 u32 frm_stat = readl_relaxed(dev->base + FRM_STAT);
458 u32 frm_cfg = readl_relaxed(dev->base + FRM_CFG);
459 u32 frm_intr_stat = readl_relaxed(dev->base +
460 FRM_INT_STAT);
461 u32 frm_ie_stat = readl_relaxed(dev->base +
462 FRM_IE_STAT);
463 u32 intf_stat = readl_relaxed(dev->base + INTF_STAT);
464 u32 intf_intr_stat = readl_relaxed(dev->base +
465 INTF_INT_STAT);
466 u32 intf_ie_stat = readl_relaxed(dev->base +
467 INTF_IE_STAT);
468
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700469 writel_relaxed(MGR_INT_TX_NACKED_2,
470 dev->base + MGR_INT_CLR);
Sagar Dhariaf323f8c2012-09-04 11:27:26 -0600471 pr_err("TX Nack MGR dump:int_stat:0x%x, mgr_stat:0x%x",
472 stat, mgr_stat);
473 pr_err("TX Nack MGR dump:ie_stat:0x%x", mgr_ie_stat);
474 pr_err("TX Nack FRM dump:int_stat:0x%x, frm_stat:0x%x",
475 frm_intr_stat, frm_stat);
476 pr_err("TX Nack FRM dump:frm_cfg:0x%x, ie_stat:0x%x",
477 frm_cfg, frm_ie_stat);
478 pr_err("TX Nack INTF dump:intr_st:0x%x, intf_stat:0x%x",
479 intf_intr_stat, intf_stat);
480 pr_err("TX Nack INTF dump:ie_stat:0x%x", intf_ie_stat);
481
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700482 dev->err = -EIO;
483 }
484 /*
485 * Guarantee that interrupt clear bit write goes through before
486 * signalling completion/exiting ISR
487 */
488 mb();
489 if (dev->wr_comp)
490 complete(dev->wr_comp);
491 }
492 if (stat & MGR_INT_RX_MSG_RCVD) {
493 u32 rx_buf[10];
494 u32 mc, mt;
495 u8 len, i;
496 rx_buf[0] = readl_relaxed(dev->base + MGR_RX_MSG);
497 len = rx_buf[0] & 0x1F;
498 for (i = 1; i < ((len + 3) >> 2); i++) {
499 rx_buf[i] = readl_relaxed(dev->base + MGR_RX_MSG +
500 (4 * i));
501 dev_dbg(dev->dev, "reading data: %x\n", rx_buf[i]);
502 }
503 mt = (rx_buf[0] >> 5) & 0x7;
504 mc = (rx_buf[0] >> 8) & 0xff;
505 dev_dbg(dev->dev, "MC: %x, MT: %x\n", mc, mt);
506 if (mt == SLIM_MSG_MT_DEST_REFERRED_USER ||
507 mt == SLIM_MSG_MT_SRC_REFERRED_USER) {
Sagar Dharia790cfd02011-09-25 17:56:24 -0600508 u8 laddr = (u8)((rx_buf[0] >> 16) & 0xFF);
509 struct msm_slim_sat *sat = addr_to_sat(dev, laddr);
510 if (sat)
511 msm_sat_enqueue(sat, rx_buf, len);
512 else
513 dev_err(dev->dev, "unknown sat:%d message",
514 laddr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700515 writel_relaxed(MGR_INT_RX_MSG_RCVD,
516 dev->base + MGR_INT_CLR);
517 /*
518 * Guarantee that CLR bit write goes through before
519 * queuing work
520 */
521 mb();
Sagar Dharia790cfd02011-09-25 17:56:24 -0600522 if (sat)
523 queue_work(sat->wq, &sat->wd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700524 } else if (mt == SLIM_MSG_MT_CORE &&
525 mc == SLIM_MSG_MC_REPORT_PRESENT) {
526 u8 e_addr[6];
527 msm_get_eaddr(e_addr, rx_buf);
Sagar Dharia790cfd02011-09-25 17:56:24 -0600528 msm_slim_rx_enqueue(dev, rx_buf, len);
529 writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
530 MGR_INT_CLR);
531 /*
532 * Guarantee that CLR bit write goes through
533 * before signalling completion
534 */
535 mb();
536 complete(&dev->rx_msgq_notify);
Kiran Gunda41555d72013-01-10 17:03:33 +0530537 } else if (mt == SLIM_MSG_MT_CORE &&
538 mc == SLIM_MSG_MC_REPORT_ABSENT) {
539 writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
540 MGR_INT_CLR);
541 /*
542 * Guarantee that CLR bit write goes through
543 * before signalling completion
544 */
545 mb();
546 complete(&dev->rx_msgq_notify);
547
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700548 } else if (mc == SLIM_MSG_MC_REPLY_INFORMATION ||
549 mc == SLIM_MSG_MC_REPLY_VALUE) {
550 msm_slim_rx_enqueue(dev, rx_buf, len);
551 writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
552 MGR_INT_CLR);
553 /*
554 * Guarantee that CLR bit write goes through
555 * before signalling completion
556 */
557 mb();
558 complete(&dev->rx_msgq_notify);
Sagar Dharia144e5e02011-08-08 17:30:11 -0600559 } else if (mc == SLIM_MSG_MC_REPORT_INFORMATION) {
560 u8 *buf = (u8 *)rx_buf;
561 u8 l_addr = buf[2];
562 u16 ele = (u16)buf[4] << 4;
563 ele |= ((buf[3] & 0xf0) >> 4);
564 dev_err(dev->dev, "Slim-dev:%d report inf element:0x%x",
565 l_addr, ele);
566 for (i = 0; i < len - 5; i++)
567 dev_err(dev->dev, "offset:0x%x:bit mask:%x",
568 i, buf[i+5]);
569 writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
570 MGR_INT_CLR);
571 /*
572 * Guarantee that CLR bit write goes through
573 * before exiting
574 */
575 mb();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700576 } else {
577 dev_err(dev->dev, "Unexpected MC,%x MT:%x, len:%d",
578 mc, mt, len);
579 for (i = 0; i < ((len + 3) >> 2); i++)
580 dev_err(dev->dev, "error msg: %x", rx_buf[i]);
581 writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
582 MGR_INT_CLR);
583 /*
584 * Guarantee that CLR bit write goes through
585 * before exiting
586 */
587 mb();
588 }
589 }
590 if (stat & MGR_INT_RECFG_DONE) {
591 writel_relaxed(MGR_INT_RECFG_DONE, dev->base + MGR_INT_CLR);
592 /*
593 * Guarantee that CLR bit write goes through
594 * before exiting ISR
595 */
596 mb();
597 complete(&dev->reconf);
598 }
Sagar Dharia82e516f2012-03-16 16:01:23 -0600599 pstat = readl_relaxed(PGD_THIS_EE(PGD_PORT_INT_ST_EEn, dev->ver));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700600 if (pstat != 0) {
601 int i = 0;
602 for (i = dev->pipe_b; i < MSM_SLIM_NPORTS; i++) {
603 if (pstat & 1 << i) {
Sagar Dharia82e516f2012-03-16 16:01:23 -0600604 u32 val = readl_relaxed(PGD_PORT(PGD_PORT_STATn,
605 i, dev->ver));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700606 if (val & (1 << 19)) {
607 dev->ctrl.ports[i].err =
608 SLIM_P_DISCONNECT;
609 dev->pipes[i-dev->pipe_b].connected =
610 false;
611 /*
612 * SPS will call completion since
613 * ERROR flags are registered
614 */
615 } else if (val & (1 << 2))
616 dev->ctrl.ports[i].err =
617 SLIM_P_OVERFLOW;
618 else if (val & (1 << 3))
619 dev->ctrl.ports[i].err =
620 SLIM_P_UNDERFLOW;
621 }
Sagar Dharia82e516f2012-03-16 16:01:23 -0600622 writel_relaxed(1, PGD_THIS_EE(PGD_PORT_INT_CL_EEn,
623 dev->ver));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700624 }
625 /*
626 * Guarantee that port interrupt bit(s) clearing writes go
627 * through before exiting ISR
628 */
629 mb();
630 }
631
632 return IRQ_HANDLED;
633}
634
635static int
636msm_slim_init_endpoint(struct msm_slim_ctrl *dev, struct msm_slim_endp *ep)
637{
638 int ret;
639 struct sps_pipe *endpoint;
640 struct sps_connect *config = &ep->config;
641
642 /* Allocate the endpoint */
643 endpoint = sps_alloc_endpoint();
644 if (!endpoint) {
645 dev_err(dev->dev, "sps_alloc_endpoint failed\n");
646 return -ENOMEM;
647 }
648
649 /* Get default connection configuration for an endpoint */
650 ret = sps_get_config(endpoint, config);
651 if (ret) {
652 dev_err(dev->dev, "sps_get_config failed 0x%x\n", ret);
653 goto sps_config_failed;
654 }
655
656 ep->sps = endpoint;
657 return 0;
658
659sps_config_failed:
660 sps_free_endpoint(endpoint);
661 return ret;
662}
663
664static void
665msm_slim_free_endpoint(struct msm_slim_endp *ep)
666{
667 sps_free_endpoint(ep->sps);
668 ep->sps = NULL;
669}
670
671static int msm_slim_sps_mem_alloc(
672 struct msm_slim_ctrl *dev, struct sps_mem_buffer *mem, u32 len)
673{
674 dma_addr_t phys;
675
676 mem->size = len;
677 mem->min_size = 0;
678 mem->base = dma_alloc_coherent(dev->dev, mem->size, &phys, GFP_KERNEL);
679
680 if (!mem->base) {
681 dev_err(dev->dev, "dma_alloc_coherent(%d) failed\n", len);
682 return -ENOMEM;
683 }
684
685 mem->phys_base = phys;
686 memset(mem->base, 0x00, mem->size);
687 return 0;
688}
689
690static void
691msm_slim_sps_mem_free(struct msm_slim_ctrl *dev, struct sps_mem_buffer *mem)
692{
693 dma_free_coherent(dev->dev, mem->size, mem->base, mem->phys_base);
694 mem->size = 0;
695 mem->base = NULL;
696 mem->phys_base = 0;
697}
698
699static void msm_hw_set_port(struct msm_slim_ctrl *dev, u8 pn)
700{
701 u32 set_cfg = DEF_WATERMARK | DEF_ALIGN | DEF_PACK | ENABLE_PORT;
Sagar Dharia82e516f2012-03-16 16:01:23 -0600702 u32 int_port = readl_relaxed(PGD_THIS_EE(PGD_PORT_INT_EN_EEn,
703 dev->ver));
704 writel_relaxed(set_cfg, PGD_PORT(PGD_PORT_CFGn, pn, dev->ver));
705 writel_relaxed(DEF_BLKSZ, PGD_PORT(PGD_PORT_BLKn, pn, dev->ver));
706 writel_relaxed(DEF_TRANSZ, PGD_PORT(PGD_PORT_TRANn, pn, dev->ver));
707 writel_relaxed((int_port | 1 << pn) , PGD_THIS_EE(PGD_PORT_INT_EN_EEn,
708 dev->ver));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700709 /* Make sure that port registers are updated before returning */
710 mb();
711}
712
713static int msm_slim_connect_pipe_port(struct msm_slim_ctrl *dev, u8 pn)
714{
715 struct msm_slim_endp *endpoint = &dev->pipes[pn];
716 struct sps_connect *cfg = &endpoint->config;
717 u32 stat;
718 int ret = sps_get_config(dev->pipes[pn].sps, cfg);
719 if (ret) {
720 dev_err(dev->dev, "sps pipe-port get config error%x\n", ret);
721 return ret;
722 }
723 cfg->options = SPS_O_DESC_DONE | SPS_O_ERROR |
724 SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
725
726 if (dev->pipes[pn].connected) {
727 ret = sps_set_config(dev->pipes[pn].sps, cfg);
728 if (ret) {
729 dev_err(dev->dev, "sps pipe-port set config erro:%x\n",
730 ret);
731 return ret;
732 }
733 }
734
Sagar Dharia82e516f2012-03-16 16:01:23 -0600735 stat = readl_relaxed(PGD_PORT(PGD_PORT_STATn, (pn + dev->pipe_b),
736 dev->ver));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700737 if (dev->ctrl.ports[pn].flow == SLIM_SRC) {
738 cfg->destination = dev->bam.hdl;
739 cfg->source = SPS_DEV_HANDLE_MEM;
740 cfg->dest_pipe_index = ((stat & (0xFF << 4)) >> 4);
741 cfg->src_pipe_index = 0;
742 dev_dbg(dev->dev, "flow src:pipe num:%d",
743 cfg->dest_pipe_index);
744 cfg->mode = SPS_MODE_DEST;
745 } else {
746 cfg->source = dev->bam.hdl;
747 cfg->destination = SPS_DEV_HANDLE_MEM;
748 cfg->src_pipe_index = ((stat & (0xFF << 4)) >> 4);
749 cfg->dest_pipe_index = 0;
750 dev_dbg(dev->dev, "flow dest:pipe num:%d",
751 cfg->src_pipe_index);
752 cfg->mode = SPS_MODE_SRC;
753 }
754 /* Space for desciptor FIFOs */
755 cfg->desc.size = MSM_SLIM_DESC_NUM * sizeof(struct sps_iovec);
756 cfg->config = SPS_CONFIG_DEFAULT;
757 ret = sps_connect(dev->pipes[pn].sps, cfg);
758 if (!ret) {
759 dev->pipes[pn].connected = true;
760 msm_hw_set_port(dev, pn + dev->pipe_b);
761 }
762 return ret;
763}
764
765static u32 *msm_get_msg_buf(struct slim_controller *ctrl, int len)
766{
767 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
768 /*
769 * Currently we block a transaction until the current one completes.
770 * In case we need multiple transactions, use message Q
771 */
772 return dev->tx_buf;
773}
774
775static int msm_send_msg_buf(struct slim_controller *ctrl, u32 *buf, u8 len)
776{
777 int i;
778 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
779 for (i = 0; i < (len + 3) >> 2; i++) {
780 dev_dbg(dev->dev, "TX data:0x%x\n", buf[i]);
781 writel_relaxed(buf[i], dev->base + MGR_TX_MSG + (i * 4));
782 }
783 /* Guarantee that message is sent before returning */
784 mb();
785 return 0;
786}
787
788static int msm_xfer_msg(struct slim_controller *ctrl, struct slim_msg_txn *txn)
789{
790 DECLARE_COMPLETION_ONSTACK(done);
791 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
792 u32 *pbuf;
793 u8 *puc;
794 int timeout;
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700795 int msgv = -1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700796 u8 la = txn->la;
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600797 u8 mc = (u8)(txn->mc & 0xFF);
798 /*
799 * Voting for runtime PM: Slimbus has 2 possible use cases:
800 * 1. messaging
801 * 2. Data channels
802 * Messaging case goes through messaging slots and data channels
803 * use their own slots
804 * This "get" votes for messaging bandwidth
805 */
806 if (!(txn->mc & SLIM_MSG_CLK_PAUSE_SEQ_FLG))
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700807 msgv = msm_slim_get_ctrl(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700808 mutex_lock(&dev->tx_lock);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700809 if (dev->state == MSM_CTRL_ASLEEP ||
810 ((!(txn->mc & SLIM_MSG_CLK_PAUSE_SEQ_FLG)) &&
811 dev->state == MSM_CTRL_SLEEPING)) {
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600812 dev_err(dev->dev, "runtime or system PM suspended state");
813 mutex_unlock(&dev->tx_lock);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700814 if (msgv >= 0)
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600815 msm_slim_put_ctrl(dev);
816 return -EBUSY;
817 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700818 if (txn->mt == SLIM_MSG_MT_CORE &&
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600819 mc == SLIM_MSG_MC_BEGIN_RECONFIGURATION) {
820 if (dev->reconf_busy) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700821 wait_for_completion(&dev->reconf);
822 dev->reconf_busy = false;
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600823 }
824 /* This "get" votes for data channels */
825 if (dev->ctrl.sched.usedslots != 0 &&
826 !dev->chan_active) {
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700827 int chv = msm_slim_get_ctrl(dev);
828 if (chv >= 0)
829 dev->chan_active = true;
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600830 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700831 }
832 txn->rl--;
833 pbuf = msm_get_msg_buf(ctrl, txn->rl);
834 dev->wr_comp = NULL;
835 dev->err = 0;
836
837 if (txn->dt == SLIM_MSG_DEST_ENUMADDR) {
838 mutex_unlock(&dev->tx_lock);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700839 if (msgv >= 0)
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600840 msm_slim_put_ctrl(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700841 return -EPROTONOSUPPORT;
842 }
843 if (txn->mt == SLIM_MSG_MT_CORE && txn->la == 0xFF &&
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600844 (mc == SLIM_MSG_MC_CONNECT_SOURCE ||
845 mc == SLIM_MSG_MC_CONNECT_SINK ||
846 mc == SLIM_MSG_MC_DISCONNECT_PORT))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700847 la = dev->pgdla;
848 if (txn->dt == SLIM_MSG_DEST_LOGICALADDR)
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600849 *pbuf = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt, mc, 0, la);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700850 else
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600851 *pbuf = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt, mc, 1, la);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700852 if (txn->dt == SLIM_MSG_DEST_LOGICALADDR)
853 puc = ((u8 *)pbuf) + 3;
854 else
855 puc = ((u8 *)pbuf) + 2;
856 if (txn->rbuf)
857 *(puc++) = txn->tid;
858 if ((txn->mt == SLIM_MSG_MT_CORE) &&
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600859 ((mc >= SLIM_MSG_MC_REQUEST_INFORMATION &&
860 mc <= SLIM_MSG_MC_REPORT_INFORMATION) ||
861 (mc >= SLIM_MSG_MC_REQUEST_VALUE &&
862 mc <= SLIM_MSG_MC_CHANGE_VALUE))) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700863 *(puc++) = (txn->ec & 0xFF);
864 *(puc++) = (txn->ec >> 8)&0xFF;
865 }
866 if (txn->wbuf)
867 memcpy(puc, txn->wbuf, txn->len);
868 if (txn->mt == SLIM_MSG_MT_CORE && txn->la == 0xFF &&
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600869 (mc == SLIM_MSG_MC_CONNECT_SOURCE ||
870 mc == SLIM_MSG_MC_CONNECT_SINK ||
871 mc == SLIM_MSG_MC_DISCONNECT_PORT)) {
872 if (mc != SLIM_MSG_MC_DISCONNECT_PORT)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700873 dev->err = msm_slim_connect_pipe_port(dev, *puc);
874 else {
875 struct msm_slim_endp *endpoint = &dev->pipes[*puc];
876 struct sps_register_event sps_event;
877 memset(&sps_event, 0, sizeof(sps_event));
878 sps_register_event(endpoint->sps, &sps_event);
879 sps_disconnect(endpoint->sps);
880 /*
881 * Remove channel disconnects master-side ports from
882 * channel. No need to send that again on the bus
883 */
884 dev->pipes[*puc].connected = false;
885 mutex_unlock(&dev->tx_lock);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700886 if (msgv >= 0)
887 msm_slim_put_ctrl(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700888 return 0;
889 }
890 if (dev->err) {
891 dev_err(dev->dev, "pipe-port connect err:%d", dev->err);
892 mutex_unlock(&dev->tx_lock);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700893 if (msgv >= 0)
894 msm_slim_put_ctrl(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700895 return dev->err;
896 }
897 *(puc) = *(puc) + dev->pipe_b;
898 }
899 if (txn->mt == SLIM_MSG_MT_CORE &&
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600900 mc == SLIM_MSG_MC_BEGIN_RECONFIGURATION)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700901 dev->reconf_busy = true;
902 dev->wr_comp = &done;
903 msm_send_msg_buf(ctrl, pbuf, txn->rl);
904 timeout = wait_for_completion_timeout(&done, HZ);
Sagar Dhariaf323f8c2012-09-04 11:27:26 -0600905 if (!timeout)
906 dev->wr_comp = NULL;
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700907 if (mc == SLIM_MSG_MC_RECONFIGURE_NOW) {
908 if ((txn->mc == (SLIM_MSG_MC_RECONFIGURE_NOW |
909 SLIM_MSG_CLK_PAUSE_SEQ_FLG)) &&
910 timeout) {
911 timeout = wait_for_completion_timeout(&dev->reconf, HZ);
912 dev->reconf_busy = false;
913 if (timeout) {
Sagar Dharia9acf7f42012-03-08 09:45:30 -0700914 clk_disable_unprepare(dev->rclk);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700915 disable_irq(dev->irq);
916 }
917 }
918 if ((txn->mc == (SLIM_MSG_MC_RECONFIGURE_NOW |
919 SLIM_MSG_CLK_PAUSE_SEQ_FLG)) &&
920 !timeout) {
921 dev->reconf_busy = false;
922 dev_err(dev->dev, "clock pause failed");
923 mutex_unlock(&dev->tx_lock);
924 return -ETIMEDOUT;
925 }
926 if (txn->mt == SLIM_MSG_MT_CORE &&
927 txn->mc == SLIM_MSG_MC_RECONFIGURE_NOW) {
928 if (dev->ctrl.sched.usedslots == 0 &&
929 dev->chan_active) {
930 dev->chan_active = false;
931 msm_slim_put_ctrl(dev);
932 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600933 }
934 }
ehgrace.kim1f6cbba2012-08-03 16:05:34 -0700935 if (!timeout) {
936 dev_err(dev->dev, "TX timed out:MC:0x%x,mt:0x%x",
937 txn->mc, txn->mt);
938 dev->wr_comp = NULL;
939 }
940
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600941 mutex_unlock(&dev->tx_lock);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700942 if (msgv >= 0)
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600943 msm_slim_put_ctrl(dev);
944
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700945 return timeout ? dev->err : -ETIMEDOUT;
946}
947
Sagar Dhariaf323f8c2012-09-04 11:27:26 -0600948static void msm_slim_wait_retry(struct msm_slim_ctrl *dev)
949{
950 int msec_per_frm = 0;
951 int sfr_per_sec;
952 /* Wait for 1 superframe, or default time and then retry */
953 sfr_per_sec = dev->framer.superfreq /
954 (1 << (SLIM_MAX_CLK_GEAR - dev->ctrl.clkgear));
955 if (sfr_per_sec)
956 msec_per_frm = MSEC_PER_SEC / sfr_per_sec;
957 if (msec_per_frm < DEF_RETRY_MS)
958 msec_per_frm = DEF_RETRY_MS;
959 msleep(msec_per_frm);
960}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700961static int msm_set_laddr(struct slim_controller *ctrl, const u8 *ea,
962 u8 elen, u8 laddr)
963{
964 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
Sagar Dhariaf323f8c2012-09-04 11:27:26 -0600965 struct completion done;
966 int timeout, ret, retries = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700967 u32 *buf;
Sagar Dhariaf323f8c2012-09-04 11:27:26 -0600968retry_laddr:
969 init_completion(&done);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700970 mutex_lock(&dev->tx_lock);
971 buf = msm_get_msg_buf(ctrl, 9);
972 buf[0] = SLIM_MSG_ASM_FIRST_WORD(9, SLIM_MSG_MT_CORE,
973 SLIM_MSG_MC_ASSIGN_LOGICAL_ADDRESS,
974 SLIM_MSG_DEST_LOGICALADDR,
975 ea[5] | ea[4] << 8);
976 buf[1] = ea[3] | (ea[2] << 8) | (ea[1] << 16) | (ea[0] << 24);
977 buf[2] = laddr;
978
979 dev->wr_comp = &done;
Sagar Dhariaf323f8c2012-09-04 11:27:26 -0600980 ret = msm_send_msg_buf(ctrl, buf, 9);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700981 timeout = wait_for_completion_timeout(&done, HZ);
ehgrace.kim1f6cbba2012-08-03 16:05:34 -0700982 if (!timeout)
Sagar Dhariaf323f8c2012-09-04 11:27:26 -0600983 dev->err = -ETIMEDOUT;
984 if (dev->err) {
985 ret = dev->err;
986 dev->err = 0;
ehgrace.kim1f6cbba2012-08-03 16:05:34 -0700987 dev->wr_comp = NULL;
Sagar Dhariaf323f8c2012-09-04 11:27:26 -0600988 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700989 mutex_unlock(&dev->tx_lock);
Sagar Dhariaf323f8c2012-09-04 11:27:26 -0600990 if (ret) {
991 pr_err("set LADDR:0x%x failed:ret:%d, retrying", laddr, ret);
992 if (retries < INIT_MX_RETRIES) {
993 msm_slim_wait_retry(dev);
994 retries++;
995 goto retry_laddr;
996 } else {
997 pr_err("set LADDR failed after retrying:ret:%d", ret);
998 }
999 }
1000 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001001}
1002
Sagar Dharia144e5e02011-08-08 17:30:11 -06001003static int msm_clk_pause_wakeup(struct slim_controller *ctrl)
1004{
1005 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001006 enable_irq(dev->irq);
Sagar Dharia9acf7f42012-03-08 09:45:30 -07001007 clk_prepare_enable(dev->rclk);
Sagar Dharia144e5e02011-08-08 17:30:11 -06001008 writel_relaxed(1, dev->base + FRM_WAKEUP);
1009 /* Make sure framer wakeup write goes through before exiting function */
1010 mb();
1011 /*
1012 * Workaround: Currently, slave is reporting lost-sync messages
1013 * after slimbus comes out of clock pause.
1014 * Transaction with slave fail before slave reports that message
1015 * Give some time for that report to come
1016 * Slimbus wakes up in clock gear 10 at 24.576MHz. With each superframe
1017 * being 250 usecs, we wait for 20 superframes here to ensure
1018 * we get the message
1019 */
1020 usleep_range(5000, 5000);
1021 return 0;
1022}
1023
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001024static int msm_config_port(struct slim_controller *ctrl, u8 pn)
1025{
1026 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
1027 struct msm_slim_endp *endpoint;
1028 int ret = 0;
1029 if (ctrl->ports[pn].req == SLIM_REQ_HALF_DUP ||
1030 ctrl->ports[pn].req == SLIM_REQ_MULTI_CH)
1031 return -EPROTONOSUPPORT;
1032 if (pn >= (MSM_SLIM_NPORTS - dev->pipe_b))
1033 return -ENODEV;
1034
1035 endpoint = &dev->pipes[pn];
1036 ret = msm_slim_init_endpoint(dev, endpoint);
1037 dev_dbg(dev->dev, "sps register bam error code:%x\n", ret);
1038 return ret;
1039}
1040
1041static enum slim_port_err msm_slim_port_xfer_status(struct slim_controller *ctr,
1042 u8 pn, u8 **done_buf, u32 *done_len)
1043{
1044 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctr);
1045 struct sps_iovec sio;
1046 int ret;
1047 if (done_len)
1048 *done_len = 0;
1049 if (done_buf)
1050 *done_buf = NULL;
1051 if (!dev->pipes[pn].connected)
1052 return SLIM_P_DISCONNECT;
1053 ret = sps_get_iovec(dev->pipes[pn].sps, &sio);
1054 if (!ret) {
1055 if (done_len)
1056 *done_len = sio.size;
1057 if (done_buf)
1058 *done_buf = (u8 *)sio.addr;
1059 }
1060 dev_dbg(dev->dev, "get iovec returned %d\n", ret);
1061 return SLIM_P_INPROGRESS;
1062}
1063
1064static int msm_slim_port_xfer(struct slim_controller *ctrl, u8 pn, u8 *iobuf,
1065 u32 len, struct completion *comp)
1066{
1067 struct sps_register_event sreg;
1068 int ret;
1069 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
Sagar Dhariae77961f2011-09-27 14:03:50 -06001070 if (pn >= 7)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001071 return -ENODEV;
1072
1073
1074 ctrl->ports[pn].xcomp = comp;
1075 sreg.options = (SPS_EVENT_DESC_DONE|SPS_EVENT_ERROR);
1076 sreg.mode = SPS_TRIGGER_WAIT;
1077 sreg.xfer_done = comp;
1078 sreg.callback = NULL;
1079 sreg.user = &ctrl->ports[pn];
1080 ret = sps_register_event(dev->pipes[pn].sps, &sreg);
1081 if (ret) {
1082 dev_dbg(dev->dev, "sps register event error:%x\n", ret);
1083 return ret;
1084 }
1085 ret = sps_transfer_one(dev->pipes[pn].sps, (u32)iobuf, len, NULL,
1086 SPS_IOVEC_FLAG_INT);
1087 dev_dbg(dev->dev, "sps submit xfer error code:%x\n", ret);
1088
1089 return ret;
1090}
1091
1092static int msm_sat_define_ch(struct msm_slim_sat *sat, u8 *buf, u8 len, u8 mc)
1093{
1094 struct msm_slim_ctrl *dev = sat->dev;
1095 enum slim_ch_control oper;
1096 int i;
1097 int ret = 0;
1098 if (mc == SLIM_USR_MC_CHAN_CTRL) {
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001099 for (i = 0; i < sat->nsatch; i++) {
1100 if (buf[5] == sat->satch[i].chan)
1101 break;
1102 }
1103 if (i >= sat->nsatch)
1104 return -ENOTCONN;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001105 oper = ((buf[3] & 0xC0) >> 6);
1106 /* part of grp. activating/removing 1 will take care of rest */
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001107 ret = slim_control_ch(&sat->satcl, sat->satch[i].chanh, oper,
1108 false);
1109 if (!ret) {
1110 for (i = 5; i < len; i++) {
1111 int j;
1112 for (j = 0; j < sat->nsatch; j++) {
1113 if (buf[i] == sat->satch[j].chan) {
1114 if (oper == SLIM_CH_REMOVE)
1115 sat->satch[j].req_rem++;
1116 else
1117 sat->satch[j].req_def++;
1118 break;
1119 }
1120 }
1121 }
1122 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001123 } else {
1124 u16 chh[40];
1125 struct slim_ch prop;
1126 u32 exp;
Sagar Dhariac3e9d8b2012-10-17 22:41:57 -06001127 u16 *grph = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001128 u8 coeff, cc;
1129 u8 prrate = buf[6];
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001130 if (len <= 8)
1131 return -EINVAL;
1132 for (i = 8; i < len; i++) {
1133 int j = 0;
1134 for (j = 0; j < sat->nsatch; j++) {
1135 if (sat->satch[j].chan == buf[i]) {
1136 chh[i - 8] = sat->satch[j].chanh;
1137 break;
1138 }
1139 }
1140 if (j < sat->nsatch) {
1141 u16 dummy;
1142 ret = slim_query_ch(&sat->satcl, buf[i],
1143 &dummy);
1144 if (ret)
1145 return ret;
1146 if (mc == SLIM_USR_MC_DEF_ACT_CHAN)
1147 sat->satch[j].req_def++;
Sagar Dhariac3e9d8b2012-10-17 22:41:57 -06001148 /* First channel in group from satellite */
1149 if (i == 8)
1150 grph = &sat->satch[j].chanh;
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001151 continue;
1152 }
1153 if (sat->nsatch >= MSM_MAX_SATCH)
1154 return -EXFULL;
1155 ret = slim_query_ch(&sat->satcl, buf[i], &chh[i - 8]);
1156 if (ret)
1157 return ret;
1158 sat->satch[j].chan = buf[i];
1159 sat->satch[j].chanh = chh[i - 8];
1160 if (mc == SLIM_USR_MC_DEF_ACT_CHAN)
1161 sat->satch[j].req_def++;
Sagar Dhariac3e9d8b2012-10-17 22:41:57 -06001162 if (i == 8)
1163 grph = &sat->satch[j].chanh;
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001164 sat->nsatch++;
1165 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001166 prop.dataf = (enum slim_ch_dataf)((buf[3] & 0xE0) >> 5);
1167 prop.auxf = (enum slim_ch_auxf)((buf[4] & 0xC0) >> 5);
1168 prop.baser = SLIM_RATE_4000HZ;
1169 if (prrate & 0x8)
1170 prop.baser = SLIM_RATE_11025HZ;
1171 else
1172 prop.baser = SLIM_RATE_4000HZ;
1173 prop.prot = (enum slim_ch_proto)(buf[5] & 0x0F);
1174 prop.sampleszbits = (buf[4] & 0x1F)*SLIM_CL_PER_SL;
1175 exp = (u32)((buf[5] & 0xF0) >> 4);
1176 coeff = (buf[4] & 0x20) >> 5;
1177 cc = (coeff ? 3 : 1);
1178 prop.ratem = cc * (1 << exp);
1179 if (i > 9)
1180 ret = slim_define_ch(&sat->satcl, &prop, chh, len - 8,
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001181 true, &chh[0]);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001182 else
1183 ret = slim_define_ch(&sat->satcl, &prop,
Sagar Dhariac3e9d8b2012-10-17 22:41:57 -06001184 chh, 1, true, &chh[0]);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001185 dev_dbg(dev->dev, "define sat grp returned:%d", ret);
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001186 if (ret)
1187 return ret;
Sagar Dhariac3e9d8b2012-10-17 22:41:57 -06001188 else if (grph)
1189 *grph = chh[0];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001190
1191 /* part of group so activating 1 will take care of rest */
1192 if (mc == SLIM_USR_MC_DEF_ACT_CHAN)
1193 ret = slim_control_ch(&sat->satcl,
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001194 chh[0],
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001195 SLIM_CH_ACTIVATE, false);
1196 }
1197 return ret;
1198}
1199
1200static void msm_slim_rxwq(struct msm_slim_ctrl *dev)
1201{
1202 u8 buf[40];
1203 u8 mc, mt, len;
1204 int i, ret;
1205 if ((msm_slim_rx_dequeue(dev, (u8 *)buf)) != -ENODATA) {
1206 len = buf[0] & 0x1F;
1207 mt = (buf[0] >> 5) & 0x7;
1208 mc = buf[1];
1209 if (mt == SLIM_MSG_MT_CORE &&
1210 mc == SLIM_MSG_MC_REPORT_PRESENT) {
1211 u8 laddr;
1212 u8 e_addr[6];
1213 for (i = 0; i < 6; i++)
1214 e_addr[i] = buf[7-i];
1215
Sagar Dharia6d430092012-09-09 17:32:46 -06001216 ret = slim_assign_laddr(&dev->ctrl, e_addr, 6, &laddr,
1217 false);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001218 /* Is this Qualcomm ported generic device? */
1219 if (!ret && e_addr[5] == QC_MFGID_LSB &&
1220 e_addr[4] == QC_MFGID_MSB &&
1221 e_addr[1] == QC_DEVID_PGD &&
1222 e_addr[2] != QC_CHIPID_SL)
1223 dev->pgdla = laddr;
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001224 if (!ret && !pm_runtime_enabled(dev->dev) &&
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001225 laddr == (QC_MSM_DEVS - 1))
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001226 pm_runtime_enable(dev->dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001227
Sagar Dharia790cfd02011-09-25 17:56:24 -06001228 if (!ret && msm_is_sat_dev(e_addr)) {
1229 struct msm_slim_sat *sat = addr_to_sat(dev,
1230 laddr);
1231 if (!sat)
1232 sat = msm_slim_alloc_sat(dev);
1233 if (!sat)
1234 return;
1235
1236 sat->satcl.laddr = laddr;
1237 msm_sat_enqueue(sat, (u32 *)buf, len);
1238 queue_work(sat->wq, &sat->wd);
1239 }
Sagar Dhariaf323f8c2012-09-04 11:27:26 -06001240 if (ret)
1241 pr_err("assign laddr failed, error:%d", ret);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001242 } else if (mc == SLIM_MSG_MC_REPLY_INFORMATION ||
1243 mc == SLIM_MSG_MC_REPLY_VALUE) {
1244 u8 tid = buf[3];
1245 dev_dbg(dev->dev, "tid:%d, len:%d\n", tid, len - 4);
1246 slim_msg_response(&dev->ctrl, &buf[4], tid,
1247 len - 4);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001248 pm_runtime_mark_last_busy(dev->dev);
Sagar Dharia144e5e02011-08-08 17:30:11 -06001249 } else if (mc == SLIM_MSG_MC_REPORT_INFORMATION) {
1250 u8 l_addr = buf[2];
1251 u16 ele = (u16)buf[4] << 4;
1252 ele |= ((buf[3] & 0xf0) >> 4);
1253 dev_err(dev->dev, "Slim-dev:%d report inf element:0x%x",
1254 l_addr, ele);
1255 for (i = 0; i < len - 5; i++)
1256 dev_err(dev->dev, "offset:0x%x:bit mask:%x",
1257 i, buf[i+5]);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001258 } else {
1259 dev_err(dev->dev, "unexpected message:mc:%x, mt:%x",
1260 mc, mt);
1261 for (i = 0; i < len; i++)
1262 dev_err(dev->dev, "error msg: %x", buf[i]);
1263
1264 }
1265 } else
1266 dev_err(dev->dev, "rxwq called and no dequeue");
1267}
1268
1269static void slim_sat_rxprocess(struct work_struct *work)
1270{
1271 struct msm_slim_sat *sat = container_of(work, struct msm_slim_sat, wd);
1272 struct msm_slim_ctrl *dev = sat->dev;
1273 u8 buf[40];
1274
1275 while ((msm_sat_dequeue(sat, buf)) != -ENODATA) {
1276 struct slim_msg_txn txn;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001277 u8 len, mc, mt;
1278 u32 bw_sl;
1279 int ret = 0;
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001280 int satv = -1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001281 bool gen_ack = false;
1282 u8 tid;
1283 u8 wbuf[8];
Sagar Dhariaf323f8c2012-09-04 11:27:26 -06001284 int i, retries = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001285 txn.mt = SLIM_MSG_MT_SRC_REFERRED_USER;
1286 txn.dt = SLIM_MSG_DEST_LOGICALADDR;
1287 txn.ec = 0;
1288 txn.rbuf = NULL;
1289 txn.la = sat->satcl.laddr;
1290 /* satellite handling */
1291 len = buf[0] & 0x1F;
1292 mc = buf[1];
1293 mt = (buf[0] >> 5) & 0x7;
1294
1295 if (mt == SLIM_MSG_MT_CORE &&
1296 mc == SLIM_MSG_MC_REPORT_PRESENT) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001297 u8 e_addr[6];
1298 for (i = 0; i < 6; i++)
1299 e_addr[i] = buf[7-i];
1300
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001301 if (pm_runtime_enabled(dev->dev)) {
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001302 satv = msm_slim_get_ctrl(dev);
1303 if (satv >= 0)
1304 sat->pending_capability = true;
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001305 }
Sagar Dharia69bf5572012-02-21 14:45:35 -07001306 /*
1307 * Since capability message is already sent, present
1308 * message will indicate subsystem hosting this
1309 * satellite has restarted.
1310 * Remove all active channels of this satellite
1311 * when this is detected
1312 */
1313 if (sat->sent_capability) {
1314 for (i = 0; i < sat->nsatch; i++) {
Ajay Dudani2c71b242012-08-15 00:01:57 -06001315 if (sat->satch[i].reconf) {
1316 pr_err("SSR, sat:%d, rm ch:%d",
Sagar Dhariaf323f8c2012-09-04 11:27:26 -06001317 sat->satcl.laddr,
Sagar Dharia69bf5572012-02-21 14:45:35 -07001318 sat->satch[i].chan);
Sagar Dharia69bf5572012-02-21 14:45:35 -07001319 slim_control_ch(&sat->satcl,
1320 sat->satch[i].chanh,
1321 SLIM_CH_REMOVE, true);
Sagar Dhariac3e9d8b2012-10-17 22:41:57 -06001322 slim_dealloc_ch(&sat->satcl,
1323 sat->satch[i].chanh);
Ajay Dudani2c71b242012-08-15 00:01:57 -06001324 sat->satch[i].reconf = false;
1325 }
Sagar Dharia69bf5572012-02-21 14:45:35 -07001326 }
1327 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001328 } else if (mt != SLIM_MSG_MT_CORE &&
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001329 mc != SLIM_MSG_MC_REPORT_PRESENT) {
1330 satv = msm_slim_get_ctrl(dev);
1331 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001332 switch (mc) {
1333 case SLIM_MSG_MC_REPORT_PRESENT:
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001334 /* Remove runtime_pm vote once satellite acks */
1335 if (mt != SLIM_MSG_MT_CORE) {
1336 if (pm_runtime_enabled(dev->dev) &&
1337 sat->pending_capability) {
1338 msm_slim_put_ctrl(dev);
1339 sat->pending_capability = false;
1340 }
1341 continue;
1342 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001343 /* send a Manager capability msg */
Sagar Dharia790cfd02011-09-25 17:56:24 -06001344 if (sat->sent_capability) {
1345 if (mt == SLIM_MSG_MT_CORE)
1346 goto send_capability;
1347 else
1348 continue;
1349 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001350 ret = slim_add_device(&dev->ctrl, &sat->satcl);
1351 if (ret) {
1352 dev_err(dev->dev,
1353 "Satellite-init failed");
1354 continue;
1355 }
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001356 /* Satellite-channels */
1357 sat->satch = kzalloc(MSM_MAX_SATCH *
1358 sizeof(struct msm_sat_chan),
1359 GFP_KERNEL);
Sagar Dharia790cfd02011-09-25 17:56:24 -06001360send_capability:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001361 txn.mc = SLIM_USR_MC_MASTER_CAPABILITY;
1362 txn.mt = SLIM_MSG_MT_SRC_REFERRED_USER;
1363 txn.la = sat->satcl.laddr;
1364 txn.rl = 8;
1365 wbuf[0] = SAT_MAGIC_LSB;
1366 wbuf[1] = SAT_MAGIC_MSB;
1367 wbuf[2] = SAT_MSG_VER;
1368 wbuf[3] = SAT_MSG_PROT;
1369 txn.wbuf = wbuf;
1370 txn.len = 4;
Sagar Dhariaf323f8c2012-09-04 11:27:26 -06001371 ret = msm_xfer_msg(&dev->ctrl, &txn);
1372 if (ret) {
1373 pr_err("capability for:0x%x fail:%d, retry:%d",
1374 sat->satcl.laddr, ret, retries);
1375 if (retries < INIT_MX_RETRIES) {
1376 msm_slim_wait_retry(dev);
1377 retries++;
1378 goto send_capability;
1379 } else {
1380 pr_err("failed after all retries:%d",
1381 ret);
1382 }
1383 } else {
1384 sat->sent_capability = true;
1385 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001386 break;
1387 case SLIM_USR_MC_ADDR_QUERY:
1388 memcpy(&wbuf[1], &buf[4], 6);
1389 ret = slim_get_logical_addr(&sat->satcl,
1390 &wbuf[1], 6, &wbuf[7]);
1391 if (ret)
1392 memset(&wbuf[1], 0, 6);
1393 wbuf[0] = buf[3];
1394 txn.mc = SLIM_USR_MC_ADDR_REPLY;
1395 txn.rl = 12;
1396 txn.len = 8;
1397 txn.wbuf = wbuf;
1398 msm_xfer_msg(&dev->ctrl, &txn);
1399 break;
1400 case SLIM_USR_MC_DEFINE_CHAN:
1401 case SLIM_USR_MC_DEF_ACT_CHAN:
1402 case SLIM_USR_MC_CHAN_CTRL:
1403 if (mc != SLIM_USR_MC_CHAN_CTRL)
1404 tid = buf[7];
1405 else
1406 tid = buf[4];
1407 gen_ack = true;
1408 ret = msm_sat_define_ch(sat, buf, len, mc);
1409 if (ret) {
1410 dev_err(dev->dev,
1411 "SAT define_ch returned:%d",
1412 ret);
1413 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001414 if (!sat->pending_reconf) {
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001415 int chv = msm_slim_get_ctrl(dev);
1416 if (chv >= 0)
1417 sat->pending_reconf = true;
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001418 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001419 break;
1420 case SLIM_USR_MC_RECONFIG_NOW:
1421 tid = buf[3];
1422 gen_ack = true;
1423 ret = slim_reconfigure_now(&sat->satcl);
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001424 for (i = 0; i < sat->nsatch; i++) {
1425 struct msm_sat_chan *sch = &sat->satch[i];
Sagar Dhariad1d81402012-09-05 12:19:24 -06001426 if (sch->req_rem && sch->reconf) {
Ajay Dudani2c71b242012-08-15 00:01:57 -06001427 if (!ret) {
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001428 slim_dealloc_ch(&sat->satcl,
1429 sch->chanh);
Ajay Dudani2c71b242012-08-15 00:01:57 -06001430 sch->reconf = false;
1431 }
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001432 sch->req_rem--;
1433 } else if (sch->req_def) {
1434 if (ret)
1435 slim_dealloc_ch(&sat->satcl,
1436 sch->chanh);
Ajay Dudani2c71b242012-08-15 00:01:57 -06001437 else
1438 sch->reconf = true;
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001439 sch->req_def--;
1440 }
1441 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001442 if (sat->pending_reconf) {
1443 msm_slim_put_ctrl(dev);
1444 sat->pending_reconf = false;
1445 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001446 break;
1447 case SLIM_USR_MC_REQ_BW:
1448 /* what we get is in SLOTS */
1449 bw_sl = (u32)buf[4] << 3 |
1450 ((buf[3] & 0xE0) >> 5);
1451 sat->satcl.pending_msgsl = bw_sl;
1452 tid = buf[5];
1453 gen_ack = true;
1454 break;
1455 case SLIM_USR_MC_CONNECT_SRC:
1456 case SLIM_USR_MC_CONNECT_SINK:
1457 if (mc == SLIM_USR_MC_CONNECT_SRC)
1458 txn.mc = SLIM_MSG_MC_CONNECT_SOURCE;
1459 else
1460 txn.mc = SLIM_MSG_MC_CONNECT_SINK;
1461 wbuf[0] = buf[4] & 0x1F;
1462 wbuf[1] = buf[5];
1463 tid = buf[6];
1464 txn.la = buf[3];
1465 txn.mt = SLIM_MSG_MT_CORE;
1466 txn.rl = 6;
1467 txn.len = 2;
1468 txn.wbuf = wbuf;
1469 gen_ack = true;
1470 ret = msm_xfer_msg(&dev->ctrl, &txn);
1471 break;
1472 case SLIM_USR_MC_DISCONNECT_PORT:
1473 txn.mc = SLIM_MSG_MC_DISCONNECT_PORT;
1474 wbuf[0] = buf[4] & 0x1F;
1475 tid = buf[5];
1476 txn.la = buf[3];
1477 txn.rl = 5;
1478 txn.len = 1;
1479 txn.mt = SLIM_MSG_MT_CORE;
1480 txn.wbuf = wbuf;
1481 gen_ack = true;
1482 ret = msm_xfer_msg(&dev->ctrl, &txn);
Kiran Gunda41555d72013-01-10 17:03:33 +05301483 break;
1484 case SLIM_MSG_MC_REPORT_ABSENT:
1485 dev_info(dev->dev, "Received Report Absent Message\n");
1486 break;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001487 default:
1488 break;
1489 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001490 if (!gen_ack) {
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001491 if (mc != SLIM_MSG_MC_REPORT_PRESENT && satv >= 0)
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001492 msm_slim_put_ctrl(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001493 continue;
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001494 }
1495
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001496 wbuf[0] = tid;
1497 if (!ret)
1498 wbuf[1] = MSM_SAT_SUCCSS;
1499 else
1500 wbuf[1] = 0;
1501 txn.mc = SLIM_USR_MC_GENERIC_ACK;
1502 txn.la = sat->satcl.laddr;
1503 txn.rl = 6;
1504 txn.len = 2;
1505 txn.wbuf = wbuf;
1506 txn.mt = SLIM_MSG_MT_SRC_REFERRED_USER;
1507 msm_xfer_msg(&dev->ctrl, &txn);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001508 if (satv >= 0)
1509 msm_slim_put_ctrl(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001510 }
1511}
1512
Sagar Dharia790cfd02011-09-25 17:56:24 -06001513static struct msm_slim_sat *msm_slim_alloc_sat(struct msm_slim_ctrl *dev)
1514{
1515 struct msm_slim_sat *sat;
1516 char *name;
1517 if (dev->nsats >= MSM_MAX_NSATS)
1518 return NULL;
1519
1520 sat = kzalloc(sizeof(struct msm_slim_sat), GFP_KERNEL);
1521 if (!sat) {
1522 dev_err(dev->dev, "no memory for satellite");
1523 return NULL;
1524 }
1525 name = kzalloc(SLIMBUS_NAME_SIZE, GFP_KERNEL);
1526 if (!name) {
1527 dev_err(dev->dev, "no memory for satellite name");
1528 kfree(sat);
1529 return NULL;
1530 }
1531 dev->satd[dev->nsats] = sat;
1532 sat->dev = dev;
1533 snprintf(name, SLIMBUS_NAME_SIZE, "msm_sat%d", dev->nsats);
1534 sat->satcl.name = name;
1535 spin_lock_init(&sat->lock);
1536 INIT_WORK(&sat->wd, slim_sat_rxprocess);
1537 sat->wq = create_singlethread_workqueue(sat->satcl.name);
1538 if (!sat->wq) {
1539 kfree(name);
1540 kfree(sat);
1541 return NULL;
1542 }
1543 /*
1544 * Both sats will be allocated from RX thread and RX thread will
1545 * process messages sequentially. No synchronization necessary
1546 */
1547 dev->nsats++;
1548 return sat;
1549}
1550
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001551static void
1552msm_slim_rx_msgq_event(struct msm_slim_ctrl *dev, struct sps_event_notify *ev)
1553{
1554 u32 *buf = ev->data.transfer.user;
1555 struct sps_iovec *iovec = &ev->data.transfer.iovec;
1556
1557 /*
1558 * Note the virtual address needs to be offset by the same index
1559 * as the physical address or just pass in the actual virtual address
1560 * if the sps_mem_buffer is not needed. Note that if completion is
1561 * used, the virtual address won't be available and will need to be
1562 * calculated based on the offset of the physical address
1563 */
1564 if (ev->event_id == SPS_EVENT_DESC_DONE) {
1565
1566 pr_debug("buf = 0x%p, data = 0x%x\n", buf, *buf);
1567
1568 pr_debug("iovec = (0x%x 0x%x 0x%x)\n",
1569 iovec->addr, iovec->size, iovec->flags);
1570
1571 } else {
1572 dev_err(dev->dev, "%s: unknown event %d\n",
1573 __func__, ev->event_id);
1574 }
1575}
1576
1577static void msm_slim_rx_msgq_cb(struct sps_event_notify *notify)
1578{
1579 struct msm_slim_ctrl *dev = (struct msm_slim_ctrl *)notify->user;
1580 msm_slim_rx_msgq_event(dev, notify);
1581}
1582
1583/* Queue up Rx message buffer */
1584static inline int
1585msm_slim_post_rx_msgq(struct msm_slim_ctrl *dev, int ix)
1586{
1587 int ret;
1588 u32 flags = SPS_IOVEC_FLAG_INT;
1589 struct msm_slim_endp *endpoint = &dev->rx_msgq;
1590 struct sps_mem_buffer *mem = &endpoint->buf;
1591 struct sps_pipe *pipe = endpoint->sps;
1592
1593 /* Rx message queue buffers are 4 bytes in length */
1594 u8 *virt_addr = mem->base + (4 * ix);
1595 u32 phys_addr = mem->phys_base + (4 * ix);
1596
1597 pr_debug("index:%d, phys:0x%x, virt:0x%p\n", ix, phys_addr, virt_addr);
1598
1599 ret = sps_transfer_one(pipe, phys_addr, 4, virt_addr, flags);
1600 if (ret)
1601 dev_err(dev->dev, "transfer_one() failed 0x%x, %d\n", ret, ix);
1602
1603 return ret;
1604}
1605
1606static inline int
1607msm_slim_rx_msgq_get(struct msm_slim_ctrl *dev, u32 *data, int offset)
1608{
1609 struct msm_slim_endp *endpoint = &dev->rx_msgq;
1610 struct sps_mem_buffer *mem = &endpoint->buf;
1611 struct sps_pipe *pipe = endpoint->sps;
1612 struct sps_iovec iovec;
1613 int index;
1614 int ret;
1615
1616 ret = sps_get_iovec(pipe, &iovec);
1617 if (ret) {
1618 dev_err(dev->dev, "sps_get_iovec() failed 0x%x\n", ret);
1619 goto err_exit;
1620 }
1621
1622 pr_debug("iovec = (0x%x 0x%x 0x%x)\n",
1623 iovec.addr, iovec.size, iovec.flags);
1624 BUG_ON(iovec.addr < mem->phys_base);
1625 BUG_ON(iovec.addr >= mem->phys_base + mem->size);
1626
1627 /* Calculate buffer index */
1628 index = (iovec.addr - mem->phys_base) / 4;
1629 *(data + offset) = *((u32 *)mem->base + index);
1630
1631 pr_debug("buf = 0x%p, data = 0x%x\n", (u32 *)mem->base + index, *data);
1632
1633 /* Add buffer back to the queue */
1634 (void)msm_slim_post_rx_msgq(dev, index);
1635
1636err_exit:
1637 return ret;
1638}
1639
1640static int msm_slim_rx_msgq_thread(void *data)
1641{
1642 struct msm_slim_ctrl *dev = (struct msm_slim_ctrl *)data;
1643 struct completion *notify = &dev->rx_msgq_notify;
1644 struct msm_slim_sat *sat = NULL;
1645 u32 mc = 0;
1646 u32 mt = 0;
1647 u32 buffer[10];
1648 int index = 0;
1649 u8 msg_len = 0;
1650 int ret;
1651
1652 dev_dbg(dev->dev, "rx thread started");
1653
1654 while (!kthread_should_stop()) {
1655 set_current_state(TASK_INTERRUPTIBLE);
1656 ret = wait_for_completion_interruptible(notify);
1657
1658 if (ret)
1659 dev_err(dev->dev, "rx thread wait error:%d", ret);
1660
1661 /* 1 irq notification per message */
1662 if (!dev->use_rx_msgqs) {
1663 msm_slim_rxwq(dev);
1664 continue;
1665 }
1666
1667 ret = msm_slim_rx_msgq_get(dev, buffer, index);
1668 if (ret) {
1669 dev_err(dev->dev, "rx_msgq_get() failed 0x%x\n", ret);
1670 continue;
1671 }
1672
1673 pr_debug("message[%d] = 0x%x\n", index, *buffer);
1674
1675 /* Decide if we use generic RX or satellite RX */
1676 if (index++ == 0) {
1677 msg_len = *buffer & 0x1F;
1678 pr_debug("Start of new message, len = %d\n", msg_len);
1679 mt = (buffer[0] >> 5) & 0x7;
1680 mc = (buffer[0] >> 8) & 0xff;
1681 dev_dbg(dev->dev, "MC: %x, MT: %x\n", mc, mt);
1682 if (mt == SLIM_MSG_MT_DEST_REFERRED_USER ||
Sagar Dharia790cfd02011-09-25 17:56:24 -06001683 mt == SLIM_MSG_MT_SRC_REFERRED_USER) {
1684 u8 laddr;
1685 laddr = (u8)((buffer[0] >> 16) & 0xff);
1686 sat = addr_to_sat(dev, laddr);
1687 }
Kiran Gunda41555d72013-01-10 17:03:33 +05301688 }
1689 if ((index * 4) >= msg_len) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001690 index = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001691 if (sat) {
1692 msm_sat_enqueue(sat, buffer, msg_len);
1693 queue_work(sat->wq, &sat->wd);
1694 sat = NULL;
1695 } else {
1696 msm_slim_rx_enqueue(dev, buffer, msg_len);
1697 msm_slim_rxwq(dev);
1698 }
1699 }
1700 }
1701
1702 return 0;
1703}
1704
1705static int __devinit msm_slim_init_rx_msgq(struct msm_slim_ctrl *dev)
1706{
1707 int i, ret;
1708 u32 pipe_offset;
1709 struct msm_slim_endp *endpoint = &dev->rx_msgq;
1710 struct sps_connect *config = &endpoint->config;
1711 struct sps_mem_buffer *descr = &config->desc;
1712 struct sps_mem_buffer *mem = &endpoint->buf;
1713 struct completion *notify = &dev->rx_msgq_notify;
1714
1715 struct sps_register_event sps_error_event; /* SPS_ERROR */
1716 struct sps_register_event sps_descr_event; /* DESCR_DONE */
1717
Sagar Dharia31ac5812012-01-04 11:38:59 -07001718 init_completion(notify);
1719 if (!dev->use_rx_msgqs)
1720 goto rx_thread_create;
1721
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001722 /* Allocate the endpoint */
1723 ret = msm_slim_init_endpoint(dev, endpoint);
1724 if (ret) {
1725 dev_err(dev->dev, "init_endpoint failed 0x%x\n", ret);
1726 goto sps_init_endpoint_failed;
1727 }
1728
1729 /* Get the pipe indices for the message queues */
1730 pipe_offset = (readl_relaxed(dev->base + MGR_STATUS) & 0xfc) >> 2;
1731 dev_dbg(dev->dev, "Message queue pipe offset %d\n", pipe_offset);
1732
1733 config->mode = SPS_MODE_SRC;
1734 config->source = dev->bam.hdl;
1735 config->destination = SPS_DEV_HANDLE_MEM;
1736 config->src_pipe_index = pipe_offset;
1737 config->options = SPS_O_DESC_DONE | SPS_O_ERROR |
1738 SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
1739
1740 /* Allocate memory for the FIFO descriptors */
1741 ret = msm_slim_sps_mem_alloc(dev, descr,
1742 MSM_SLIM_DESC_NUM * sizeof(struct sps_iovec));
1743 if (ret) {
1744 dev_err(dev->dev, "unable to allocate SPS descriptors\n");
1745 goto alloc_descr_failed;
1746 }
1747
1748 ret = sps_connect(endpoint->sps, config);
1749 if (ret) {
1750 dev_err(dev->dev, "sps_connect failed 0x%x\n", ret);
1751 goto sps_connect_failed;
1752 }
1753
1754 /* Register completion for DESC_DONE */
1755 init_completion(notify);
1756 memset(&sps_descr_event, 0x00, sizeof(sps_descr_event));
1757
1758 sps_descr_event.mode = SPS_TRIGGER_CALLBACK;
1759 sps_descr_event.options = SPS_O_DESC_DONE;
1760 sps_descr_event.user = (void *)dev;
1761 sps_descr_event.xfer_done = notify;
1762
1763 ret = sps_register_event(endpoint->sps, &sps_descr_event);
1764 if (ret) {
1765 dev_err(dev->dev, "sps_connect() failed 0x%x\n", ret);
1766 goto sps_reg_event_failed;
1767 }
1768
1769 /* Register callback for errors */
1770 memset(&sps_error_event, 0x00, sizeof(sps_error_event));
1771 sps_error_event.mode = SPS_TRIGGER_CALLBACK;
1772 sps_error_event.options = SPS_O_ERROR;
1773 sps_error_event.user = (void *)dev;
1774 sps_error_event.callback = msm_slim_rx_msgq_cb;
1775
1776 ret = sps_register_event(endpoint->sps, &sps_error_event);
1777 if (ret) {
1778 dev_err(dev->dev, "sps_connect() failed 0x%x\n", ret);
1779 goto sps_reg_event_failed;
1780 }
1781
1782 /* Allocate memory for the message buffer(s), N descrs, 4-byte mesg */
1783 ret = msm_slim_sps_mem_alloc(dev, mem, MSM_SLIM_DESC_NUM * 4);
1784 if (ret) {
1785 dev_err(dev->dev, "dma_alloc_coherent failed\n");
1786 goto alloc_buffer_failed;
1787 }
1788
1789 /*
1790 * Call transfer_one for each 4-byte buffer
1791 * Use (buf->size/4) - 1 for the number of buffer to post
1792 */
1793
1794 /* Setup the transfer */
1795 for (i = 0; i < (MSM_SLIM_DESC_NUM - 1); i++) {
1796 ret = msm_slim_post_rx_msgq(dev, i);
1797 if (ret) {
1798 dev_err(dev->dev, "post_rx_msgq() failed 0x%x\n", ret);
1799 goto sps_transfer_failed;
1800 }
1801 }
1802
Sagar Dharia31ac5812012-01-04 11:38:59 -07001803rx_thread_create:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001804 /* Fire up the Rx message queue thread */
1805 dev->rx_msgq_thread = kthread_run(msm_slim_rx_msgq_thread, dev,
1806 MSM_SLIM_NAME "_rx_msgq_thread");
1807 if (!dev->rx_msgq_thread) {
1808 dev_err(dev->dev, "Failed to start Rx message queue thread\n");
Sagar Dharia31ac5812012-01-04 11:38:59 -07001809 /* Tear-down BAMs or return? */
1810 if (!dev->use_rx_msgqs)
1811 return -EIO;
1812 else
1813 ret = -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001814 } else
1815 return 0;
1816
1817sps_transfer_failed:
1818 msm_slim_sps_mem_free(dev, mem);
1819alloc_buffer_failed:
1820 memset(&sps_error_event, 0x00, sizeof(sps_error_event));
1821 sps_register_event(endpoint->sps, &sps_error_event);
1822sps_reg_event_failed:
1823 sps_disconnect(endpoint->sps);
1824sps_connect_failed:
1825 msm_slim_sps_mem_free(dev, descr);
1826alloc_descr_failed:
1827 msm_slim_free_endpoint(endpoint);
1828sps_init_endpoint_failed:
Sagar Dharia31ac5812012-01-04 11:38:59 -07001829 dev->use_rx_msgqs = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001830 return ret;
1831}
1832
1833/* Registers BAM h/w resource with SPS driver and initializes msgq endpoints */
1834static int __devinit
1835msm_slim_sps_init(struct msm_slim_ctrl *dev, struct resource *bam_mem)
1836{
1837 int i, ret;
1838 u32 bam_handle;
1839 struct sps_bam_props bam_props = {0};
1840
1841 static struct sps_bam_sec_config_props sec_props = {
1842 .ees = {
1843 [0] = { /* LPASS */
1844 .vmid = 0,
1845 .pipe_mask = 0xFFFF98,
1846 },
1847 [1] = { /* Krait Apps */
1848 .vmid = 1,
1849 .pipe_mask = 0x3F000007,
1850 },
1851 [2] = { /* Modem */
1852 .vmid = 2,
1853 .pipe_mask = 0x00000060,
1854 },
1855 },
1856 };
1857
1858 bam_props.ee = dev->ee;
1859 bam_props.virt_addr = dev->bam.base;
1860 bam_props.phys_addr = bam_mem->start;
1861 bam_props.irq = dev->bam.irq;
1862 bam_props.manage = SPS_BAM_MGR_LOCAL;
1863 bam_props.summing_threshold = MSM_SLIM_PERF_SUMM_THRESHOLD;
1864
1865 bam_props.sec_config = SPS_BAM_SEC_DO_CONFIG;
1866 bam_props.p_sec_config_props = &sec_props;
1867
1868 bam_props.options = SPS_O_DESC_DONE | SPS_O_ERROR |
1869 SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
1870
1871 /* First 7 bits are for message Qs */
1872 for (i = 7; i < 32; i++) {
1873 /* Check what pipes are owned by Apps. */
1874 if ((sec_props.ees[dev->ee].pipe_mask >> i) & 0x1)
1875 break;
1876 }
1877 dev->pipe_b = i - 7;
1878
1879 /* Register the BAM device with the SPS driver */
1880 ret = sps_register_bam_device(&bam_props, &bam_handle);
1881 if (ret) {
Sagar Dharia31ac5812012-01-04 11:38:59 -07001882 dev_err(dev->dev, "disabling BAM: reg-bam failed 0x%x\n", ret);
1883 dev->use_rx_msgqs = 0;
1884 goto init_rx_msgq;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001885 }
1886 dev->bam.hdl = bam_handle;
1887 dev_dbg(dev->dev, "SLIM BAM registered, handle = 0x%x\n", bam_handle);
1888
Sagar Dharia31ac5812012-01-04 11:38:59 -07001889init_rx_msgq:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001890 ret = msm_slim_init_rx_msgq(dev);
Sagar Dharia31ac5812012-01-04 11:38:59 -07001891 if (ret)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001892 dev_err(dev->dev, "msm_slim_init_rx_msgq failed 0x%x\n", ret);
Sagar Dharia1beb2202012-07-31 19:06:21 -06001893 if (ret && bam_handle) {
Sagar Dharia31ac5812012-01-04 11:38:59 -07001894 sps_deregister_bam_device(bam_handle);
1895 dev->bam.hdl = 0L;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001896 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001897 return ret;
1898}
1899
1900static void msm_slim_sps_exit(struct msm_slim_ctrl *dev)
1901{
1902 if (dev->use_rx_msgqs) {
1903 struct msm_slim_endp *endpoint = &dev->rx_msgq;
1904 struct sps_connect *config = &endpoint->config;
1905 struct sps_mem_buffer *descr = &config->desc;
1906 struct sps_mem_buffer *mem = &endpoint->buf;
1907 struct sps_register_event sps_event;
1908 memset(&sps_event, 0x00, sizeof(sps_event));
1909 msm_slim_sps_mem_free(dev, mem);
1910 sps_register_event(endpoint->sps, &sps_event);
1911 sps_disconnect(endpoint->sps);
1912 msm_slim_sps_mem_free(dev, descr);
1913 msm_slim_free_endpoint(endpoint);
Sagar Dharia31ac5812012-01-04 11:38:59 -07001914 sps_deregister_bam_device(dev->bam.hdl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001915 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001916}
1917
Sagar Dhariacc969452011-09-19 10:34:30 -06001918static void msm_slim_prg_slew(struct platform_device *pdev,
1919 struct msm_slim_ctrl *dev)
1920{
1921 struct resource *slew_io;
1922 void __iomem *slew_reg;
1923 /* SLEW RATE register for this slimbus */
1924 dev->slew_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1925 "slimbus_slew_reg");
1926 if (!dev->slew_mem) {
1927 dev_dbg(&pdev->dev, "no slimbus slew resource\n");
1928 return;
1929 }
1930 slew_io = request_mem_region(dev->slew_mem->start,
1931 resource_size(dev->slew_mem), pdev->name);
1932 if (!slew_io) {
1933 dev_dbg(&pdev->dev, "slimbus-slew mem claimed\n");
1934 dev->slew_mem = NULL;
1935 return;
1936 }
1937
1938 slew_reg = ioremap(dev->slew_mem->start, resource_size(dev->slew_mem));
1939 if (!slew_reg) {
1940 dev_dbg(dev->dev, "slew register mapping failed");
1941 release_mem_region(dev->slew_mem->start,
1942 resource_size(dev->slew_mem));
1943 dev->slew_mem = NULL;
1944 return;
1945 }
1946 writel_relaxed(1, slew_reg);
1947 /* Make sure slimbus-slew rate enabling goes through */
1948 wmb();
1949 iounmap(slew_reg);
1950}
1951
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001952static int __devinit msm_slim_probe(struct platform_device *pdev)
1953{
1954 struct msm_slim_ctrl *dev;
1955 int ret;
1956 struct resource *bam_mem, *bam_io;
1957 struct resource *slim_mem, *slim_io;
1958 struct resource *irq, *bam_irq;
Sagar Dharia1beb2202012-07-31 19:06:21 -06001959 bool rxreg_access = false;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001960 slim_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1961 "slimbus_physical");
1962 if (!slim_mem) {
1963 dev_err(&pdev->dev, "no slimbus physical memory resource\n");
1964 return -ENODEV;
1965 }
1966 slim_io = request_mem_region(slim_mem->start, resource_size(slim_mem),
1967 pdev->name);
1968 if (!slim_io) {
1969 dev_err(&pdev->dev, "slimbus memory already claimed\n");
1970 return -EBUSY;
1971 }
1972
1973 bam_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1974 "slimbus_bam_physical");
1975 if (!bam_mem) {
1976 dev_err(&pdev->dev, "no slimbus BAM memory resource\n");
1977 ret = -ENODEV;
1978 goto err_get_res_bam_failed;
1979 }
1980 bam_io = request_mem_region(bam_mem->start, resource_size(bam_mem),
1981 pdev->name);
1982 if (!bam_io) {
1983 release_mem_region(slim_mem->start, resource_size(slim_mem));
1984 dev_err(&pdev->dev, "slimbus BAM memory already claimed\n");
1985 ret = -EBUSY;
1986 goto err_get_res_bam_failed;
1987 }
1988 irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
1989 "slimbus_irq");
1990 if (!irq) {
1991 dev_err(&pdev->dev, "no slimbus IRQ resource\n");
1992 ret = -ENODEV;
1993 goto err_get_res_failed;
1994 }
1995 bam_irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
1996 "slimbus_bam_irq");
1997 if (!bam_irq) {
1998 dev_err(&pdev->dev, "no slimbus BAM IRQ resource\n");
1999 ret = -ENODEV;
2000 goto err_get_res_failed;
2001 }
2002
2003 dev = kzalloc(sizeof(struct msm_slim_ctrl), GFP_KERNEL);
2004 if (!dev) {
2005 dev_err(&pdev->dev, "no memory for MSM slimbus controller\n");
2006 ret = -ENOMEM;
2007 goto err_get_res_failed;
2008 }
2009 dev->dev = &pdev->dev;
2010 platform_set_drvdata(pdev, dev);
2011 slim_set_ctrldata(&dev->ctrl, dev);
2012 dev->base = ioremap(slim_mem->start, resource_size(slim_mem));
2013 if (!dev->base) {
2014 dev_err(&pdev->dev, "IOremap failed\n");
2015 ret = -ENOMEM;
2016 goto err_ioremap_failed;
2017 }
2018 dev->bam.base = ioremap(bam_mem->start, resource_size(bam_mem));
2019 if (!dev->bam.base) {
2020 dev_err(&pdev->dev, "BAM IOremap failed\n");
2021 ret = -ENOMEM;
2022 goto err_ioremap_bam_failed;
2023 }
Sagar Dhariaf8f603b2012-03-21 15:25:17 -06002024 if (pdev->dev.of_node) {
2025
2026 ret = of_property_read_u32(pdev->dev.of_node, "cell-index",
2027 &dev->ctrl.nr);
2028 if (ret) {
2029 dev_err(&pdev->dev, "Cell index not specified:%d", ret);
2030 goto err_of_init_failed;
2031 }
Sagar Dharia1beb2202012-07-31 19:06:21 -06002032 rxreg_access = of_property_read_bool(pdev->dev.of_node,
2033 "qcom,rxreg-access");
Sagar Dhariaf8f603b2012-03-21 15:25:17 -06002034 /* Optional properties */
2035 ret = of_property_read_u32(pdev->dev.of_node,
2036 "qcom,min-clk-gear", &dev->ctrl.min_cg);
2037 ret = of_property_read_u32(pdev->dev.of_node,
2038 "qcom,max-clk-gear", &dev->ctrl.max_cg);
Sagar Dharia1beb2202012-07-31 19:06:21 -06002039 pr_debug("min_cg:%d, max_cg:%d, rxreg: %d", dev->ctrl.min_cg,
2040 dev->ctrl.max_cg, rxreg_access);
Sagar Dhariaf8f603b2012-03-21 15:25:17 -06002041 } else {
2042 dev->ctrl.nr = pdev->id;
2043 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002044 dev->ctrl.nchans = MSM_SLIM_NCHANS;
2045 dev->ctrl.nports = MSM_SLIM_NPORTS;
2046 dev->ctrl.set_laddr = msm_set_laddr;
2047 dev->ctrl.xfer_msg = msm_xfer_msg;
Sagar Dharia144e5e02011-08-08 17:30:11 -06002048 dev->ctrl.wakeup = msm_clk_pause_wakeup;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002049 dev->ctrl.config_port = msm_config_port;
2050 dev->ctrl.port_xfer = msm_slim_port_xfer;
2051 dev->ctrl.port_xfer_status = msm_slim_port_xfer_status;
2052 /* Reserve some messaging BW for satellite-apps driver communication */
2053 dev->ctrl.sched.pending_msgsl = 30;
2054
2055 init_completion(&dev->reconf);
2056 mutex_init(&dev->tx_lock);
2057 spin_lock_init(&dev->rx_lock);
2058 dev->ee = 1;
Sagar Dharia1beb2202012-07-31 19:06:21 -06002059 if (rxreg_access)
2060 dev->use_rx_msgqs = 0;
2061 else
2062 dev->use_rx_msgqs = 1;
2063
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002064 dev->irq = irq->start;
2065 dev->bam.irq = bam_irq->start;
2066
Sagar Dhariad5bb0552012-08-11 15:02:12 -06002067 dev->hclk = clk_get(dev->dev, "iface_clk");
2068 if (IS_ERR(dev->hclk))
2069 dev->hclk = NULL;
2070 else
2071 clk_prepare_enable(dev->hclk);
2072
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002073 ret = msm_slim_sps_init(dev, bam_mem);
2074 if (ret != 0) {
2075 dev_err(dev->dev, "error SPS init\n");
2076 goto err_sps_init_failed;
2077 }
2078
2079
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002080 dev->framer.rootfreq = SLIM_ROOT_FREQ >> 3;
2081 dev->framer.superfreq =
2082 dev->framer.rootfreq / SLIM_CL_PER_SUPERFRAME_DIV8;
2083 dev->ctrl.a_framer = &dev->framer;
2084 dev->ctrl.clkgear = SLIM_MAX_CLK_GEAR;
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002085 dev->ctrl.dev.parent = &pdev->dev;
Sagar Dhariaf8f603b2012-03-21 15:25:17 -06002086 dev->ctrl.dev.of_node = pdev->dev.of_node;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002087
2088 ret = request_irq(dev->irq, msm_slim_interrupt, IRQF_TRIGGER_HIGH,
2089 "msm_slim_irq", dev);
2090 if (ret) {
2091 dev_err(&pdev->dev, "request IRQ failed\n");
2092 goto err_request_irq_failed;
2093 }
2094
Sagar Dhariacc969452011-09-19 10:34:30 -06002095 msm_slim_prg_slew(pdev, dev);
Sagar Dhariab1c0acf2012-02-06 18:16:58 -07002096
2097 /* Register with framework before enabling frame, clock */
2098 ret = slim_add_numbered_controller(&dev->ctrl);
2099 if (ret) {
2100 dev_err(dev->dev, "error adding controller\n");
2101 goto err_ctrl_failed;
2102 }
2103
2104
Tianyi Gou44a81b02012-02-06 17:49:07 -08002105 dev->rclk = clk_get(dev->dev, "core_clk");
Sagar Dhariab1c0acf2012-02-06 18:16:58 -07002106 if (!dev->rclk) {
2107 dev_err(dev->dev, "slimbus clock not found");
2108 goto err_clk_get_failed;
2109 }
Sagar Dhariacc969452011-09-19 10:34:30 -06002110 clk_set_rate(dev->rclk, SLIM_ROOT_FREQ);
Sagar Dharia9acf7f42012-03-08 09:45:30 -07002111 clk_prepare_enable(dev->rclk);
Sagar Dhariacc969452011-09-19 10:34:30 -06002112
Sagar Dharia82e516f2012-03-16 16:01:23 -06002113 dev->ver = readl_relaxed(dev->base);
2114 /* Version info in 16 MSbits */
2115 dev->ver >>= 16;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002116 /* Component register initialization */
Sagar Dharia82e516f2012-03-16 16:01:23 -06002117 writel_relaxed(1, dev->base + CFG_PORT(COMP_CFG, dev->ver));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002118 writel_relaxed((EE_MGR_RSC_GRP | EE_NGD_2 | EE_NGD_1),
Sagar Dharia82e516f2012-03-16 16:01:23 -06002119 dev->base + CFG_PORT(COMP_TRUST_CFG, dev->ver));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002120
2121 /*
2122 * Manager register initialization
2123 * If RX msg Q is used, disable RX_MSG_RCVD interrupt
2124 */
2125 if (dev->use_rx_msgqs)
2126 writel_relaxed((MGR_INT_RECFG_DONE | MGR_INT_TX_NACKED_2 |
2127 MGR_INT_MSG_BUF_CONTE | /* MGR_INT_RX_MSG_RCVD | */
2128 MGR_INT_TX_MSG_SENT), dev->base + MGR_INT_EN);
2129 else
2130 writel_relaxed((MGR_INT_RECFG_DONE | MGR_INT_TX_NACKED_2 |
2131 MGR_INT_MSG_BUF_CONTE | MGR_INT_RX_MSG_RCVD |
2132 MGR_INT_TX_MSG_SENT), dev->base + MGR_INT_EN);
2133 writel_relaxed(1, dev->base + MGR_CFG);
2134 /*
2135 * Framer registers are beyond 1K memory region after Manager and/or
2136 * component registers. Make sure those writes are ordered
2137 * before framer register writes
2138 */
2139 wmb();
2140
2141 /* Framer register initialization */
Sagar Dhariad5bb0552012-08-11 15:02:12 -06002142 writel_relaxed((1 << INTR_WAKE) | (0xA << REF_CLK_GEAR) |
2143 (0xA << CLK_GEAR) | (1 << ROOT_FREQ) | (1 << FRM_ACTIVE) | 1,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002144 dev->base + FRM_CFG);
2145 /*
2146 * Make sure that framer wake-up and enabling writes go through
2147 * before any other component is enabled. Framer is responsible for
2148 * clocking the bus and enabling framer first will ensure that other
2149 * devices can report presence when they are enabled
2150 */
2151 mb();
2152
2153 /* Enable RX msg Q */
2154 if (dev->use_rx_msgqs)
2155 writel_relaxed(MGR_CFG_ENABLE | MGR_CFG_RX_MSGQ_EN,
2156 dev->base + MGR_CFG);
2157 else
2158 writel_relaxed(MGR_CFG_ENABLE, dev->base + MGR_CFG);
2159 /*
2160 * Make sure that manager-enable is written through before interface
2161 * device is enabled
2162 */
2163 mb();
2164 writel_relaxed(1, dev->base + INTF_CFG);
2165 /*
2166 * Make sure that interface-enable is written through before enabling
2167 * ported generic device inside MSM manager
2168 */
2169 mb();
Sagar Dharia82e516f2012-03-16 16:01:23 -06002170 writel_relaxed(1, dev->base + CFG_PORT(PGD_CFG, dev->ver));
2171 writel_relaxed(0x3F<<17, dev->base + CFG_PORT(PGD_OWN_EEn, dev->ver) +
2172 (4 * dev->ee));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002173 /*
2174 * Make sure that ported generic device is enabled and port-EE settings
2175 * are written through before finally enabling the component
2176 */
2177 mb();
2178
Sagar Dharia82e516f2012-03-16 16:01:23 -06002179 writel_relaxed(1, dev->base + CFG_PORT(COMP_CFG, dev->ver));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002180 /*
2181 * Make sure that all writes have gone through before exiting this
2182 * function
2183 */
2184 mb();
Sagar Dhariaf8f603b2012-03-21 15:25:17 -06002185 if (pdev->dev.of_node)
2186 of_register_slim_devices(&dev->ctrl);
2187
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002188 pm_runtime_use_autosuspend(&pdev->dev);
2189 pm_runtime_set_autosuspend_delay(&pdev->dev, MSM_SLIM_AUTOSUSPEND);
2190 pm_runtime_set_active(&pdev->dev);
2191
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002192 dev_dbg(dev->dev, "MSM SB controller is up!\n");
2193 return 0;
2194
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002195err_ctrl_failed:
Sagar Dharia82e516f2012-03-16 16:01:23 -06002196 writel_relaxed(0, dev->base + CFG_PORT(COMP_CFG, dev->ver));
Sagar Dhariab1c0acf2012-02-06 18:16:58 -07002197err_clk_get_failed:
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002198 kfree(dev->satd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002199err_request_irq_failed:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002200 msm_slim_sps_exit(dev);
2201err_sps_init_failed:
Sagar Dhariad5bb0552012-08-11 15:02:12 -06002202 if (dev->hclk) {
2203 clk_disable_unprepare(dev->hclk);
2204 clk_put(dev->hclk);
2205 }
Sagar Dhariaf8f603b2012-03-21 15:25:17 -06002206err_of_init_failed:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002207 iounmap(dev->bam.base);
2208err_ioremap_bam_failed:
2209 iounmap(dev->base);
2210err_ioremap_failed:
2211 kfree(dev);
2212err_get_res_failed:
2213 release_mem_region(bam_mem->start, resource_size(bam_mem));
2214err_get_res_bam_failed:
2215 release_mem_region(slim_mem->start, resource_size(slim_mem));
2216 return ret;
2217}
2218
2219static int __devexit msm_slim_remove(struct platform_device *pdev)
2220{
2221 struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
2222 struct resource *bam_mem;
2223 struct resource *slim_mem;
Sagar Dhariacc969452011-09-19 10:34:30 -06002224 struct resource *slew_mem = dev->slew_mem;
Sagar Dharia790cfd02011-09-25 17:56:24 -06002225 int i;
2226 for (i = 0; i < dev->nsats; i++) {
2227 struct msm_slim_sat *sat = dev->satd[i];
Sagar Dharia0ffdca12011-09-25 18:55:53 -06002228 int j;
2229 for (j = 0; j < sat->nsatch; j++)
2230 slim_dealloc_ch(&sat->satcl, sat->satch[j].chanh);
Sagar Dharia790cfd02011-09-25 17:56:24 -06002231 slim_remove_device(&sat->satcl);
2232 kfree(sat->satch);
2233 destroy_workqueue(sat->wq);
2234 kfree(sat->satcl.name);
2235 kfree(sat);
2236 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002237 pm_runtime_disable(&pdev->dev);
2238 pm_runtime_set_suspended(&pdev->dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002239 free_irq(dev->irq, dev);
2240 slim_del_controller(&dev->ctrl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002241 clk_put(dev->rclk);
Sagar Dhariad5bb0552012-08-11 15:02:12 -06002242 if (dev->hclk)
2243 clk_put(dev->hclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002244 msm_slim_sps_exit(dev);
2245 kthread_stop(dev->rx_msgq_thread);
2246 iounmap(dev->bam.base);
2247 iounmap(dev->base);
2248 kfree(dev);
2249 bam_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2250 "slimbus_bam_physical");
Sagar Dhariae77961f2011-09-27 14:03:50 -06002251 if (bam_mem)
2252 release_mem_region(bam_mem->start, resource_size(bam_mem));
Sagar Dhariacc969452011-09-19 10:34:30 -06002253 if (slew_mem)
2254 release_mem_region(slew_mem->start, resource_size(slew_mem));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002255 slim_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2256 "slimbus_physical");
Sagar Dhariae77961f2011-09-27 14:03:50 -06002257 if (slim_mem)
2258 release_mem_region(slim_mem->start, resource_size(slim_mem));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002259 return 0;
2260}
2261
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002262#ifdef CONFIG_PM_RUNTIME
2263static int msm_slim_runtime_idle(struct device *device)
2264{
2265 dev_dbg(device, "pm_runtime: idle...\n");
2266 pm_request_autosuspend(device);
2267 return -EAGAIN;
2268}
2269#endif
2270
2271/*
2272 * If PM_RUNTIME is not defined, these 2 functions become helper
2273 * functions to be called from system suspend/resume. So they are not
2274 * inside ifdef CONFIG_PM_RUNTIME
2275 */
Sagar Dharia45e77912012-01-10 09:55:18 -07002276#ifdef CONFIG_PM_SLEEP
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002277static int msm_slim_runtime_suspend(struct device *device)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002278{
2279 struct platform_device *pdev = to_platform_device(device);
2280 struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002281 int ret;
2282 dev_dbg(device, "pm_runtime: suspending...\n");
2283 dev->state = MSM_CTRL_SLEEPING;
2284 ret = slim_ctrl_clk_pause(&dev->ctrl, false, SLIM_CLK_UNSPECIFIED);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07002285 if (ret) {
2286 dev_err(device, "clk pause not entered:%d", ret);
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002287 dev->state = MSM_CTRL_AWAKE;
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07002288 } else {
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002289 dev->state = MSM_CTRL_ASLEEP;
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07002290 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002291 return ret;
2292}
2293
2294static int msm_slim_runtime_resume(struct device *device)
2295{
2296 struct platform_device *pdev = to_platform_device(device);
2297 struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
2298 int ret = 0;
2299 dev_dbg(device, "pm_runtime: resuming...\n");
2300 if (dev->state == MSM_CTRL_ASLEEP)
2301 ret = slim_ctrl_clk_pause(&dev->ctrl, true, 0);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07002302 if (ret) {
2303 dev_err(device, "clk pause not exited:%d", ret);
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002304 dev->state = MSM_CTRL_ASLEEP;
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07002305 } else {
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002306 dev->state = MSM_CTRL_AWAKE;
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07002307 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002308 return ret;
2309}
2310
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002311static int msm_slim_suspend(struct device *dev)
2312{
2313 int ret = 0;
2314 if (!pm_runtime_enabled(dev) || !pm_runtime_suspended(dev)) {
Sagar Dhariad5bb0552012-08-11 15:02:12 -06002315 struct platform_device *pdev = to_platform_device(dev);
2316 struct msm_slim_ctrl *cdev = platform_get_drvdata(pdev);
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002317 dev_dbg(dev, "system suspend");
2318 ret = msm_slim_runtime_suspend(dev);
Sagar Dhariad5bb0552012-08-11 15:02:12 -06002319 if (!ret) {
2320 if (cdev->hclk)
2321 clk_disable_unprepare(cdev->hclk);
2322 }
Sagar Dharia6b559e02011-08-03 17:01:31 -06002323 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002324 if (ret == -EBUSY) {
Sagar Dharia144e5e02011-08-08 17:30:11 -06002325 /*
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002326 * If the clock pause failed due to active channels, there is
2327 * a possibility that some audio stream is active during suspend
2328 * We dont want to return suspend failure in that case so that
2329 * display and relevant components can still go to suspend.
2330 * If there is some other error, then it should be passed-on
2331 * to system level suspend
2332 */
Sagar Dharia144e5e02011-08-08 17:30:11 -06002333 ret = 0;
2334 }
2335 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002336}
2337
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002338static int msm_slim_resume(struct device *dev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002339{
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002340 /* If runtime_pm is enabled, this resume shouldn't do anything */
2341 if (!pm_runtime_enabled(dev) || !pm_runtime_suspended(dev)) {
Sagar Dhariad5bb0552012-08-11 15:02:12 -06002342 struct platform_device *pdev = to_platform_device(dev);
2343 struct msm_slim_ctrl *cdev = platform_get_drvdata(pdev);
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002344 int ret;
2345 dev_dbg(dev, "system resume");
Sagar Dhariad5bb0552012-08-11 15:02:12 -06002346 if (cdev->hclk)
2347 clk_prepare_enable(cdev->hclk);
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002348 ret = msm_slim_runtime_resume(dev);
2349 if (!ret) {
2350 pm_runtime_mark_last_busy(dev);
2351 pm_request_autosuspend(dev);
2352 }
2353 return ret;
2354
Sagar Dharia144e5e02011-08-08 17:30:11 -06002355 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002356 return 0;
2357}
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002358#endif /* CONFIG_PM_SLEEP */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002359
2360static const struct dev_pm_ops msm_slim_dev_pm_ops = {
2361 SET_SYSTEM_SLEEP_PM_OPS(
2362 msm_slim_suspend,
2363 msm_slim_resume
2364 )
2365 SET_RUNTIME_PM_OPS(
2366 msm_slim_runtime_suspend,
2367 msm_slim_runtime_resume,
2368 msm_slim_runtime_idle
2369 )
2370};
2371
Sagar Dhariaf8f603b2012-03-21 15:25:17 -06002372static struct of_device_id msm_slim_dt_match[] = {
2373 {
2374 .compatible = "qcom,slim-msm",
2375 },
2376 {}
2377};
2378
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002379static struct platform_driver msm_slim_driver = {
2380 .probe = msm_slim_probe,
2381 .remove = msm_slim_remove,
2382 .driver = {
2383 .name = MSM_SLIM_NAME,
2384 .owner = THIS_MODULE,
2385 .pm = &msm_slim_dev_pm_ops,
Sagar Dhariaf8f603b2012-03-21 15:25:17 -06002386 .of_match_table = msm_slim_dt_match,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002387 },
2388};
2389
2390static int msm_slim_init(void)
2391{
2392 return platform_driver_register(&msm_slim_driver);
2393}
2394subsys_initcall(msm_slim_init);
2395
2396static void msm_slim_exit(void)
2397{
2398 platform_driver_unregister(&msm_slim_driver);
2399}
2400module_exit(msm_slim_exit);
2401
2402MODULE_LICENSE("GPL v2");
2403MODULE_VERSION("0.1");
2404MODULE_DESCRIPTION("MSM Slimbus controller");
2405MODULE_ALIAS("platform:msm-slim");