blob: 6eb5d60b6d63e327e41334b8401f6567aed19ada [file] [log] [blame]
Sagar Dharia790cfd02011-09-25 17:56:24 -06001/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12#include <linux/irq.h>
13#include <linux/kernel.h>
14#include <linux/init.h>
15#include <linux/slab.h>
16#include <linux/io.h>
17#include <linux/interrupt.h>
18#include <linux/platform_device.h>
19#include <linux/dma-mapping.h>
20#include <linux/slimbus/slimbus.h>
21#include <linux/delay.h>
22#include <linux/kthread.h>
23#include <linux/clk.h>
Sagar Dharia45ee38a2011-08-03 17:01:31 -060024#include <linux/pm_runtime.h>
Sagar Dhariaf8f603b2012-03-21 15:25:17 -060025#include <linux/of.h>
26#include <linux/of_slimbus.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070027#include <mach/sps.h>
28
29/* Per spec.max 40 bytes per received message */
30#define SLIM_RX_MSGQ_BUF_LEN 40
31
32#define SLIM_USR_MC_GENERIC_ACK 0x25
33#define SLIM_USR_MC_MASTER_CAPABILITY 0x0
34#define SLIM_USR_MC_REPORT_SATELLITE 0x1
35#define SLIM_USR_MC_ADDR_QUERY 0xD
36#define SLIM_USR_MC_ADDR_REPLY 0xE
37#define SLIM_USR_MC_DEFINE_CHAN 0x20
38#define SLIM_USR_MC_DEF_ACT_CHAN 0x21
39#define SLIM_USR_MC_CHAN_CTRL 0x23
40#define SLIM_USR_MC_RECONFIG_NOW 0x24
41#define SLIM_USR_MC_REQ_BW 0x28
42#define SLIM_USR_MC_CONNECT_SRC 0x2C
43#define SLIM_USR_MC_CONNECT_SINK 0x2D
44#define SLIM_USR_MC_DISCONNECT_PORT 0x2E
45
46/* MSM Slimbus peripheral settings */
47#define MSM_SLIM_PERF_SUMM_THRESHOLD 0x8000
48#define MSM_SLIM_NCHANS 32
49#define MSM_SLIM_NPORTS 24
Sagar Dharia45ee38a2011-08-03 17:01:31 -060050#define MSM_SLIM_AUTOSUSPEND MSEC_PER_SEC
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070051
52/*
53 * Need enough descriptors to receive present messages from slaves
54 * if received simultaneously. Present message needs 3 descriptors
55 * and this size will ensure around 10 simultaneous reports.
56 */
57#define MSM_SLIM_DESC_NUM 32
58
59#define SLIM_MSG_ASM_FIRST_WORD(l, mt, mc, dt, ad) \
60 ((l) | ((mt) << 5) | ((mc) << 8) | ((dt) << 15) | ((ad) << 16))
61
62#define MSM_SLIM_NAME "msm_slim_ctrl"
63#define SLIM_ROOT_FREQ 24576000
64
65#define MSM_CONCUR_MSG 8
66#define SAT_CONCUR_MSG 8
67#define DEF_WATERMARK (8 << 1)
68#define DEF_ALIGN 0
69#define DEF_PACK (1 << 6)
70#define ENABLE_PORT 1
71
72#define DEF_BLKSZ 0
73#define DEF_TRANSZ 0
74
75#define SAT_MAGIC_LSB 0xD9
76#define SAT_MAGIC_MSB 0xC5
77#define SAT_MSG_VER 0x1
78#define SAT_MSG_PROT 0x1
79#define MSM_SAT_SUCCSS 0x20
Sagar Dharia790cfd02011-09-25 17:56:24 -060080#define MSM_MAX_NSATS 2
Sagar Dharia0ffdca12011-09-25 18:55:53 -060081#define MSM_MAX_SATCH 32
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070082
83#define QC_MFGID_LSB 0x2
84#define QC_MFGID_MSB 0x17
85#define QC_CHIPID_SL 0x10
86#define QC_DEVID_SAT1 0x3
87#define QC_DEVID_SAT2 0x4
88#define QC_DEVID_PGD 0x5
Sagar Dharia45ee38a2011-08-03 17:01:31 -060089#define QC_MSM_DEVS 5
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070090
Sagar Dharia82e516f2012-03-16 16:01:23 -060091#define PGD_THIS_EE(r, v) ((v) ? PGD_THIS_EE_V2(r) : PGD_THIS_EE_V1(r))
92#define PGD_PORT(r, p, v) ((v) ? PGD_PORT_V2(r, p) : PGD_PORT_V1(r, p))
93#define CFG_PORT(r, v) ((v) ? CFG_PORT_V2(r) : CFG_PORT_V1(r))
94
95#define PGD_THIS_EE_V2(r) (dev->base + (r ## _V2) + (dev->ee * 0x1000))
96#define PGD_PORT_V2(r, p) (dev->base + (r ## _V2) + ((p) * 0x1000))
97#define CFG_PORT_V2(r) ((r ## _V2))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070098/* Component registers */
Sagar Dharia82e516f2012-03-16 16:01:23 -060099enum comp_reg_v2 {
100 COMP_CFG_V2 = 4,
101 COMP_TRUST_CFG_V2 = 0x3000,
102};
103
104/* Manager PGD registers */
105enum pgd_reg_v2 {
106 PGD_CFG_V2 = 0x800,
107 PGD_STAT_V2 = 0x804,
108 PGD_INT_EN_V2 = 0x810,
109 PGD_INT_STAT_V2 = 0x814,
110 PGD_INT_CLR_V2 = 0x818,
111 PGD_OWN_EEn_V2 = 0x300C,
112 PGD_PORT_INT_EN_EEn_V2 = 0x5000,
113 PGD_PORT_INT_ST_EEn_V2 = 0x5004,
114 PGD_PORT_INT_CL_EEn_V2 = 0x5008,
115 PGD_PORT_CFGn_V2 = 0x14000,
116 PGD_PORT_STATn_V2 = 0x14004,
117 PGD_PORT_PARAMn_V2 = 0x14008,
118 PGD_PORT_BLKn_V2 = 0x1400C,
119 PGD_PORT_TRANn_V2 = 0x14010,
120 PGD_PORT_MCHANn_V2 = 0x14014,
121 PGD_PORT_PSHPLLn_V2 = 0x14018,
122 PGD_PORT_PC_CFGn_V2 = 0x8000,
123 PGD_PORT_PC_VALn_V2 = 0x8004,
124 PGD_PORT_PC_VFR_TSn_V2 = 0x8008,
125 PGD_PORT_PC_VFR_STn_V2 = 0x800C,
126 PGD_PORT_PC_VFR_CLn_V2 = 0x8010,
127 PGD_IE_STAT_V2 = 0x820,
128 PGD_VE_STAT_V2 = 0x830,
129};
130
131#define PGD_THIS_EE_V1(r) (dev->base + (r ## _V1) + (dev->ee * 16))
132#define PGD_PORT_V1(r, p) (dev->base + (r ## _V1) + ((p) * 32))
133#define CFG_PORT_V1(r) ((r ## _V1))
134/* Component registers */
135enum comp_reg_v1 {
136 COMP_CFG_V1 = 0,
137 COMP_TRUST_CFG_V1 = 0x14,
138};
139
140/* Manager PGD registers */
141enum pgd_reg_v1 {
142 PGD_CFG_V1 = 0x1000,
143 PGD_STAT_V1 = 0x1004,
144 PGD_INT_EN_V1 = 0x1010,
145 PGD_INT_STAT_V1 = 0x1014,
146 PGD_INT_CLR_V1 = 0x1018,
147 PGD_OWN_EEn_V1 = 0x1020,
148 PGD_PORT_INT_EN_EEn_V1 = 0x1030,
149 PGD_PORT_INT_ST_EEn_V1 = 0x1034,
150 PGD_PORT_INT_CL_EEn_V1 = 0x1038,
151 PGD_PORT_CFGn_V1 = 0x1080,
152 PGD_PORT_STATn_V1 = 0x1084,
153 PGD_PORT_PARAMn_V1 = 0x1088,
154 PGD_PORT_BLKn_V1 = 0x108C,
155 PGD_PORT_TRANn_V1 = 0x1090,
156 PGD_PORT_MCHANn_V1 = 0x1094,
157 PGD_PORT_PSHPLLn_V1 = 0x1098,
158 PGD_PORT_PC_CFGn_V1 = 0x1600,
159 PGD_PORT_PC_VALn_V1 = 0x1604,
160 PGD_PORT_PC_VFR_TSn_V1 = 0x1608,
161 PGD_PORT_PC_VFR_STn_V1 = 0x160C,
162 PGD_PORT_PC_VFR_CLn_V1 = 0x1610,
163 PGD_IE_STAT_V1 = 0x1700,
164 PGD_VE_STAT_V1 = 0x1710,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700165};
166
167/* Manager registers */
168enum mgr_reg {
169 MGR_CFG = 0x200,
170 MGR_STATUS = 0x204,
171 MGR_RX_MSGQ_CFG = 0x208,
172 MGR_INT_EN = 0x210,
173 MGR_INT_STAT = 0x214,
174 MGR_INT_CLR = 0x218,
175 MGR_TX_MSG = 0x230,
176 MGR_RX_MSG = 0x270,
177 MGR_VE_STAT = 0x300,
178};
179
180enum msg_cfg {
181 MGR_CFG_ENABLE = 1,
182 MGR_CFG_RX_MSGQ_EN = 1 << 1,
183 MGR_CFG_TX_MSGQ_EN_HIGH = 1 << 2,
184 MGR_CFG_TX_MSGQ_EN_LOW = 1 << 3,
185};
186/* Message queue types */
187enum msm_slim_msgq_type {
188 MSGQ_RX = 0,
189 MSGQ_TX_LOW = 1,
190 MSGQ_TX_HIGH = 2,
191};
192/* Framer registers */
193enum frm_reg {
194 FRM_CFG = 0x400,
195 FRM_STAT = 0x404,
196 FRM_INT_EN = 0x410,
197 FRM_INT_STAT = 0x414,
198 FRM_INT_CLR = 0x418,
199 FRM_WAKEUP = 0x41C,
200 FRM_CLKCTL_DONE = 0x420,
201 FRM_IE_STAT = 0x430,
202 FRM_VE_STAT = 0x440,
203};
204
205/* Interface registers */
206enum intf_reg {
207 INTF_CFG = 0x600,
208 INTF_STAT = 0x604,
209 INTF_INT_EN = 0x610,
210 INTF_INT_STAT = 0x614,
211 INTF_INT_CLR = 0x618,
212 INTF_IE_STAT = 0x630,
213 INTF_VE_STAT = 0x640,
214};
215
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700216enum rsc_grp {
217 EE_MGR_RSC_GRP = 1 << 10,
218 EE_NGD_2 = 2 << 6,
219 EE_NGD_1 = 0,
220};
221
222enum mgr_intr {
223 MGR_INT_RECFG_DONE = 1 << 24,
224 MGR_INT_TX_NACKED_2 = 1 << 25,
225 MGR_INT_MSG_BUF_CONTE = 1 << 26,
226 MGR_INT_RX_MSG_RCVD = 1 << 30,
227 MGR_INT_TX_MSG_SENT = 1 << 31,
228};
229
230enum frm_cfg {
231 FRM_ACTIVE = 1,
232 CLK_GEAR = 7,
233 ROOT_FREQ = 11,
234 REF_CLK_GEAR = 15,
235};
236
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600237enum msm_ctrl_state {
238 MSM_CTRL_AWAKE,
239 MSM_CTRL_SLEEPING,
240 MSM_CTRL_ASLEEP,
241};
242
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700243struct msm_slim_sps_bam {
244 u32 hdl;
245 void __iomem *base;
246 int irq;
247};
248
249struct msm_slim_endp {
250 struct sps_pipe *sps;
251 struct sps_connect config;
252 struct sps_register_event event;
253 struct sps_mem_buffer buf;
254 struct completion *xcomp;
255 bool connected;
256};
257
258struct msm_slim_ctrl {
259 struct slim_controller ctrl;
260 struct slim_framer framer;
261 struct device *dev;
262 void __iomem *base;
Sagar Dhariacc969452011-09-19 10:34:30 -0600263 struct resource *slew_mem;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700264 u32 curr_bw;
265 u8 msg_cnt;
266 u32 tx_buf[10];
267 u8 rx_msgs[MSM_CONCUR_MSG][SLIM_RX_MSGQ_BUF_LEN];
268 spinlock_t rx_lock;
269 int head;
270 int tail;
271 int irq;
272 int err;
273 int ee;
274 struct completion *wr_comp;
Sagar Dharia790cfd02011-09-25 17:56:24 -0600275 struct msm_slim_sat *satd[MSM_MAX_NSATS];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700276 struct msm_slim_endp pipes[7];
277 struct msm_slim_sps_bam bam;
278 struct msm_slim_endp rx_msgq;
279 struct completion rx_msgq_notify;
280 struct task_struct *rx_msgq_thread;
281 struct clk *rclk;
282 struct mutex tx_lock;
283 u8 pgdla;
284 bool use_rx_msgqs;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700285 int pipe_b;
286 struct completion reconf;
287 bool reconf_busy;
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600288 bool chan_active;
289 enum msm_ctrl_state state;
Sagar Dharia790cfd02011-09-25 17:56:24 -0600290 int nsats;
Sagar Dharia82e516f2012-03-16 16:01:23 -0600291 u32 ver;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700292};
293
Sagar Dharia0ffdca12011-09-25 18:55:53 -0600294struct msm_sat_chan {
295 u8 chan;
296 u16 chanh;
297 int req_rem;
298 int req_def;
299};
300
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700301struct msm_slim_sat {
302 struct slim_device satcl;
303 struct msm_slim_ctrl *dev;
304 struct workqueue_struct *wq;
305 struct work_struct wd;
306 u8 sat_msgs[SAT_CONCUR_MSG][40];
Sagar Dharia0ffdca12011-09-25 18:55:53 -0600307 struct msm_sat_chan *satch;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700308 u8 nsatch;
309 bool sent_capability;
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600310 bool pending_reconf;
311 bool pending_capability;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700312 int shead;
313 int stail;
314 spinlock_t lock;
315};
316
Sagar Dharia790cfd02011-09-25 17:56:24 -0600317static struct msm_slim_sat *msm_slim_alloc_sat(struct msm_slim_ctrl *dev);
318
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700319static int msm_slim_rx_enqueue(struct msm_slim_ctrl *dev, u32 *buf, u8 len)
320{
321 spin_lock(&dev->rx_lock);
322 if ((dev->tail + 1) % MSM_CONCUR_MSG == dev->head) {
323 spin_unlock(&dev->rx_lock);
324 dev_err(dev->dev, "RX QUEUE full!");
325 return -EXFULL;
326 }
327 memcpy((u8 *)dev->rx_msgs[dev->tail], (u8 *)buf, len);
328 dev->tail = (dev->tail + 1) % MSM_CONCUR_MSG;
329 spin_unlock(&dev->rx_lock);
330 return 0;
331}
332
333static int msm_slim_rx_dequeue(struct msm_slim_ctrl *dev, u8 *buf)
334{
335 unsigned long flags;
336 spin_lock_irqsave(&dev->rx_lock, flags);
337 if (dev->tail == dev->head) {
338 spin_unlock_irqrestore(&dev->rx_lock, flags);
339 return -ENODATA;
340 }
341 memcpy(buf, (u8 *)dev->rx_msgs[dev->head], 40);
342 dev->head = (dev->head + 1) % MSM_CONCUR_MSG;
343 spin_unlock_irqrestore(&dev->rx_lock, flags);
344 return 0;
345}
346
347static int msm_sat_enqueue(struct msm_slim_sat *sat, u32 *buf, u8 len)
348{
349 struct msm_slim_ctrl *dev = sat->dev;
350 spin_lock(&sat->lock);
351 if ((sat->stail + 1) % SAT_CONCUR_MSG == sat->shead) {
352 spin_unlock(&sat->lock);
353 dev_err(dev->dev, "SAT QUEUE full!");
354 return -EXFULL;
355 }
356 memcpy(sat->sat_msgs[sat->stail], (u8 *)buf, len);
357 sat->stail = (sat->stail + 1) % SAT_CONCUR_MSG;
358 spin_unlock(&sat->lock);
359 return 0;
360}
361
362static int msm_sat_dequeue(struct msm_slim_sat *sat, u8 *buf)
363{
364 unsigned long flags;
365 spin_lock_irqsave(&sat->lock, flags);
366 if (sat->stail == sat->shead) {
367 spin_unlock_irqrestore(&sat->lock, flags);
368 return -ENODATA;
369 }
370 memcpy(buf, sat->sat_msgs[sat->shead], 40);
371 sat->shead = (sat->shead + 1) % SAT_CONCUR_MSG;
372 spin_unlock_irqrestore(&sat->lock, flags);
373 return 0;
374}
375
376static void msm_get_eaddr(u8 *e_addr, u32 *buffer)
377{
378 e_addr[0] = (buffer[1] >> 24) & 0xff;
379 e_addr[1] = (buffer[1] >> 16) & 0xff;
380 e_addr[2] = (buffer[1] >> 8) & 0xff;
381 e_addr[3] = buffer[1] & 0xff;
382 e_addr[4] = (buffer[0] >> 24) & 0xff;
383 e_addr[5] = (buffer[0] >> 16) & 0xff;
384}
385
386static bool msm_is_sat_dev(u8 *e_addr)
387{
388 if (e_addr[5] == QC_MFGID_LSB && e_addr[4] == QC_MFGID_MSB &&
389 e_addr[2] != QC_CHIPID_SL &&
390 (e_addr[1] == QC_DEVID_SAT1 || e_addr[1] == QC_DEVID_SAT2))
391 return true;
392 return false;
393}
394
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700395static int msm_slim_get_ctrl(struct msm_slim_ctrl *dev)
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600396{
Sagar Dharia45e77912012-01-10 09:55:18 -0700397#ifdef CONFIG_PM_RUNTIME
398 int ref = 0;
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700399 int ret = pm_runtime_get_sync(dev->dev);
400 if (ret >= 0) {
401 ref = atomic_read(&dev->dev->power.usage_count);
402 if (ref <= 0) {
403 dev_err(dev->dev, "reference count -ve:%d", ref);
404 ret = -ENODEV;
405 }
406 }
407 return ret;
Sagar Dharia45e77912012-01-10 09:55:18 -0700408#else
409 return -ENODEV;
410#endif
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600411}
412static void msm_slim_put_ctrl(struct msm_slim_ctrl *dev)
413{
Sagar Dharia45e77912012-01-10 09:55:18 -0700414#ifdef CONFIG_PM_RUNTIME
Sagar Dharia38fd1872012-02-06 18:36:38 -0700415 int ref;
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600416 pm_runtime_mark_last_busy(dev->dev);
Sagar Dharia38fd1872012-02-06 18:36:38 -0700417 ref = atomic_read(&dev->dev->power.usage_count);
418 if (ref <= 0)
419 dev_err(dev->dev, "reference count mismatch:%d", ref);
420 else
421 pm_runtime_put(dev->dev);
Sagar Dharia45e77912012-01-10 09:55:18 -0700422#endif
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600423}
424
Sagar Dharia790cfd02011-09-25 17:56:24 -0600425static struct msm_slim_sat *addr_to_sat(struct msm_slim_ctrl *dev, u8 laddr)
426{
427 struct msm_slim_sat *sat = NULL;
428 int i = 0;
429 while (!sat && i < dev->nsats) {
430 if (laddr == dev->satd[i]->satcl.laddr)
431 sat = dev->satd[i];
432 i++;
433 }
434 return sat;
435}
436
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700437static irqreturn_t msm_slim_interrupt(int irq, void *d)
438{
439 struct msm_slim_ctrl *dev = d;
440 u32 pstat;
441 u32 stat = readl_relaxed(dev->base + MGR_INT_STAT);
442
443 if (stat & MGR_INT_TX_MSG_SENT || stat & MGR_INT_TX_NACKED_2) {
444 if (stat & MGR_INT_TX_MSG_SENT)
445 writel_relaxed(MGR_INT_TX_MSG_SENT,
446 dev->base + MGR_INT_CLR);
447 else {
448 writel_relaxed(MGR_INT_TX_NACKED_2,
449 dev->base + MGR_INT_CLR);
450 dev->err = -EIO;
451 }
452 /*
453 * Guarantee that interrupt clear bit write goes through before
454 * signalling completion/exiting ISR
455 */
456 mb();
457 if (dev->wr_comp)
458 complete(dev->wr_comp);
459 }
460 if (stat & MGR_INT_RX_MSG_RCVD) {
461 u32 rx_buf[10];
462 u32 mc, mt;
463 u8 len, i;
464 rx_buf[0] = readl_relaxed(dev->base + MGR_RX_MSG);
465 len = rx_buf[0] & 0x1F;
466 for (i = 1; i < ((len + 3) >> 2); i++) {
467 rx_buf[i] = readl_relaxed(dev->base + MGR_RX_MSG +
468 (4 * i));
469 dev_dbg(dev->dev, "reading data: %x\n", rx_buf[i]);
470 }
471 mt = (rx_buf[0] >> 5) & 0x7;
472 mc = (rx_buf[0] >> 8) & 0xff;
473 dev_dbg(dev->dev, "MC: %x, MT: %x\n", mc, mt);
474 if (mt == SLIM_MSG_MT_DEST_REFERRED_USER ||
475 mt == SLIM_MSG_MT_SRC_REFERRED_USER) {
Sagar Dharia790cfd02011-09-25 17:56:24 -0600476 u8 laddr = (u8)((rx_buf[0] >> 16) & 0xFF);
477 struct msm_slim_sat *sat = addr_to_sat(dev, laddr);
478 if (sat)
479 msm_sat_enqueue(sat, rx_buf, len);
480 else
481 dev_err(dev->dev, "unknown sat:%d message",
482 laddr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700483 writel_relaxed(MGR_INT_RX_MSG_RCVD,
484 dev->base + MGR_INT_CLR);
485 /*
486 * Guarantee that CLR bit write goes through before
487 * queuing work
488 */
489 mb();
Sagar Dharia790cfd02011-09-25 17:56:24 -0600490 if (sat)
491 queue_work(sat->wq, &sat->wd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700492 } else if (mt == SLIM_MSG_MT_CORE &&
493 mc == SLIM_MSG_MC_REPORT_PRESENT) {
494 u8 e_addr[6];
495 msm_get_eaddr(e_addr, rx_buf);
Sagar Dharia790cfd02011-09-25 17:56:24 -0600496 msm_slim_rx_enqueue(dev, rx_buf, len);
497 writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
498 MGR_INT_CLR);
499 /*
500 * Guarantee that CLR bit write goes through
501 * before signalling completion
502 */
503 mb();
504 complete(&dev->rx_msgq_notify);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700505 } else if (mc == SLIM_MSG_MC_REPLY_INFORMATION ||
506 mc == SLIM_MSG_MC_REPLY_VALUE) {
507 msm_slim_rx_enqueue(dev, rx_buf, len);
508 writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
509 MGR_INT_CLR);
510 /*
511 * Guarantee that CLR bit write goes through
512 * before signalling completion
513 */
514 mb();
515 complete(&dev->rx_msgq_notify);
Sagar Dharia144e5e02011-08-08 17:30:11 -0600516 } else if (mc == SLIM_MSG_MC_REPORT_INFORMATION) {
517 u8 *buf = (u8 *)rx_buf;
518 u8 l_addr = buf[2];
519 u16 ele = (u16)buf[4] << 4;
520 ele |= ((buf[3] & 0xf0) >> 4);
521 dev_err(dev->dev, "Slim-dev:%d report inf element:0x%x",
522 l_addr, ele);
523 for (i = 0; i < len - 5; i++)
524 dev_err(dev->dev, "offset:0x%x:bit mask:%x",
525 i, buf[i+5]);
526 writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
527 MGR_INT_CLR);
528 /*
529 * Guarantee that CLR bit write goes through
530 * before exiting
531 */
532 mb();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700533 } else {
534 dev_err(dev->dev, "Unexpected MC,%x MT:%x, len:%d",
535 mc, mt, len);
536 for (i = 0; i < ((len + 3) >> 2); i++)
537 dev_err(dev->dev, "error msg: %x", rx_buf[i]);
538 writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
539 MGR_INT_CLR);
540 /*
541 * Guarantee that CLR bit write goes through
542 * before exiting
543 */
544 mb();
545 }
546 }
547 if (stat & MGR_INT_RECFG_DONE) {
548 writel_relaxed(MGR_INT_RECFG_DONE, dev->base + MGR_INT_CLR);
549 /*
550 * Guarantee that CLR bit write goes through
551 * before exiting ISR
552 */
553 mb();
554 complete(&dev->reconf);
555 }
Sagar Dharia82e516f2012-03-16 16:01:23 -0600556 pstat = readl_relaxed(PGD_THIS_EE(PGD_PORT_INT_ST_EEn, dev->ver));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700557 if (pstat != 0) {
558 int i = 0;
559 for (i = dev->pipe_b; i < MSM_SLIM_NPORTS; i++) {
560 if (pstat & 1 << i) {
Sagar Dharia82e516f2012-03-16 16:01:23 -0600561 u32 val = readl_relaxed(PGD_PORT(PGD_PORT_STATn,
562 i, dev->ver));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700563 if (val & (1 << 19)) {
564 dev->ctrl.ports[i].err =
565 SLIM_P_DISCONNECT;
566 dev->pipes[i-dev->pipe_b].connected =
567 false;
568 /*
569 * SPS will call completion since
570 * ERROR flags are registered
571 */
572 } else if (val & (1 << 2))
573 dev->ctrl.ports[i].err =
574 SLIM_P_OVERFLOW;
575 else if (val & (1 << 3))
576 dev->ctrl.ports[i].err =
577 SLIM_P_UNDERFLOW;
578 }
Sagar Dharia82e516f2012-03-16 16:01:23 -0600579 writel_relaxed(1, PGD_THIS_EE(PGD_PORT_INT_CL_EEn,
580 dev->ver));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700581 }
582 /*
583 * Guarantee that port interrupt bit(s) clearing writes go
584 * through before exiting ISR
585 */
586 mb();
587 }
588
589 return IRQ_HANDLED;
590}
591
592static int
593msm_slim_init_endpoint(struct msm_slim_ctrl *dev, struct msm_slim_endp *ep)
594{
595 int ret;
596 struct sps_pipe *endpoint;
597 struct sps_connect *config = &ep->config;
598
599 /* Allocate the endpoint */
600 endpoint = sps_alloc_endpoint();
601 if (!endpoint) {
602 dev_err(dev->dev, "sps_alloc_endpoint failed\n");
603 return -ENOMEM;
604 }
605
606 /* Get default connection configuration for an endpoint */
607 ret = sps_get_config(endpoint, config);
608 if (ret) {
609 dev_err(dev->dev, "sps_get_config failed 0x%x\n", ret);
610 goto sps_config_failed;
611 }
612
613 ep->sps = endpoint;
614 return 0;
615
616sps_config_failed:
617 sps_free_endpoint(endpoint);
618 return ret;
619}
620
621static void
622msm_slim_free_endpoint(struct msm_slim_endp *ep)
623{
624 sps_free_endpoint(ep->sps);
625 ep->sps = NULL;
626}
627
628static int msm_slim_sps_mem_alloc(
629 struct msm_slim_ctrl *dev, struct sps_mem_buffer *mem, u32 len)
630{
631 dma_addr_t phys;
632
633 mem->size = len;
634 mem->min_size = 0;
635 mem->base = dma_alloc_coherent(dev->dev, mem->size, &phys, GFP_KERNEL);
636
637 if (!mem->base) {
638 dev_err(dev->dev, "dma_alloc_coherent(%d) failed\n", len);
639 return -ENOMEM;
640 }
641
642 mem->phys_base = phys;
643 memset(mem->base, 0x00, mem->size);
644 return 0;
645}
646
647static void
648msm_slim_sps_mem_free(struct msm_slim_ctrl *dev, struct sps_mem_buffer *mem)
649{
650 dma_free_coherent(dev->dev, mem->size, mem->base, mem->phys_base);
651 mem->size = 0;
652 mem->base = NULL;
653 mem->phys_base = 0;
654}
655
656static void msm_hw_set_port(struct msm_slim_ctrl *dev, u8 pn)
657{
658 u32 set_cfg = DEF_WATERMARK | DEF_ALIGN | DEF_PACK | ENABLE_PORT;
Sagar Dharia82e516f2012-03-16 16:01:23 -0600659 u32 int_port = readl_relaxed(PGD_THIS_EE(PGD_PORT_INT_EN_EEn,
660 dev->ver));
661 writel_relaxed(set_cfg, PGD_PORT(PGD_PORT_CFGn, pn, dev->ver));
662 writel_relaxed(DEF_BLKSZ, PGD_PORT(PGD_PORT_BLKn, pn, dev->ver));
663 writel_relaxed(DEF_TRANSZ, PGD_PORT(PGD_PORT_TRANn, pn, dev->ver));
664 writel_relaxed((int_port | 1 << pn) , PGD_THIS_EE(PGD_PORT_INT_EN_EEn,
665 dev->ver));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700666 /* Make sure that port registers are updated before returning */
667 mb();
668}
669
670static int msm_slim_connect_pipe_port(struct msm_slim_ctrl *dev, u8 pn)
671{
672 struct msm_slim_endp *endpoint = &dev->pipes[pn];
673 struct sps_connect *cfg = &endpoint->config;
674 u32 stat;
675 int ret = sps_get_config(dev->pipes[pn].sps, cfg);
676 if (ret) {
677 dev_err(dev->dev, "sps pipe-port get config error%x\n", ret);
678 return ret;
679 }
680 cfg->options = SPS_O_DESC_DONE | SPS_O_ERROR |
681 SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
682
683 if (dev->pipes[pn].connected) {
684 ret = sps_set_config(dev->pipes[pn].sps, cfg);
685 if (ret) {
686 dev_err(dev->dev, "sps pipe-port set config erro:%x\n",
687 ret);
688 return ret;
689 }
690 }
691
Sagar Dharia82e516f2012-03-16 16:01:23 -0600692 stat = readl_relaxed(PGD_PORT(PGD_PORT_STATn, (pn + dev->pipe_b),
693 dev->ver));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700694 if (dev->ctrl.ports[pn].flow == SLIM_SRC) {
695 cfg->destination = dev->bam.hdl;
696 cfg->source = SPS_DEV_HANDLE_MEM;
697 cfg->dest_pipe_index = ((stat & (0xFF << 4)) >> 4);
698 cfg->src_pipe_index = 0;
699 dev_dbg(dev->dev, "flow src:pipe num:%d",
700 cfg->dest_pipe_index);
701 cfg->mode = SPS_MODE_DEST;
702 } else {
703 cfg->source = dev->bam.hdl;
704 cfg->destination = SPS_DEV_HANDLE_MEM;
705 cfg->src_pipe_index = ((stat & (0xFF << 4)) >> 4);
706 cfg->dest_pipe_index = 0;
707 dev_dbg(dev->dev, "flow dest:pipe num:%d",
708 cfg->src_pipe_index);
709 cfg->mode = SPS_MODE_SRC;
710 }
711 /* Space for desciptor FIFOs */
712 cfg->desc.size = MSM_SLIM_DESC_NUM * sizeof(struct sps_iovec);
713 cfg->config = SPS_CONFIG_DEFAULT;
714 ret = sps_connect(dev->pipes[pn].sps, cfg);
715 if (!ret) {
716 dev->pipes[pn].connected = true;
717 msm_hw_set_port(dev, pn + dev->pipe_b);
718 }
719 return ret;
720}
721
722static u32 *msm_get_msg_buf(struct slim_controller *ctrl, int len)
723{
724 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
725 /*
726 * Currently we block a transaction until the current one completes.
727 * In case we need multiple transactions, use message Q
728 */
729 return dev->tx_buf;
730}
731
732static int msm_send_msg_buf(struct slim_controller *ctrl, u32 *buf, u8 len)
733{
734 int i;
735 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
736 for (i = 0; i < (len + 3) >> 2; i++) {
737 dev_dbg(dev->dev, "TX data:0x%x\n", buf[i]);
738 writel_relaxed(buf[i], dev->base + MGR_TX_MSG + (i * 4));
739 }
740 /* Guarantee that message is sent before returning */
741 mb();
742 return 0;
743}
744
745static int msm_xfer_msg(struct slim_controller *ctrl, struct slim_msg_txn *txn)
746{
747 DECLARE_COMPLETION_ONSTACK(done);
748 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
749 u32 *pbuf;
750 u8 *puc;
751 int timeout;
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700752 int msgv = -1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700753 u8 la = txn->la;
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600754 u8 mc = (u8)(txn->mc & 0xFF);
755 /*
756 * Voting for runtime PM: Slimbus has 2 possible use cases:
757 * 1. messaging
758 * 2. Data channels
759 * Messaging case goes through messaging slots and data channels
760 * use their own slots
761 * This "get" votes for messaging bandwidth
762 */
763 if (!(txn->mc & SLIM_MSG_CLK_PAUSE_SEQ_FLG))
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700764 msgv = msm_slim_get_ctrl(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700765 mutex_lock(&dev->tx_lock);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700766 if (dev->state == MSM_CTRL_ASLEEP ||
767 ((!(txn->mc & SLIM_MSG_CLK_PAUSE_SEQ_FLG)) &&
768 dev->state == MSM_CTRL_SLEEPING)) {
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600769 dev_err(dev->dev, "runtime or system PM suspended state");
770 mutex_unlock(&dev->tx_lock);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700771 if (msgv >= 0)
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600772 msm_slim_put_ctrl(dev);
773 return -EBUSY;
774 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700775 if (txn->mt == SLIM_MSG_MT_CORE &&
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600776 mc == SLIM_MSG_MC_BEGIN_RECONFIGURATION) {
777 if (dev->reconf_busy) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700778 wait_for_completion(&dev->reconf);
779 dev->reconf_busy = false;
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600780 }
781 /* This "get" votes for data channels */
782 if (dev->ctrl.sched.usedslots != 0 &&
783 !dev->chan_active) {
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700784 int chv = msm_slim_get_ctrl(dev);
785 if (chv >= 0)
786 dev->chan_active = true;
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600787 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700788 }
789 txn->rl--;
790 pbuf = msm_get_msg_buf(ctrl, txn->rl);
791 dev->wr_comp = NULL;
792 dev->err = 0;
793
794 if (txn->dt == SLIM_MSG_DEST_ENUMADDR) {
795 mutex_unlock(&dev->tx_lock);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700796 if (msgv >= 0)
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600797 msm_slim_put_ctrl(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700798 return -EPROTONOSUPPORT;
799 }
800 if (txn->mt == SLIM_MSG_MT_CORE && txn->la == 0xFF &&
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600801 (mc == SLIM_MSG_MC_CONNECT_SOURCE ||
802 mc == SLIM_MSG_MC_CONNECT_SINK ||
803 mc == SLIM_MSG_MC_DISCONNECT_PORT))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700804 la = dev->pgdla;
805 if (txn->dt == SLIM_MSG_DEST_LOGICALADDR)
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600806 *pbuf = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt, mc, 0, la);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700807 else
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600808 *pbuf = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt, mc, 1, la);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700809 if (txn->dt == SLIM_MSG_DEST_LOGICALADDR)
810 puc = ((u8 *)pbuf) + 3;
811 else
812 puc = ((u8 *)pbuf) + 2;
813 if (txn->rbuf)
814 *(puc++) = txn->tid;
815 if ((txn->mt == SLIM_MSG_MT_CORE) &&
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600816 ((mc >= SLIM_MSG_MC_REQUEST_INFORMATION &&
817 mc <= SLIM_MSG_MC_REPORT_INFORMATION) ||
818 (mc >= SLIM_MSG_MC_REQUEST_VALUE &&
819 mc <= SLIM_MSG_MC_CHANGE_VALUE))) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700820 *(puc++) = (txn->ec & 0xFF);
821 *(puc++) = (txn->ec >> 8)&0xFF;
822 }
823 if (txn->wbuf)
824 memcpy(puc, txn->wbuf, txn->len);
825 if (txn->mt == SLIM_MSG_MT_CORE && txn->la == 0xFF &&
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600826 (mc == SLIM_MSG_MC_CONNECT_SOURCE ||
827 mc == SLIM_MSG_MC_CONNECT_SINK ||
828 mc == SLIM_MSG_MC_DISCONNECT_PORT)) {
829 if (mc != SLIM_MSG_MC_DISCONNECT_PORT)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700830 dev->err = msm_slim_connect_pipe_port(dev, *puc);
831 else {
832 struct msm_slim_endp *endpoint = &dev->pipes[*puc];
833 struct sps_register_event sps_event;
834 memset(&sps_event, 0, sizeof(sps_event));
835 sps_register_event(endpoint->sps, &sps_event);
836 sps_disconnect(endpoint->sps);
837 /*
838 * Remove channel disconnects master-side ports from
839 * channel. No need to send that again on the bus
840 */
841 dev->pipes[*puc].connected = false;
842 mutex_unlock(&dev->tx_lock);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700843 if (msgv >= 0)
844 msm_slim_put_ctrl(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700845 return 0;
846 }
847 if (dev->err) {
848 dev_err(dev->dev, "pipe-port connect err:%d", dev->err);
849 mutex_unlock(&dev->tx_lock);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700850 if (msgv >= 0)
851 msm_slim_put_ctrl(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700852 return dev->err;
853 }
854 *(puc) = *(puc) + dev->pipe_b;
855 }
856 if (txn->mt == SLIM_MSG_MT_CORE &&
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600857 mc == SLIM_MSG_MC_BEGIN_RECONFIGURATION)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700858 dev->reconf_busy = true;
859 dev->wr_comp = &done;
860 msm_send_msg_buf(ctrl, pbuf, txn->rl);
861 timeout = wait_for_completion_timeout(&done, HZ);
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600862
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700863 if (mc == SLIM_MSG_MC_RECONFIGURE_NOW) {
864 if ((txn->mc == (SLIM_MSG_MC_RECONFIGURE_NOW |
865 SLIM_MSG_CLK_PAUSE_SEQ_FLG)) &&
866 timeout) {
867 timeout = wait_for_completion_timeout(&dev->reconf, HZ);
868 dev->reconf_busy = false;
869 if (timeout) {
Sagar Dharia9acf7f42012-03-08 09:45:30 -0700870 clk_disable_unprepare(dev->rclk);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700871 disable_irq(dev->irq);
872 }
873 }
874 if ((txn->mc == (SLIM_MSG_MC_RECONFIGURE_NOW |
875 SLIM_MSG_CLK_PAUSE_SEQ_FLG)) &&
876 !timeout) {
877 dev->reconf_busy = false;
878 dev_err(dev->dev, "clock pause failed");
879 mutex_unlock(&dev->tx_lock);
880 return -ETIMEDOUT;
881 }
882 if (txn->mt == SLIM_MSG_MT_CORE &&
883 txn->mc == SLIM_MSG_MC_RECONFIGURE_NOW) {
884 if (dev->ctrl.sched.usedslots == 0 &&
885 dev->chan_active) {
886 dev->chan_active = false;
887 msm_slim_put_ctrl(dev);
888 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600889 }
890 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600891 mutex_unlock(&dev->tx_lock);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700892 if (msgv >= 0)
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600893 msm_slim_put_ctrl(dev);
894
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700895 if (!timeout)
896 dev_err(dev->dev, "TX timed out:MC:0x%x,mt:0x%x", txn->mc,
897 txn->mt);
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600898
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700899 return timeout ? dev->err : -ETIMEDOUT;
900}
901
902static int msm_set_laddr(struct slim_controller *ctrl, const u8 *ea,
903 u8 elen, u8 laddr)
904{
905 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
906 DECLARE_COMPLETION_ONSTACK(done);
907 int timeout;
908 u32 *buf;
909 mutex_lock(&dev->tx_lock);
910 buf = msm_get_msg_buf(ctrl, 9);
911 buf[0] = SLIM_MSG_ASM_FIRST_WORD(9, SLIM_MSG_MT_CORE,
912 SLIM_MSG_MC_ASSIGN_LOGICAL_ADDRESS,
913 SLIM_MSG_DEST_LOGICALADDR,
914 ea[5] | ea[4] << 8);
915 buf[1] = ea[3] | (ea[2] << 8) | (ea[1] << 16) | (ea[0] << 24);
916 buf[2] = laddr;
917
918 dev->wr_comp = &done;
919 msm_send_msg_buf(ctrl, buf, 9);
920 timeout = wait_for_completion_timeout(&done, HZ);
921 mutex_unlock(&dev->tx_lock);
922 return timeout ? dev->err : -ETIMEDOUT;
923}
924
Sagar Dharia144e5e02011-08-08 17:30:11 -0600925static int msm_clk_pause_wakeup(struct slim_controller *ctrl)
926{
927 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600928 enable_irq(dev->irq);
Sagar Dharia9acf7f42012-03-08 09:45:30 -0700929 clk_prepare_enable(dev->rclk);
Sagar Dharia144e5e02011-08-08 17:30:11 -0600930 writel_relaxed(1, dev->base + FRM_WAKEUP);
931 /* Make sure framer wakeup write goes through before exiting function */
932 mb();
933 /*
934 * Workaround: Currently, slave is reporting lost-sync messages
935 * after slimbus comes out of clock pause.
936 * Transaction with slave fail before slave reports that message
937 * Give some time for that report to come
938 * Slimbus wakes up in clock gear 10 at 24.576MHz. With each superframe
939 * being 250 usecs, we wait for 20 superframes here to ensure
940 * we get the message
941 */
942 usleep_range(5000, 5000);
943 return 0;
944}
945
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700946static int msm_config_port(struct slim_controller *ctrl, u8 pn)
947{
948 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
949 struct msm_slim_endp *endpoint;
950 int ret = 0;
951 if (ctrl->ports[pn].req == SLIM_REQ_HALF_DUP ||
952 ctrl->ports[pn].req == SLIM_REQ_MULTI_CH)
953 return -EPROTONOSUPPORT;
954 if (pn >= (MSM_SLIM_NPORTS - dev->pipe_b))
955 return -ENODEV;
956
957 endpoint = &dev->pipes[pn];
958 ret = msm_slim_init_endpoint(dev, endpoint);
959 dev_dbg(dev->dev, "sps register bam error code:%x\n", ret);
960 return ret;
961}
962
963static enum slim_port_err msm_slim_port_xfer_status(struct slim_controller *ctr,
964 u8 pn, u8 **done_buf, u32 *done_len)
965{
966 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctr);
967 struct sps_iovec sio;
968 int ret;
969 if (done_len)
970 *done_len = 0;
971 if (done_buf)
972 *done_buf = NULL;
973 if (!dev->pipes[pn].connected)
974 return SLIM_P_DISCONNECT;
975 ret = sps_get_iovec(dev->pipes[pn].sps, &sio);
976 if (!ret) {
977 if (done_len)
978 *done_len = sio.size;
979 if (done_buf)
980 *done_buf = (u8 *)sio.addr;
981 }
982 dev_dbg(dev->dev, "get iovec returned %d\n", ret);
983 return SLIM_P_INPROGRESS;
984}
985
986static int msm_slim_port_xfer(struct slim_controller *ctrl, u8 pn, u8 *iobuf,
987 u32 len, struct completion *comp)
988{
989 struct sps_register_event sreg;
990 int ret;
991 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
Sagar Dhariae77961f2011-09-27 14:03:50 -0600992 if (pn >= 7)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700993 return -ENODEV;
994
995
996 ctrl->ports[pn].xcomp = comp;
997 sreg.options = (SPS_EVENT_DESC_DONE|SPS_EVENT_ERROR);
998 sreg.mode = SPS_TRIGGER_WAIT;
999 sreg.xfer_done = comp;
1000 sreg.callback = NULL;
1001 sreg.user = &ctrl->ports[pn];
1002 ret = sps_register_event(dev->pipes[pn].sps, &sreg);
1003 if (ret) {
1004 dev_dbg(dev->dev, "sps register event error:%x\n", ret);
1005 return ret;
1006 }
1007 ret = sps_transfer_one(dev->pipes[pn].sps, (u32)iobuf, len, NULL,
1008 SPS_IOVEC_FLAG_INT);
1009 dev_dbg(dev->dev, "sps submit xfer error code:%x\n", ret);
1010
1011 return ret;
1012}
1013
1014static int msm_sat_define_ch(struct msm_slim_sat *sat, u8 *buf, u8 len, u8 mc)
1015{
1016 struct msm_slim_ctrl *dev = sat->dev;
1017 enum slim_ch_control oper;
1018 int i;
1019 int ret = 0;
1020 if (mc == SLIM_USR_MC_CHAN_CTRL) {
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001021 for (i = 0; i < sat->nsatch; i++) {
1022 if (buf[5] == sat->satch[i].chan)
1023 break;
1024 }
1025 if (i >= sat->nsatch)
1026 return -ENOTCONN;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001027 oper = ((buf[3] & 0xC0) >> 6);
1028 /* part of grp. activating/removing 1 will take care of rest */
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001029 ret = slim_control_ch(&sat->satcl, sat->satch[i].chanh, oper,
1030 false);
1031 if (!ret) {
1032 for (i = 5; i < len; i++) {
1033 int j;
1034 for (j = 0; j < sat->nsatch; j++) {
1035 if (buf[i] == sat->satch[j].chan) {
1036 if (oper == SLIM_CH_REMOVE)
1037 sat->satch[j].req_rem++;
1038 else
1039 sat->satch[j].req_def++;
1040 break;
1041 }
1042 }
1043 }
1044 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001045 } else {
1046 u16 chh[40];
1047 struct slim_ch prop;
1048 u32 exp;
1049 u8 coeff, cc;
1050 u8 prrate = buf[6];
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001051 if (len <= 8)
1052 return -EINVAL;
1053 for (i = 8; i < len; i++) {
1054 int j = 0;
1055 for (j = 0; j < sat->nsatch; j++) {
1056 if (sat->satch[j].chan == buf[i]) {
1057 chh[i - 8] = sat->satch[j].chanh;
1058 break;
1059 }
1060 }
1061 if (j < sat->nsatch) {
1062 u16 dummy;
1063 ret = slim_query_ch(&sat->satcl, buf[i],
1064 &dummy);
1065 if (ret)
1066 return ret;
1067 if (mc == SLIM_USR_MC_DEF_ACT_CHAN)
1068 sat->satch[j].req_def++;
1069 continue;
1070 }
1071 if (sat->nsatch >= MSM_MAX_SATCH)
1072 return -EXFULL;
1073 ret = slim_query_ch(&sat->satcl, buf[i], &chh[i - 8]);
1074 if (ret)
1075 return ret;
1076 sat->satch[j].chan = buf[i];
1077 sat->satch[j].chanh = chh[i - 8];
1078 if (mc == SLIM_USR_MC_DEF_ACT_CHAN)
1079 sat->satch[j].req_def++;
1080 sat->nsatch++;
1081 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001082 prop.dataf = (enum slim_ch_dataf)((buf[3] & 0xE0) >> 5);
1083 prop.auxf = (enum slim_ch_auxf)((buf[4] & 0xC0) >> 5);
1084 prop.baser = SLIM_RATE_4000HZ;
1085 if (prrate & 0x8)
1086 prop.baser = SLIM_RATE_11025HZ;
1087 else
1088 prop.baser = SLIM_RATE_4000HZ;
1089 prop.prot = (enum slim_ch_proto)(buf[5] & 0x0F);
1090 prop.sampleszbits = (buf[4] & 0x1F)*SLIM_CL_PER_SL;
1091 exp = (u32)((buf[5] & 0xF0) >> 4);
1092 coeff = (buf[4] & 0x20) >> 5;
1093 cc = (coeff ? 3 : 1);
1094 prop.ratem = cc * (1 << exp);
1095 if (i > 9)
1096 ret = slim_define_ch(&sat->satcl, &prop, chh, len - 8,
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001097 true, &chh[0]);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001098 else
1099 ret = slim_define_ch(&sat->satcl, &prop,
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001100 &chh[0], 1, false, NULL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001101 dev_dbg(dev->dev, "define sat grp returned:%d", ret);
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001102 if (ret)
1103 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001104
1105 /* part of group so activating 1 will take care of rest */
1106 if (mc == SLIM_USR_MC_DEF_ACT_CHAN)
1107 ret = slim_control_ch(&sat->satcl,
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001108 chh[0],
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001109 SLIM_CH_ACTIVATE, false);
1110 }
1111 return ret;
1112}
1113
1114static void msm_slim_rxwq(struct msm_slim_ctrl *dev)
1115{
1116 u8 buf[40];
1117 u8 mc, mt, len;
1118 int i, ret;
1119 if ((msm_slim_rx_dequeue(dev, (u8 *)buf)) != -ENODATA) {
1120 len = buf[0] & 0x1F;
1121 mt = (buf[0] >> 5) & 0x7;
1122 mc = buf[1];
1123 if (mt == SLIM_MSG_MT_CORE &&
1124 mc == SLIM_MSG_MC_REPORT_PRESENT) {
1125 u8 laddr;
1126 u8 e_addr[6];
1127 for (i = 0; i < 6; i++)
1128 e_addr[i] = buf[7-i];
1129
1130 ret = slim_assign_laddr(&dev->ctrl, e_addr, 6, &laddr);
1131 /* Is this Qualcomm ported generic device? */
1132 if (!ret && e_addr[5] == QC_MFGID_LSB &&
1133 e_addr[4] == QC_MFGID_MSB &&
1134 e_addr[1] == QC_DEVID_PGD &&
1135 e_addr[2] != QC_CHIPID_SL)
1136 dev->pgdla = laddr;
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001137 if (!ret && !pm_runtime_enabled(dev->dev) &&
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001138 laddr == (QC_MSM_DEVS - 1))
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001139 pm_runtime_enable(dev->dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001140
Sagar Dharia790cfd02011-09-25 17:56:24 -06001141 if (!ret && msm_is_sat_dev(e_addr)) {
1142 struct msm_slim_sat *sat = addr_to_sat(dev,
1143 laddr);
1144 if (!sat)
1145 sat = msm_slim_alloc_sat(dev);
1146 if (!sat)
1147 return;
1148
1149 sat->satcl.laddr = laddr;
1150 msm_sat_enqueue(sat, (u32 *)buf, len);
1151 queue_work(sat->wq, &sat->wd);
1152 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001153 } else if (mc == SLIM_MSG_MC_REPLY_INFORMATION ||
1154 mc == SLIM_MSG_MC_REPLY_VALUE) {
1155 u8 tid = buf[3];
1156 dev_dbg(dev->dev, "tid:%d, len:%d\n", tid, len - 4);
1157 slim_msg_response(&dev->ctrl, &buf[4], tid,
1158 len - 4);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001159 pm_runtime_mark_last_busy(dev->dev);
Sagar Dharia144e5e02011-08-08 17:30:11 -06001160 } else if (mc == SLIM_MSG_MC_REPORT_INFORMATION) {
1161 u8 l_addr = buf[2];
1162 u16 ele = (u16)buf[4] << 4;
1163 ele |= ((buf[3] & 0xf0) >> 4);
1164 dev_err(dev->dev, "Slim-dev:%d report inf element:0x%x",
1165 l_addr, ele);
1166 for (i = 0; i < len - 5; i++)
1167 dev_err(dev->dev, "offset:0x%x:bit mask:%x",
1168 i, buf[i+5]);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001169 } else {
1170 dev_err(dev->dev, "unexpected message:mc:%x, mt:%x",
1171 mc, mt);
1172 for (i = 0; i < len; i++)
1173 dev_err(dev->dev, "error msg: %x", buf[i]);
1174
1175 }
1176 } else
1177 dev_err(dev->dev, "rxwq called and no dequeue");
1178}
1179
1180static void slim_sat_rxprocess(struct work_struct *work)
1181{
1182 struct msm_slim_sat *sat = container_of(work, struct msm_slim_sat, wd);
1183 struct msm_slim_ctrl *dev = sat->dev;
1184 u8 buf[40];
1185
1186 while ((msm_sat_dequeue(sat, buf)) != -ENODATA) {
1187 struct slim_msg_txn txn;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001188 u8 len, mc, mt;
1189 u32 bw_sl;
1190 int ret = 0;
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001191 int satv = -1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001192 bool gen_ack = false;
1193 u8 tid;
1194 u8 wbuf[8];
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001195 int i;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001196 txn.mt = SLIM_MSG_MT_SRC_REFERRED_USER;
1197 txn.dt = SLIM_MSG_DEST_LOGICALADDR;
1198 txn.ec = 0;
1199 txn.rbuf = NULL;
1200 txn.la = sat->satcl.laddr;
1201 /* satellite handling */
1202 len = buf[0] & 0x1F;
1203 mc = buf[1];
1204 mt = (buf[0] >> 5) & 0x7;
1205
1206 if (mt == SLIM_MSG_MT_CORE &&
1207 mc == SLIM_MSG_MC_REPORT_PRESENT) {
1208 u8 laddr;
1209 u8 e_addr[6];
1210 for (i = 0; i < 6; i++)
1211 e_addr[i] = buf[7-i];
1212
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001213 if (pm_runtime_enabled(dev->dev)) {
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001214 satv = msm_slim_get_ctrl(dev);
1215 if (satv >= 0)
1216 sat->pending_capability = true;
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001217 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001218 slim_assign_laddr(&dev->ctrl, e_addr, 6, &laddr);
1219 sat->satcl.laddr = laddr;
Sagar Dharia69bf5572012-02-21 14:45:35 -07001220 /*
1221 * Since capability message is already sent, present
1222 * message will indicate subsystem hosting this
1223 * satellite has restarted.
1224 * Remove all active channels of this satellite
1225 * when this is detected
1226 */
1227 if (sat->sent_capability) {
1228 for (i = 0; i < sat->nsatch; i++) {
1229 enum slim_ch_state chs =
1230 slim_get_ch_state(&sat->satcl,
1231 sat->satch[i].chanh);
1232 pr_err("Slim-SSR, sat:%d, rm chan:%d",
1233 laddr,
1234 sat->satch[i].chan);
1235 if (chs == SLIM_CH_ACTIVE)
1236 slim_control_ch(&sat->satcl,
1237 sat->satch[i].chanh,
1238 SLIM_CH_REMOVE, true);
1239 }
1240 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001241 } else if (mt != SLIM_MSG_MT_CORE &&
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001242 mc != SLIM_MSG_MC_REPORT_PRESENT) {
1243 satv = msm_slim_get_ctrl(dev);
1244 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001245 switch (mc) {
1246 case SLIM_MSG_MC_REPORT_PRESENT:
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001247 /* Remove runtime_pm vote once satellite acks */
1248 if (mt != SLIM_MSG_MT_CORE) {
1249 if (pm_runtime_enabled(dev->dev) &&
1250 sat->pending_capability) {
1251 msm_slim_put_ctrl(dev);
1252 sat->pending_capability = false;
1253 }
1254 continue;
1255 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001256 /* send a Manager capability msg */
Sagar Dharia790cfd02011-09-25 17:56:24 -06001257 if (sat->sent_capability) {
1258 if (mt == SLIM_MSG_MT_CORE)
1259 goto send_capability;
1260 else
1261 continue;
1262 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001263 ret = slim_add_device(&dev->ctrl, &sat->satcl);
1264 if (ret) {
1265 dev_err(dev->dev,
1266 "Satellite-init failed");
1267 continue;
1268 }
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001269 /* Satellite-channels */
1270 sat->satch = kzalloc(MSM_MAX_SATCH *
1271 sizeof(struct msm_sat_chan),
1272 GFP_KERNEL);
Sagar Dharia790cfd02011-09-25 17:56:24 -06001273send_capability:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001274 txn.mc = SLIM_USR_MC_MASTER_CAPABILITY;
1275 txn.mt = SLIM_MSG_MT_SRC_REFERRED_USER;
1276 txn.la = sat->satcl.laddr;
1277 txn.rl = 8;
1278 wbuf[0] = SAT_MAGIC_LSB;
1279 wbuf[1] = SAT_MAGIC_MSB;
1280 wbuf[2] = SAT_MSG_VER;
1281 wbuf[3] = SAT_MSG_PROT;
1282 txn.wbuf = wbuf;
1283 txn.len = 4;
1284 sat->sent_capability = true;
1285 msm_xfer_msg(&dev->ctrl, &txn);
1286 break;
1287 case SLIM_USR_MC_ADDR_QUERY:
1288 memcpy(&wbuf[1], &buf[4], 6);
1289 ret = slim_get_logical_addr(&sat->satcl,
1290 &wbuf[1], 6, &wbuf[7]);
1291 if (ret)
1292 memset(&wbuf[1], 0, 6);
1293 wbuf[0] = buf[3];
1294 txn.mc = SLIM_USR_MC_ADDR_REPLY;
1295 txn.rl = 12;
1296 txn.len = 8;
1297 txn.wbuf = wbuf;
1298 msm_xfer_msg(&dev->ctrl, &txn);
1299 break;
1300 case SLIM_USR_MC_DEFINE_CHAN:
1301 case SLIM_USR_MC_DEF_ACT_CHAN:
1302 case SLIM_USR_MC_CHAN_CTRL:
1303 if (mc != SLIM_USR_MC_CHAN_CTRL)
1304 tid = buf[7];
1305 else
1306 tid = buf[4];
1307 gen_ack = true;
1308 ret = msm_sat_define_ch(sat, buf, len, mc);
1309 if (ret) {
1310 dev_err(dev->dev,
1311 "SAT define_ch returned:%d",
1312 ret);
1313 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001314 if (!sat->pending_reconf) {
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001315 int chv = msm_slim_get_ctrl(dev);
1316 if (chv >= 0)
1317 sat->pending_reconf = true;
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001318 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001319 break;
1320 case SLIM_USR_MC_RECONFIG_NOW:
1321 tid = buf[3];
1322 gen_ack = true;
1323 ret = slim_reconfigure_now(&sat->satcl);
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001324 for (i = 0; i < sat->nsatch; i++) {
1325 struct msm_sat_chan *sch = &sat->satch[i];
1326 if (sch->req_rem) {
1327 if (!ret)
1328 slim_dealloc_ch(&sat->satcl,
1329 sch->chanh);
1330 sch->req_rem--;
1331 } else if (sch->req_def) {
1332 if (ret)
1333 slim_dealloc_ch(&sat->satcl,
1334 sch->chanh);
1335 sch->req_def--;
1336 }
1337 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001338 if (sat->pending_reconf) {
1339 msm_slim_put_ctrl(dev);
1340 sat->pending_reconf = false;
1341 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001342 break;
1343 case SLIM_USR_MC_REQ_BW:
1344 /* what we get is in SLOTS */
1345 bw_sl = (u32)buf[4] << 3 |
1346 ((buf[3] & 0xE0) >> 5);
1347 sat->satcl.pending_msgsl = bw_sl;
1348 tid = buf[5];
1349 gen_ack = true;
1350 break;
1351 case SLIM_USR_MC_CONNECT_SRC:
1352 case SLIM_USR_MC_CONNECT_SINK:
1353 if (mc == SLIM_USR_MC_CONNECT_SRC)
1354 txn.mc = SLIM_MSG_MC_CONNECT_SOURCE;
1355 else
1356 txn.mc = SLIM_MSG_MC_CONNECT_SINK;
1357 wbuf[0] = buf[4] & 0x1F;
1358 wbuf[1] = buf[5];
1359 tid = buf[6];
1360 txn.la = buf[3];
1361 txn.mt = SLIM_MSG_MT_CORE;
1362 txn.rl = 6;
1363 txn.len = 2;
1364 txn.wbuf = wbuf;
1365 gen_ack = true;
1366 ret = msm_xfer_msg(&dev->ctrl, &txn);
1367 break;
1368 case SLIM_USR_MC_DISCONNECT_PORT:
1369 txn.mc = SLIM_MSG_MC_DISCONNECT_PORT;
1370 wbuf[0] = buf[4] & 0x1F;
1371 tid = buf[5];
1372 txn.la = buf[3];
1373 txn.rl = 5;
1374 txn.len = 1;
1375 txn.mt = SLIM_MSG_MT_CORE;
1376 txn.wbuf = wbuf;
1377 gen_ack = true;
1378 ret = msm_xfer_msg(&dev->ctrl, &txn);
1379 default:
1380 break;
1381 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001382 if (!gen_ack) {
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001383 if (mc != SLIM_MSG_MC_REPORT_PRESENT && satv >= 0)
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001384 msm_slim_put_ctrl(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001385 continue;
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001386 }
1387
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001388 wbuf[0] = tid;
1389 if (!ret)
1390 wbuf[1] = MSM_SAT_SUCCSS;
1391 else
1392 wbuf[1] = 0;
1393 txn.mc = SLIM_USR_MC_GENERIC_ACK;
1394 txn.la = sat->satcl.laddr;
1395 txn.rl = 6;
1396 txn.len = 2;
1397 txn.wbuf = wbuf;
1398 txn.mt = SLIM_MSG_MT_SRC_REFERRED_USER;
1399 msm_xfer_msg(&dev->ctrl, &txn);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001400 if (satv >= 0)
1401 msm_slim_put_ctrl(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001402 }
1403}
1404
Sagar Dharia790cfd02011-09-25 17:56:24 -06001405static struct msm_slim_sat *msm_slim_alloc_sat(struct msm_slim_ctrl *dev)
1406{
1407 struct msm_slim_sat *sat;
1408 char *name;
1409 if (dev->nsats >= MSM_MAX_NSATS)
1410 return NULL;
1411
1412 sat = kzalloc(sizeof(struct msm_slim_sat), GFP_KERNEL);
1413 if (!sat) {
1414 dev_err(dev->dev, "no memory for satellite");
1415 return NULL;
1416 }
1417 name = kzalloc(SLIMBUS_NAME_SIZE, GFP_KERNEL);
1418 if (!name) {
1419 dev_err(dev->dev, "no memory for satellite name");
1420 kfree(sat);
1421 return NULL;
1422 }
1423 dev->satd[dev->nsats] = sat;
1424 sat->dev = dev;
1425 snprintf(name, SLIMBUS_NAME_SIZE, "msm_sat%d", dev->nsats);
1426 sat->satcl.name = name;
1427 spin_lock_init(&sat->lock);
1428 INIT_WORK(&sat->wd, slim_sat_rxprocess);
1429 sat->wq = create_singlethread_workqueue(sat->satcl.name);
1430 if (!sat->wq) {
1431 kfree(name);
1432 kfree(sat);
1433 return NULL;
1434 }
1435 /*
1436 * Both sats will be allocated from RX thread and RX thread will
1437 * process messages sequentially. No synchronization necessary
1438 */
1439 dev->nsats++;
1440 return sat;
1441}
1442
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001443static void
1444msm_slim_rx_msgq_event(struct msm_slim_ctrl *dev, struct sps_event_notify *ev)
1445{
1446 u32 *buf = ev->data.transfer.user;
1447 struct sps_iovec *iovec = &ev->data.transfer.iovec;
1448
1449 /*
1450 * Note the virtual address needs to be offset by the same index
1451 * as the physical address or just pass in the actual virtual address
1452 * if the sps_mem_buffer is not needed. Note that if completion is
1453 * used, the virtual address won't be available and will need to be
1454 * calculated based on the offset of the physical address
1455 */
1456 if (ev->event_id == SPS_EVENT_DESC_DONE) {
1457
1458 pr_debug("buf = 0x%p, data = 0x%x\n", buf, *buf);
1459
1460 pr_debug("iovec = (0x%x 0x%x 0x%x)\n",
1461 iovec->addr, iovec->size, iovec->flags);
1462
1463 } else {
1464 dev_err(dev->dev, "%s: unknown event %d\n",
1465 __func__, ev->event_id);
1466 }
1467}
1468
1469static void msm_slim_rx_msgq_cb(struct sps_event_notify *notify)
1470{
1471 struct msm_slim_ctrl *dev = (struct msm_slim_ctrl *)notify->user;
1472 msm_slim_rx_msgq_event(dev, notify);
1473}
1474
1475/* Queue up Rx message buffer */
1476static inline int
1477msm_slim_post_rx_msgq(struct msm_slim_ctrl *dev, int ix)
1478{
1479 int ret;
1480 u32 flags = SPS_IOVEC_FLAG_INT;
1481 struct msm_slim_endp *endpoint = &dev->rx_msgq;
1482 struct sps_mem_buffer *mem = &endpoint->buf;
1483 struct sps_pipe *pipe = endpoint->sps;
1484
1485 /* Rx message queue buffers are 4 bytes in length */
1486 u8 *virt_addr = mem->base + (4 * ix);
1487 u32 phys_addr = mem->phys_base + (4 * ix);
1488
1489 pr_debug("index:%d, phys:0x%x, virt:0x%p\n", ix, phys_addr, virt_addr);
1490
1491 ret = sps_transfer_one(pipe, phys_addr, 4, virt_addr, flags);
1492 if (ret)
1493 dev_err(dev->dev, "transfer_one() failed 0x%x, %d\n", ret, ix);
1494
1495 return ret;
1496}
1497
1498static inline int
1499msm_slim_rx_msgq_get(struct msm_slim_ctrl *dev, u32 *data, int offset)
1500{
1501 struct msm_slim_endp *endpoint = &dev->rx_msgq;
1502 struct sps_mem_buffer *mem = &endpoint->buf;
1503 struct sps_pipe *pipe = endpoint->sps;
1504 struct sps_iovec iovec;
1505 int index;
1506 int ret;
1507
1508 ret = sps_get_iovec(pipe, &iovec);
1509 if (ret) {
1510 dev_err(dev->dev, "sps_get_iovec() failed 0x%x\n", ret);
1511 goto err_exit;
1512 }
1513
1514 pr_debug("iovec = (0x%x 0x%x 0x%x)\n",
1515 iovec.addr, iovec.size, iovec.flags);
1516 BUG_ON(iovec.addr < mem->phys_base);
1517 BUG_ON(iovec.addr >= mem->phys_base + mem->size);
1518
1519 /* Calculate buffer index */
1520 index = (iovec.addr - mem->phys_base) / 4;
1521 *(data + offset) = *((u32 *)mem->base + index);
1522
1523 pr_debug("buf = 0x%p, data = 0x%x\n", (u32 *)mem->base + index, *data);
1524
1525 /* Add buffer back to the queue */
1526 (void)msm_slim_post_rx_msgq(dev, index);
1527
1528err_exit:
1529 return ret;
1530}
1531
1532static int msm_slim_rx_msgq_thread(void *data)
1533{
1534 struct msm_slim_ctrl *dev = (struct msm_slim_ctrl *)data;
1535 struct completion *notify = &dev->rx_msgq_notify;
1536 struct msm_slim_sat *sat = NULL;
1537 u32 mc = 0;
1538 u32 mt = 0;
1539 u32 buffer[10];
1540 int index = 0;
1541 u8 msg_len = 0;
1542 int ret;
1543
1544 dev_dbg(dev->dev, "rx thread started");
1545
1546 while (!kthread_should_stop()) {
1547 set_current_state(TASK_INTERRUPTIBLE);
1548 ret = wait_for_completion_interruptible(notify);
1549
1550 if (ret)
1551 dev_err(dev->dev, "rx thread wait error:%d", ret);
1552
1553 /* 1 irq notification per message */
1554 if (!dev->use_rx_msgqs) {
1555 msm_slim_rxwq(dev);
1556 continue;
1557 }
1558
1559 ret = msm_slim_rx_msgq_get(dev, buffer, index);
1560 if (ret) {
1561 dev_err(dev->dev, "rx_msgq_get() failed 0x%x\n", ret);
1562 continue;
1563 }
1564
1565 pr_debug("message[%d] = 0x%x\n", index, *buffer);
1566
1567 /* Decide if we use generic RX or satellite RX */
1568 if (index++ == 0) {
1569 msg_len = *buffer & 0x1F;
1570 pr_debug("Start of new message, len = %d\n", msg_len);
1571 mt = (buffer[0] >> 5) & 0x7;
1572 mc = (buffer[0] >> 8) & 0xff;
1573 dev_dbg(dev->dev, "MC: %x, MT: %x\n", mc, mt);
1574 if (mt == SLIM_MSG_MT_DEST_REFERRED_USER ||
Sagar Dharia790cfd02011-09-25 17:56:24 -06001575 mt == SLIM_MSG_MT_SRC_REFERRED_USER) {
1576 u8 laddr;
1577 laddr = (u8)((buffer[0] >> 16) & 0xff);
1578 sat = addr_to_sat(dev, laddr);
1579 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001580 } else if ((index * 4) >= msg_len) {
1581 index = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001582 if (sat) {
1583 msm_sat_enqueue(sat, buffer, msg_len);
1584 queue_work(sat->wq, &sat->wd);
1585 sat = NULL;
1586 } else {
1587 msm_slim_rx_enqueue(dev, buffer, msg_len);
1588 msm_slim_rxwq(dev);
1589 }
1590 }
1591 }
1592
1593 return 0;
1594}
1595
1596static int __devinit msm_slim_init_rx_msgq(struct msm_slim_ctrl *dev)
1597{
1598 int i, ret;
1599 u32 pipe_offset;
1600 struct msm_slim_endp *endpoint = &dev->rx_msgq;
1601 struct sps_connect *config = &endpoint->config;
1602 struct sps_mem_buffer *descr = &config->desc;
1603 struct sps_mem_buffer *mem = &endpoint->buf;
1604 struct completion *notify = &dev->rx_msgq_notify;
1605
1606 struct sps_register_event sps_error_event; /* SPS_ERROR */
1607 struct sps_register_event sps_descr_event; /* DESCR_DONE */
1608
Sagar Dharia31ac5812012-01-04 11:38:59 -07001609 init_completion(notify);
1610 if (!dev->use_rx_msgqs)
1611 goto rx_thread_create;
1612
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001613 /* Allocate the endpoint */
1614 ret = msm_slim_init_endpoint(dev, endpoint);
1615 if (ret) {
1616 dev_err(dev->dev, "init_endpoint failed 0x%x\n", ret);
1617 goto sps_init_endpoint_failed;
1618 }
1619
1620 /* Get the pipe indices for the message queues */
1621 pipe_offset = (readl_relaxed(dev->base + MGR_STATUS) & 0xfc) >> 2;
1622 dev_dbg(dev->dev, "Message queue pipe offset %d\n", pipe_offset);
1623
1624 config->mode = SPS_MODE_SRC;
1625 config->source = dev->bam.hdl;
1626 config->destination = SPS_DEV_HANDLE_MEM;
1627 config->src_pipe_index = pipe_offset;
1628 config->options = SPS_O_DESC_DONE | SPS_O_ERROR |
1629 SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
1630
1631 /* Allocate memory for the FIFO descriptors */
1632 ret = msm_slim_sps_mem_alloc(dev, descr,
1633 MSM_SLIM_DESC_NUM * sizeof(struct sps_iovec));
1634 if (ret) {
1635 dev_err(dev->dev, "unable to allocate SPS descriptors\n");
1636 goto alloc_descr_failed;
1637 }
1638
1639 ret = sps_connect(endpoint->sps, config);
1640 if (ret) {
1641 dev_err(dev->dev, "sps_connect failed 0x%x\n", ret);
1642 goto sps_connect_failed;
1643 }
1644
1645 /* Register completion for DESC_DONE */
1646 init_completion(notify);
1647 memset(&sps_descr_event, 0x00, sizeof(sps_descr_event));
1648
1649 sps_descr_event.mode = SPS_TRIGGER_CALLBACK;
1650 sps_descr_event.options = SPS_O_DESC_DONE;
1651 sps_descr_event.user = (void *)dev;
1652 sps_descr_event.xfer_done = notify;
1653
1654 ret = sps_register_event(endpoint->sps, &sps_descr_event);
1655 if (ret) {
1656 dev_err(dev->dev, "sps_connect() failed 0x%x\n", ret);
1657 goto sps_reg_event_failed;
1658 }
1659
1660 /* Register callback for errors */
1661 memset(&sps_error_event, 0x00, sizeof(sps_error_event));
1662 sps_error_event.mode = SPS_TRIGGER_CALLBACK;
1663 sps_error_event.options = SPS_O_ERROR;
1664 sps_error_event.user = (void *)dev;
1665 sps_error_event.callback = msm_slim_rx_msgq_cb;
1666
1667 ret = sps_register_event(endpoint->sps, &sps_error_event);
1668 if (ret) {
1669 dev_err(dev->dev, "sps_connect() failed 0x%x\n", ret);
1670 goto sps_reg_event_failed;
1671 }
1672
1673 /* Allocate memory for the message buffer(s), N descrs, 4-byte mesg */
1674 ret = msm_slim_sps_mem_alloc(dev, mem, MSM_SLIM_DESC_NUM * 4);
1675 if (ret) {
1676 dev_err(dev->dev, "dma_alloc_coherent failed\n");
1677 goto alloc_buffer_failed;
1678 }
1679
1680 /*
1681 * Call transfer_one for each 4-byte buffer
1682 * Use (buf->size/4) - 1 for the number of buffer to post
1683 */
1684
1685 /* Setup the transfer */
1686 for (i = 0; i < (MSM_SLIM_DESC_NUM - 1); i++) {
1687 ret = msm_slim_post_rx_msgq(dev, i);
1688 if (ret) {
1689 dev_err(dev->dev, "post_rx_msgq() failed 0x%x\n", ret);
1690 goto sps_transfer_failed;
1691 }
1692 }
1693
Sagar Dharia31ac5812012-01-04 11:38:59 -07001694rx_thread_create:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001695 /* Fire up the Rx message queue thread */
1696 dev->rx_msgq_thread = kthread_run(msm_slim_rx_msgq_thread, dev,
1697 MSM_SLIM_NAME "_rx_msgq_thread");
1698 if (!dev->rx_msgq_thread) {
1699 dev_err(dev->dev, "Failed to start Rx message queue thread\n");
Sagar Dharia31ac5812012-01-04 11:38:59 -07001700 /* Tear-down BAMs or return? */
1701 if (!dev->use_rx_msgqs)
1702 return -EIO;
1703 else
1704 ret = -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001705 } else
1706 return 0;
1707
1708sps_transfer_failed:
1709 msm_slim_sps_mem_free(dev, mem);
1710alloc_buffer_failed:
1711 memset(&sps_error_event, 0x00, sizeof(sps_error_event));
1712 sps_register_event(endpoint->sps, &sps_error_event);
1713sps_reg_event_failed:
1714 sps_disconnect(endpoint->sps);
1715sps_connect_failed:
1716 msm_slim_sps_mem_free(dev, descr);
1717alloc_descr_failed:
1718 msm_slim_free_endpoint(endpoint);
1719sps_init_endpoint_failed:
Sagar Dharia31ac5812012-01-04 11:38:59 -07001720 dev->use_rx_msgqs = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001721 return ret;
1722}
1723
1724/* Registers BAM h/w resource with SPS driver and initializes msgq endpoints */
1725static int __devinit
1726msm_slim_sps_init(struct msm_slim_ctrl *dev, struct resource *bam_mem)
1727{
1728 int i, ret;
1729 u32 bam_handle;
1730 struct sps_bam_props bam_props = {0};
1731
1732 static struct sps_bam_sec_config_props sec_props = {
1733 .ees = {
1734 [0] = { /* LPASS */
1735 .vmid = 0,
1736 .pipe_mask = 0xFFFF98,
1737 },
1738 [1] = { /* Krait Apps */
1739 .vmid = 1,
1740 .pipe_mask = 0x3F000007,
1741 },
1742 [2] = { /* Modem */
1743 .vmid = 2,
1744 .pipe_mask = 0x00000060,
1745 },
1746 },
1747 };
1748
1749 bam_props.ee = dev->ee;
1750 bam_props.virt_addr = dev->bam.base;
1751 bam_props.phys_addr = bam_mem->start;
1752 bam_props.irq = dev->bam.irq;
1753 bam_props.manage = SPS_BAM_MGR_LOCAL;
1754 bam_props.summing_threshold = MSM_SLIM_PERF_SUMM_THRESHOLD;
1755
1756 bam_props.sec_config = SPS_BAM_SEC_DO_CONFIG;
1757 bam_props.p_sec_config_props = &sec_props;
1758
1759 bam_props.options = SPS_O_DESC_DONE | SPS_O_ERROR |
1760 SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
1761
1762 /* First 7 bits are for message Qs */
1763 for (i = 7; i < 32; i++) {
1764 /* Check what pipes are owned by Apps. */
1765 if ((sec_props.ees[dev->ee].pipe_mask >> i) & 0x1)
1766 break;
1767 }
1768 dev->pipe_b = i - 7;
1769
1770 /* Register the BAM device with the SPS driver */
1771 ret = sps_register_bam_device(&bam_props, &bam_handle);
1772 if (ret) {
Sagar Dharia31ac5812012-01-04 11:38:59 -07001773 dev_err(dev->dev, "disabling BAM: reg-bam failed 0x%x\n", ret);
1774 dev->use_rx_msgqs = 0;
1775 goto init_rx_msgq;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001776 }
1777 dev->bam.hdl = bam_handle;
1778 dev_dbg(dev->dev, "SLIM BAM registered, handle = 0x%x\n", bam_handle);
1779
Sagar Dharia31ac5812012-01-04 11:38:59 -07001780init_rx_msgq:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001781 ret = msm_slim_init_rx_msgq(dev);
Sagar Dharia31ac5812012-01-04 11:38:59 -07001782 if (ret)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001783 dev_err(dev->dev, "msm_slim_init_rx_msgq failed 0x%x\n", ret);
Sagar Dharia1beb2202012-07-31 19:06:21 -06001784 if (ret && bam_handle) {
Sagar Dharia31ac5812012-01-04 11:38:59 -07001785 sps_deregister_bam_device(bam_handle);
1786 dev->bam.hdl = 0L;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001787 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001788 return ret;
1789}
1790
1791static void msm_slim_sps_exit(struct msm_slim_ctrl *dev)
1792{
1793 if (dev->use_rx_msgqs) {
1794 struct msm_slim_endp *endpoint = &dev->rx_msgq;
1795 struct sps_connect *config = &endpoint->config;
1796 struct sps_mem_buffer *descr = &config->desc;
1797 struct sps_mem_buffer *mem = &endpoint->buf;
1798 struct sps_register_event sps_event;
1799 memset(&sps_event, 0x00, sizeof(sps_event));
1800 msm_slim_sps_mem_free(dev, mem);
1801 sps_register_event(endpoint->sps, &sps_event);
1802 sps_disconnect(endpoint->sps);
1803 msm_slim_sps_mem_free(dev, descr);
1804 msm_slim_free_endpoint(endpoint);
Sagar Dharia31ac5812012-01-04 11:38:59 -07001805 sps_deregister_bam_device(dev->bam.hdl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001806 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001807}
1808
Sagar Dhariacc969452011-09-19 10:34:30 -06001809static void msm_slim_prg_slew(struct platform_device *pdev,
1810 struct msm_slim_ctrl *dev)
1811{
1812 struct resource *slew_io;
1813 void __iomem *slew_reg;
1814 /* SLEW RATE register for this slimbus */
1815 dev->slew_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1816 "slimbus_slew_reg");
1817 if (!dev->slew_mem) {
1818 dev_dbg(&pdev->dev, "no slimbus slew resource\n");
1819 return;
1820 }
1821 slew_io = request_mem_region(dev->slew_mem->start,
1822 resource_size(dev->slew_mem), pdev->name);
1823 if (!slew_io) {
1824 dev_dbg(&pdev->dev, "slimbus-slew mem claimed\n");
1825 dev->slew_mem = NULL;
1826 return;
1827 }
1828
1829 slew_reg = ioremap(dev->slew_mem->start, resource_size(dev->slew_mem));
1830 if (!slew_reg) {
1831 dev_dbg(dev->dev, "slew register mapping failed");
1832 release_mem_region(dev->slew_mem->start,
1833 resource_size(dev->slew_mem));
1834 dev->slew_mem = NULL;
1835 return;
1836 }
1837 writel_relaxed(1, slew_reg);
1838 /* Make sure slimbus-slew rate enabling goes through */
1839 wmb();
1840 iounmap(slew_reg);
1841}
1842
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001843static int __devinit msm_slim_probe(struct platform_device *pdev)
1844{
1845 struct msm_slim_ctrl *dev;
1846 int ret;
1847 struct resource *bam_mem, *bam_io;
1848 struct resource *slim_mem, *slim_io;
1849 struct resource *irq, *bam_irq;
Sagar Dharia1beb2202012-07-31 19:06:21 -06001850 bool rxreg_access = false;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001851 slim_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1852 "slimbus_physical");
1853 if (!slim_mem) {
1854 dev_err(&pdev->dev, "no slimbus physical memory resource\n");
1855 return -ENODEV;
1856 }
1857 slim_io = request_mem_region(slim_mem->start, resource_size(slim_mem),
1858 pdev->name);
1859 if (!slim_io) {
1860 dev_err(&pdev->dev, "slimbus memory already claimed\n");
1861 return -EBUSY;
1862 }
1863
1864 bam_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1865 "slimbus_bam_physical");
1866 if (!bam_mem) {
1867 dev_err(&pdev->dev, "no slimbus BAM memory resource\n");
1868 ret = -ENODEV;
1869 goto err_get_res_bam_failed;
1870 }
1871 bam_io = request_mem_region(bam_mem->start, resource_size(bam_mem),
1872 pdev->name);
1873 if (!bam_io) {
1874 release_mem_region(slim_mem->start, resource_size(slim_mem));
1875 dev_err(&pdev->dev, "slimbus BAM memory already claimed\n");
1876 ret = -EBUSY;
1877 goto err_get_res_bam_failed;
1878 }
1879 irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
1880 "slimbus_irq");
1881 if (!irq) {
1882 dev_err(&pdev->dev, "no slimbus IRQ resource\n");
1883 ret = -ENODEV;
1884 goto err_get_res_failed;
1885 }
1886 bam_irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
1887 "slimbus_bam_irq");
1888 if (!bam_irq) {
1889 dev_err(&pdev->dev, "no slimbus BAM IRQ resource\n");
1890 ret = -ENODEV;
1891 goto err_get_res_failed;
1892 }
1893
1894 dev = kzalloc(sizeof(struct msm_slim_ctrl), GFP_KERNEL);
1895 if (!dev) {
1896 dev_err(&pdev->dev, "no memory for MSM slimbus controller\n");
1897 ret = -ENOMEM;
1898 goto err_get_res_failed;
1899 }
1900 dev->dev = &pdev->dev;
1901 platform_set_drvdata(pdev, dev);
1902 slim_set_ctrldata(&dev->ctrl, dev);
1903 dev->base = ioremap(slim_mem->start, resource_size(slim_mem));
1904 if (!dev->base) {
1905 dev_err(&pdev->dev, "IOremap failed\n");
1906 ret = -ENOMEM;
1907 goto err_ioremap_failed;
1908 }
1909 dev->bam.base = ioremap(bam_mem->start, resource_size(bam_mem));
1910 if (!dev->bam.base) {
1911 dev_err(&pdev->dev, "BAM IOremap failed\n");
1912 ret = -ENOMEM;
1913 goto err_ioremap_bam_failed;
1914 }
Sagar Dhariaf8f603b2012-03-21 15:25:17 -06001915 if (pdev->dev.of_node) {
1916
1917 ret = of_property_read_u32(pdev->dev.of_node, "cell-index",
1918 &dev->ctrl.nr);
1919 if (ret) {
1920 dev_err(&pdev->dev, "Cell index not specified:%d", ret);
1921 goto err_of_init_failed;
1922 }
Sagar Dharia1beb2202012-07-31 19:06:21 -06001923 rxreg_access = of_property_read_bool(pdev->dev.of_node,
1924 "qcom,rxreg-access");
Sagar Dhariaf8f603b2012-03-21 15:25:17 -06001925 /* Optional properties */
1926 ret = of_property_read_u32(pdev->dev.of_node,
1927 "qcom,min-clk-gear", &dev->ctrl.min_cg);
1928 ret = of_property_read_u32(pdev->dev.of_node,
1929 "qcom,max-clk-gear", &dev->ctrl.max_cg);
Sagar Dharia1beb2202012-07-31 19:06:21 -06001930 pr_debug("min_cg:%d, max_cg:%d, rxreg: %d", dev->ctrl.min_cg,
1931 dev->ctrl.max_cg, rxreg_access);
Sagar Dhariaf8f603b2012-03-21 15:25:17 -06001932 } else {
1933 dev->ctrl.nr = pdev->id;
1934 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001935 dev->ctrl.nchans = MSM_SLIM_NCHANS;
1936 dev->ctrl.nports = MSM_SLIM_NPORTS;
1937 dev->ctrl.set_laddr = msm_set_laddr;
1938 dev->ctrl.xfer_msg = msm_xfer_msg;
Sagar Dharia144e5e02011-08-08 17:30:11 -06001939 dev->ctrl.wakeup = msm_clk_pause_wakeup;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001940 dev->ctrl.config_port = msm_config_port;
1941 dev->ctrl.port_xfer = msm_slim_port_xfer;
1942 dev->ctrl.port_xfer_status = msm_slim_port_xfer_status;
1943 /* Reserve some messaging BW for satellite-apps driver communication */
1944 dev->ctrl.sched.pending_msgsl = 30;
1945
1946 init_completion(&dev->reconf);
1947 mutex_init(&dev->tx_lock);
1948 spin_lock_init(&dev->rx_lock);
1949 dev->ee = 1;
Sagar Dharia1beb2202012-07-31 19:06:21 -06001950 if (rxreg_access)
1951 dev->use_rx_msgqs = 0;
1952 else
1953 dev->use_rx_msgqs = 1;
1954
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001955 dev->irq = irq->start;
1956 dev->bam.irq = bam_irq->start;
1957
1958 ret = msm_slim_sps_init(dev, bam_mem);
1959 if (ret != 0) {
1960 dev_err(dev->dev, "error SPS init\n");
1961 goto err_sps_init_failed;
1962 }
1963
1964
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001965 dev->framer.rootfreq = SLIM_ROOT_FREQ >> 3;
1966 dev->framer.superfreq =
1967 dev->framer.rootfreq / SLIM_CL_PER_SUPERFRAME_DIV8;
1968 dev->ctrl.a_framer = &dev->framer;
1969 dev->ctrl.clkgear = SLIM_MAX_CLK_GEAR;
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001970 dev->ctrl.dev.parent = &pdev->dev;
Sagar Dhariaf8f603b2012-03-21 15:25:17 -06001971 dev->ctrl.dev.of_node = pdev->dev.of_node;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001972
1973 ret = request_irq(dev->irq, msm_slim_interrupt, IRQF_TRIGGER_HIGH,
1974 "msm_slim_irq", dev);
1975 if (ret) {
1976 dev_err(&pdev->dev, "request IRQ failed\n");
1977 goto err_request_irq_failed;
1978 }
1979
Sagar Dhariacc969452011-09-19 10:34:30 -06001980 msm_slim_prg_slew(pdev, dev);
Sagar Dhariab1c0acf2012-02-06 18:16:58 -07001981
1982 /* Register with framework before enabling frame, clock */
1983 ret = slim_add_numbered_controller(&dev->ctrl);
1984 if (ret) {
1985 dev_err(dev->dev, "error adding controller\n");
1986 goto err_ctrl_failed;
1987 }
1988
1989
Tianyi Gou44a81b02012-02-06 17:49:07 -08001990 dev->rclk = clk_get(dev->dev, "core_clk");
Sagar Dhariab1c0acf2012-02-06 18:16:58 -07001991 if (!dev->rclk) {
1992 dev_err(dev->dev, "slimbus clock not found");
1993 goto err_clk_get_failed;
1994 }
Sagar Dhariacc969452011-09-19 10:34:30 -06001995 clk_set_rate(dev->rclk, SLIM_ROOT_FREQ);
Sagar Dharia9acf7f42012-03-08 09:45:30 -07001996 clk_prepare_enable(dev->rclk);
Sagar Dhariacc969452011-09-19 10:34:30 -06001997
Sagar Dharia82e516f2012-03-16 16:01:23 -06001998 dev->ver = readl_relaxed(dev->base);
1999 /* Version info in 16 MSbits */
2000 dev->ver >>= 16;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002001 /* Component register initialization */
Sagar Dharia82e516f2012-03-16 16:01:23 -06002002 writel_relaxed(1, dev->base + CFG_PORT(COMP_CFG, dev->ver));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002003 writel_relaxed((EE_MGR_RSC_GRP | EE_NGD_2 | EE_NGD_1),
Sagar Dharia82e516f2012-03-16 16:01:23 -06002004 dev->base + CFG_PORT(COMP_TRUST_CFG, dev->ver));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002005
2006 /*
2007 * Manager register initialization
2008 * If RX msg Q is used, disable RX_MSG_RCVD interrupt
2009 */
2010 if (dev->use_rx_msgqs)
2011 writel_relaxed((MGR_INT_RECFG_DONE | MGR_INT_TX_NACKED_2 |
2012 MGR_INT_MSG_BUF_CONTE | /* MGR_INT_RX_MSG_RCVD | */
2013 MGR_INT_TX_MSG_SENT), dev->base + MGR_INT_EN);
2014 else
2015 writel_relaxed((MGR_INT_RECFG_DONE | MGR_INT_TX_NACKED_2 |
2016 MGR_INT_MSG_BUF_CONTE | MGR_INT_RX_MSG_RCVD |
2017 MGR_INT_TX_MSG_SENT), dev->base + MGR_INT_EN);
2018 writel_relaxed(1, dev->base + MGR_CFG);
2019 /*
2020 * Framer registers are beyond 1K memory region after Manager and/or
2021 * component registers. Make sure those writes are ordered
2022 * before framer register writes
2023 */
2024 wmb();
2025
2026 /* Framer register initialization */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002027 writel_relaxed((0xA << REF_CLK_GEAR) | (0xA << CLK_GEAR) |
2028 (1 << ROOT_FREQ) | (1 << FRM_ACTIVE) | 1,
2029 dev->base + FRM_CFG);
2030 /*
2031 * Make sure that framer wake-up and enabling writes go through
2032 * before any other component is enabled. Framer is responsible for
2033 * clocking the bus and enabling framer first will ensure that other
2034 * devices can report presence when they are enabled
2035 */
2036 mb();
2037
2038 /* Enable RX msg Q */
2039 if (dev->use_rx_msgqs)
2040 writel_relaxed(MGR_CFG_ENABLE | MGR_CFG_RX_MSGQ_EN,
2041 dev->base + MGR_CFG);
2042 else
2043 writel_relaxed(MGR_CFG_ENABLE, dev->base + MGR_CFG);
2044 /*
2045 * Make sure that manager-enable is written through before interface
2046 * device is enabled
2047 */
2048 mb();
2049 writel_relaxed(1, dev->base + INTF_CFG);
2050 /*
2051 * Make sure that interface-enable is written through before enabling
2052 * ported generic device inside MSM manager
2053 */
2054 mb();
Sagar Dharia82e516f2012-03-16 16:01:23 -06002055 writel_relaxed(1, dev->base + CFG_PORT(PGD_CFG, dev->ver));
2056 writel_relaxed(0x3F<<17, dev->base + CFG_PORT(PGD_OWN_EEn, dev->ver) +
2057 (4 * dev->ee));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002058 /*
2059 * Make sure that ported generic device is enabled and port-EE settings
2060 * are written through before finally enabling the component
2061 */
2062 mb();
2063
Sagar Dharia82e516f2012-03-16 16:01:23 -06002064 writel_relaxed(1, dev->base + CFG_PORT(COMP_CFG, dev->ver));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002065 /*
2066 * Make sure that all writes have gone through before exiting this
2067 * function
2068 */
2069 mb();
Sagar Dhariaf8f603b2012-03-21 15:25:17 -06002070 if (pdev->dev.of_node)
2071 of_register_slim_devices(&dev->ctrl);
2072
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002073 pm_runtime_use_autosuspend(&pdev->dev);
2074 pm_runtime_set_autosuspend_delay(&pdev->dev, MSM_SLIM_AUTOSUSPEND);
2075 pm_runtime_set_active(&pdev->dev);
2076
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002077 dev_dbg(dev->dev, "MSM SB controller is up!\n");
2078 return 0;
2079
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002080err_ctrl_failed:
Sagar Dharia82e516f2012-03-16 16:01:23 -06002081 writel_relaxed(0, dev->base + CFG_PORT(COMP_CFG, dev->ver));
Sagar Dhariab1c0acf2012-02-06 18:16:58 -07002082err_clk_get_failed:
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002083 kfree(dev->satd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002084err_request_irq_failed:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002085 msm_slim_sps_exit(dev);
2086err_sps_init_failed:
Sagar Dhariaf8f603b2012-03-21 15:25:17 -06002087err_of_init_failed:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002088 iounmap(dev->bam.base);
2089err_ioremap_bam_failed:
2090 iounmap(dev->base);
2091err_ioremap_failed:
2092 kfree(dev);
2093err_get_res_failed:
2094 release_mem_region(bam_mem->start, resource_size(bam_mem));
2095err_get_res_bam_failed:
2096 release_mem_region(slim_mem->start, resource_size(slim_mem));
2097 return ret;
2098}
2099
2100static int __devexit msm_slim_remove(struct platform_device *pdev)
2101{
2102 struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
2103 struct resource *bam_mem;
2104 struct resource *slim_mem;
Sagar Dhariacc969452011-09-19 10:34:30 -06002105 struct resource *slew_mem = dev->slew_mem;
Sagar Dharia790cfd02011-09-25 17:56:24 -06002106 int i;
2107 for (i = 0; i < dev->nsats; i++) {
2108 struct msm_slim_sat *sat = dev->satd[i];
Sagar Dharia0ffdca12011-09-25 18:55:53 -06002109 int j;
2110 for (j = 0; j < sat->nsatch; j++)
2111 slim_dealloc_ch(&sat->satcl, sat->satch[j].chanh);
Sagar Dharia790cfd02011-09-25 17:56:24 -06002112 slim_remove_device(&sat->satcl);
2113 kfree(sat->satch);
2114 destroy_workqueue(sat->wq);
2115 kfree(sat->satcl.name);
2116 kfree(sat);
2117 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002118 pm_runtime_disable(&pdev->dev);
2119 pm_runtime_set_suspended(&pdev->dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002120 free_irq(dev->irq, dev);
2121 slim_del_controller(&dev->ctrl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002122 clk_put(dev->rclk);
2123 msm_slim_sps_exit(dev);
2124 kthread_stop(dev->rx_msgq_thread);
2125 iounmap(dev->bam.base);
2126 iounmap(dev->base);
2127 kfree(dev);
2128 bam_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2129 "slimbus_bam_physical");
Sagar Dhariae77961f2011-09-27 14:03:50 -06002130 if (bam_mem)
2131 release_mem_region(bam_mem->start, resource_size(bam_mem));
Sagar Dhariacc969452011-09-19 10:34:30 -06002132 if (slew_mem)
2133 release_mem_region(slew_mem->start, resource_size(slew_mem));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002134 slim_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2135 "slimbus_physical");
Sagar Dhariae77961f2011-09-27 14:03:50 -06002136 if (slim_mem)
2137 release_mem_region(slim_mem->start, resource_size(slim_mem));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002138 return 0;
2139}
2140
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002141#ifdef CONFIG_PM_RUNTIME
2142static int msm_slim_runtime_idle(struct device *device)
2143{
2144 dev_dbg(device, "pm_runtime: idle...\n");
2145 pm_request_autosuspend(device);
2146 return -EAGAIN;
2147}
2148#endif
2149
2150/*
2151 * If PM_RUNTIME is not defined, these 2 functions become helper
2152 * functions to be called from system suspend/resume. So they are not
2153 * inside ifdef CONFIG_PM_RUNTIME
2154 */
Sagar Dharia45e77912012-01-10 09:55:18 -07002155#ifdef CONFIG_PM_SLEEP
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002156static int msm_slim_runtime_suspend(struct device *device)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002157{
2158 struct platform_device *pdev = to_platform_device(device);
2159 struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002160 int ret;
2161 dev_dbg(device, "pm_runtime: suspending...\n");
2162 dev->state = MSM_CTRL_SLEEPING;
2163 ret = slim_ctrl_clk_pause(&dev->ctrl, false, SLIM_CLK_UNSPECIFIED);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07002164 if (ret) {
2165 dev_err(device, "clk pause not entered:%d", ret);
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002166 dev->state = MSM_CTRL_AWAKE;
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07002167 } else {
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002168 dev->state = MSM_CTRL_ASLEEP;
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07002169 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002170 return ret;
2171}
2172
2173static int msm_slim_runtime_resume(struct device *device)
2174{
2175 struct platform_device *pdev = to_platform_device(device);
2176 struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
2177 int ret = 0;
2178 dev_dbg(device, "pm_runtime: resuming...\n");
2179 if (dev->state == MSM_CTRL_ASLEEP)
2180 ret = slim_ctrl_clk_pause(&dev->ctrl, true, 0);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07002181 if (ret) {
2182 dev_err(device, "clk pause not exited:%d", ret);
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002183 dev->state = MSM_CTRL_ASLEEP;
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07002184 } else {
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002185 dev->state = MSM_CTRL_AWAKE;
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07002186 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002187 return ret;
2188}
2189
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002190static int msm_slim_suspend(struct device *dev)
2191{
2192 int ret = 0;
2193 if (!pm_runtime_enabled(dev) || !pm_runtime_suspended(dev)) {
2194 dev_dbg(dev, "system suspend");
2195 ret = msm_slim_runtime_suspend(dev);
Sagar Dharia6b559e02011-08-03 17:01:31 -06002196 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002197 if (ret == -EBUSY) {
Sagar Dharia144e5e02011-08-08 17:30:11 -06002198 /*
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002199 * If the clock pause failed due to active channels, there is
2200 * a possibility that some audio stream is active during suspend
2201 * We dont want to return suspend failure in that case so that
2202 * display and relevant components can still go to suspend.
2203 * If there is some other error, then it should be passed-on
2204 * to system level suspend
2205 */
Sagar Dharia144e5e02011-08-08 17:30:11 -06002206 ret = 0;
2207 }
2208 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002209}
2210
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002211static int msm_slim_resume(struct device *dev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002212{
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002213 /* If runtime_pm is enabled, this resume shouldn't do anything */
2214 if (!pm_runtime_enabled(dev) || !pm_runtime_suspended(dev)) {
2215 int ret;
2216 dev_dbg(dev, "system resume");
2217 ret = msm_slim_runtime_resume(dev);
2218 if (!ret) {
2219 pm_runtime_mark_last_busy(dev);
2220 pm_request_autosuspend(dev);
2221 }
2222 return ret;
2223
Sagar Dharia144e5e02011-08-08 17:30:11 -06002224 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002225 return 0;
2226}
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002227#endif /* CONFIG_PM_SLEEP */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002228
2229static const struct dev_pm_ops msm_slim_dev_pm_ops = {
2230 SET_SYSTEM_SLEEP_PM_OPS(
2231 msm_slim_suspend,
2232 msm_slim_resume
2233 )
2234 SET_RUNTIME_PM_OPS(
2235 msm_slim_runtime_suspend,
2236 msm_slim_runtime_resume,
2237 msm_slim_runtime_idle
2238 )
2239};
2240
Sagar Dhariaf8f603b2012-03-21 15:25:17 -06002241static struct of_device_id msm_slim_dt_match[] = {
2242 {
2243 .compatible = "qcom,slim-msm",
2244 },
2245 {}
2246};
2247
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002248static struct platform_driver msm_slim_driver = {
2249 .probe = msm_slim_probe,
2250 .remove = msm_slim_remove,
2251 .driver = {
2252 .name = MSM_SLIM_NAME,
2253 .owner = THIS_MODULE,
2254 .pm = &msm_slim_dev_pm_ops,
Sagar Dhariaf8f603b2012-03-21 15:25:17 -06002255 .of_match_table = msm_slim_dt_match,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002256 },
2257};
2258
2259static int msm_slim_init(void)
2260{
2261 return platform_driver_register(&msm_slim_driver);
2262}
2263subsys_initcall(msm_slim_init);
2264
2265static void msm_slim_exit(void)
2266{
2267 platform_driver_unregister(&msm_slim_driver);
2268}
2269module_exit(msm_slim_exit);
2270
2271MODULE_LICENSE("GPL v2");
2272MODULE_VERSION("0.1");
2273MODULE_DESCRIPTION("MSM Slimbus controller");
2274MODULE_ALIAS("platform:msm-slim");