blob: 624b7ef334672c7f187f236fcbc32d3b7673cc27 [file] [log] [blame]
Sagar Dharia790cfd02011-09-25 17:56:24 -06001/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12#include <linux/irq.h>
13#include <linux/kernel.h>
14#include <linux/init.h>
15#include <linux/slab.h>
16#include <linux/io.h>
17#include <linux/interrupt.h>
18#include <linux/platform_device.h>
19#include <linux/dma-mapping.h>
20#include <linux/slimbus/slimbus.h>
21#include <linux/delay.h>
22#include <linux/kthread.h>
23#include <linux/clk.h>
Sagar Dharia45ee38a2011-08-03 17:01:31 -060024#include <linux/pm_runtime.h>
Sagar Dhariaf8f603b2012-03-21 15:25:17 -060025#include <linux/of.h>
26#include <linux/of_slimbus.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070027#include <mach/sps.h>
28
29/* Per spec.max 40 bytes per received message */
30#define SLIM_RX_MSGQ_BUF_LEN 40
31
32#define SLIM_USR_MC_GENERIC_ACK 0x25
33#define SLIM_USR_MC_MASTER_CAPABILITY 0x0
34#define SLIM_USR_MC_REPORT_SATELLITE 0x1
35#define SLIM_USR_MC_ADDR_QUERY 0xD
36#define SLIM_USR_MC_ADDR_REPLY 0xE
37#define SLIM_USR_MC_DEFINE_CHAN 0x20
38#define SLIM_USR_MC_DEF_ACT_CHAN 0x21
39#define SLIM_USR_MC_CHAN_CTRL 0x23
40#define SLIM_USR_MC_RECONFIG_NOW 0x24
41#define SLIM_USR_MC_REQ_BW 0x28
42#define SLIM_USR_MC_CONNECT_SRC 0x2C
43#define SLIM_USR_MC_CONNECT_SINK 0x2D
44#define SLIM_USR_MC_DISCONNECT_PORT 0x2E
45
46/* MSM Slimbus peripheral settings */
47#define MSM_SLIM_PERF_SUMM_THRESHOLD 0x8000
48#define MSM_SLIM_NCHANS 32
49#define MSM_SLIM_NPORTS 24
Sagar Dharia45ee38a2011-08-03 17:01:31 -060050#define MSM_SLIM_AUTOSUSPEND MSEC_PER_SEC
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070051
52/*
53 * Need enough descriptors to receive present messages from slaves
54 * if received simultaneously. Present message needs 3 descriptors
55 * and this size will ensure around 10 simultaneous reports.
56 */
57#define MSM_SLIM_DESC_NUM 32
58
59#define SLIM_MSG_ASM_FIRST_WORD(l, mt, mc, dt, ad) \
60 ((l) | ((mt) << 5) | ((mc) << 8) | ((dt) << 15) | ((ad) << 16))
61
62#define MSM_SLIM_NAME "msm_slim_ctrl"
63#define SLIM_ROOT_FREQ 24576000
64
65#define MSM_CONCUR_MSG 8
66#define SAT_CONCUR_MSG 8
67#define DEF_WATERMARK (8 << 1)
68#define DEF_ALIGN 0
69#define DEF_PACK (1 << 6)
70#define ENABLE_PORT 1
71
72#define DEF_BLKSZ 0
73#define DEF_TRANSZ 0
74
75#define SAT_MAGIC_LSB 0xD9
76#define SAT_MAGIC_MSB 0xC5
77#define SAT_MSG_VER 0x1
78#define SAT_MSG_PROT 0x1
79#define MSM_SAT_SUCCSS 0x20
Sagar Dharia790cfd02011-09-25 17:56:24 -060080#define MSM_MAX_NSATS 2
Sagar Dharia0ffdca12011-09-25 18:55:53 -060081#define MSM_MAX_SATCH 32
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070082
83#define QC_MFGID_LSB 0x2
84#define QC_MFGID_MSB 0x17
85#define QC_CHIPID_SL 0x10
86#define QC_DEVID_SAT1 0x3
87#define QC_DEVID_SAT2 0x4
88#define QC_DEVID_PGD 0x5
Sagar Dharia45ee38a2011-08-03 17:01:31 -060089#define QC_MSM_DEVS 5
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070090
Sagar Dharia82e516f2012-03-16 16:01:23 -060091#define PGD_THIS_EE(r, v) ((v) ? PGD_THIS_EE_V2(r) : PGD_THIS_EE_V1(r))
92#define PGD_PORT(r, p, v) ((v) ? PGD_PORT_V2(r, p) : PGD_PORT_V1(r, p))
93#define CFG_PORT(r, v) ((v) ? CFG_PORT_V2(r) : CFG_PORT_V1(r))
94
95#define PGD_THIS_EE_V2(r) (dev->base + (r ## _V2) + (dev->ee * 0x1000))
96#define PGD_PORT_V2(r, p) (dev->base + (r ## _V2) + ((p) * 0x1000))
97#define CFG_PORT_V2(r) ((r ## _V2))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070098/* Component registers */
Sagar Dharia82e516f2012-03-16 16:01:23 -060099enum comp_reg_v2 {
100 COMP_CFG_V2 = 4,
101 COMP_TRUST_CFG_V2 = 0x3000,
102};
103
104/* Manager PGD registers */
105enum pgd_reg_v2 {
106 PGD_CFG_V2 = 0x800,
107 PGD_STAT_V2 = 0x804,
108 PGD_INT_EN_V2 = 0x810,
109 PGD_INT_STAT_V2 = 0x814,
110 PGD_INT_CLR_V2 = 0x818,
111 PGD_OWN_EEn_V2 = 0x300C,
112 PGD_PORT_INT_EN_EEn_V2 = 0x5000,
113 PGD_PORT_INT_ST_EEn_V2 = 0x5004,
114 PGD_PORT_INT_CL_EEn_V2 = 0x5008,
115 PGD_PORT_CFGn_V2 = 0x14000,
116 PGD_PORT_STATn_V2 = 0x14004,
117 PGD_PORT_PARAMn_V2 = 0x14008,
118 PGD_PORT_BLKn_V2 = 0x1400C,
119 PGD_PORT_TRANn_V2 = 0x14010,
120 PGD_PORT_MCHANn_V2 = 0x14014,
121 PGD_PORT_PSHPLLn_V2 = 0x14018,
122 PGD_PORT_PC_CFGn_V2 = 0x8000,
123 PGD_PORT_PC_VALn_V2 = 0x8004,
124 PGD_PORT_PC_VFR_TSn_V2 = 0x8008,
125 PGD_PORT_PC_VFR_STn_V2 = 0x800C,
126 PGD_PORT_PC_VFR_CLn_V2 = 0x8010,
127 PGD_IE_STAT_V2 = 0x820,
128 PGD_VE_STAT_V2 = 0x830,
129};
130
131#define PGD_THIS_EE_V1(r) (dev->base + (r ## _V1) + (dev->ee * 16))
132#define PGD_PORT_V1(r, p) (dev->base + (r ## _V1) + ((p) * 32))
133#define CFG_PORT_V1(r) ((r ## _V1))
134/* Component registers */
135enum comp_reg_v1 {
136 COMP_CFG_V1 = 0,
137 COMP_TRUST_CFG_V1 = 0x14,
138};
139
140/* Manager PGD registers */
141enum pgd_reg_v1 {
142 PGD_CFG_V1 = 0x1000,
143 PGD_STAT_V1 = 0x1004,
144 PGD_INT_EN_V1 = 0x1010,
145 PGD_INT_STAT_V1 = 0x1014,
146 PGD_INT_CLR_V1 = 0x1018,
147 PGD_OWN_EEn_V1 = 0x1020,
148 PGD_PORT_INT_EN_EEn_V1 = 0x1030,
149 PGD_PORT_INT_ST_EEn_V1 = 0x1034,
150 PGD_PORT_INT_CL_EEn_V1 = 0x1038,
151 PGD_PORT_CFGn_V1 = 0x1080,
152 PGD_PORT_STATn_V1 = 0x1084,
153 PGD_PORT_PARAMn_V1 = 0x1088,
154 PGD_PORT_BLKn_V1 = 0x108C,
155 PGD_PORT_TRANn_V1 = 0x1090,
156 PGD_PORT_MCHANn_V1 = 0x1094,
157 PGD_PORT_PSHPLLn_V1 = 0x1098,
158 PGD_PORT_PC_CFGn_V1 = 0x1600,
159 PGD_PORT_PC_VALn_V1 = 0x1604,
160 PGD_PORT_PC_VFR_TSn_V1 = 0x1608,
161 PGD_PORT_PC_VFR_STn_V1 = 0x160C,
162 PGD_PORT_PC_VFR_CLn_V1 = 0x1610,
163 PGD_IE_STAT_V1 = 0x1700,
164 PGD_VE_STAT_V1 = 0x1710,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700165};
166
167/* Manager registers */
168enum mgr_reg {
169 MGR_CFG = 0x200,
170 MGR_STATUS = 0x204,
171 MGR_RX_MSGQ_CFG = 0x208,
172 MGR_INT_EN = 0x210,
173 MGR_INT_STAT = 0x214,
174 MGR_INT_CLR = 0x218,
175 MGR_TX_MSG = 0x230,
176 MGR_RX_MSG = 0x270,
177 MGR_VE_STAT = 0x300,
178};
179
180enum msg_cfg {
181 MGR_CFG_ENABLE = 1,
182 MGR_CFG_RX_MSGQ_EN = 1 << 1,
183 MGR_CFG_TX_MSGQ_EN_HIGH = 1 << 2,
184 MGR_CFG_TX_MSGQ_EN_LOW = 1 << 3,
185};
186/* Message queue types */
187enum msm_slim_msgq_type {
188 MSGQ_RX = 0,
189 MSGQ_TX_LOW = 1,
190 MSGQ_TX_HIGH = 2,
191};
192/* Framer registers */
193enum frm_reg {
194 FRM_CFG = 0x400,
195 FRM_STAT = 0x404,
196 FRM_INT_EN = 0x410,
197 FRM_INT_STAT = 0x414,
198 FRM_INT_CLR = 0x418,
199 FRM_WAKEUP = 0x41C,
200 FRM_CLKCTL_DONE = 0x420,
201 FRM_IE_STAT = 0x430,
202 FRM_VE_STAT = 0x440,
203};
204
205/* Interface registers */
206enum intf_reg {
207 INTF_CFG = 0x600,
208 INTF_STAT = 0x604,
209 INTF_INT_EN = 0x610,
210 INTF_INT_STAT = 0x614,
211 INTF_INT_CLR = 0x618,
212 INTF_IE_STAT = 0x630,
213 INTF_VE_STAT = 0x640,
214};
215
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700216enum rsc_grp {
217 EE_MGR_RSC_GRP = 1 << 10,
218 EE_NGD_2 = 2 << 6,
219 EE_NGD_1 = 0,
220};
221
222enum mgr_intr {
223 MGR_INT_RECFG_DONE = 1 << 24,
224 MGR_INT_TX_NACKED_2 = 1 << 25,
225 MGR_INT_MSG_BUF_CONTE = 1 << 26,
226 MGR_INT_RX_MSG_RCVD = 1 << 30,
227 MGR_INT_TX_MSG_SENT = 1 << 31,
228};
229
230enum frm_cfg {
231 FRM_ACTIVE = 1,
232 CLK_GEAR = 7,
233 ROOT_FREQ = 11,
234 REF_CLK_GEAR = 15,
235};
236
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600237enum msm_ctrl_state {
238 MSM_CTRL_AWAKE,
239 MSM_CTRL_SLEEPING,
240 MSM_CTRL_ASLEEP,
241};
242
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700243struct msm_slim_sps_bam {
244 u32 hdl;
245 void __iomem *base;
246 int irq;
247};
248
249struct msm_slim_endp {
250 struct sps_pipe *sps;
251 struct sps_connect config;
252 struct sps_register_event event;
253 struct sps_mem_buffer buf;
254 struct completion *xcomp;
255 bool connected;
256};
257
258struct msm_slim_ctrl {
259 struct slim_controller ctrl;
260 struct slim_framer framer;
261 struct device *dev;
262 void __iomem *base;
Sagar Dhariacc969452011-09-19 10:34:30 -0600263 struct resource *slew_mem;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700264 u32 curr_bw;
265 u8 msg_cnt;
266 u32 tx_buf[10];
267 u8 rx_msgs[MSM_CONCUR_MSG][SLIM_RX_MSGQ_BUF_LEN];
268 spinlock_t rx_lock;
269 int head;
270 int tail;
271 int irq;
272 int err;
273 int ee;
274 struct completion *wr_comp;
Sagar Dharia790cfd02011-09-25 17:56:24 -0600275 struct msm_slim_sat *satd[MSM_MAX_NSATS];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700276 struct msm_slim_endp pipes[7];
277 struct msm_slim_sps_bam bam;
278 struct msm_slim_endp rx_msgq;
279 struct completion rx_msgq_notify;
280 struct task_struct *rx_msgq_thread;
281 struct clk *rclk;
282 struct mutex tx_lock;
283 u8 pgdla;
284 bool use_rx_msgqs;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700285 int pipe_b;
286 struct completion reconf;
287 bool reconf_busy;
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600288 bool chan_active;
289 enum msm_ctrl_state state;
Sagar Dharia790cfd02011-09-25 17:56:24 -0600290 int nsats;
Sagar Dharia82e516f2012-03-16 16:01:23 -0600291 u32 ver;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700292};
293
Sagar Dharia0ffdca12011-09-25 18:55:53 -0600294struct msm_sat_chan {
295 u8 chan;
296 u16 chanh;
297 int req_rem;
298 int req_def;
Ajay Dudani2c71b242012-08-15 00:01:57 -0600299 bool reconf;
Sagar Dharia0ffdca12011-09-25 18:55:53 -0600300};
301
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700302struct msm_slim_sat {
303 struct slim_device satcl;
304 struct msm_slim_ctrl *dev;
305 struct workqueue_struct *wq;
306 struct work_struct wd;
307 u8 sat_msgs[SAT_CONCUR_MSG][40];
Sagar Dharia0ffdca12011-09-25 18:55:53 -0600308 struct msm_sat_chan *satch;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700309 u8 nsatch;
310 bool sent_capability;
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600311 bool pending_reconf;
312 bool pending_capability;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700313 int shead;
314 int stail;
315 spinlock_t lock;
316};
317
Sagar Dharia790cfd02011-09-25 17:56:24 -0600318static struct msm_slim_sat *msm_slim_alloc_sat(struct msm_slim_ctrl *dev);
319
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700320static int msm_slim_rx_enqueue(struct msm_slim_ctrl *dev, u32 *buf, u8 len)
321{
322 spin_lock(&dev->rx_lock);
323 if ((dev->tail + 1) % MSM_CONCUR_MSG == dev->head) {
324 spin_unlock(&dev->rx_lock);
325 dev_err(dev->dev, "RX QUEUE full!");
326 return -EXFULL;
327 }
328 memcpy((u8 *)dev->rx_msgs[dev->tail], (u8 *)buf, len);
329 dev->tail = (dev->tail + 1) % MSM_CONCUR_MSG;
330 spin_unlock(&dev->rx_lock);
331 return 0;
332}
333
334static int msm_slim_rx_dequeue(struct msm_slim_ctrl *dev, u8 *buf)
335{
336 unsigned long flags;
337 spin_lock_irqsave(&dev->rx_lock, flags);
338 if (dev->tail == dev->head) {
339 spin_unlock_irqrestore(&dev->rx_lock, flags);
340 return -ENODATA;
341 }
342 memcpy(buf, (u8 *)dev->rx_msgs[dev->head], 40);
343 dev->head = (dev->head + 1) % MSM_CONCUR_MSG;
344 spin_unlock_irqrestore(&dev->rx_lock, flags);
345 return 0;
346}
347
348static int msm_sat_enqueue(struct msm_slim_sat *sat, u32 *buf, u8 len)
349{
350 struct msm_slim_ctrl *dev = sat->dev;
351 spin_lock(&sat->lock);
352 if ((sat->stail + 1) % SAT_CONCUR_MSG == sat->shead) {
353 spin_unlock(&sat->lock);
354 dev_err(dev->dev, "SAT QUEUE full!");
355 return -EXFULL;
356 }
357 memcpy(sat->sat_msgs[sat->stail], (u8 *)buf, len);
358 sat->stail = (sat->stail + 1) % SAT_CONCUR_MSG;
359 spin_unlock(&sat->lock);
360 return 0;
361}
362
363static int msm_sat_dequeue(struct msm_slim_sat *sat, u8 *buf)
364{
365 unsigned long flags;
366 spin_lock_irqsave(&sat->lock, flags);
367 if (sat->stail == sat->shead) {
368 spin_unlock_irqrestore(&sat->lock, flags);
369 return -ENODATA;
370 }
371 memcpy(buf, sat->sat_msgs[sat->shead], 40);
372 sat->shead = (sat->shead + 1) % SAT_CONCUR_MSG;
373 spin_unlock_irqrestore(&sat->lock, flags);
374 return 0;
375}
376
377static void msm_get_eaddr(u8 *e_addr, u32 *buffer)
378{
379 e_addr[0] = (buffer[1] >> 24) & 0xff;
380 e_addr[1] = (buffer[1] >> 16) & 0xff;
381 e_addr[2] = (buffer[1] >> 8) & 0xff;
382 e_addr[3] = buffer[1] & 0xff;
383 e_addr[4] = (buffer[0] >> 24) & 0xff;
384 e_addr[5] = (buffer[0] >> 16) & 0xff;
385}
386
387static bool msm_is_sat_dev(u8 *e_addr)
388{
389 if (e_addr[5] == QC_MFGID_LSB && e_addr[4] == QC_MFGID_MSB &&
390 e_addr[2] != QC_CHIPID_SL &&
391 (e_addr[1] == QC_DEVID_SAT1 || e_addr[1] == QC_DEVID_SAT2))
392 return true;
393 return false;
394}
395
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700396static int msm_slim_get_ctrl(struct msm_slim_ctrl *dev)
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600397{
Sagar Dharia45e77912012-01-10 09:55:18 -0700398#ifdef CONFIG_PM_RUNTIME
399 int ref = 0;
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700400 int ret = pm_runtime_get_sync(dev->dev);
401 if (ret >= 0) {
402 ref = atomic_read(&dev->dev->power.usage_count);
403 if (ref <= 0) {
404 dev_err(dev->dev, "reference count -ve:%d", ref);
405 ret = -ENODEV;
406 }
407 }
408 return ret;
Sagar Dharia45e77912012-01-10 09:55:18 -0700409#else
410 return -ENODEV;
411#endif
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600412}
413static void msm_slim_put_ctrl(struct msm_slim_ctrl *dev)
414{
Sagar Dharia45e77912012-01-10 09:55:18 -0700415#ifdef CONFIG_PM_RUNTIME
Sagar Dharia38fd1872012-02-06 18:36:38 -0700416 int ref;
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600417 pm_runtime_mark_last_busy(dev->dev);
Sagar Dharia38fd1872012-02-06 18:36:38 -0700418 ref = atomic_read(&dev->dev->power.usage_count);
419 if (ref <= 0)
420 dev_err(dev->dev, "reference count mismatch:%d", ref);
421 else
422 pm_runtime_put(dev->dev);
Sagar Dharia45e77912012-01-10 09:55:18 -0700423#endif
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600424}
425
Sagar Dharia790cfd02011-09-25 17:56:24 -0600426static struct msm_slim_sat *addr_to_sat(struct msm_slim_ctrl *dev, u8 laddr)
427{
428 struct msm_slim_sat *sat = NULL;
429 int i = 0;
430 while (!sat && i < dev->nsats) {
431 if (laddr == dev->satd[i]->satcl.laddr)
432 sat = dev->satd[i];
433 i++;
434 }
435 return sat;
436}
437
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700438static irqreturn_t msm_slim_interrupt(int irq, void *d)
439{
440 struct msm_slim_ctrl *dev = d;
441 u32 pstat;
442 u32 stat = readl_relaxed(dev->base + MGR_INT_STAT);
443
444 if (stat & MGR_INT_TX_MSG_SENT || stat & MGR_INT_TX_NACKED_2) {
445 if (stat & MGR_INT_TX_MSG_SENT)
446 writel_relaxed(MGR_INT_TX_MSG_SENT,
447 dev->base + MGR_INT_CLR);
448 else {
449 writel_relaxed(MGR_INT_TX_NACKED_2,
450 dev->base + MGR_INT_CLR);
451 dev->err = -EIO;
452 }
453 /*
454 * Guarantee that interrupt clear bit write goes through before
455 * signalling completion/exiting ISR
456 */
457 mb();
458 if (dev->wr_comp)
459 complete(dev->wr_comp);
460 }
461 if (stat & MGR_INT_RX_MSG_RCVD) {
462 u32 rx_buf[10];
463 u32 mc, mt;
464 u8 len, i;
465 rx_buf[0] = readl_relaxed(dev->base + MGR_RX_MSG);
466 len = rx_buf[0] & 0x1F;
467 for (i = 1; i < ((len + 3) >> 2); i++) {
468 rx_buf[i] = readl_relaxed(dev->base + MGR_RX_MSG +
469 (4 * i));
470 dev_dbg(dev->dev, "reading data: %x\n", rx_buf[i]);
471 }
472 mt = (rx_buf[0] >> 5) & 0x7;
473 mc = (rx_buf[0] >> 8) & 0xff;
474 dev_dbg(dev->dev, "MC: %x, MT: %x\n", mc, mt);
475 if (mt == SLIM_MSG_MT_DEST_REFERRED_USER ||
476 mt == SLIM_MSG_MT_SRC_REFERRED_USER) {
Sagar Dharia790cfd02011-09-25 17:56:24 -0600477 u8 laddr = (u8)((rx_buf[0] >> 16) & 0xFF);
478 struct msm_slim_sat *sat = addr_to_sat(dev, laddr);
479 if (sat)
480 msm_sat_enqueue(sat, rx_buf, len);
481 else
482 dev_err(dev->dev, "unknown sat:%d message",
483 laddr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700484 writel_relaxed(MGR_INT_RX_MSG_RCVD,
485 dev->base + MGR_INT_CLR);
486 /*
487 * Guarantee that CLR bit write goes through before
488 * queuing work
489 */
490 mb();
Sagar Dharia790cfd02011-09-25 17:56:24 -0600491 if (sat)
492 queue_work(sat->wq, &sat->wd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700493 } else if (mt == SLIM_MSG_MT_CORE &&
494 mc == SLIM_MSG_MC_REPORT_PRESENT) {
495 u8 e_addr[6];
496 msm_get_eaddr(e_addr, rx_buf);
Sagar Dharia790cfd02011-09-25 17:56:24 -0600497 msm_slim_rx_enqueue(dev, rx_buf, len);
498 writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
499 MGR_INT_CLR);
500 /*
501 * Guarantee that CLR bit write goes through
502 * before signalling completion
503 */
504 mb();
505 complete(&dev->rx_msgq_notify);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700506 } else if (mc == SLIM_MSG_MC_REPLY_INFORMATION ||
507 mc == SLIM_MSG_MC_REPLY_VALUE) {
508 msm_slim_rx_enqueue(dev, rx_buf, len);
509 writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
510 MGR_INT_CLR);
511 /*
512 * Guarantee that CLR bit write goes through
513 * before signalling completion
514 */
515 mb();
516 complete(&dev->rx_msgq_notify);
Sagar Dharia144e5e02011-08-08 17:30:11 -0600517 } else if (mc == SLIM_MSG_MC_REPORT_INFORMATION) {
518 u8 *buf = (u8 *)rx_buf;
519 u8 l_addr = buf[2];
520 u16 ele = (u16)buf[4] << 4;
521 ele |= ((buf[3] & 0xf0) >> 4);
522 dev_err(dev->dev, "Slim-dev:%d report inf element:0x%x",
523 l_addr, ele);
524 for (i = 0; i < len - 5; i++)
525 dev_err(dev->dev, "offset:0x%x:bit mask:%x",
526 i, buf[i+5]);
527 writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
528 MGR_INT_CLR);
529 /*
530 * Guarantee that CLR bit write goes through
531 * before exiting
532 */
533 mb();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700534 } else {
535 dev_err(dev->dev, "Unexpected MC,%x MT:%x, len:%d",
536 mc, mt, len);
537 for (i = 0; i < ((len + 3) >> 2); i++)
538 dev_err(dev->dev, "error msg: %x", rx_buf[i]);
539 writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
540 MGR_INT_CLR);
541 /*
542 * Guarantee that CLR bit write goes through
543 * before exiting
544 */
545 mb();
546 }
547 }
548 if (stat & MGR_INT_RECFG_DONE) {
549 writel_relaxed(MGR_INT_RECFG_DONE, dev->base + MGR_INT_CLR);
550 /*
551 * Guarantee that CLR bit write goes through
552 * before exiting ISR
553 */
554 mb();
555 complete(&dev->reconf);
556 }
Sagar Dharia82e516f2012-03-16 16:01:23 -0600557 pstat = readl_relaxed(PGD_THIS_EE(PGD_PORT_INT_ST_EEn, dev->ver));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700558 if (pstat != 0) {
559 int i = 0;
560 for (i = dev->pipe_b; i < MSM_SLIM_NPORTS; i++) {
561 if (pstat & 1 << i) {
Sagar Dharia82e516f2012-03-16 16:01:23 -0600562 u32 val = readl_relaxed(PGD_PORT(PGD_PORT_STATn,
563 i, dev->ver));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700564 if (val & (1 << 19)) {
565 dev->ctrl.ports[i].err =
566 SLIM_P_DISCONNECT;
567 dev->pipes[i-dev->pipe_b].connected =
568 false;
569 /*
570 * SPS will call completion since
571 * ERROR flags are registered
572 */
573 } else if (val & (1 << 2))
574 dev->ctrl.ports[i].err =
575 SLIM_P_OVERFLOW;
576 else if (val & (1 << 3))
577 dev->ctrl.ports[i].err =
578 SLIM_P_UNDERFLOW;
579 }
Sagar Dharia82e516f2012-03-16 16:01:23 -0600580 writel_relaxed(1, PGD_THIS_EE(PGD_PORT_INT_CL_EEn,
581 dev->ver));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700582 }
583 /*
584 * Guarantee that port interrupt bit(s) clearing writes go
585 * through before exiting ISR
586 */
587 mb();
588 }
589
590 return IRQ_HANDLED;
591}
592
593static int
594msm_slim_init_endpoint(struct msm_slim_ctrl *dev, struct msm_slim_endp *ep)
595{
596 int ret;
597 struct sps_pipe *endpoint;
598 struct sps_connect *config = &ep->config;
599
600 /* Allocate the endpoint */
601 endpoint = sps_alloc_endpoint();
602 if (!endpoint) {
603 dev_err(dev->dev, "sps_alloc_endpoint failed\n");
604 return -ENOMEM;
605 }
606
607 /* Get default connection configuration for an endpoint */
608 ret = sps_get_config(endpoint, config);
609 if (ret) {
610 dev_err(dev->dev, "sps_get_config failed 0x%x\n", ret);
611 goto sps_config_failed;
612 }
613
614 ep->sps = endpoint;
615 return 0;
616
617sps_config_failed:
618 sps_free_endpoint(endpoint);
619 return ret;
620}
621
622static void
623msm_slim_free_endpoint(struct msm_slim_endp *ep)
624{
625 sps_free_endpoint(ep->sps);
626 ep->sps = NULL;
627}
628
629static int msm_slim_sps_mem_alloc(
630 struct msm_slim_ctrl *dev, struct sps_mem_buffer *mem, u32 len)
631{
632 dma_addr_t phys;
633
634 mem->size = len;
635 mem->min_size = 0;
636 mem->base = dma_alloc_coherent(dev->dev, mem->size, &phys, GFP_KERNEL);
637
638 if (!mem->base) {
639 dev_err(dev->dev, "dma_alloc_coherent(%d) failed\n", len);
640 return -ENOMEM;
641 }
642
643 mem->phys_base = phys;
644 memset(mem->base, 0x00, mem->size);
645 return 0;
646}
647
648static void
649msm_slim_sps_mem_free(struct msm_slim_ctrl *dev, struct sps_mem_buffer *mem)
650{
651 dma_free_coherent(dev->dev, mem->size, mem->base, mem->phys_base);
652 mem->size = 0;
653 mem->base = NULL;
654 mem->phys_base = 0;
655}
656
657static void msm_hw_set_port(struct msm_slim_ctrl *dev, u8 pn)
658{
659 u32 set_cfg = DEF_WATERMARK | DEF_ALIGN | DEF_PACK | ENABLE_PORT;
Sagar Dharia82e516f2012-03-16 16:01:23 -0600660 u32 int_port = readl_relaxed(PGD_THIS_EE(PGD_PORT_INT_EN_EEn,
661 dev->ver));
662 writel_relaxed(set_cfg, PGD_PORT(PGD_PORT_CFGn, pn, dev->ver));
663 writel_relaxed(DEF_BLKSZ, PGD_PORT(PGD_PORT_BLKn, pn, dev->ver));
664 writel_relaxed(DEF_TRANSZ, PGD_PORT(PGD_PORT_TRANn, pn, dev->ver));
665 writel_relaxed((int_port | 1 << pn) , PGD_THIS_EE(PGD_PORT_INT_EN_EEn,
666 dev->ver));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700667 /* Make sure that port registers are updated before returning */
668 mb();
669}
670
671static int msm_slim_connect_pipe_port(struct msm_slim_ctrl *dev, u8 pn)
672{
673 struct msm_slim_endp *endpoint = &dev->pipes[pn];
674 struct sps_connect *cfg = &endpoint->config;
675 u32 stat;
676 int ret = sps_get_config(dev->pipes[pn].sps, cfg);
677 if (ret) {
678 dev_err(dev->dev, "sps pipe-port get config error%x\n", ret);
679 return ret;
680 }
681 cfg->options = SPS_O_DESC_DONE | SPS_O_ERROR |
682 SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
683
684 if (dev->pipes[pn].connected) {
685 ret = sps_set_config(dev->pipes[pn].sps, cfg);
686 if (ret) {
687 dev_err(dev->dev, "sps pipe-port set config erro:%x\n",
688 ret);
689 return ret;
690 }
691 }
692
Sagar Dharia82e516f2012-03-16 16:01:23 -0600693 stat = readl_relaxed(PGD_PORT(PGD_PORT_STATn, (pn + dev->pipe_b),
694 dev->ver));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700695 if (dev->ctrl.ports[pn].flow == SLIM_SRC) {
696 cfg->destination = dev->bam.hdl;
697 cfg->source = SPS_DEV_HANDLE_MEM;
698 cfg->dest_pipe_index = ((stat & (0xFF << 4)) >> 4);
699 cfg->src_pipe_index = 0;
700 dev_dbg(dev->dev, "flow src:pipe num:%d",
701 cfg->dest_pipe_index);
702 cfg->mode = SPS_MODE_DEST;
703 } else {
704 cfg->source = dev->bam.hdl;
705 cfg->destination = SPS_DEV_HANDLE_MEM;
706 cfg->src_pipe_index = ((stat & (0xFF << 4)) >> 4);
707 cfg->dest_pipe_index = 0;
708 dev_dbg(dev->dev, "flow dest:pipe num:%d",
709 cfg->src_pipe_index);
710 cfg->mode = SPS_MODE_SRC;
711 }
712 /* Space for desciptor FIFOs */
713 cfg->desc.size = MSM_SLIM_DESC_NUM * sizeof(struct sps_iovec);
714 cfg->config = SPS_CONFIG_DEFAULT;
715 ret = sps_connect(dev->pipes[pn].sps, cfg);
716 if (!ret) {
717 dev->pipes[pn].connected = true;
718 msm_hw_set_port(dev, pn + dev->pipe_b);
719 }
720 return ret;
721}
722
723static u32 *msm_get_msg_buf(struct slim_controller *ctrl, int len)
724{
725 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
726 /*
727 * Currently we block a transaction until the current one completes.
728 * In case we need multiple transactions, use message Q
729 */
730 return dev->tx_buf;
731}
732
733static int msm_send_msg_buf(struct slim_controller *ctrl, u32 *buf, u8 len)
734{
735 int i;
736 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
737 for (i = 0; i < (len + 3) >> 2; i++) {
738 dev_dbg(dev->dev, "TX data:0x%x\n", buf[i]);
739 writel_relaxed(buf[i], dev->base + MGR_TX_MSG + (i * 4));
740 }
741 /* Guarantee that message is sent before returning */
742 mb();
743 return 0;
744}
745
746static int msm_xfer_msg(struct slim_controller *ctrl, struct slim_msg_txn *txn)
747{
748 DECLARE_COMPLETION_ONSTACK(done);
749 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
750 u32 *pbuf;
751 u8 *puc;
752 int timeout;
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700753 int msgv = -1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700754 u8 la = txn->la;
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600755 u8 mc = (u8)(txn->mc & 0xFF);
756 /*
757 * Voting for runtime PM: Slimbus has 2 possible use cases:
758 * 1. messaging
759 * 2. Data channels
760 * Messaging case goes through messaging slots and data channels
761 * use their own slots
762 * This "get" votes for messaging bandwidth
763 */
764 if (!(txn->mc & SLIM_MSG_CLK_PAUSE_SEQ_FLG))
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700765 msgv = msm_slim_get_ctrl(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700766 mutex_lock(&dev->tx_lock);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700767 if (dev->state == MSM_CTRL_ASLEEP ||
768 ((!(txn->mc & SLIM_MSG_CLK_PAUSE_SEQ_FLG)) &&
769 dev->state == MSM_CTRL_SLEEPING)) {
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600770 dev_err(dev->dev, "runtime or system PM suspended state");
771 mutex_unlock(&dev->tx_lock);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700772 if (msgv >= 0)
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600773 msm_slim_put_ctrl(dev);
774 return -EBUSY;
775 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700776 if (txn->mt == SLIM_MSG_MT_CORE &&
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600777 mc == SLIM_MSG_MC_BEGIN_RECONFIGURATION) {
778 if (dev->reconf_busy) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700779 wait_for_completion(&dev->reconf);
780 dev->reconf_busy = false;
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600781 }
782 /* This "get" votes for data channels */
783 if (dev->ctrl.sched.usedslots != 0 &&
784 !dev->chan_active) {
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700785 int chv = msm_slim_get_ctrl(dev);
786 if (chv >= 0)
787 dev->chan_active = true;
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600788 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700789 }
790 txn->rl--;
791 pbuf = msm_get_msg_buf(ctrl, txn->rl);
792 dev->wr_comp = NULL;
793 dev->err = 0;
794
795 if (txn->dt == SLIM_MSG_DEST_ENUMADDR) {
796 mutex_unlock(&dev->tx_lock);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700797 if (msgv >= 0)
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600798 msm_slim_put_ctrl(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700799 return -EPROTONOSUPPORT;
800 }
801 if (txn->mt == SLIM_MSG_MT_CORE && txn->la == 0xFF &&
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600802 (mc == SLIM_MSG_MC_CONNECT_SOURCE ||
803 mc == SLIM_MSG_MC_CONNECT_SINK ||
804 mc == SLIM_MSG_MC_DISCONNECT_PORT))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700805 la = dev->pgdla;
806 if (txn->dt == SLIM_MSG_DEST_LOGICALADDR)
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600807 *pbuf = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt, mc, 0, la);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700808 else
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600809 *pbuf = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt, mc, 1, la);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700810 if (txn->dt == SLIM_MSG_DEST_LOGICALADDR)
811 puc = ((u8 *)pbuf) + 3;
812 else
813 puc = ((u8 *)pbuf) + 2;
814 if (txn->rbuf)
815 *(puc++) = txn->tid;
816 if ((txn->mt == SLIM_MSG_MT_CORE) &&
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600817 ((mc >= SLIM_MSG_MC_REQUEST_INFORMATION &&
818 mc <= SLIM_MSG_MC_REPORT_INFORMATION) ||
819 (mc >= SLIM_MSG_MC_REQUEST_VALUE &&
820 mc <= SLIM_MSG_MC_CHANGE_VALUE))) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700821 *(puc++) = (txn->ec & 0xFF);
822 *(puc++) = (txn->ec >> 8)&0xFF;
823 }
824 if (txn->wbuf)
825 memcpy(puc, txn->wbuf, txn->len);
826 if (txn->mt == SLIM_MSG_MT_CORE && txn->la == 0xFF &&
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600827 (mc == SLIM_MSG_MC_CONNECT_SOURCE ||
828 mc == SLIM_MSG_MC_CONNECT_SINK ||
829 mc == SLIM_MSG_MC_DISCONNECT_PORT)) {
830 if (mc != SLIM_MSG_MC_DISCONNECT_PORT)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700831 dev->err = msm_slim_connect_pipe_port(dev, *puc);
832 else {
833 struct msm_slim_endp *endpoint = &dev->pipes[*puc];
834 struct sps_register_event sps_event;
835 memset(&sps_event, 0, sizeof(sps_event));
836 sps_register_event(endpoint->sps, &sps_event);
837 sps_disconnect(endpoint->sps);
838 /*
839 * Remove channel disconnects master-side ports from
840 * channel. No need to send that again on the bus
841 */
842 dev->pipes[*puc].connected = false;
843 mutex_unlock(&dev->tx_lock);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700844 if (msgv >= 0)
845 msm_slim_put_ctrl(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700846 return 0;
847 }
848 if (dev->err) {
849 dev_err(dev->dev, "pipe-port connect err:%d", dev->err);
850 mutex_unlock(&dev->tx_lock);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700851 if (msgv >= 0)
852 msm_slim_put_ctrl(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700853 return dev->err;
854 }
855 *(puc) = *(puc) + dev->pipe_b;
856 }
857 if (txn->mt == SLIM_MSG_MT_CORE &&
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600858 mc == SLIM_MSG_MC_BEGIN_RECONFIGURATION)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700859 dev->reconf_busy = true;
860 dev->wr_comp = &done;
861 msm_send_msg_buf(ctrl, pbuf, txn->rl);
862 timeout = wait_for_completion_timeout(&done, HZ);
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600863
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700864 if (mc == SLIM_MSG_MC_RECONFIGURE_NOW) {
865 if ((txn->mc == (SLIM_MSG_MC_RECONFIGURE_NOW |
866 SLIM_MSG_CLK_PAUSE_SEQ_FLG)) &&
867 timeout) {
868 timeout = wait_for_completion_timeout(&dev->reconf, HZ);
869 dev->reconf_busy = false;
870 if (timeout) {
Sagar Dharia9acf7f42012-03-08 09:45:30 -0700871 clk_disable_unprepare(dev->rclk);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700872 disable_irq(dev->irq);
873 }
874 }
875 if ((txn->mc == (SLIM_MSG_MC_RECONFIGURE_NOW |
876 SLIM_MSG_CLK_PAUSE_SEQ_FLG)) &&
877 !timeout) {
878 dev->reconf_busy = false;
879 dev_err(dev->dev, "clock pause failed");
880 mutex_unlock(&dev->tx_lock);
881 return -ETIMEDOUT;
882 }
883 if (txn->mt == SLIM_MSG_MT_CORE &&
884 txn->mc == SLIM_MSG_MC_RECONFIGURE_NOW) {
885 if (dev->ctrl.sched.usedslots == 0 &&
886 dev->chan_active) {
887 dev->chan_active = false;
888 msm_slim_put_ctrl(dev);
889 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600890 }
891 }
ehgrace.kim1f6cbba2012-08-03 16:05:34 -0700892 if (!timeout) {
893 dev_err(dev->dev, "TX timed out:MC:0x%x,mt:0x%x",
894 txn->mc, txn->mt);
895 dev->wr_comp = NULL;
896 }
897
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600898 mutex_unlock(&dev->tx_lock);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -0700899 if (msgv >= 0)
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600900 msm_slim_put_ctrl(dev);
901
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700902 return timeout ? dev->err : -ETIMEDOUT;
903}
904
905static int msm_set_laddr(struct slim_controller *ctrl, const u8 *ea,
906 u8 elen, u8 laddr)
907{
908 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
909 DECLARE_COMPLETION_ONSTACK(done);
910 int timeout;
911 u32 *buf;
912 mutex_lock(&dev->tx_lock);
913 buf = msm_get_msg_buf(ctrl, 9);
914 buf[0] = SLIM_MSG_ASM_FIRST_WORD(9, SLIM_MSG_MT_CORE,
915 SLIM_MSG_MC_ASSIGN_LOGICAL_ADDRESS,
916 SLIM_MSG_DEST_LOGICALADDR,
917 ea[5] | ea[4] << 8);
918 buf[1] = ea[3] | (ea[2] << 8) | (ea[1] << 16) | (ea[0] << 24);
919 buf[2] = laddr;
920
921 dev->wr_comp = &done;
922 msm_send_msg_buf(ctrl, buf, 9);
923 timeout = wait_for_completion_timeout(&done, HZ);
ehgrace.kim1f6cbba2012-08-03 16:05:34 -0700924 if (!timeout)
925 dev->wr_comp = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700926 mutex_unlock(&dev->tx_lock);
927 return timeout ? dev->err : -ETIMEDOUT;
928}
929
Sagar Dharia144e5e02011-08-08 17:30:11 -0600930static int msm_clk_pause_wakeup(struct slim_controller *ctrl)
931{
932 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600933 enable_irq(dev->irq);
Sagar Dharia9acf7f42012-03-08 09:45:30 -0700934 clk_prepare_enable(dev->rclk);
Sagar Dharia144e5e02011-08-08 17:30:11 -0600935 writel_relaxed(1, dev->base + FRM_WAKEUP);
936 /* Make sure framer wakeup write goes through before exiting function */
937 mb();
938 /*
939 * Workaround: Currently, slave is reporting lost-sync messages
940 * after slimbus comes out of clock pause.
941 * Transaction with slave fail before slave reports that message
942 * Give some time for that report to come
943 * Slimbus wakes up in clock gear 10 at 24.576MHz. With each superframe
944 * being 250 usecs, we wait for 20 superframes here to ensure
945 * we get the message
946 */
947 usleep_range(5000, 5000);
948 return 0;
949}
950
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700951static int msm_config_port(struct slim_controller *ctrl, u8 pn)
952{
953 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
954 struct msm_slim_endp *endpoint;
955 int ret = 0;
956 if (ctrl->ports[pn].req == SLIM_REQ_HALF_DUP ||
957 ctrl->ports[pn].req == SLIM_REQ_MULTI_CH)
958 return -EPROTONOSUPPORT;
959 if (pn >= (MSM_SLIM_NPORTS - dev->pipe_b))
960 return -ENODEV;
961
962 endpoint = &dev->pipes[pn];
963 ret = msm_slim_init_endpoint(dev, endpoint);
964 dev_dbg(dev->dev, "sps register bam error code:%x\n", ret);
965 return ret;
966}
967
968static enum slim_port_err msm_slim_port_xfer_status(struct slim_controller *ctr,
969 u8 pn, u8 **done_buf, u32 *done_len)
970{
971 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctr);
972 struct sps_iovec sio;
973 int ret;
974 if (done_len)
975 *done_len = 0;
976 if (done_buf)
977 *done_buf = NULL;
978 if (!dev->pipes[pn].connected)
979 return SLIM_P_DISCONNECT;
980 ret = sps_get_iovec(dev->pipes[pn].sps, &sio);
981 if (!ret) {
982 if (done_len)
983 *done_len = sio.size;
984 if (done_buf)
985 *done_buf = (u8 *)sio.addr;
986 }
987 dev_dbg(dev->dev, "get iovec returned %d\n", ret);
988 return SLIM_P_INPROGRESS;
989}
990
991static int msm_slim_port_xfer(struct slim_controller *ctrl, u8 pn, u8 *iobuf,
992 u32 len, struct completion *comp)
993{
994 struct sps_register_event sreg;
995 int ret;
996 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
Sagar Dhariae77961f2011-09-27 14:03:50 -0600997 if (pn >= 7)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700998 return -ENODEV;
999
1000
1001 ctrl->ports[pn].xcomp = comp;
1002 sreg.options = (SPS_EVENT_DESC_DONE|SPS_EVENT_ERROR);
1003 sreg.mode = SPS_TRIGGER_WAIT;
1004 sreg.xfer_done = comp;
1005 sreg.callback = NULL;
1006 sreg.user = &ctrl->ports[pn];
1007 ret = sps_register_event(dev->pipes[pn].sps, &sreg);
1008 if (ret) {
1009 dev_dbg(dev->dev, "sps register event error:%x\n", ret);
1010 return ret;
1011 }
1012 ret = sps_transfer_one(dev->pipes[pn].sps, (u32)iobuf, len, NULL,
1013 SPS_IOVEC_FLAG_INT);
1014 dev_dbg(dev->dev, "sps submit xfer error code:%x\n", ret);
1015
1016 return ret;
1017}
1018
1019static int msm_sat_define_ch(struct msm_slim_sat *sat, u8 *buf, u8 len, u8 mc)
1020{
1021 struct msm_slim_ctrl *dev = sat->dev;
1022 enum slim_ch_control oper;
1023 int i;
1024 int ret = 0;
1025 if (mc == SLIM_USR_MC_CHAN_CTRL) {
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001026 for (i = 0; i < sat->nsatch; i++) {
1027 if (buf[5] == sat->satch[i].chan)
1028 break;
1029 }
1030 if (i >= sat->nsatch)
1031 return -ENOTCONN;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001032 oper = ((buf[3] & 0xC0) >> 6);
1033 /* part of grp. activating/removing 1 will take care of rest */
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001034 ret = slim_control_ch(&sat->satcl, sat->satch[i].chanh, oper,
1035 false);
1036 if (!ret) {
1037 for (i = 5; i < len; i++) {
1038 int j;
1039 for (j = 0; j < sat->nsatch; j++) {
1040 if (buf[i] == sat->satch[j].chan) {
1041 if (oper == SLIM_CH_REMOVE)
1042 sat->satch[j].req_rem++;
1043 else
1044 sat->satch[j].req_def++;
1045 break;
1046 }
1047 }
1048 }
1049 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001050 } else {
1051 u16 chh[40];
1052 struct slim_ch prop;
1053 u32 exp;
1054 u8 coeff, cc;
1055 u8 prrate = buf[6];
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001056 if (len <= 8)
1057 return -EINVAL;
1058 for (i = 8; i < len; i++) {
1059 int j = 0;
1060 for (j = 0; j < sat->nsatch; j++) {
1061 if (sat->satch[j].chan == buf[i]) {
1062 chh[i - 8] = sat->satch[j].chanh;
1063 break;
1064 }
1065 }
1066 if (j < sat->nsatch) {
1067 u16 dummy;
1068 ret = slim_query_ch(&sat->satcl, buf[i],
1069 &dummy);
1070 if (ret)
1071 return ret;
1072 if (mc == SLIM_USR_MC_DEF_ACT_CHAN)
1073 sat->satch[j].req_def++;
1074 continue;
1075 }
1076 if (sat->nsatch >= MSM_MAX_SATCH)
1077 return -EXFULL;
1078 ret = slim_query_ch(&sat->satcl, buf[i], &chh[i - 8]);
1079 if (ret)
1080 return ret;
1081 sat->satch[j].chan = buf[i];
1082 sat->satch[j].chanh = chh[i - 8];
1083 if (mc == SLIM_USR_MC_DEF_ACT_CHAN)
1084 sat->satch[j].req_def++;
1085 sat->nsatch++;
1086 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001087 prop.dataf = (enum slim_ch_dataf)((buf[3] & 0xE0) >> 5);
1088 prop.auxf = (enum slim_ch_auxf)((buf[4] & 0xC0) >> 5);
1089 prop.baser = SLIM_RATE_4000HZ;
1090 if (prrate & 0x8)
1091 prop.baser = SLIM_RATE_11025HZ;
1092 else
1093 prop.baser = SLIM_RATE_4000HZ;
1094 prop.prot = (enum slim_ch_proto)(buf[5] & 0x0F);
1095 prop.sampleszbits = (buf[4] & 0x1F)*SLIM_CL_PER_SL;
1096 exp = (u32)((buf[5] & 0xF0) >> 4);
1097 coeff = (buf[4] & 0x20) >> 5;
1098 cc = (coeff ? 3 : 1);
1099 prop.ratem = cc * (1 << exp);
1100 if (i > 9)
1101 ret = slim_define_ch(&sat->satcl, &prop, chh, len - 8,
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001102 true, &chh[0]);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001103 else
1104 ret = slim_define_ch(&sat->satcl, &prop,
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001105 &chh[0], 1, false, NULL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001106 dev_dbg(dev->dev, "define sat grp returned:%d", ret);
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001107 if (ret)
1108 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001109
1110 /* part of group so activating 1 will take care of rest */
1111 if (mc == SLIM_USR_MC_DEF_ACT_CHAN)
1112 ret = slim_control_ch(&sat->satcl,
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001113 chh[0],
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001114 SLIM_CH_ACTIVATE, false);
1115 }
1116 return ret;
1117}
1118
1119static void msm_slim_rxwq(struct msm_slim_ctrl *dev)
1120{
1121 u8 buf[40];
1122 u8 mc, mt, len;
1123 int i, ret;
1124 if ((msm_slim_rx_dequeue(dev, (u8 *)buf)) != -ENODATA) {
1125 len = buf[0] & 0x1F;
1126 mt = (buf[0] >> 5) & 0x7;
1127 mc = buf[1];
1128 if (mt == SLIM_MSG_MT_CORE &&
1129 mc == SLIM_MSG_MC_REPORT_PRESENT) {
1130 u8 laddr;
1131 u8 e_addr[6];
1132 for (i = 0; i < 6; i++)
1133 e_addr[i] = buf[7-i];
1134
1135 ret = slim_assign_laddr(&dev->ctrl, e_addr, 6, &laddr);
1136 /* Is this Qualcomm ported generic device? */
1137 if (!ret && e_addr[5] == QC_MFGID_LSB &&
1138 e_addr[4] == QC_MFGID_MSB &&
1139 e_addr[1] == QC_DEVID_PGD &&
1140 e_addr[2] != QC_CHIPID_SL)
1141 dev->pgdla = laddr;
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001142 if (!ret && !pm_runtime_enabled(dev->dev) &&
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001143 laddr == (QC_MSM_DEVS - 1))
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001144 pm_runtime_enable(dev->dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001145
Sagar Dharia790cfd02011-09-25 17:56:24 -06001146 if (!ret && msm_is_sat_dev(e_addr)) {
1147 struct msm_slim_sat *sat = addr_to_sat(dev,
1148 laddr);
1149 if (!sat)
1150 sat = msm_slim_alloc_sat(dev);
1151 if (!sat)
1152 return;
1153
1154 sat->satcl.laddr = laddr;
1155 msm_sat_enqueue(sat, (u32 *)buf, len);
1156 queue_work(sat->wq, &sat->wd);
1157 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001158 } else if (mc == SLIM_MSG_MC_REPLY_INFORMATION ||
1159 mc == SLIM_MSG_MC_REPLY_VALUE) {
1160 u8 tid = buf[3];
1161 dev_dbg(dev->dev, "tid:%d, len:%d\n", tid, len - 4);
1162 slim_msg_response(&dev->ctrl, &buf[4], tid,
1163 len - 4);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001164 pm_runtime_mark_last_busy(dev->dev);
Sagar Dharia144e5e02011-08-08 17:30:11 -06001165 } else if (mc == SLIM_MSG_MC_REPORT_INFORMATION) {
1166 u8 l_addr = buf[2];
1167 u16 ele = (u16)buf[4] << 4;
1168 ele |= ((buf[3] & 0xf0) >> 4);
1169 dev_err(dev->dev, "Slim-dev:%d report inf element:0x%x",
1170 l_addr, ele);
1171 for (i = 0; i < len - 5; i++)
1172 dev_err(dev->dev, "offset:0x%x:bit mask:%x",
1173 i, buf[i+5]);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001174 } else {
1175 dev_err(dev->dev, "unexpected message:mc:%x, mt:%x",
1176 mc, mt);
1177 for (i = 0; i < len; i++)
1178 dev_err(dev->dev, "error msg: %x", buf[i]);
1179
1180 }
1181 } else
1182 dev_err(dev->dev, "rxwq called and no dequeue");
1183}
1184
1185static void slim_sat_rxprocess(struct work_struct *work)
1186{
1187 struct msm_slim_sat *sat = container_of(work, struct msm_slim_sat, wd);
1188 struct msm_slim_ctrl *dev = sat->dev;
1189 u8 buf[40];
1190
1191 while ((msm_sat_dequeue(sat, buf)) != -ENODATA) {
1192 struct slim_msg_txn txn;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001193 u8 len, mc, mt;
1194 u32 bw_sl;
1195 int ret = 0;
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001196 int satv = -1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001197 bool gen_ack = false;
1198 u8 tid;
1199 u8 wbuf[8];
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001200 int i;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001201 txn.mt = SLIM_MSG_MT_SRC_REFERRED_USER;
1202 txn.dt = SLIM_MSG_DEST_LOGICALADDR;
1203 txn.ec = 0;
1204 txn.rbuf = NULL;
1205 txn.la = sat->satcl.laddr;
1206 /* satellite handling */
1207 len = buf[0] & 0x1F;
1208 mc = buf[1];
1209 mt = (buf[0] >> 5) & 0x7;
1210
1211 if (mt == SLIM_MSG_MT_CORE &&
1212 mc == SLIM_MSG_MC_REPORT_PRESENT) {
1213 u8 laddr;
1214 u8 e_addr[6];
1215 for (i = 0; i < 6; i++)
1216 e_addr[i] = buf[7-i];
1217
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001218 if (pm_runtime_enabled(dev->dev)) {
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001219 satv = msm_slim_get_ctrl(dev);
1220 if (satv >= 0)
1221 sat->pending_capability = true;
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001222 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001223 slim_assign_laddr(&dev->ctrl, e_addr, 6, &laddr);
1224 sat->satcl.laddr = laddr;
Sagar Dharia69bf5572012-02-21 14:45:35 -07001225 /*
1226 * Since capability message is already sent, present
1227 * message will indicate subsystem hosting this
1228 * satellite has restarted.
1229 * Remove all active channels of this satellite
1230 * when this is detected
1231 */
1232 if (sat->sent_capability) {
1233 for (i = 0; i < sat->nsatch; i++) {
Ajay Dudani2c71b242012-08-15 00:01:57 -06001234 if (sat->satch[i].reconf) {
1235 pr_err("SSR, sat:%d, rm ch:%d",
Sagar Dharia69bf5572012-02-21 14:45:35 -07001236 laddr,
1237 sat->satch[i].chan);
Sagar Dharia69bf5572012-02-21 14:45:35 -07001238 slim_control_ch(&sat->satcl,
1239 sat->satch[i].chanh,
1240 SLIM_CH_REMOVE, true);
Ajay Dudani2c71b242012-08-15 00:01:57 -06001241 sat->satch[i].reconf = false;
1242 }
Sagar Dharia69bf5572012-02-21 14:45:35 -07001243 }
1244 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001245 } else if (mt != SLIM_MSG_MT_CORE &&
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001246 mc != SLIM_MSG_MC_REPORT_PRESENT) {
1247 satv = msm_slim_get_ctrl(dev);
1248 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001249 switch (mc) {
1250 case SLIM_MSG_MC_REPORT_PRESENT:
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001251 /* Remove runtime_pm vote once satellite acks */
1252 if (mt != SLIM_MSG_MT_CORE) {
1253 if (pm_runtime_enabled(dev->dev) &&
1254 sat->pending_capability) {
1255 msm_slim_put_ctrl(dev);
1256 sat->pending_capability = false;
1257 }
1258 continue;
1259 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001260 /* send a Manager capability msg */
Sagar Dharia790cfd02011-09-25 17:56:24 -06001261 if (sat->sent_capability) {
1262 if (mt == SLIM_MSG_MT_CORE)
1263 goto send_capability;
1264 else
1265 continue;
1266 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001267 ret = slim_add_device(&dev->ctrl, &sat->satcl);
1268 if (ret) {
1269 dev_err(dev->dev,
1270 "Satellite-init failed");
1271 continue;
1272 }
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001273 /* Satellite-channels */
1274 sat->satch = kzalloc(MSM_MAX_SATCH *
1275 sizeof(struct msm_sat_chan),
1276 GFP_KERNEL);
Sagar Dharia790cfd02011-09-25 17:56:24 -06001277send_capability:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001278 txn.mc = SLIM_USR_MC_MASTER_CAPABILITY;
1279 txn.mt = SLIM_MSG_MT_SRC_REFERRED_USER;
1280 txn.la = sat->satcl.laddr;
1281 txn.rl = 8;
1282 wbuf[0] = SAT_MAGIC_LSB;
1283 wbuf[1] = SAT_MAGIC_MSB;
1284 wbuf[2] = SAT_MSG_VER;
1285 wbuf[3] = SAT_MSG_PROT;
1286 txn.wbuf = wbuf;
1287 txn.len = 4;
1288 sat->sent_capability = true;
1289 msm_xfer_msg(&dev->ctrl, &txn);
1290 break;
1291 case SLIM_USR_MC_ADDR_QUERY:
1292 memcpy(&wbuf[1], &buf[4], 6);
1293 ret = slim_get_logical_addr(&sat->satcl,
1294 &wbuf[1], 6, &wbuf[7]);
1295 if (ret)
1296 memset(&wbuf[1], 0, 6);
1297 wbuf[0] = buf[3];
1298 txn.mc = SLIM_USR_MC_ADDR_REPLY;
1299 txn.rl = 12;
1300 txn.len = 8;
1301 txn.wbuf = wbuf;
1302 msm_xfer_msg(&dev->ctrl, &txn);
1303 break;
1304 case SLIM_USR_MC_DEFINE_CHAN:
1305 case SLIM_USR_MC_DEF_ACT_CHAN:
1306 case SLIM_USR_MC_CHAN_CTRL:
1307 if (mc != SLIM_USR_MC_CHAN_CTRL)
1308 tid = buf[7];
1309 else
1310 tid = buf[4];
1311 gen_ack = true;
1312 ret = msm_sat_define_ch(sat, buf, len, mc);
1313 if (ret) {
1314 dev_err(dev->dev,
1315 "SAT define_ch returned:%d",
1316 ret);
1317 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001318 if (!sat->pending_reconf) {
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001319 int chv = msm_slim_get_ctrl(dev);
1320 if (chv >= 0)
1321 sat->pending_reconf = true;
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001322 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001323 break;
1324 case SLIM_USR_MC_RECONFIG_NOW:
1325 tid = buf[3];
1326 gen_ack = true;
1327 ret = slim_reconfigure_now(&sat->satcl);
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001328 for (i = 0; i < sat->nsatch; i++) {
1329 struct msm_sat_chan *sch = &sat->satch[i];
1330 if (sch->req_rem) {
Ajay Dudani2c71b242012-08-15 00:01:57 -06001331 if (!ret) {
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001332 slim_dealloc_ch(&sat->satcl,
1333 sch->chanh);
Ajay Dudani2c71b242012-08-15 00:01:57 -06001334 sch->reconf = false;
1335 }
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001336 sch->req_rem--;
1337 } else if (sch->req_def) {
1338 if (ret)
1339 slim_dealloc_ch(&sat->satcl,
1340 sch->chanh);
Ajay Dudani2c71b242012-08-15 00:01:57 -06001341 else
1342 sch->reconf = true;
Sagar Dharia0ffdca12011-09-25 18:55:53 -06001343 sch->req_def--;
1344 }
1345 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001346 if (sat->pending_reconf) {
1347 msm_slim_put_ctrl(dev);
1348 sat->pending_reconf = false;
1349 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001350 break;
1351 case SLIM_USR_MC_REQ_BW:
1352 /* what we get is in SLOTS */
1353 bw_sl = (u32)buf[4] << 3 |
1354 ((buf[3] & 0xE0) >> 5);
1355 sat->satcl.pending_msgsl = bw_sl;
1356 tid = buf[5];
1357 gen_ack = true;
1358 break;
1359 case SLIM_USR_MC_CONNECT_SRC:
1360 case SLIM_USR_MC_CONNECT_SINK:
1361 if (mc == SLIM_USR_MC_CONNECT_SRC)
1362 txn.mc = SLIM_MSG_MC_CONNECT_SOURCE;
1363 else
1364 txn.mc = SLIM_MSG_MC_CONNECT_SINK;
1365 wbuf[0] = buf[4] & 0x1F;
1366 wbuf[1] = buf[5];
1367 tid = buf[6];
1368 txn.la = buf[3];
1369 txn.mt = SLIM_MSG_MT_CORE;
1370 txn.rl = 6;
1371 txn.len = 2;
1372 txn.wbuf = wbuf;
1373 gen_ack = true;
1374 ret = msm_xfer_msg(&dev->ctrl, &txn);
1375 break;
1376 case SLIM_USR_MC_DISCONNECT_PORT:
1377 txn.mc = SLIM_MSG_MC_DISCONNECT_PORT;
1378 wbuf[0] = buf[4] & 0x1F;
1379 tid = buf[5];
1380 txn.la = buf[3];
1381 txn.rl = 5;
1382 txn.len = 1;
1383 txn.mt = SLIM_MSG_MT_CORE;
1384 txn.wbuf = wbuf;
1385 gen_ack = true;
1386 ret = msm_xfer_msg(&dev->ctrl, &txn);
1387 default:
1388 break;
1389 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001390 if (!gen_ack) {
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001391 if (mc != SLIM_MSG_MC_REPORT_PRESENT && satv >= 0)
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001392 msm_slim_put_ctrl(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001393 continue;
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001394 }
1395
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001396 wbuf[0] = tid;
1397 if (!ret)
1398 wbuf[1] = MSM_SAT_SUCCSS;
1399 else
1400 wbuf[1] = 0;
1401 txn.mc = SLIM_USR_MC_GENERIC_ACK;
1402 txn.la = sat->satcl.laddr;
1403 txn.rl = 6;
1404 txn.len = 2;
1405 txn.wbuf = wbuf;
1406 txn.mt = SLIM_MSG_MT_SRC_REFERRED_USER;
1407 msm_xfer_msg(&dev->ctrl, &txn);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07001408 if (satv >= 0)
1409 msm_slim_put_ctrl(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001410 }
1411}
1412
Sagar Dharia790cfd02011-09-25 17:56:24 -06001413static struct msm_slim_sat *msm_slim_alloc_sat(struct msm_slim_ctrl *dev)
1414{
1415 struct msm_slim_sat *sat;
1416 char *name;
1417 if (dev->nsats >= MSM_MAX_NSATS)
1418 return NULL;
1419
1420 sat = kzalloc(sizeof(struct msm_slim_sat), GFP_KERNEL);
1421 if (!sat) {
1422 dev_err(dev->dev, "no memory for satellite");
1423 return NULL;
1424 }
1425 name = kzalloc(SLIMBUS_NAME_SIZE, GFP_KERNEL);
1426 if (!name) {
1427 dev_err(dev->dev, "no memory for satellite name");
1428 kfree(sat);
1429 return NULL;
1430 }
1431 dev->satd[dev->nsats] = sat;
1432 sat->dev = dev;
1433 snprintf(name, SLIMBUS_NAME_SIZE, "msm_sat%d", dev->nsats);
1434 sat->satcl.name = name;
1435 spin_lock_init(&sat->lock);
1436 INIT_WORK(&sat->wd, slim_sat_rxprocess);
1437 sat->wq = create_singlethread_workqueue(sat->satcl.name);
1438 if (!sat->wq) {
1439 kfree(name);
1440 kfree(sat);
1441 return NULL;
1442 }
1443 /*
1444 * Both sats will be allocated from RX thread and RX thread will
1445 * process messages sequentially. No synchronization necessary
1446 */
1447 dev->nsats++;
1448 return sat;
1449}
1450
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001451static void
1452msm_slim_rx_msgq_event(struct msm_slim_ctrl *dev, struct sps_event_notify *ev)
1453{
1454 u32 *buf = ev->data.transfer.user;
1455 struct sps_iovec *iovec = &ev->data.transfer.iovec;
1456
1457 /*
1458 * Note the virtual address needs to be offset by the same index
1459 * as the physical address or just pass in the actual virtual address
1460 * if the sps_mem_buffer is not needed. Note that if completion is
1461 * used, the virtual address won't be available and will need to be
1462 * calculated based on the offset of the physical address
1463 */
1464 if (ev->event_id == SPS_EVENT_DESC_DONE) {
1465
1466 pr_debug("buf = 0x%p, data = 0x%x\n", buf, *buf);
1467
1468 pr_debug("iovec = (0x%x 0x%x 0x%x)\n",
1469 iovec->addr, iovec->size, iovec->flags);
1470
1471 } else {
1472 dev_err(dev->dev, "%s: unknown event %d\n",
1473 __func__, ev->event_id);
1474 }
1475}
1476
1477static void msm_slim_rx_msgq_cb(struct sps_event_notify *notify)
1478{
1479 struct msm_slim_ctrl *dev = (struct msm_slim_ctrl *)notify->user;
1480 msm_slim_rx_msgq_event(dev, notify);
1481}
1482
1483/* Queue up Rx message buffer */
1484static inline int
1485msm_slim_post_rx_msgq(struct msm_slim_ctrl *dev, int ix)
1486{
1487 int ret;
1488 u32 flags = SPS_IOVEC_FLAG_INT;
1489 struct msm_slim_endp *endpoint = &dev->rx_msgq;
1490 struct sps_mem_buffer *mem = &endpoint->buf;
1491 struct sps_pipe *pipe = endpoint->sps;
1492
1493 /* Rx message queue buffers are 4 bytes in length */
1494 u8 *virt_addr = mem->base + (4 * ix);
1495 u32 phys_addr = mem->phys_base + (4 * ix);
1496
1497 pr_debug("index:%d, phys:0x%x, virt:0x%p\n", ix, phys_addr, virt_addr);
1498
1499 ret = sps_transfer_one(pipe, phys_addr, 4, virt_addr, flags);
1500 if (ret)
1501 dev_err(dev->dev, "transfer_one() failed 0x%x, %d\n", ret, ix);
1502
1503 return ret;
1504}
1505
1506static inline int
1507msm_slim_rx_msgq_get(struct msm_slim_ctrl *dev, u32 *data, int offset)
1508{
1509 struct msm_slim_endp *endpoint = &dev->rx_msgq;
1510 struct sps_mem_buffer *mem = &endpoint->buf;
1511 struct sps_pipe *pipe = endpoint->sps;
1512 struct sps_iovec iovec;
1513 int index;
1514 int ret;
1515
1516 ret = sps_get_iovec(pipe, &iovec);
1517 if (ret) {
1518 dev_err(dev->dev, "sps_get_iovec() failed 0x%x\n", ret);
1519 goto err_exit;
1520 }
1521
1522 pr_debug("iovec = (0x%x 0x%x 0x%x)\n",
1523 iovec.addr, iovec.size, iovec.flags);
1524 BUG_ON(iovec.addr < mem->phys_base);
1525 BUG_ON(iovec.addr >= mem->phys_base + mem->size);
1526
1527 /* Calculate buffer index */
1528 index = (iovec.addr - mem->phys_base) / 4;
1529 *(data + offset) = *((u32 *)mem->base + index);
1530
1531 pr_debug("buf = 0x%p, data = 0x%x\n", (u32 *)mem->base + index, *data);
1532
1533 /* Add buffer back to the queue */
1534 (void)msm_slim_post_rx_msgq(dev, index);
1535
1536err_exit:
1537 return ret;
1538}
1539
1540static int msm_slim_rx_msgq_thread(void *data)
1541{
1542 struct msm_slim_ctrl *dev = (struct msm_slim_ctrl *)data;
1543 struct completion *notify = &dev->rx_msgq_notify;
1544 struct msm_slim_sat *sat = NULL;
1545 u32 mc = 0;
1546 u32 mt = 0;
1547 u32 buffer[10];
1548 int index = 0;
1549 u8 msg_len = 0;
1550 int ret;
1551
1552 dev_dbg(dev->dev, "rx thread started");
1553
1554 while (!kthread_should_stop()) {
1555 set_current_state(TASK_INTERRUPTIBLE);
1556 ret = wait_for_completion_interruptible(notify);
1557
1558 if (ret)
1559 dev_err(dev->dev, "rx thread wait error:%d", ret);
1560
1561 /* 1 irq notification per message */
1562 if (!dev->use_rx_msgqs) {
1563 msm_slim_rxwq(dev);
1564 continue;
1565 }
1566
1567 ret = msm_slim_rx_msgq_get(dev, buffer, index);
1568 if (ret) {
1569 dev_err(dev->dev, "rx_msgq_get() failed 0x%x\n", ret);
1570 continue;
1571 }
1572
1573 pr_debug("message[%d] = 0x%x\n", index, *buffer);
1574
1575 /* Decide if we use generic RX or satellite RX */
1576 if (index++ == 0) {
1577 msg_len = *buffer & 0x1F;
1578 pr_debug("Start of new message, len = %d\n", msg_len);
1579 mt = (buffer[0] >> 5) & 0x7;
1580 mc = (buffer[0] >> 8) & 0xff;
1581 dev_dbg(dev->dev, "MC: %x, MT: %x\n", mc, mt);
1582 if (mt == SLIM_MSG_MT_DEST_REFERRED_USER ||
Sagar Dharia790cfd02011-09-25 17:56:24 -06001583 mt == SLIM_MSG_MT_SRC_REFERRED_USER) {
1584 u8 laddr;
1585 laddr = (u8)((buffer[0] >> 16) & 0xff);
1586 sat = addr_to_sat(dev, laddr);
1587 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001588 } else if ((index * 4) >= msg_len) {
1589 index = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001590 if (sat) {
1591 msm_sat_enqueue(sat, buffer, msg_len);
1592 queue_work(sat->wq, &sat->wd);
1593 sat = NULL;
1594 } else {
1595 msm_slim_rx_enqueue(dev, buffer, msg_len);
1596 msm_slim_rxwq(dev);
1597 }
1598 }
1599 }
1600
1601 return 0;
1602}
1603
1604static int __devinit msm_slim_init_rx_msgq(struct msm_slim_ctrl *dev)
1605{
1606 int i, ret;
1607 u32 pipe_offset;
1608 struct msm_slim_endp *endpoint = &dev->rx_msgq;
1609 struct sps_connect *config = &endpoint->config;
1610 struct sps_mem_buffer *descr = &config->desc;
1611 struct sps_mem_buffer *mem = &endpoint->buf;
1612 struct completion *notify = &dev->rx_msgq_notify;
1613
1614 struct sps_register_event sps_error_event; /* SPS_ERROR */
1615 struct sps_register_event sps_descr_event; /* DESCR_DONE */
1616
Sagar Dharia31ac5812012-01-04 11:38:59 -07001617 init_completion(notify);
1618 if (!dev->use_rx_msgqs)
1619 goto rx_thread_create;
1620
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001621 /* Allocate the endpoint */
1622 ret = msm_slim_init_endpoint(dev, endpoint);
1623 if (ret) {
1624 dev_err(dev->dev, "init_endpoint failed 0x%x\n", ret);
1625 goto sps_init_endpoint_failed;
1626 }
1627
1628 /* Get the pipe indices for the message queues */
1629 pipe_offset = (readl_relaxed(dev->base + MGR_STATUS) & 0xfc) >> 2;
1630 dev_dbg(dev->dev, "Message queue pipe offset %d\n", pipe_offset);
1631
1632 config->mode = SPS_MODE_SRC;
1633 config->source = dev->bam.hdl;
1634 config->destination = SPS_DEV_HANDLE_MEM;
1635 config->src_pipe_index = pipe_offset;
1636 config->options = SPS_O_DESC_DONE | SPS_O_ERROR |
1637 SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
1638
1639 /* Allocate memory for the FIFO descriptors */
1640 ret = msm_slim_sps_mem_alloc(dev, descr,
1641 MSM_SLIM_DESC_NUM * sizeof(struct sps_iovec));
1642 if (ret) {
1643 dev_err(dev->dev, "unable to allocate SPS descriptors\n");
1644 goto alloc_descr_failed;
1645 }
1646
1647 ret = sps_connect(endpoint->sps, config);
1648 if (ret) {
1649 dev_err(dev->dev, "sps_connect failed 0x%x\n", ret);
1650 goto sps_connect_failed;
1651 }
1652
1653 /* Register completion for DESC_DONE */
1654 init_completion(notify);
1655 memset(&sps_descr_event, 0x00, sizeof(sps_descr_event));
1656
1657 sps_descr_event.mode = SPS_TRIGGER_CALLBACK;
1658 sps_descr_event.options = SPS_O_DESC_DONE;
1659 sps_descr_event.user = (void *)dev;
1660 sps_descr_event.xfer_done = notify;
1661
1662 ret = sps_register_event(endpoint->sps, &sps_descr_event);
1663 if (ret) {
1664 dev_err(dev->dev, "sps_connect() failed 0x%x\n", ret);
1665 goto sps_reg_event_failed;
1666 }
1667
1668 /* Register callback for errors */
1669 memset(&sps_error_event, 0x00, sizeof(sps_error_event));
1670 sps_error_event.mode = SPS_TRIGGER_CALLBACK;
1671 sps_error_event.options = SPS_O_ERROR;
1672 sps_error_event.user = (void *)dev;
1673 sps_error_event.callback = msm_slim_rx_msgq_cb;
1674
1675 ret = sps_register_event(endpoint->sps, &sps_error_event);
1676 if (ret) {
1677 dev_err(dev->dev, "sps_connect() failed 0x%x\n", ret);
1678 goto sps_reg_event_failed;
1679 }
1680
1681 /* Allocate memory for the message buffer(s), N descrs, 4-byte mesg */
1682 ret = msm_slim_sps_mem_alloc(dev, mem, MSM_SLIM_DESC_NUM * 4);
1683 if (ret) {
1684 dev_err(dev->dev, "dma_alloc_coherent failed\n");
1685 goto alloc_buffer_failed;
1686 }
1687
1688 /*
1689 * Call transfer_one for each 4-byte buffer
1690 * Use (buf->size/4) - 1 for the number of buffer to post
1691 */
1692
1693 /* Setup the transfer */
1694 for (i = 0; i < (MSM_SLIM_DESC_NUM - 1); i++) {
1695 ret = msm_slim_post_rx_msgq(dev, i);
1696 if (ret) {
1697 dev_err(dev->dev, "post_rx_msgq() failed 0x%x\n", ret);
1698 goto sps_transfer_failed;
1699 }
1700 }
1701
Sagar Dharia31ac5812012-01-04 11:38:59 -07001702rx_thread_create:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001703 /* Fire up the Rx message queue thread */
1704 dev->rx_msgq_thread = kthread_run(msm_slim_rx_msgq_thread, dev,
1705 MSM_SLIM_NAME "_rx_msgq_thread");
1706 if (!dev->rx_msgq_thread) {
1707 dev_err(dev->dev, "Failed to start Rx message queue thread\n");
Sagar Dharia31ac5812012-01-04 11:38:59 -07001708 /* Tear-down BAMs or return? */
1709 if (!dev->use_rx_msgqs)
1710 return -EIO;
1711 else
1712 ret = -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001713 } else
1714 return 0;
1715
1716sps_transfer_failed:
1717 msm_slim_sps_mem_free(dev, mem);
1718alloc_buffer_failed:
1719 memset(&sps_error_event, 0x00, sizeof(sps_error_event));
1720 sps_register_event(endpoint->sps, &sps_error_event);
1721sps_reg_event_failed:
1722 sps_disconnect(endpoint->sps);
1723sps_connect_failed:
1724 msm_slim_sps_mem_free(dev, descr);
1725alloc_descr_failed:
1726 msm_slim_free_endpoint(endpoint);
1727sps_init_endpoint_failed:
Sagar Dharia31ac5812012-01-04 11:38:59 -07001728 dev->use_rx_msgqs = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001729 return ret;
1730}
1731
1732/* Registers BAM h/w resource with SPS driver and initializes msgq endpoints */
1733static int __devinit
1734msm_slim_sps_init(struct msm_slim_ctrl *dev, struct resource *bam_mem)
1735{
1736 int i, ret;
1737 u32 bam_handle;
1738 struct sps_bam_props bam_props = {0};
1739
1740 static struct sps_bam_sec_config_props sec_props = {
1741 .ees = {
1742 [0] = { /* LPASS */
1743 .vmid = 0,
1744 .pipe_mask = 0xFFFF98,
1745 },
1746 [1] = { /* Krait Apps */
1747 .vmid = 1,
1748 .pipe_mask = 0x3F000007,
1749 },
1750 [2] = { /* Modem */
1751 .vmid = 2,
1752 .pipe_mask = 0x00000060,
1753 },
1754 },
1755 };
1756
Sagar Dharia31ac5812012-01-04 11:38:59 -07001757 if (!dev->use_rx_msgqs)
1758 goto init_rx_msgq;
1759
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001760 bam_props.ee = dev->ee;
1761 bam_props.virt_addr = dev->bam.base;
1762 bam_props.phys_addr = bam_mem->start;
1763 bam_props.irq = dev->bam.irq;
1764 bam_props.manage = SPS_BAM_MGR_LOCAL;
1765 bam_props.summing_threshold = MSM_SLIM_PERF_SUMM_THRESHOLD;
1766
1767 bam_props.sec_config = SPS_BAM_SEC_DO_CONFIG;
1768 bam_props.p_sec_config_props = &sec_props;
1769
1770 bam_props.options = SPS_O_DESC_DONE | SPS_O_ERROR |
1771 SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
1772
1773 /* First 7 bits are for message Qs */
1774 for (i = 7; i < 32; i++) {
1775 /* Check what pipes are owned by Apps. */
1776 if ((sec_props.ees[dev->ee].pipe_mask >> i) & 0x1)
1777 break;
1778 }
1779 dev->pipe_b = i - 7;
1780
1781 /* Register the BAM device with the SPS driver */
1782 ret = sps_register_bam_device(&bam_props, &bam_handle);
1783 if (ret) {
Sagar Dharia31ac5812012-01-04 11:38:59 -07001784 dev_err(dev->dev, "disabling BAM: reg-bam failed 0x%x\n", ret);
1785 dev->use_rx_msgqs = 0;
1786 goto init_rx_msgq;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001787 }
1788 dev->bam.hdl = bam_handle;
1789 dev_dbg(dev->dev, "SLIM BAM registered, handle = 0x%x\n", bam_handle);
1790
Sagar Dharia31ac5812012-01-04 11:38:59 -07001791init_rx_msgq:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001792 ret = msm_slim_init_rx_msgq(dev);
Sagar Dharia31ac5812012-01-04 11:38:59 -07001793 if (ret)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001794 dev_err(dev->dev, "msm_slim_init_rx_msgq failed 0x%x\n", ret);
Sagar Dharia31ac5812012-01-04 11:38:59 -07001795 if (!dev->use_rx_msgqs && bam_handle) {
1796 sps_deregister_bam_device(bam_handle);
1797 dev->bam.hdl = 0L;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001798 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001799 return ret;
1800}
1801
1802static void msm_slim_sps_exit(struct msm_slim_ctrl *dev)
1803{
1804 if (dev->use_rx_msgqs) {
1805 struct msm_slim_endp *endpoint = &dev->rx_msgq;
1806 struct sps_connect *config = &endpoint->config;
1807 struct sps_mem_buffer *descr = &config->desc;
1808 struct sps_mem_buffer *mem = &endpoint->buf;
1809 struct sps_register_event sps_event;
1810 memset(&sps_event, 0x00, sizeof(sps_event));
1811 msm_slim_sps_mem_free(dev, mem);
1812 sps_register_event(endpoint->sps, &sps_event);
1813 sps_disconnect(endpoint->sps);
1814 msm_slim_sps_mem_free(dev, descr);
1815 msm_slim_free_endpoint(endpoint);
Sagar Dharia31ac5812012-01-04 11:38:59 -07001816 sps_deregister_bam_device(dev->bam.hdl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001817 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001818}
1819
Sagar Dhariacc969452011-09-19 10:34:30 -06001820static void msm_slim_prg_slew(struct platform_device *pdev,
1821 struct msm_slim_ctrl *dev)
1822{
1823 struct resource *slew_io;
1824 void __iomem *slew_reg;
1825 /* SLEW RATE register for this slimbus */
1826 dev->slew_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1827 "slimbus_slew_reg");
1828 if (!dev->slew_mem) {
1829 dev_dbg(&pdev->dev, "no slimbus slew resource\n");
1830 return;
1831 }
1832 slew_io = request_mem_region(dev->slew_mem->start,
1833 resource_size(dev->slew_mem), pdev->name);
1834 if (!slew_io) {
1835 dev_dbg(&pdev->dev, "slimbus-slew mem claimed\n");
1836 dev->slew_mem = NULL;
1837 return;
1838 }
1839
1840 slew_reg = ioremap(dev->slew_mem->start, resource_size(dev->slew_mem));
1841 if (!slew_reg) {
1842 dev_dbg(dev->dev, "slew register mapping failed");
1843 release_mem_region(dev->slew_mem->start,
1844 resource_size(dev->slew_mem));
1845 dev->slew_mem = NULL;
1846 return;
1847 }
1848 writel_relaxed(1, slew_reg);
1849 /* Make sure slimbus-slew rate enabling goes through */
1850 wmb();
1851 iounmap(slew_reg);
1852}
1853
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001854static int __devinit msm_slim_probe(struct platform_device *pdev)
1855{
1856 struct msm_slim_ctrl *dev;
1857 int ret;
1858 struct resource *bam_mem, *bam_io;
1859 struct resource *slim_mem, *slim_io;
1860 struct resource *irq, *bam_irq;
1861 slim_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1862 "slimbus_physical");
1863 if (!slim_mem) {
1864 dev_err(&pdev->dev, "no slimbus physical memory resource\n");
1865 return -ENODEV;
1866 }
1867 slim_io = request_mem_region(slim_mem->start, resource_size(slim_mem),
1868 pdev->name);
1869 if (!slim_io) {
1870 dev_err(&pdev->dev, "slimbus memory already claimed\n");
1871 return -EBUSY;
1872 }
1873
1874 bam_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1875 "slimbus_bam_physical");
1876 if (!bam_mem) {
1877 dev_err(&pdev->dev, "no slimbus BAM memory resource\n");
1878 ret = -ENODEV;
1879 goto err_get_res_bam_failed;
1880 }
1881 bam_io = request_mem_region(bam_mem->start, resource_size(bam_mem),
1882 pdev->name);
1883 if (!bam_io) {
1884 release_mem_region(slim_mem->start, resource_size(slim_mem));
1885 dev_err(&pdev->dev, "slimbus BAM memory already claimed\n");
1886 ret = -EBUSY;
1887 goto err_get_res_bam_failed;
1888 }
1889 irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
1890 "slimbus_irq");
1891 if (!irq) {
1892 dev_err(&pdev->dev, "no slimbus IRQ resource\n");
1893 ret = -ENODEV;
1894 goto err_get_res_failed;
1895 }
1896 bam_irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
1897 "slimbus_bam_irq");
1898 if (!bam_irq) {
1899 dev_err(&pdev->dev, "no slimbus BAM IRQ resource\n");
1900 ret = -ENODEV;
1901 goto err_get_res_failed;
1902 }
1903
1904 dev = kzalloc(sizeof(struct msm_slim_ctrl), GFP_KERNEL);
1905 if (!dev) {
1906 dev_err(&pdev->dev, "no memory for MSM slimbus controller\n");
1907 ret = -ENOMEM;
1908 goto err_get_res_failed;
1909 }
1910 dev->dev = &pdev->dev;
1911 platform_set_drvdata(pdev, dev);
1912 slim_set_ctrldata(&dev->ctrl, dev);
1913 dev->base = ioremap(slim_mem->start, resource_size(slim_mem));
1914 if (!dev->base) {
1915 dev_err(&pdev->dev, "IOremap failed\n");
1916 ret = -ENOMEM;
1917 goto err_ioremap_failed;
1918 }
1919 dev->bam.base = ioremap(bam_mem->start, resource_size(bam_mem));
1920 if (!dev->bam.base) {
1921 dev_err(&pdev->dev, "BAM IOremap failed\n");
1922 ret = -ENOMEM;
1923 goto err_ioremap_bam_failed;
1924 }
Sagar Dhariaf8f603b2012-03-21 15:25:17 -06001925 if (pdev->dev.of_node) {
1926
1927 ret = of_property_read_u32(pdev->dev.of_node, "cell-index",
1928 &dev->ctrl.nr);
1929 if (ret) {
1930 dev_err(&pdev->dev, "Cell index not specified:%d", ret);
1931 goto err_of_init_failed;
1932 }
1933 /* Optional properties */
1934 ret = of_property_read_u32(pdev->dev.of_node,
1935 "qcom,min-clk-gear", &dev->ctrl.min_cg);
1936 ret = of_property_read_u32(pdev->dev.of_node,
1937 "qcom,max-clk-gear", &dev->ctrl.max_cg);
1938 pr_err("min_cg:%d, max_cg:%d, ret:%d", dev->ctrl.min_cg,
1939 dev->ctrl.max_cg, ret);
1940 } else {
1941 dev->ctrl.nr = pdev->id;
1942 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001943 dev->ctrl.nchans = MSM_SLIM_NCHANS;
1944 dev->ctrl.nports = MSM_SLIM_NPORTS;
1945 dev->ctrl.set_laddr = msm_set_laddr;
1946 dev->ctrl.xfer_msg = msm_xfer_msg;
Sagar Dharia144e5e02011-08-08 17:30:11 -06001947 dev->ctrl.wakeup = msm_clk_pause_wakeup;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001948 dev->ctrl.config_port = msm_config_port;
1949 dev->ctrl.port_xfer = msm_slim_port_xfer;
1950 dev->ctrl.port_xfer_status = msm_slim_port_xfer_status;
1951 /* Reserve some messaging BW for satellite-apps driver communication */
1952 dev->ctrl.sched.pending_msgsl = 30;
1953
1954 init_completion(&dev->reconf);
1955 mutex_init(&dev->tx_lock);
1956 spin_lock_init(&dev->rx_lock);
1957 dev->ee = 1;
1958 dev->use_rx_msgqs = 1;
1959 dev->irq = irq->start;
1960 dev->bam.irq = bam_irq->start;
1961
1962 ret = msm_slim_sps_init(dev, bam_mem);
1963 if (ret != 0) {
1964 dev_err(dev->dev, "error SPS init\n");
1965 goto err_sps_init_failed;
1966 }
1967
1968
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001969 dev->framer.rootfreq = SLIM_ROOT_FREQ >> 3;
1970 dev->framer.superfreq =
1971 dev->framer.rootfreq / SLIM_CL_PER_SUPERFRAME_DIV8;
1972 dev->ctrl.a_framer = &dev->framer;
1973 dev->ctrl.clkgear = SLIM_MAX_CLK_GEAR;
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001974 dev->ctrl.dev.parent = &pdev->dev;
Sagar Dhariaf8f603b2012-03-21 15:25:17 -06001975 dev->ctrl.dev.of_node = pdev->dev.of_node;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001976
1977 ret = request_irq(dev->irq, msm_slim_interrupt, IRQF_TRIGGER_HIGH,
1978 "msm_slim_irq", dev);
1979 if (ret) {
1980 dev_err(&pdev->dev, "request IRQ failed\n");
1981 goto err_request_irq_failed;
1982 }
1983
Sagar Dhariacc969452011-09-19 10:34:30 -06001984 msm_slim_prg_slew(pdev, dev);
Sagar Dhariab1c0acf2012-02-06 18:16:58 -07001985
1986 /* Register with framework before enabling frame, clock */
1987 ret = slim_add_numbered_controller(&dev->ctrl);
1988 if (ret) {
1989 dev_err(dev->dev, "error adding controller\n");
1990 goto err_ctrl_failed;
1991 }
1992
1993
Tianyi Gou44a81b02012-02-06 17:49:07 -08001994 dev->rclk = clk_get(dev->dev, "core_clk");
Sagar Dhariab1c0acf2012-02-06 18:16:58 -07001995 if (!dev->rclk) {
1996 dev_err(dev->dev, "slimbus clock not found");
1997 goto err_clk_get_failed;
1998 }
Sagar Dhariacc969452011-09-19 10:34:30 -06001999 clk_set_rate(dev->rclk, SLIM_ROOT_FREQ);
Sagar Dharia9acf7f42012-03-08 09:45:30 -07002000 clk_prepare_enable(dev->rclk);
Sagar Dhariacc969452011-09-19 10:34:30 -06002001
Sagar Dharia82e516f2012-03-16 16:01:23 -06002002 dev->ver = readl_relaxed(dev->base);
2003 /* Version info in 16 MSbits */
2004 dev->ver >>= 16;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002005 /* Component register initialization */
Sagar Dharia82e516f2012-03-16 16:01:23 -06002006 writel_relaxed(1, dev->base + CFG_PORT(COMP_CFG, dev->ver));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002007 writel_relaxed((EE_MGR_RSC_GRP | EE_NGD_2 | EE_NGD_1),
Sagar Dharia82e516f2012-03-16 16:01:23 -06002008 dev->base + CFG_PORT(COMP_TRUST_CFG, dev->ver));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002009
2010 /*
2011 * Manager register initialization
2012 * If RX msg Q is used, disable RX_MSG_RCVD interrupt
2013 */
2014 if (dev->use_rx_msgqs)
2015 writel_relaxed((MGR_INT_RECFG_DONE | MGR_INT_TX_NACKED_2 |
2016 MGR_INT_MSG_BUF_CONTE | /* MGR_INT_RX_MSG_RCVD | */
2017 MGR_INT_TX_MSG_SENT), dev->base + MGR_INT_EN);
2018 else
2019 writel_relaxed((MGR_INT_RECFG_DONE | MGR_INT_TX_NACKED_2 |
2020 MGR_INT_MSG_BUF_CONTE | MGR_INT_RX_MSG_RCVD |
2021 MGR_INT_TX_MSG_SENT), dev->base + MGR_INT_EN);
2022 writel_relaxed(1, dev->base + MGR_CFG);
2023 /*
2024 * Framer registers are beyond 1K memory region after Manager and/or
2025 * component registers. Make sure those writes are ordered
2026 * before framer register writes
2027 */
2028 wmb();
2029
2030 /* Framer register initialization */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002031 writel_relaxed((0xA << REF_CLK_GEAR) | (0xA << CLK_GEAR) |
2032 (1 << ROOT_FREQ) | (1 << FRM_ACTIVE) | 1,
2033 dev->base + FRM_CFG);
2034 /*
2035 * Make sure that framer wake-up and enabling writes go through
2036 * before any other component is enabled. Framer is responsible for
2037 * clocking the bus and enabling framer first will ensure that other
2038 * devices can report presence when they are enabled
2039 */
2040 mb();
2041
2042 /* Enable RX msg Q */
2043 if (dev->use_rx_msgqs)
2044 writel_relaxed(MGR_CFG_ENABLE | MGR_CFG_RX_MSGQ_EN,
2045 dev->base + MGR_CFG);
2046 else
2047 writel_relaxed(MGR_CFG_ENABLE, dev->base + MGR_CFG);
2048 /*
2049 * Make sure that manager-enable is written through before interface
2050 * device is enabled
2051 */
2052 mb();
2053 writel_relaxed(1, dev->base + INTF_CFG);
2054 /*
2055 * Make sure that interface-enable is written through before enabling
2056 * ported generic device inside MSM manager
2057 */
2058 mb();
Sagar Dharia82e516f2012-03-16 16:01:23 -06002059 writel_relaxed(1, dev->base + CFG_PORT(PGD_CFG, dev->ver));
2060 writel_relaxed(0x3F<<17, dev->base + CFG_PORT(PGD_OWN_EEn, dev->ver) +
2061 (4 * dev->ee));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002062 /*
2063 * Make sure that ported generic device is enabled and port-EE settings
2064 * are written through before finally enabling the component
2065 */
2066 mb();
2067
Sagar Dharia82e516f2012-03-16 16:01:23 -06002068 writel_relaxed(1, dev->base + CFG_PORT(COMP_CFG, dev->ver));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002069 /*
2070 * Make sure that all writes have gone through before exiting this
2071 * function
2072 */
2073 mb();
Sagar Dhariaf8f603b2012-03-21 15:25:17 -06002074 if (pdev->dev.of_node)
2075 of_register_slim_devices(&dev->ctrl);
2076
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002077 pm_runtime_use_autosuspend(&pdev->dev);
2078 pm_runtime_set_autosuspend_delay(&pdev->dev, MSM_SLIM_AUTOSUSPEND);
2079 pm_runtime_set_active(&pdev->dev);
2080
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002081 dev_dbg(dev->dev, "MSM SB controller is up!\n");
2082 return 0;
2083
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002084err_ctrl_failed:
Sagar Dharia82e516f2012-03-16 16:01:23 -06002085 writel_relaxed(0, dev->base + CFG_PORT(COMP_CFG, dev->ver));
Sagar Dhariab1c0acf2012-02-06 18:16:58 -07002086err_clk_get_failed:
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002087 kfree(dev->satd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002088err_request_irq_failed:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002089 msm_slim_sps_exit(dev);
2090err_sps_init_failed:
Sagar Dhariaf8f603b2012-03-21 15:25:17 -06002091err_of_init_failed:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002092 iounmap(dev->bam.base);
2093err_ioremap_bam_failed:
2094 iounmap(dev->base);
2095err_ioremap_failed:
2096 kfree(dev);
2097err_get_res_failed:
2098 release_mem_region(bam_mem->start, resource_size(bam_mem));
2099err_get_res_bam_failed:
2100 release_mem_region(slim_mem->start, resource_size(slim_mem));
2101 return ret;
2102}
2103
2104static int __devexit msm_slim_remove(struct platform_device *pdev)
2105{
2106 struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
2107 struct resource *bam_mem;
2108 struct resource *slim_mem;
Sagar Dhariacc969452011-09-19 10:34:30 -06002109 struct resource *slew_mem = dev->slew_mem;
Sagar Dharia790cfd02011-09-25 17:56:24 -06002110 int i;
2111 for (i = 0; i < dev->nsats; i++) {
2112 struct msm_slim_sat *sat = dev->satd[i];
Sagar Dharia0ffdca12011-09-25 18:55:53 -06002113 int j;
2114 for (j = 0; j < sat->nsatch; j++)
2115 slim_dealloc_ch(&sat->satcl, sat->satch[j].chanh);
Sagar Dharia790cfd02011-09-25 17:56:24 -06002116 slim_remove_device(&sat->satcl);
2117 kfree(sat->satch);
2118 destroy_workqueue(sat->wq);
2119 kfree(sat->satcl.name);
2120 kfree(sat);
2121 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002122 pm_runtime_disable(&pdev->dev);
2123 pm_runtime_set_suspended(&pdev->dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002124 free_irq(dev->irq, dev);
2125 slim_del_controller(&dev->ctrl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002126 clk_put(dev->rclk);
2127 msm_slim_sps_exit(dev);
2128 kthread_stop(dev->rx_msgq_thread);
2129 iounmap(dev->bam.base);
2130 iounmap(dev->base);
2131 kfree(dev);
2132 bam_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2133 "slimbus_bam_physical");
Sagar Dhariae77961f2011-09-27 14:03:50 -06002134 if (bam_mem)
2135 release_mem_region(bam_mem->start, resource_size(bam_mem));
Sagar Dhariacc969452011-09-19 10:34:30 -06002136 if (slew_mem)
2137 release_mem_region(slew_mem->start, resource_size(slew_mem));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002138 slim_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2139 "slimbus_physical");
Sagar Dhariae77961f2011-09-27 14:03:50 -06002140 if (slim_mem)
2141 release_mem_region(slim_mem->start, resource_size(slim_mem));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002142 return 0;
2143}
2144
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002145#ifdef CONFIG_PM_RUNTIME
2146static int msm_slim_runtime_idle(struct device *device)
2147{
2148 dev_dbg(device, "pm_runtime: idle...\n");
2149 pm_request_autosuspend(device);
2150 return -EAGAIN;
2151}
2152#endif
2153
2154/*
2155 * If PM_RUNTIME is not defined, these 2 functions become helper
2156 * functions to be called from system suspend/resume. So they are not
2157 * inside ifdef CONFIG_PM_RUNTIME
2158 */
Sagar Dharia45e77912012-01-10 09:55:18 -07002159#ifdef CONFIG_PM_SLEEP
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002160static int msm_slim_runtime_suspend(struct device *device)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002161{
2162 struct platform_device *pdev = to_platform_device(device);
2163 struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002164 int ret;
2165 dev_dbg(device, "pm_runtime: suspending...\n");
2166 dev->state = MSM_CTRL_SLEEPING;
2167 ret = slim_ctrl_clk_pause(&dev->ctrl, false, SLIM_CLK_UNSPECIFIED);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07002168 if (ret) {
2169 dev_err(device, "clk pause not entered:%d", ret);
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002170 dev->state = MSM_CTRL_AWAKE;
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07002171 } else {
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002172 dev->state = MSM_CTRL_ASLEEP;
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07002173 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002174 return ret;
2175}
2176
2177static int msm_slim_runtime_resume(struct device *device)
2178{
2179 struct platform_device *pdev = to_platform_device(device);
2180 struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
2181 int ret = 0;
2182 dev_dbg(device, "pm_runtime: resuming...\n");
2183 if (dev->state == MSM_CTRL_ASLEEP)
2184 ret = slim_ctrl_clk_pause(&dev->ctrl, true, 0);
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07002185 if (ret) {
2186 dev_err(device, "clk pause not exited:%d", ret);
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002187 dev->state = MSM_CTRL_ASLEEP;
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07002188 } else {
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002189 dev->state = MSM_CTRL_AWAKE;
Sagar Dhariad3ef30a2011-12-09 14:30:45 -07002190 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002191 return ret;
2192}
2193
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002194static int msm_slim_suspend(struct device *dev)
2195{
2196 int ret = 0;
2197 if (!pm_runtime_enabled(dev) || !pm_runtime_suspended(dev)) {
2198 dev_dbg(dev, "system suspend");
2199 ret = msm_slim_runtime_suspend(dev);
Sagar Dharia6b559e02011-08-03 17:01:31 -06002200 }
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002201 if (ret == -EBUSY) {
Sagar Dharia144e5e02011-08-08 17:30:11 -06002202 /*
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002203 * If the clock pause failed due to active channels, there is
2204 * a possibility that some audio stream is active during suspend
2205 * We dont want to return suspend failure in that case so that
2206 * display and relevant components can still go to suspend.
2207 * If there is some other error, then it should be passed-on
2208 * to system level suspend
2209 */
Sagar Dharia144e5e02011-08-08 17:30:11 -06002210 ret = 0;
2211 }
2212 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002213}
2214
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002215static int msm_slim_resume(struct device *dev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002216{
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002217 /* If runtime_pm is enabled, this resume shouldn't do anything */
2218 if (!pm_runtime_enabled(dev) || !pm_runtime_suspended(dev)) {
2219 int ret;
2220 dev_dbg(dev, "system resume");
2221 ret = msm_slim_runtime_resume(dev);
2222 if (!ret) {
2223 pm_runtime_mark_last_busy(dev);
2224 pm_request_autosuspend(dev);
2225 }
2226 return ret;
2227
Sagar Dharia144e5e02011-08-08 17:30:11 -06002228 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002229 return 0;
2230}
Sagar Dharia45ee38a2011-08-03 17:01:31 -06002231#endif /* CONFIG_PM_SLEEP */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002232
2233static const struct dev_pm_ops msm_slim_dev_pm_ops = {
2234 SET_SYSTEM_SLEEP_PM_OPS(
2235 msm_slim_suspend,
2236 msm_slim_resume
2237 )
2238 SET_RUNTIME_PM_OPS(
2239 msm_slim_runtime_suspend,
2240 msm_slim_runtime_resume,
2241 msm_slim_runtime_idle
2242 )
2243};
2244
Sagar Dhariaf8f603b2012-03-21 15:25:17 -06002245static struct of_device_id msm_slim_dt_match[] = {
2246 {
2247 .compatible = "qcom,slim-msm",
2248 },
2249 {}
2250};
2251
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002252static struct platform_driver msm_slim_driver = {
2253 .probe = msm_slim_probe,
2254 .remove = msm_slim_remove,
2255 .driver = {
2256 .name = MSM_SLIM_NAME,
2257 .owner = THIS_MODULE,
2258 .pm = &msm_slim_dev_pm_ops,
Sagar Dhariaf8f603b2012-03-21 15:25:17 -06002259 .of_match_table = msm_slim_dt_match,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002260 },
2261};
2262
2263static int msm_slim_init(void)
2264{
2265 return platform_driver_register(&msm_slim_driver);
2266}
2267subsys_initcall(msm_slim_init);
2268
2269static void msm_slim_exit(void)
2270{
2271 platform_driver_unregister(&msm_slim_driver);
2272}
2273module_exit(msm_slim_exit);
2274
2275MODULE_LICENSE("GPL v2");
2276MODULE_VERSION("0.1");
2277MODULE_DESCRIPTION("MSM Slimbus controller");
2278MODULE_ALIAS("platform:msm-slim");