blob: 5c66fdd473438ace0128f1bc412f5d8964bb5365 [file] [log] [blame]
Duy Truonge833aca2013-02-12 13:35:08 -08001/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/slab.h>
16#include <linux/mm.h>
17#include <linux/rbtree.h>
18#include <linux/genalloc.h>
19#include <linux/of.h>
20#include <linux/io.h>
21#include <linux/platform_device.h>
22#include <linux/debugfs.h>
23#include <linux/seq_file.h>
24#include <linux/delay.h>
25#include <linux/interrupt.h>
26#include <linux/wait.h>
27#include <linux/sched.h>
28#include <mach/ocmem_priv.h>
29
30#define RDM_MAX_ENTRIES 32
31#define RDM_MAX_CLIENTS 2
32
33/* Data Mover Parameters */
34#define DM_BLOCK_128 0x0
35#define DM_BLOCK_256 0x1
36#define DM_BR_ID_LPASS 0x0
37#define DM_BR_ID_GPS 0x1
38
39#define DM_INTR_CLR (0x8)
40#define DM_INTR_MASK (0xC)
Naveen Ramarajb9938772012-08-20 14:41:13 -070041#define DM_INT_STATUS (0x10)
42#define DM_GEN_STATUS (0x14)
43#define DM_CLR_OFFSET (0x18)
44#define DM_CLR_SIZE (0x1C)
45#define DM_CLR_PATTERN (0x20)
46#define DM_CLR_TRIGGER (0x24)
Naveen Ramarajcc4ec152012-05-14 09:55:29 -070047#define DM_CTRL (0x1000)
48#define DM_TBL_BASE (0x1010)
49#define DM_TBL_IDX(x) ((x) * 0x18)
50#define DM_TBL_n(x) (DM_TBL_BASE + (DM_TBL_IDX(x)))
51#define DM_TBL_n_offset(x) DM_TBL_n(x)
52#define DM_TBL_n_size(x) (DM_TBL_n(x)+0x4)
53#define DM_TBL_n_paddr(x) (DM_TBL_n(x)+0x8)
54#define DM_TBL_n_ctrl(x) (DM_TBL_n(x)+0x10)
55
56#define BR_CTRL (0x0)
57#define BR_CLIENT_BASE (0x4)
58#define BR_CLIENT_n_IDX(x) ((x) * 0x4)
59#define BR_CLIENT_n_ctrl(x) (BR_CLIENT_BASE + (BR_CLIENT_n_IDX(x)))
60#define BR_STATUS (0x14)
61/* 16 entries per client are supported */
62/* Use entries 0 - 15 for client0 */
63#define BR_CLIENT0_MASK (0x1000)
64/* Use entries 16- 31 for client1 */
65#define BR_CLIENT1_MASK (0x2010)
66
67#define BR_TBL_BASE (0x40)
68#define BR_TBL_IDX(x) ((x) * 0x18)
69#define BR_TBL_n(x) (BR_TBL_BASE + (BR_TBL_IDX(x)))
70#define BR_TBL_n_offset(x) BR_TBL_n(x)
71#define BR_TBL_n_size(x) (BR_TBL_n(x)+0x4)
72#define BR_TBL_n_paddr(x) (BR_TBL_n(x)+0x8)
73#define BR_TBL_n_ctrl(x) (BR_TBL_n(x)+0x10)
74
75/* Constants and Shifts */
76#define BR_TBL_ENTRY_ENABLE 0x1
77#define BR_TBL_START 0x0
78#define BR_TBL_END 0x8
79#define BR_RW_SHIFT 0x2
80
81#define DM_TBL_START 0x10
82#define DM_TBL_END 0x18
83#define DM_CLIENT_SHIFT 0x8
84#define DM_BR_ID_SHIFT 0x4
85#define DM_BR_BLK_SHIFT 0x1
86#define DM_DIR_SHIFT 0x0
87
88#define DM_DONE 0x1
Naveen Ramarajb9938772012-08-20 14:41:13 -070089#define DM_MASK_RESET 0x0
90#define DM_INTR_RESET 0x20003
91#define DM_CLR_ENABLE 0x1
Naveen Ramarajcc4ec152012-05-14 09:55:29 -070092
93static void *br_base;
94static void *dm_base;
95
96static atomic_t dm_pending;
97static wait_queue_head_t dm_wq;
98/* Shadow tables for debug purposes */
99struct ocmem_br_table {
100 unsigned int offset;
101 unsigned int size;
102 unsigned int ddr_low;
103 unsigned int ddr_high;
104 unsigned int ctrl;
105} br_table[RDM_MAX_ENTRIES];
106
107/* DM Table replicates an entire BR table */
108/* Note: There are more than 1 BRs in the system */
109struct ocmem_dm_table {
110 unsigned int offset;
111 unsigned int size;
112 unsigned int ddr_low;
113 unsigned int ddr_high;
114 unsigned int ctrl;
115} dm_table[RDM_MAX_ENTRIES];
116
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700117static inline int client_ctrl_id(int id)
118{
119 return (id == OCMEM_SENSORS) ? 1 : 0;
120}
121
122static inline int client_slot_start(int id)
123{
124
125 return client_ctrl_id(id) * 16;
126}
127
128static irqreturn_t ocmem_dm_irq_handler(int irq, void *dev_id)
129{
Naveen Ramarajb9938772012-08-20 14:41:13 -0700130 unsigned status;
131 unsigned irq_status;
132 status = ocmem_read(dm_base + DM_GEN_STATUS);
133 irq_status = ocmem_read(dm_base + DM_INT_STATUS);
134 pr_debug("irq:dm_status %x irq_status %x\n", status, irq_status);
135 if (irq_status & BIT(0)) {
136 pr_debug("Data mover completed\n");
137 irq_status &= ~BIT(0);
138 ocmem_write(irq_status, dm_base + DM_INTR_CLR);
139 } else if (irq_status & BIT(1)) {
140 pr_debug("Data clear engine completed\n");
141 irq_status &= ~BIT(1);
142 ocmem_write(irq_status, dm_base + DM_INTR_CLR);
143 } else {
144 BUG_ON(1);
145 }
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700146 atomic_set(&dm_pending, 0);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700147 wake_up_interruptible(&dm_wq);
148 return IRQ_HANDLED;
149}
150
Naveen Ramarajb9938772012-08-20 14:41:13 -0700151#ifdef CONFIG_MSM_OCMEM_NONSECURE
152int ocmem_clear(unsigned long start, unsigned long size)
153{
154 atomic_set(&dm_pending, 1);
155 /* Clear DM Mask */
156 ocmem_write(DM_MASK_RESET, dm_base + DM_INTR_MASK);
157 /* Clear DM Interrupts */
158 ocmem_write(DM_INTR_RESET, dm_base + DM_INTR_CLR);
159 /* DM CLR offset */
160 ocmem_write(start, dm_base + DM_CLR_OFFSET);
161 /* DM CLR size */
162 ocmem_write(size, dm_base + DM_CLR_SIZE);
163 /* Wipe out memory as "OCMM" */
164 ocmem_write(0x4D4D434F, dm_base + DM_CLR_PATTERN);
165 /* The offset, size and pattern for clearing must be set
166 * before triggering the clearing engine
167 */
168 mb();
169 /* Trigger Data Clear */
170 ocmem_write(DM_CLR_ENABLE, dm_base + DM_CLR_TRIGGER);
171
172 wait_event_interruptible(dm_wq,
173 atomic_read(&dm_pending) == 0);
174 return 0;
175}
176#else
177int ocmem_clear(unsigned long start, unsigned long size)
178{
179 return 0;
180}
181#endif
182
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700183/* Lock during transfers */
184int ocmem_rdm_transfer(int id, struct ocmem_map_list *clist,
185 unsigned long start, int direction)
186{
187 int num_chunks = clist->num_chunks;
188 int slot = client_slot_start(id);
189 int table_start = 0;
190 int table_end = 0;
191 int br_ctrl = 0;
192 int br_id = 0;
193 int dm_ctrl = 0;
194 int i = 0;
195 int j = 0;
196 int status = 0;
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700197 int rc = 0;
198
199 rc = ocmem_enable_core_clock();
200
201 if (rc < 0) {
202 pr_err("RDM transfer failed for client %s (id: %d)\n",
203 get_name(id), id);
204 return rc;
205 }
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700206
207 for (i = 0, j = slot; i < num_chunks; i++, j++) {
208
209 struct ocmem_chunk *chunk = &clist->chunks[i];
210 int sz = chunk->size;
211 int paddr = chunk->ddr_paddr;
212 int tbl_n_ctrl = 0;
213
214 tbl_n_ctrl |= BR_TBL_ENTRY_ENABLE;
215 if (chunk->ro)
216 tbl_n_ctrl |= (1 << BR_RW_SHIFT);
217
218 /* Table Entry n of BR and DM */
219 ocmem_write(start, br_base + BR_TBL_n_offset(j));
220 ocmem_write(sz, br_base + BR_TBL_n_size(j));
221 ocmem_write(paddr, br_base + BR_TBL_n_paddr(j));
222 ocmem_write(tbl_n_ctrl, br_base + BR_TBL_n_ctrl(j));
223
224 ocmem_write(start, dm_base + DM_TBL_n_offset(j));
225 ocmem_write(sz, dm_base + DM_TBL_n_size(j));
226 ocmem_write(paddr, dm_base + DM_TBL_n_paddr(j));
227 ocmem_write(tbl_n_ctrl, dm_base + DM_TBL_n_ctrl(j));
228
229 start += sz;
230 }
231
232 br_id = client_ctrl_id(id);
233 table_start = slot;
234 table_end = slot + num_chunks - 1;
235 br_ctrl |= (table_start << BR_TBL_START);
236 br_ctrl |= (table_end << BR_TBL_END);
237
238 ocmem_write(br_ctrl, (br_base + BR_CLIENT_n_ctrl(br_id)));
239 /* Enable BR */
240 ocmem_write(0x1, br_base + BR_CTRL);
241
242 /* Compute DM Control Value */
243 dm_ctrl |= (table_start << DM_TBL_START);
244 dm_ctrl |= (table_end << DM_TBL_END);
245
246 dm_ctrl |= (DM_BR_ID_LPASS << DM_BR_ID_SHIFT);
247 dm_ctrl |= (DM_BLOCK_256 << DM_BR_BLK_SHIFT);
248 dm_ctrl |= (direction << DM_DIR_SHIFT);
249
Naveen Ramarajb9938772012-08-20 14:41:13 -0700250 status = ocmem_read(dm_base + DM_GEN_STATUS);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700251 pr_debug("Transfer status before %x\n", status);
252 atomic_set(&dm_pending, 1);
Naveen Ramarajb9938772012-08-20 14:41:13 -0700253 /* The DM and BR tables must be programmed before triggering the
254 * Data Mover else the coherent transfer would be corrupted
255 */
256 mb();
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700257 /* Trigger DM */
258 ocmem_write(dm_ctrl, dm_base + DM_CTRL);
259 pr_debug("ocmem: rdm: dm_ctrl %x br_ctrl %x\n", dm_ctrl, br_ctrl);
260
261 wait_event_interruptible(dm_wq,
262 atomic_read(&dm_pending) == 0);
263
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700264 ocmem_disable_core_clock();
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700265 return 0;
266}
267
268int ocmem_rdm_init(struct platform_device *pdev)
269{
270
271 struct ocmem_plat_data *pdata = NULL;
272 int rc = 0;
273
274 pdata = platform_get_drvdata(pdev);
275
276 br_base = pdata->br_base;
277 dm_base = pdata->dm_base;
278
279 rc = devm_request_irq(&pdev->dev, pdata->dm_irq, ocmem_dm_irq_handler,
280 IRQF_TRIGGER_RISING, "ocmem_dm_irq", pdata);
281
282 if (rc) {
283 dev_err(&pdev->dev, "Failed to request dm irq");
284 return -EINVAL;
285 }
286
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700287 rc = ocmem_enable_core_clock();
288
289 if (rc < 0) {
290 pr_err("RDM initialization failed\n");
291 return rc;
292 }
293
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700294 init_waitqueue_head(&dm_wq);
Naveen Ramarajb9938772012-08-20 14:41:13 -0700295 /* Clear DM Mask */
296 ocmem_write(DM_MASK_RESET, dm_base + DM_INTR_MASK);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700297 /* enable dm interrupts */
Naveen Ramarajb9938772012-08-20 14:41:13 -0700298 ocmem_write(DM_INTR_RESET, dm_base + DM_INTR_CLR);
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700299 ocmem_disable_core_clock();
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700300 return 0;
301}