blob: 1016f186c17cc0be236750c446123e6f474faeb3 [file] [log] [blame]
Huang Ying15651292010-05-18 14:35:11 +08001/*
2 * atomicio.c - ACPI IO memory pre-mapping/post-unmapping, then
3 * accessing in atomic context.
4 *
5 * This is used for NMI handler to access IO memory area, because
6 * ioremap/iounmap can not be used in NMI handler. The IO memory area
7 * is pre-mapped in process context and accessed in NMI handler.
8 *
9 * Copyright (C) 2009-2010, Intel Corp.
10 * Author: Huang Ying <ying.huang@intel.com>
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License version
14 * 2 as published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 */
25
26#include <linux/kernel.h>
Paul Gortmaker067d7562011-10-26 17:58:35 -040027#include <linux/export.h>
Huang Ying15651292010-05-18 14:35:11 +080028#include <linux/init.h>
29#include <linux/acpi.h>
30#include <linux/io.h>
31#include <linux/kref.h>
32#include <linux/rculist.h>
33#include <linux/interrupt.h>
Tejun Heoe0fb8c42010-03-30 02:52:44 +090034#include <linux/slab.h>
Huang Ying15651292010-05-18 14:35:11 +080035#include <acpi/atomicio.h>
36
37#define ACPI_PFX "ACPI: "
38
39static LIST_HEAD(acpi_iomaps);
40/*
41 * Used for mutual exclusion between writers of acpi_iomaps list, for
42 * synchronization between readers and writer, RCU is used.
43 */
44static DEFINE_SPINLOCK(acpi_iomaps_lock);
45
46struct acpi_iomap {
47 struct list_head list;
48 void __iomem *vaddr;
49 unsigned long size;
50 phys_addr_t paddr;
51 struct kref ref;
52};
53
54/* acpi_iomaps_lock or RCU read lock must be held before calling */
55static struct acpi_iomap *__acpi_find_iomap(phys_addr_t paddr,
56 unsigned long size)
57{
58 struct acpi_iomap *map;
59
60 list_for_each_entry_rcu(map, &acpi_iomaps, list) {
61 if (map->paddr + map->size >= paddr + size &&
62 map->paddr <= paddr)
63 return map;
64 }
65 return NULL;
66}
67
68/*
69 * Atomic "ioremap" used by NMI handler, if the specified IO memory
70 * area is not pre-mapped, NULL will be returned.
71 *
72 * acpi_iomaps_lock or RCU read lock must be held before calling
73 */
74static void __iomem *__acpi_ioremap_fast(phys_addr_t paddr,
75 unsigned long size)
76{
77 struct acpi_iomap *map;
78
Luck, Tony3bf3f8b2011-10-21 14:42:55 -070079 map = __acpi_find_iomap(paddr, size/8);
Huang Ying15651292010-05-18 14:35:11 +080080 if (map)
81 return map->vaddr + (paddr - map->paddr);
82 else
83 return NULL;
84}
85
86/* acpi_iomaps_lock must be held before calling */
87static void __iomem *__acpi_try_ioremap(phys_addr_t paddr,
88 unsigned long size)
89{
90 struct acpi_iomap *map;
91
92 map = __acpi_find_iomap(paddr, size);
93 if (map) {
94 kref_get(&map->ref);
95 return map->vaddr + (paddr - map->paddr);
96 } else
97 return NULL;
98}
99
100/*
101 * Used to pre-map the specified IO memory area. First try to find
102 * whether the area is already pre-mapped, if it is, increase the
103 * reference count (in __acpi_try_ioremap) and return; otherwise, do
104 * the real ioremap, and add the mapping into acpi_iomaps list.
105 */
106static void __iomem *acpi_pre_map(phys_addr_t paddr,
107 unsigned long size)
108{
109 void __iomem *vaddr;
110 struct acpi_iomap *map;
111 unsigned long pg_sz, flags;
112 phys_addr_t pg_off;
113
114 spin_lock_irqsave(&acpi_iomaps_lock, flags);
115 vaddr = __acpi_try_ioremap(paddr, size);
116 spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
117 if (vaddr)
118 return vaddr;
119
120 pg_off = paddr & PAGE_MASK;
121 pg_sz = ((paddr + size + PAGE_SIZE - 1) & PAGE_MASK) - pg_off;
122 vaddr = ioremap(pg_off, pg_sz);
123 if (!vaddr)
124 return NULL;
125 map = kmalloc(sizeof(*map), GFP_KERNEL);
126 if (!map)
127 goto err_unmap;
128 INIT_LIST_HEAD(&map->list);
129 map->paddr = pg_off;
130 map->size = pg_sz;
131 map->vaddr = vaddr;
132 kref_init(&map->ref);
133
134 spin_lock_irqsave(&acpi_iomaps_lock, flags);
135 vaddr = __acpi_try_ioremap(paddr, size);
136 if (vaddr) {
137 spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
138 iounmap(map->vaddr);
139 kfree(map);
140 return vaddr;
141 }
142 list_add_tail_rcu(&map->list, &acpi_iomaps);
143 spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
144
Jin Dongmingbad97c372010-09-29 19:53:52 +0800145 return map->vaddr + (paddr - map->paddr);
Huang Ying15651292010-05-18 14:35:11 +0800146err_unmap:
147 iounmap(vaddr);
148 return NULL;
149}
150
151/* acpi_iomaps_lock must be held before calling */
152static void __acpi_kref_del_iomap(struct kref *ref)
153{
154 struct acpi_iomap *map;
155
156 map = container_of(ref, struct acpi_iomap, ref);
157 list_del_rcu(&map->list);
158}
159
160/*
161 * Used to post-unmap the specified IO memory area. The iounmap is
162 * done only if the reference count goes zero.
163 */
164static void acpi_post_unmap(phys_addr_t paddr, unsigned long size)
165{
166 struct acpi_iomap *map;
167 unsigned long flags;
168 int del;
169
170 spin_lock_irqsave(&acpi_iomaps_lock, flags);
171 map = __acpi_find_iomap(paddr, size);
172 BUG_ON(!map);
173 del = kref_put(&map->ref, __acpi_kref_del_iomap);
174 spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
175
176 if (!del)
177 return;
178
179 synchronize_rcu();
180 iounmap(map->vaddr);
181 kfree(map);
182}
183
184/* In NMI handler, should set silent = 1 */
185static int acpi_check_gar(struct acpi_generic_address *reg,
186 u64 *paddr, int silent)
187{
188 u32 width, space_id;
189
190 width = reg->bit_width;
191 space_id = reg->space_id;
192 /* Handle possible alignment issues */
193 memcpy(paddr, &reg->address, sizeof(*paddr));
194 if (!*paddr) {
195 if (!silent)
196 pr_warning(FW_BUG ACPI_PFX
197 "Invalid physical address in GAR [0x%llx/%u/%u]\n",
198 *paddr, width, space_id);
199 return -EINVAL;
200 }
201
202 if ((width != 8) && (width != 16) && (width != 32) && (width != 64)) {
203 if (!silent)
204 pr_warning(FW_BUG ACPI_PFX
205 "Invalid bit width in GAR [0x%llx/%u/%u]\n",
206 *paddr, width, space_id);
207 return -EINVAL;
208 }
209
210 if (space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY &&
211 space_id != ACPI_ADR_SPACE_SYSTEM_IO) {
212 if (!silent)
213 pr_warning(FW_BUG ACPI_PFX
214 "Invalid address space type in GAR [0x%llx/%u/%u]\n",
215 *paddr, width, space_id);
216 return -EINVAL;
217 }
218
219 return 0;
220}
221
222/* Pre-map, working on GAR */
223int acpi_pre_map_gar(struct acpi_generic_address *reg)
224{
225 u64 paddr;
226 void __iomem *vaddr;
227 int rc;
228
229 if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
230 return 0;
231
232 rc = acpi_check_gar(reg, &paddr, 0);
233 if (rc)
234 return rc;
235
236 vaddr = acpi_pre_map(paddr, reg->bit_width / 8);
237 if (!vaddr)
238 return -EIO;
239
240 return 0;
241}
242EXPORT_SYMBOL_GPL(acpi_pre_map_gar);
243
244/* Post-unmap, working on GAR */
245int acpi_post_unmap_gar(struct acpi_generic_address *reg)
246{
247 u64 paddr;
248 int rc;
249
250 if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
251 return 0;
252
253 rc = acpi_check_gar(reg, &paddr, 0);
254 if (rc)
255 return rc;
256
257 acpi_post_unmap(paddr, reg->bit_width / 8);
258
259 return 0;
260}
261EXPORT_SYMBOL_GPL(acpi_post_unmap_gar);
262
Huang Ying04c25992011-12-08 11:25:40 +0800263#ifdef readq
264static inline u64 read64(const volatile void __iomem *addr)
265{
266 return readq(addr);
267}
268#else
269static inline u64 read64(const volatile void __iomem *addr)
270{
271 u64 l, h;
272 l = readl(addr);
273 h = readl(addr+4);
274 return l | (h << 32);
275}
276#endif
277
Huang Ying15651292010-05-18 14:35:11 +0800278/*
279 * Can be used in atomic (including NMI) or process context. RCU read
280 * lock can only be released after the IO memory area accessing.
281 */
282static int acpi_atomic_read_mem(u64 paddr, u64 *val, u32 width)
283{
284 void __iomem *addr;
285
286 rcu_read_lock();
287 addr = __acpi_ioremap_fast(paddr, width);
288 switch (width) {
289 case 8:
290 *val = readb(addr);
291 break;
292 case 16:
293 *val = readw(addr);
294 break;
295 case 32:
296 *val = readl(addr);
297 break;
298 case 64:
Huang Ying04c25992011-12-08 11:25:40 +0800299 *val = read64(addr);
Huang Ying15651292010-05-18 14:35:11 +0800300 break;
301 default:
302 return -EINVAL;
303 }
304 rcu_read_unlock();
305
306 return 0;
307}
308
Huang Ying04c25992011-12-08 11:25:40 +0800309#ifdef writeq
310static inline void write64(u64 val, volatile void __iomem *addr)
311{
312 writeq(val, addr);
313}
314#else
315static inline void write64(u64 val, volatile void __iomem *addr)
316{
317 writel(val, addr);
318 writel(val>>32, addr+4);
319}
320#endif
321
Huang Ying15651292010-05-18 14:35:11 +0800322static int acpi_atomic_write_mem(u64 paddr, u64 val, u32 width)
323{
324 void __iomem *addr;
325
326 rcu_read_lock();
327 addr = __acpi_ioremap_fast(paddr, width);
328 switch (width) {
329 case 8:
330 writeb(val, addr);
331 break;
332 case 16:
333 writew(val, addr);
334 break;
335 case 32:
336 writel(val, addr);
337 break;
338 case 64:
Huang Ying04c25992011-12-08 11:25:40 +0800339 write64(val, addr);
Huang Ying15651292010-05-18 14:35:11 +0800340 break;
Huang Ying15651292010-05-18 14:35:11 +0800341 default:
342 return -EINVAL;
343 }
344 rcu_read_unlock();
345
346 return 0;
347}
348
349/* GAR accessing in atomic (including NMI) or process context */
350int acpi_atomic_read(u64 *val, struct acpi_generic_address *reg)
351{
352 u64 paddr;
353 int rc;
354
355 rc = acpi_check_gar(reg, &paddr, 1);
356 if (rc)
357 return rc;
358
359 *val = 0;
360 switch (reg->space_id) {
361 case ACPI_ADR_SPACE_SYSTEM_MEMORY:
362 return acpi_atomic_read_mem(paddr, val, reg->bit_width);
363 case ACPI_ADR_SPACE_SYSTEM_IO:
364 return acpi_os_read_port(paddr, (u32 *)val, reg->bit_width);
365 default:
366 return -EINVAL;
367 }
368}
369EXPORT_SYMBOL_GPL(acpi_atomic_read);
370
371int acpi_atomic_write(u64 val, struct acpi_generic_address *reg)
372{
373 u64 paddr;
374 int rc;
375
376 rc = acpi_check_gar(reg, &paddr, 1);
377 if (rc)
378 return rc;
379
380 switch (reg->space_id) {
381 case ACPI_ADR_SPACE_SYSTEM_MEMORY:
382 return acpi_atomic_write_mem(paddr, val, reg->bit_width);
383 case ACPI_ADR_SPACE_SYSTEM_IO:
384 return acpi_os_write_port(paddr, val, reg->bit_width);
385 default:
386 return -EINVAL;
387 }
388}
389EXPORT_SYMBOL_GPL(acpi_atomic_write);