Kukjin Kim | cc511b8 | 2011-12-27 08:18:36 +0100 | [diff] [blame^] | 1 | /* |
| 2 | * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd. |
| 3 | * http://www.samsung.com |
| 4 | * |
| 5 | * Common Codes for EXYNOS |
| 6 | * |
| 7 | * This program is free software; you can redistribute it and/or modify |
| 8 | * it under the terms of the GNU General Public License version 2 as |
| 9 | * published by the Free Software Foundation. |
| 10 | */ |
| 11 | |
| 12 | #include <linux/kernel.h> |
| 13 | #include <linux/interrupt.h> |
| 14 | #include <linux/irq.h> |
| 15 | #include <linux/io.h> |
| 16 | #include <linux/sysdev.h> |
| 17 | #include <linux/gpio.h> |
| 18 | #include <linux/sched.h> |
| 19 | #include <linux/serial_core.h> |
| 20 | |
| 21 | #include <asm/proc-fns.h> |
| 22 | #include <asm/hardware/cache-l2x0.h> |
| 23 | #include <asm/hardware/gic.h> |
| 24 | #include <asm/mach/map.h> |
| 25 | #include <asm/mach/irq.h> |
| 26 | |
| 27 | #include <mach/regs-irq.h> |
| 28 | #include <mach/regs-pmu.h> |
| 29 | #include <mach/regs-gpio.h> |
| 30 | |
| 31 | #include <plat/cpu.h> |
| 32 | #include <plat/clock.h> |
| 33 | #include <plat/devs.h> |
| 34 | #include <plat/pm.h> |
| 35 | #include <plat/reset.h> |
| 36 | #include <plat/sdhci.h> |
| 37 | #include <plat/gpio-cfg.h> |
| 38 | #include <plat/adc-core.h> |
| 39 | #include <plat/fb-core.h> |
| 40 | #include <plat/fimc-core.h> |
| 41 | #include <plat/iic-core.h> |
| 42 | #include <plat/tv-core.h> |
| 43 | #include <plat/regs-serial.h> |
| 44 | |
| 45 | #include "common.h" |
| 46 | |
| 47 | unsigned int gic_bank_offset __read_mostly; |
| 48 | |
| 49 | static const char name_exynos4210[] = "EXYNOS4210"; |
| 50 | static const char name_exynos4212[] = "EXYNOS4212"; |
| 51 | static const char name_exynos4412[] = "EXYNOS4412"; |
| 52 | |
| 53 | static struct cpu_table cpu_ids[] __initdata = { |
| 54 | { |
| 55 | .idcode = EXYNOS4210_CPU_ID, |
| 56 | .idmask = EXYNOS4_CPU_MASK, |
| 57 | .map_io = exynos4_map_io, |
| 58 | .init_clocks = exynos4_init_clocks, |
| 59 | .init_uarts = exynos4_init_uarts, |
| 60 | .init = exynos_init, |
| 61 | .name = name_exynos4210, |
| 62 | }, { |
| 63 | .idcode = EXYNOS4212_CPU_ID, |
| 64 | .idmask = EXYNOS4_CPU_MASK, |
| 65 | .map_io = exynos4_map_io, |
| 66 | .init_clocks = exynos4_init_clocks, |
| 67 | .init_uarts = exynos4_init_uarts, |
| 68 | .init = exynos_init, |
| 69 | .name = name_exynos4212, |
| 70 | }, { |
| 71 | .idcode = EXYNOS4412_CPU_ID, |
| 72 | .idmask = EXYNOS4_CPU_MASK, |
| 73 | .map_io = exynos4_map_io, |
| 74 | .init_clocks = exynos4_init_clocks, |
| 75 | .init_uarts = exynos4_init_uarts, |
| 76 | .init = exynos_init, |
| 77 | .name = name_exynos4412, |
| 78 | }, |
| 79 | }; |
| 80 | |
| 81 | /* Initial IO mappings */ |
| 82 | |
| 83 | static struct map_desc exynos_iodesc[] __initdata = { |
| 84 | { |
| 85 | .virtual = (unsigned long)S5P_VA_CHIPID, |
| 86 | .pfn = __phys_to_pfn(EXYNOS4_PA_CHIPID), |
| 87 | .length = SZ_4K, |
| 88 | .type = MT_DEVICE, |
| 89 | }, { |
| 90 | .virtual = (unsigned long)S3C_VA_SYS, |
| 91 | .pfn = __phys_to_pfn(EXYNOS4_PA_SYSCON), |
| 92 | .length = SZ_64K, |
| 93 | .type = MT_DEVICE, |
| 94 | }, { |
| 95 | .virtual = (unsigned long)S3C_VA_TIMER, |
| 96 | .pfn = __phys_to_pfn(EXYNOS4_PA_TIMER), |
| 97 | .length = SZ_16K, |
| 98 | .type = MT_DEVICE, |
| 99 | }, { |
| 100 | .virtual = (unsigned long)S3C_VA_WATCHDOG, |
| 101 | .pfn = __phys_to_pfn(EXYNOS4_PA_WATCHDOG), |
| 102 | .length = SZ_4K, |
| 103 | .type = MT_DEVICE, |
| 104 | }, { |
| 105 | .virtual = (unsigned long)S5P_VA_SROMC, |
| 106 | .pfn = __phys_to_pfn(EXYNOS4_PA_SROMC), |
| 107 | .length = SZ_4K, |
| 108 | .type = MT_DEVICE, |
| 109 | }, { |
| 110 | .virtual = (unsigned long)S5P_VA_SYSTIMER, |
| 111 | .pfn = __phys_to_pfn(EXYNOS4_PA_SYSTIMER), |
| 112 | .length = SZ_4K, |
| 113 | .type = MT_DEVICE, |
| 114 | }, { |
| 115 | .virtual = (unsigned long)S5P_VA_PMU, |
| 116 | .pfn = __phys_to_pfn(EXYNOS4_PA_PMU), |
| 117 | .length = SZ_64K, |
| 118 | .type = MT_DEVICE, |
| 119 | }, { |
| 120 | .virtual = (unsigned long)S5P_VA_COMBINER_BASE, |
| 121 | .pfn = __phys_to_pfn(EXYNOS4_PA_COMBINER), |
| 122 | .length = SZ_4K, |
| 123 | .type = MT_DEVICE, |
| 124 | }, { |
| 125 | .virtual = (unsigned long)S5P_VA_GIC_CPU, |
| 126 | .pfn = __phys_to_pfn(EXYNOS4_PA_GIC_CPU), |
| 127 | .length = SZ_64K, |
| 128 | .type = MT_DEVICE, |
| 129 | }, { |
| 130 | .virtual = (unsigned long)S5P_VA_GIC_DIST, |
| 131 | .pfn = __phys_to_pfn(EXYNOS4_PA_GIC_DIST), |
| 132 | .length = SZ_64K, |
| 133 | .type = MT_DEVICE, |
| 134 | }, { |
| 135 | .virtual = (unsigned long)S3C_VA_UART, |
| 136 | .pfn = __phys_to_pfn(EXYNOS4_PA_UART), |
| 137 | .length = SZ_512K, |
| 138 | .type = MT_DEVICE, |
| 139 | }, |
| 140 | }; |
| 141 | |
| 142 | static struct map_desc exynos4_iodesc[] __initdata = { |
| 143 | { |
| 144 | .virtual = (unsigned long)S5P_VA_CMU, |
| 145 | .pfn = __phys_to_pfn(EXYNOS4_PA_CMU), |
| 146 | .length = SZ_128K, |
| 147 | .type = MT_DEVICE, |
| 148 | }, { |
| 149 | .virtual = (unsigned long)S5P_VA_COREPERI_BASE, |
| 150 | .pfn = __phys_to_pfn(EXYNOS4_PA_COREPERI), |
| 151 | .length = SZ_8K, |
| 152 | .type = MT_DEVICE, |
| 153 | }, { |
| 154 | .virtual = (unsigned long)S5P_VA_L2CC, |
| 155 | .pfn = __phys_to_pfn(EXYNOS4_PA_L2CC), |
| 156 | .length = SZ_4K, |
| 157 | .type = MT_DEVICE, |
| 158 | }, { |
| 159 | .virtual = (unsigned long)S5P_VA_GPIO1, |
| 160 | .pfn = __phys_to_pfn(EXYNOS4_PA_GPIO1), |
| 161 | .length = SZ_4K, |
| 162 | .type = MT_DEVICE, |
| 163 | }, { |
| 164 | .virtual = (unsigned long)S5P_VA_GPIO2, |
| 165 | .pfn = __phys_to_pfn(EXYNOS4_PA_GPIO2), |
| 166 | .length = SZ_4K, |
| 167 | .type = MT_DEVICE, |
| 168 | }, { |
| 169 | .virtual = (unsigned long)S5P_VA_GPIO3, |
| 170 | .pfn = __phys_to_pfn(EXYNOS4_PA_GPIO3), |
| 171 | .length = SZ_256, |
| 172 | .type = MT_DEVICE, |
| 173 | }, { |
| 174 | .virtual = (unsigned long)S5P_VA_DMC0, |
| 175 | .pfn = __phys_to_pfn(EXYNOS4_PA_DMC0), |
| 176 | .length = SZ_4K, |
| 177 | .type = MT_DEVICE, |
| 178 | }, { |
| 179 | .virtual = (unsigned long)S5P_VA_SROMC, |
| 180 | .pfn = __phys_to_pfn(EXYNOS4_PA_SROMC), |
| 181 | .length = SZ_4K, |
| 182 | .type = MT_DEVICE, |
| 183 | }, { |
| 184 | .virtual = (unsigned long)S3C_VA_USB_HSPHY, |
| 185 | .pfn = __phys_to_pfn(EXYNOS4_PA_HSPHY), |
| 186 | .length = SZ_4K, |
| 187 | .type = MT_DEVICE, |
| 188 | }, |
| 189 | }; |
| 190 | |
| 191 | static struct map_desc exynos4_iodesc0[] __initdata = { |
| 192 | { |
| 193 | .virtual = (unsigned long)S5P_VA_SYSRAM, |
| 194 | .pfn = __phys_to_pfn(EXYNOS4_PA_SYSRAM0), |
| 195 | .length = SZ_4K, |
| 196 | .type = MT_DEVICE, |
| 197 | }, |
| 198 | }; |
| 199 | |
| 200 | static struct map_desc exynos4_iodesc1[] __initdata = { |
| 201 | { |
| 202 | .virtual = (unsigned long)S5P_VA_SYSRAM, |
| 203 | .pfn = __phys_to_pfn(EXYNOS4_PA_SYSRAM1), |
| 204 | .length = SZ_4K, |
| 205 | .type = MT_DEVICE, |
| 206 | }, |
| 207 | }; |
| 208 | |
| 209 | static void exynos_idle(void) |
| 210 | { |
| 211 | if (!need_resched()) |
| 212 | cpu_do_idle(); |
| 213 | |
| 214 | local_irq_enable(); |
| 215 | } |
| 216 | |
| 217 | static void exynos4_sw_reset(void) |
| 218 | { |
| 219 | __raw_writel(0x1, S5P_SWRESET); |
| 220 | } |
| 221 | |
| 222 | /* |
| 223 | * exynos_map_io |
| 224 | * |
| 225 | * register the standard cpu IO areas |
| 226 | */ |
| 227 | |
| 228 | void __init exynos_init_io(struct map_desc *mach_desc, int size) |
| 229 | { |
| 230 | /* initialize the io descriptors we need for initialization */ |
| 231 | iotable_init(exynos_iodesc, ARRAY_SIZE(exynos_iodesc)); |
| 232 | if (mach_desc) |
| 233 | iotable_init(mach_desc, size); |
| 234 | |
| 235 | /* detect cpu id and rev. */ |
| 236 | s5p_init_cpu(S5P_VA_CHIPID); |
| 237 | |
| 238 | s3c_init_cpu(samsung_cpu_id, cpu_ids, ARRAY_SIZE(cpu_ids)); |
| 239 | } |
| 240 | |
| 241 | void __init exynos4_map_io(void) |
| 242 | { |
| 243 | iotable_init(exynos4_iodesc, ARRAY_SIZE(exynos4_iodesc)); |
| 244 | |
| 245 | if (soc_is_exynos4210() && samsung_rev() == EXYNOS4210_REV_0) |
| 246 | iotable_init(exynos4_iodesc0, ARRAY_SIZE(exynos4_iodesc0)); |
| 247 | else |
| 248 | iotable_init(exynos4_iodesc1, ARRAY_SIZE(exynos4_iodesc1)); |
| 249 | |
| 250 | /* initialize device information early */ |
| 251 | exynos4_default_sdhci0(); |
| 252 | exynos4_default_sdhci1(); |
| 253 | exynos4_default_sdhci2(); |
| 254 | exynos4_default_sdhci3(); |
| 255 | |
| 256 | s3c_adc_setname("samsung-adc-v3"); |
| 257 | |
| 258 | s3c_fimc_setname(0, "exynos4-fimc"); |
| 259 | s3c_fimc_setname(1, "exynos4-fimc"); |
| 260 | s3c_fimc_setname(2, "exynos4-fimc"); |
| 261 | s3c_fimc_setname(3, "exynos4-fimc"); |
| 262 | |
| 263 | /* The I2C bus controllers are directly compatible with s3c2440 */ |
| 264 | s3c_i2c0_setname("s3c2440-i2c"); |
| 265 | s3c_i2c1_setname("s3c2440-i2c"); |
| 266 | s3c_i2c2_setname("s3c2440-i2c"); |
| 267 | |
| 268 | s5p_fb_setname(0, "exynos4-fb"); |
| 269 | s5p_hdmi_setname("exynos4-hdmi"); |
| 270 | } |
| 271 | |
| 272 | void __init exynos4_init_clocks(int xtal) |
| 273 | { |
| 274 | printk(KERN_DEBUG "%s: initializing clocks\n", __func__); |
| 275 | |
| 276 | s3c24xx_register_baseclocks(xtal); |
| 277 | s5p_register_clocks(xtal); |
| 278 | |
| 279 | if (soc_is_exynos4210()) |
| 280 | exynos4210_register_clocks(); |
| 281 | else if (soc_is_exynos4212() || soc_is_exynos4412()) |
| 282 | exynos4212_register_clocks(); |
| 283 | |
| 284 | exynos4_register_clocks(); |
| 285 | exynos4_setup_clocks(); |
| 286 | } |
| 287 | |
| 288 | #define COMBINER_ENABLE_SET 0x0 |
| 289 | #define COMBINER_ENABLE_CLEAR 0x4 |
| 290 | #define COMBINER_INT_STATUS 0xC |
| 291 | |
| 292 | static DEFINE_SPINLOCK(irq_controller_lock); |
| 293 | |
| 294 | struct combiner_chip_data { |
| 295 | unsigned int irq_offset; |
| 296 | unsigned int irq_mask; |
| 297 | void __iomem *base; |
| 298 | }; |
| 299 | |
| 300 | static struct combiner_chip_data combiner_data[MAX_COMBINER_NR]; |
| 301 | |
| 302 | static inline void __iomem *combiner_base(struct irq_data *data) |
| 303 | { |
| 304 | struct combiner_chip_data *combiner_data = |
| 305 | irq_data_get_irq_chip_data(data); |
| 306 | |
| 307 | return combiner_data->base; |
| 308 | } |
| 309 | |
| 310 | static void combiner_mask_irq(struct irq_data *data) |
| 311 | { |
| 312 | u32 mask = 1 << (data->irq % 32); |
| 313 | |
| 314 | __raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_CLEAR); |
| 315 | } |
| 316 | |
| 317 | static void combiner_unmask_irq(struct irq_data *data) |
| 318 | { |
| 319 | u32 mask = 1 << (data->irq % 32); |
| 320 | |
| 321 | __raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_SET); |
| 322 | } |
| 323 | |
| 324 | static void combiner_handle_cascade_irq(unsigned int irq, struct irq_desc *desc) |
| 325 | { |
| 326 | struct combiner_chip_data *chip_data = irq_get_handler_data(irq); |
| 327 | struct irq_chip *chip = irq_get_chip(irq); |
| 328 | unsigned int cascade_irq, combiner_irq; |
| 329 | unsigned long status; |
| 330 | |
| 331 | chained_irq_enter(chip, desc); |
| 332 | |
| 333 | spin_lock(&irq_controller_lock); |
| 334 | status = __raw_readl(chip_data->base + COMBINER_INT_STATUS); |
| 335 | spin_unlock(&irq_controller_lock); |
| 336 | status &= chip_data->irq_mask; |
| 337 | |
| 338 | if (status == 0) |
| 339 | goto out; |
| 340 | |
| 341 | combiner_irq = __ffs(status); |
| 342 | |
| 343 | cascade_irq = combiner_irq + (chip_data->irq_offset & ~31); |
| 344 | if (unlikely(cascade_irq >= NR_IRQS)) |
| 345 | do_bad_IRQ(cascade_irq, desc); |
| 346 | else |
| 347 | generic_handle_irq(cascade_irq); |
| 348 | |
| 349 | out: |
| 350 | chained_irq_exit(chip, desc); |
| 351 | } |
| 352 | |
| 353 | static struct irq_chip combiner_chip = { |
| 354 | .name = "COMBINER", |
| 355 | .irq_mask = combiner_mask_irq, |
| 356 | .irq_unmask = combiner_unmask_irq, |
| 357 | }; |
| 358 | |
| 359 | static void __init combiner_cascade_irq(unsigned int combiner_nr, unsigned int irq) |
| 360 | { |
| 361 | if (combiner_nr >= MAX_COMBINER_NR) |
| 362 | BUG(); |
| 363 | if (irq_set_handler_data(irq, &combiner_data[combiner_nr]) != 0) |
| 364 | BUG(); |
| 365 | irq_set_chained_handler(irq, combiner_handle_cascade_irq); |
| 366 | } |
| 367 | |
| 368 | static void __init combiner_init(unsigned int combiner_nr, void __iomem *base, |
| 369 | unsigned int irq_start) |
| 370 | { |
| 371 | unsigned int i; |
| 372 | |
| 373 | if (combiner_nr >= MAX_COMBINER_NR) |
| 374 | BUG(); |
| 375 | |
| 376 | combiner_data[combiner_nr].base = base; |
| 377 | combiner_data[combiner_nr].irq_offset = irq_start; |
| 378 | combiner_data[combiner_nr].irq_mask = 0xff << ((combiner_nr % 4) << 3); |
| 379 | |
| 380 | /* Disable all interrupts */ |
| 381 | |
| 382 | __raw_writel(combiner_data[combiner_nr].irq_mask, |
| 383 | base + COMBINER_ENABLE_CLEAR); |
| 384 | |
| 385 | /* Setup the Linux IRQ subsystem */ |
| 386 | |
| 387 | for (i = irq_start; i < combiner_data[combiner_nr].irq_offset |
| 388 | + MAX_IRQ_IN_COMBINER; i++) { |
| 389 | irq_set_chip_and_handler(i, &combiner_chip, handle_level_irq); |
| 390 | irq_set_chip_data(i, &combiner_data[combiner_nr]); |
| 391 | set_irq_flags(i, IRQF_VALID | IRQF_PROBE); |
| 392 | } |
| 393 | } |
| 394 | |
| 395 | static void exynos4_gic_irq_fix_base(struct irq_data *d) |
| 396 | { |
| 397 | struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d); |
| 398 | |
| 399 | gic_data->cpu_base = S5P_VA_GIC_CPU + |
| 400 | (gic_bank_offset * smp_processor_id()); |
| 401 | |
| 402 | gic_data->dist_base = S5P_VA_GIC_DIST + |
| 403 | (gic_bank_offset * smp_processor_id()); |
| 404 | } |
| 405 | |
| 406 | void __init exynos4_init_irq(void) |
| 407 | { |
| 408 | int irq; |
| 409 | |
| 410 | gic_bank_offset = soc_is_exynos4412() ? 0x4000 : 0x8000; |
| 411 | |
| 412 | gic_init(0, IRQ_PPI(0), S5P_VA_GIC_DIST, S5P_VA_GIC_CPU); |
| 413 | gic_arch_extn.irq_eoi = exynos4_gic_irq_fix_base; |
| 414 | gic_arch_extn.irq_unmask = exynos4_gic_irq_fix_base; |
| 415 | gic_arch_extn.irq_mask = exynos4_gic_irq_fix_base; |
| 416 | |
| 417 | for (irq = 0; irq < MAX_COMBINER_NR; irq++) { |
| 418 | |
| 419 | combiner_init(irq, (void __iomem *)S5P_VA_COMBINER(irq), |
| 420 | COMBINER_IRQ(irq, 0)); |
| 421 | combiner_cascade_irq(irq, IRQ_SPI(irq)); |
| 422 | } |
| 423 | |
| 424 | /* |
| 425 | * The parameters of s5p_init_irq() are for VIC init. |
| 426 | * Theses parameters should be NULL and 0 because EXYNOS4 |
| 427 | * uses GIC instead of VIC. |
| 428 | */ |
| 429 | s5p_init_irq(NULL, 0); |
| 430 | } |
| 431 | |
| 432 | struct sysdev_class exynos4_sysclass = { |
| 433 | .name = "exynos4-core", |
| 434 | }; |
| 435 | |
| 436 | static struct sys_device exynos4_sysdev = { |
| 437 | .cls = &exynos4_sysclass, |
| 438 | }; |
| 439 | |
| 440 | static int __init exynos4_core_init(void) |
| 441 | { |
| 442 | return sysdev_class_register(&exynos4_sysclass); |
| 443 | } |
| 444 | core_initcall(exynos4_core_init); |
| 445 | |
| 446 | #ifdef CONFIG_CACHE_L2X0 |
| 447 | static int __init exynos4_l2x0_cache_init(void) |
| 448 | { |
| 449 | /* TAG, Data Latency Control: 2cycle */ |
| 450 | __raw_writel(0x110, S5P_VA_L2CC + L2X0_TAG_LATENCY_CTRL); |
| 451 | |
| 452 | if (soc_is_exynos4210()) |
| 453 | __raw_writel(0x110, S5P_VA_L2CC + L2X0_DATA_LATENCY_CTRL); |
| 454 | else if (soc_is_exynos4212() || soc_is_exynos4412()) |
| 455 | __raw_writel(0x120, S5P_VA_L2CC + L2X0_DATA_LATENCY_CTRL); |
| 456 | |
| 457 | /* L2X0 Prefetch Control */ |
| 458 | __raw_writel(0x30000007, S5P_VA_L2CC + L2X0_PREFETCH_CTRL); |
| 459 | |
| 460 | /* L2X0 Power Control */ |
| 461 | __raw_writel(L2X0_DYNAMIC_CLK_GATING_EN | L2X0_STNDBY_MODE_EN, |
| 462 | S5P_VA_L2CC + L2X0_POWER_CTRL); |
| 463 | |
| 464 | l2x0_init(S5P_VA_L2CC, 0x7C470001, 0xC200ffff); |
| 465 | |
| 466 | return 0; |
| 467 | } |
| 468 | |
| 469 | early_initcall(exynos4_l2x0_cache_init); |
| 470 | #endif |
| 471 | |
| 472 | int __init exynos_init(void) |
| 473 | { |
| 474 | printk(KERN_INFO "EXYNOS: Initializing architecture\n"); |
| 475 | |
| 476 | /* set idle function */ |
| 477 | pm_idle = exynos_idle; |
| 478 | |
| 479 | /* set sw_reset function */ |
| 480 | if (soc_is_exynos4210() || soc_is_exynos4212() || soc_is_exynos4412()) |
| 481 | s5p_reset_hook = exynos4_sw_reset; |
| 482 | |
| 483 | return sysdev_register(&exynos4_sysdev); |
| 484 | } |
| 485 | |
| 486 | static struct s3c24xx_uart_clksrc exynos4_serial_clocks[] = { |
| 487 | [0] = { |
| 488 | .name = "uclk1", |
| 489 | .divisor = 1, |
| 490 | .min_baud = 0, |
| 491 | .max_baud = 0, |
| 492 | }, |
| 493 | }; |
| 494 | |
| 495 | /* uart registration process */ |
| 496 | |
| 497 | void __init exynos4_init_uarts(struct s3c2410_uartcfg *cfg, int no) |
| 498 | { |
| 499 | struct s3c2410_uartcfg *tcfg = cfg; |
| 500 | u32 ucnt; |
| 501 | |
| 502 | for (ucnt = 0; ucnt < no; ucnt++, tcfg++) { |
| 503 | if (!tcfg->clocks) { |
| 504 | tcfg->has_fracval = 1; |
| 505 | tcfg->clocks = exynos4_serial_clocks; |
| 506 | tcfg->clocks_size = ARRAY_SIZE(exynos4_serial_clocks); |
| 507 | } |
| 508 | tcfg->flags |= NO_NEED_CHECK_CLKSRC; |
| 509 | } |
| 510 | |
| 511 | s3c24xx_init_uartdevs("s5pv210-uart", s5p_uart_resources, cfg, no); |
| 512 | } |
| 513 | |
| 514 | static DEFINE_SPINLOCK(eint_lock); |
| 515 | |
| 516 | static unsigned int eint0_15_data[16]; |
| 517 | |
| 518 | static unsigned int exynos4_get_irq_nr(unsigned int number) |
| 519 | { |
| 520 | u32 ret = 0; |
| 521 | |
| 522 | switch (number) { |
| 523 | case 0 ... 3: |
| 524 | ret = (number + IRQ_EINT0); |
| 525 | break; |
| 526 | case 4 ... 7: |
| 527 | ret = (number + (IRQ_EINT4 - 4)); |
| 528 | break; |
| 529 | case 8 ... 15: |
| 530 | ret = (number + (IRQ_EINT8 - 8)); |
| 531 | break; |
| 532 | default: |
| 533 | printk(KERN_ERR "number available : %d\n", number); |
| 534 | } |
| 535 | |
| 536 | return ret; |
| 537 | } |
| 538 | |
| 539 | static inline void exynos4_irq_eint_mask(struct irq_data *data) |
| 540 | { |
| 541 | u32 mask; |
| 542 | |
| 543 | spin_lock(&eint_lock); |
| 544 | mask = __raw_readl(S5P_EINT_MASK(EINT_REG_NR(data->irq))); |
| 545 | mask |= eint_irq_to_bit(data->irq); |
| 546 | __raw_writel(mask, S5P_EINT_MASK(EINT_REG_NR(data->irq))); |
| 547 | spin_unlock(&eint_lock); |
| 548 | } |
| 549 | |
| 550 | static void exynos4_irq_eint_unmask(struct irq_data *data) |
| 551 | { |
| 552 | u32 mask; |
| 553 | |
| 554 | spin_lock(&eint_lock); |
| 555 | mask = __raw_readl(S5P_EINT_MASK(EINT_REG_NR(data->irq))); |
| 556 | mask &= ~(eint_irq_to_bit(data->irq)); |
| 557 | __raw_writel(mask, S5P_EINT_MASK(EINT_REG_NR(data->irq))); |
| 558 | spin_unlock(&eint_lock); |
| 559 | } |
| 560 | |
| 561 | static inline void exynos4_irq_eint_ack(struct irq_data *data) |
| 562 | { |
| 563 | __raw_writel(eint_irq_to_bit(data->irq), |
| 564 | S5P_EINT_PEND(EINT_REG_NR(data->irq))); |
| 565 | } |
| 566 | |
| 567 | static void exynos4_irq_eint_maskack(struct irq_data *data) |
| 568 | { |
| 569 | exynos4_irq_eint_mask(data); |
| 570 | exynos4_irq_eint_ack(data); |
| 571 | } |
| 572 | |
| 573 | static int exynos4_irq_eint_set_type(struct irq_data *data, unsigned int type) |
| 574 | { |
| 575 | int offs = EINT_OFFSET(data->irq); |
| 576 | int shift; |
| 577 | u32 ctrl, mask; |
| 578 | u32 newvalue = 0; |
| 579 | |
| 580 | switch (type) { |
| 581 | case IRQ_TYPE_EDGE_RISING: |
| 582 | newvalue = S5P_IRQ_TYPE_EDGE_RISING; |
| 583 | break; |
| 584 | |
| 585 | case IRQ_TYPE_EDGE_FALLING: |
| 586 | newvalue = S5P_IRQ_TYPE_EDGE_FALLING; |
| 587 | break; |
| 588 | |
| 589 | case IRQ_TYPE_EDGE_BOTH: |
| 590 | newvalue = S5P_IRQ_TYPE_EDGE_BOTH; |
| 591 | break; |
| 592 | |
| 593 | case IRQ_TYPE_LEVEL_LOW: |
| 594 | newvalue = S5P_IRQ_TYPE_LEVEL_LOW; |
| 595 | break; |
| 596 | |
| 597 | case IRQ_TYPE_LEVEL_HIGH: |
| 598 | newvalue = S5P_IRQ_TYPE_LEVEL_HIGH; |
| 599 | break; |
| 600 | |
| 601 | default: |
| 602 | printk(KERN_ERR "No such irq type %d", type); |
| 603 | return -EINVAL; |
| 604 | } |
| 605 | |
| 606 | shift = (offs & 0x7) * 4; |
| 607 | mask = 0x7 << shift; |
| 608 | |
| 609 | spin_lock(&eint_lock); |
| 610 | ctrl = __raw_readl(S5P_EINT_CON(EINT_REG_NR(data->irq))); |
| 611 | ctrl &= ~mask; |
| 612 | ctrl |= newvalue << shift; |
| 613 | __raw_writel(ctrl, S5P_EINT_CON(EINT_REG_NR(data->irq))); |
| 614 | spin_unlock(&eint_lock); |
| 615 | |
| 616 | switch (offs) { |
| 617 | case 0 ... 7: |
| 618 | s3c_gpio_cfgpin(EINT_GPIO_0(offs & 0x7), EINT_MODE); |
| 619 | break; |
| 620 | case 8 ... 15: |
| 621 | s3c_gpio_cfgpin(EINT_GPIO_1(offs & 0x7), EINT_MODE); |
| 622 | break; |
| 623 | case 16 ... 23: |
| 624 | s3c_gpio_cfgpin(EINT_GPIO_2(offs & 0x7), EINT_MODE); |
| 625 | break; |
| 626 | case 24 ... 31: |
| 627 | s3c_gpio_cfgpin(EINT_GPIO_3(offs & 0x7), EINT_MODE); |
| 628 | break; |
| 629 | default: |
| 630 | printk(KERN_ERR "No such irq number %d", offs); |
| 631 | } |
| 632 | |
| 633 | return 0; |
| 634 | } |
| 635 | |
| 636 | static struct irq_chip exynos4_irq_eint = { |
| 637 | .name = "exynos4-eint", |
| 638 | .irq_mask = exynos4_irq_eint_mask, |
| 639 | .irq_unmask = exynos4_irq_eint_unmask, |
| 640 | .irq_mask_ack = exynos4_irq_eint_maskack, |
| 641 | .irq_ack = exynos4_irq_eint_ack, |
| 642 | .irq_set_type = exynos4_irq_eint_set_type, |
| 643 | #ifdef CONFIG_PM |
| 644 | .irq_set_wake = s3c_irqext_wake, |
| 645 | #endif |
| 646 | }; |
| 647 | |
| 648 | /* |
| 649 | * exynos4_irq_demux_eint |
| 650 | * |
| 651 | * This function demuxes the IRQ from from EINTs 16 to 31. |
| 652 | * It is designed to be inlined into the specific handler |
| 653 | * s5p_irq_demux_eintX_Y. |
| 654 | * |
| 655 | * Each EINT pend/mask registers handle eight of them. |
| 656 | */ |
| 657 | static inline void exynos4_irq_demux_eint(unsigned int start) |
| 658 | { |
| 659 | unsigned int irq; |
| 660 | |
| 661 | u32 status = __raw_readl(S5P_EINT_PEND(EINT_REG_NR(start))); |
| 662 | u32 mask = __raw_readl(S5P_EINT_MASK(EINT_REG_NR(start))); |
| 663 | |
| 664 | status &= ~mask; |
| 665 | status &= 0xff; |
| 666 | |
| 667 | while (status) { |
| 668 | irq = fls(status) - 1; |
| 669 | generic_handle_irq(irq + start); |
| 670 | status &= ~(1 << irq); |
| 671 | } |
| 672 | } |
| 673 | |
| 674 | static void exynos4_irq_demux_eint16_31(unsigned int irq, struct irq_desc *desc) |
| 675 | { |
| 676 | struct irq_chip *chip = irq_get_chip(irq); |
| 677 | chained_irq_enter(chip, desc); |
| 678 | exynos4_irq_demux_eint(IRQ_EINT(16)); |
| 679 | exynos4_irq_demux_eint(IRQ_EINT(24)); |
| 680 | chained_irq_exit(chip, desc); |
| 681 | } |
| 682 | |
| 683 | static void exynos4_irq_eint0_15(unsigned int irq, struct irq_desc *desc) |
| 684 | { |
| 685 | u32 *irq_data = irq_get_handler_data(irq); |
| 686 | struct irq_chip *chip = irq_get_chip(irq); |
| 687 | |
| 688 | chained_irq_enter(chip, desc); |
| 689 | chip->irq_mask(&desc->irq_data); |
| 690 | |
| 691 | if (chip->irq_ack) |
| 692 | chip->irq_ack(&desc->irq_data); |
| 693 | |
| 694 | generic_handle_irq(*irq_data); |
| 695 | |
| 696 | chip->irq_unmask(&desc->irq_data); |
| 697 | chained_irq_exit(chip, desc); |
| 698 | } |
| 699 | |
| 700 | int __init exynos4_init_irq_eint(void) |
| 701 | { |
| 702 | int irq; |
| 703 | |
| 704 | for (irq = 0 ; irq <= 31 ; irq++) { |
| 705 | irq_set_chip_and_handler(IRQ_EINT(irq), &exynos4_irq_eint, |
| 706 | handle_level_irq); |
| 707 | set_irq_flags(IRQ_EINT(irq), IRQF_VALID); |
| 708 | } |
| 709 | |
| 710 | irq_set_chained_handler(IRQ_EINT16_31, exynos4_irq_demux_eint16_31); |
| 711 | |
| 712 | for (irq = 0 ; irq <= 15 ; irq++) { |
| 713 | eint0_15_data[irq] = IRQ_EINT(irq); |
| 714 | |
| 715 | irq_set_handler_data(exynos4_get_irq_nr(irq), |
| 716 | &eint0_15_data[irq]); |
| 717 | irq_set_chained_handler(exynos4_get_irq_nr(irq), |
| 718 | exynos4_irq_eint0_15); |
| 719 | } |
| 720 | |
| 721 | return 0; |
| 722 | } |
| 723 | arch_initcall(exynos4_init_irq_eint); |