Initial Contribution
msm-2.6.38: tag AU_LINUX_ANDROID_GINGERBREAD.02.03.04.00.142
Signed-off-by: Bryan Huntsman <bryanh@codeaurora.org>
diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
index acca35a..263eaaf 100644
--- a/arch/arm/kernel/armksyms.c
+++ b/arch/arm/kernel/armksyms.c
@@ -52,10 +52,6 @@
EXPORT_SYMBOL(__backtrace);
- /* platform dependent support */
-EXPORT_SYMBOL(__udelay);
-EXPORT_SYMBOL(__const_udelay);
-
/* networking */
EXPORT_SYMBOL(csum_partial);
EXPORT_SYMBOL(csum_partial_copy_from_user);
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 2cd0076..af0a86c 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -734,8 +734,15 @@
ldr r7, [r7, #TSK_STACK_CANARY]
#endif
#ifdef CONFIG_CPU_USE_DOMAINS
+#ifdef CONFIG_EMULATE_DOMAIN_MANAGER_V7
+ stmdb r13!, {r0-r3, lr}
+ mov r0, r6
+ bl emulate_domain_manager_set
+ ldmia r13!, {r0-r3, lr}
+#else
mcr p15, 0, r6, c3, c0, 0 @ Set domain register
#endif
+#endif
mov r5, r0
add r4, r2, #TI_CPU_SAVE
ldr r0, =thread_notify_head
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 278c1b0..f379b04 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -362,10 +362,17 @@
#ifdef CONFIG_CPU_ICACHE_DISABLE
bic r0, r0, #CR_I
#endif
- mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
- domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
- domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
+#ifdef CONFIG_EMULATE_DOMAIN_MANAGER_V7
+ mov r5, #(domain_val(DOMAIN_USER, DOMAIN_CLIENT) | \
+ domain_val(DOMAIN_KERNEL, DOMAIN_CLIENT) | \
+ domain_val(DOMAIN_TABLE, DOMAIN_CLIENT) | \
domain_val(DOMAIN_IO, DOMAIN_CLIENT))
+#else
+ mov r5, #(domain_val(DOMAIN_USER, DOMAIN_CLIENT) | \
+ domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
+ domain_val(DOMAIN_TABLE, DOMAIN_CLIENT) | \
+ domain_val(DOMAIN_IO, DOMAIN_CLIENT))
+#endif
mcr p15, 0, r5, c3, c0, 0 @ load domain access register
mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
b __turn_mmu_on
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index 87acc25..951eb8d 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -833,6 +833,18 @@
return ret;
}
+static void reset_brps_reserved_reg(int n)
+{
+ int i;
+
+ /* we must also reset any reserved registers. */
+ for (i = 0; i < n; ++i) {
+ write_wb_reg(ARM_BASE_BCR + i, 0UL);
+ write_wb_reg(ARM_BASE_BVR + i, 0UL);
+ }
+
+}
+
/*
* One-time initialisation.
*/
@@ -880,11 +892,11 @@
if (enable_monitor_mode())
return;
- /* We must also reset any reserved registers. */
- for (i = 0; i < core_num_brps + core_num_reserved_brps; ++i) {
- write_wb_reg(ARM_BASE_BCR + i, 0UL);
- write_wb_reg(ARM_BASE_BVR + i, 0UL);
- }
+#ifdef CONFIG_HAVE_HW_BRKPT_RESERVED_RW_ACCESS
+ reset_brps_reserved_reg(core_num_brps);
+#else
+ reset_brps_reserved_reg(core_num_brps + core_num_reserved_brps);
+#endif
for (i = 0; i < core_num_wrps; ++i) {
write_wb_reg(ARM_BASE_WCR + i, 0UL);
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index 83bbad0..4060386 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -42,6 +42,8 @@
#include <asm/mach/irq.h>
#include <asm/mach/time.h>
+#include <asm/perftypes.h>
+
/*
* No architecture-specific irq_finish function defined in arm/arch/irqs.h.
*/
@@ -76,6 +78,7 @@
{
struct pt_regs *old_regs = set_irq_regs(regs);
+ perf_mon_interrupt_in();
irq_enter();
/*
@@ -95,6 +98,7 @@
irq_exit();
set_irq_regs(old_regs);
+ perf_mon_interrupt_out();
}
void set_irq_flags(unsigned int irq, unsigned int iflags)
diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c
index e59bbd4..601ef74 100644
--- a/arch/arm/kernel/machine_kexec.c
+++ b/arch/arm/kernel/machine_kexec.c
@@ -86,6 +86,7 @@
unsigned long reboot_code_buffer_phys;
void *reboot_code_buffer;
+ arch_kexec();
page_list = image->head & PAGE_MASK;
@@ -120,5 +121,5 @@
cpu_proc_fin();
outer_inv_all();
flush_cache_all();
- cpu_reset(reboot_code_buffer_phys);
+ __virt_to_phys(cpu_reset)(reboot_code_buffer_phys);
}
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 2b5b142..c998282 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -19,6 +19,7 @@
#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
+#include <linux/irq.h>
#include <asm/cputype.h>
#include <asm/irq.h>
@@ -71,6 +72,10 @@
enum arm_perf_pmu_ids id;
const char *name;
irqreturn_t (*handle_irq)(int irq_num, void *dev);
+#ifdef CONFIG_SMP
+ void (*secondary_enable)(unsigned int irq);
+ void (*secondary_disable)(unsigned int irq);
+#endif
void (*enable)(struct hw_perf_event *evt, int idx);
void (*disable)(struct hw_perf_event *evt, int idx);
int (*get_event_idx)(struct cpu_hw_events *cpuc,
@@ -426,6 +431,10 @@
pr_warning("unable to request IRQ%d for ARM perf "
"counters\n", irq);
break;
+#ifdef CONFIG_SMP
+ } else if (armpmu->secondary_enable) {
+ armpmu->secondary_enable(irq);
+#endif
}
}
@@ -449,8 +458,13 @@
for (i = pmu_device->num_resources - 1; i >= 0; --i) {
irq = platform_get_irq(pmu_device, i);
- if (irq >= 0)
+ if (irq >= 0) {
free_irq(irq, NULL);
+#ifdef CONFIG_SMP
+ if (armpmu->secondary_disable)
+ armpmu->secondary_disable(irq);
+#endif
+ }
}
armpmu->stop();
@@ -624,6 +638,10 @@
#include "perf_event_xscale.c"
#include "perf_event_v6.c"
#include "perf_event_v7.c"
+#include "perf_event_msm.c"
+#include "perf_event_msm_l2.c"
+#include "perf_event_msm_krait.c"
+#include "perf_event_msm_krait_l2.c"
/*
* Ensure the PMU has sane values out of reset.
@@ -674,6 +692,22 @@
armpmu = xscale2pmu_init();
break;
}
+ /* Qualcomm CPUs */
+ } else if (0x51 == implementor) {
+ switch (part_number) {
+ case 0x00F0: /* 8x50 & 7x30*/
+ armpmu = armv7_scorpion_pmu_init();
+ break;
+ case 0x02D0: /* 8x60 */
+ armpmu = armv7_scorpionmp_pmu_init();
+ scorpionmp_l2_pmu_init();
+ break;
+ case 0x0490: /* 8960 sim */
+ case 0x04D0: /* 8960 */
+ armpmu = armv7_krait_pmu_init();
+ krait_l2_pmu_init();
+ break;
+ }
}
if (armpmu) {
diff --git a/arch/arm/kernel/perf_event_msm.c b/arch/arm/kernel/perf_event_msm.c
new file mode 100644
index 0000000..ae8bfcb
--- /dev/null
+++ b/arch/arm/kernel/perf_event_msm.c
@@ -0,0 +1,710 @@
+/*
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <asm/vfp.h>
+#include <asm/system.h>
+#include "../vfp/vfpinstr.h"
+
+#ifdef CONFIG_CPU_V7
+enum scorpion_perf_common {
+ SCORPION_EVT_START_IDX = 0x4c,
+ SCORPION_ICACHE_EXPL_INV = 0x4c,
+ SCORPION_ICACHE_MISS = 0x4d,
+ SCORPION_ICACHE_ACCESS = 0x4e,
+ SCORPION_ICACHE_CACHEREQ_L2 = 0x4f,
+ SCORPION_ICACHE_NOCACHE_L2 = 0x50,
+ SCORPION_HIQUP_NOPED = 0x51,
+ SCORPION_DATA_ABORT = 0x52,
+ SCORPION_IRQ = 0x53,
+ SCORPION_FIQ = 0x54,
+ SCORPION_ALL_EXCPT = 0x55,
+ SCORPION_UNDEF = 0x56,
+ SCORPION_SVC = 0x57,
+ SCORPION_SMC = 0x58,
+ SCORPION_PREFETCH_ABORT = 0x59,
+ SCORPION_INDEX_CHECK = 0x5a,
+ SCORPION_NULL_CHECK = 0x5b,
+ SCORPION_ICIMVAU_IMPL_ICIALLU = 0x5c,
+ SCORPION_NONICIALLU_BTAC_INV = 0x5d,
+ SCORPION_IMPL_ICIALLU = 0x5e,
+ SCORPION_EXPL_ICIALLU = 0x5f,
+ SCORPION_SPIPE_ONLY_CYCLES = 0x60,
+ SCORPION_XPIPE_ONLY_CYCLES = 0x61,
+ SCORPION_DUAL_CYCLES = 0x62,
+ SCORPION_DISPATCH_ANY_CYCLES = 0x63,
+ SCORPION_FIFO_FULLBLK_CMT = 0x64,
+ SCORPION_FAIL_COND_INST = 0x65,
+ SCORPION_PASS_COND_INST = 0x66,
+ SCORPION_ALLOW_VU_CLK = 0x67,
+ SCORPION_VU_IDLE = 0x68,
+ SCORPION_ALLOW_L2_CLK = 0x69,
+ SCORPION_L2_IDLE = 0x6a,
+ SCORPION_DTLB_IMPL_INV_SCTLR_DACR = 0x6b,
+ SCORPION_DTLB_EXPL_INV = 0x6c,
+ SCORPION_DTLB_MISS = 0x6d,
+ SCORPION_DTLB_ACCESS = 0x6e,
+ SCORPION_ITLB_MISS = 0x6f,
+ SCORPION_ITLB_IMPL_INV = 0x70,
+ SCORPION_ITLB_EXPL_INV = 0x71,
+ SCORPION_UTLB_D_MISS = 0x72,
+ SCORPION_UTLB_D_ACCESS = 0x73,
+ SCORPION_UTLB_I_MISS = 0x74,
+ SCORPION_UTLB_I_ACCESS = 0x75,
+ SCORPION_UTLB_INV_ASID = 0x76,
+ SCORPION_UTLB_INV_MVA = 0x77,
+ SCORPION_UTLB_INV_ALL = 0x78,
+ SCORPION_S2_HOLD_RDQ_UNAVAIL = 0x79,
+ SCORPION_S2_HOLD = 0x7a,
+ SCORPION_S2_HOLD_DEV_OP = 0x7b,
+ SCORPION_S2_HOLD_ORDER = 0x7c,
+ SCORPION_S2_HOLD_BARRIER = 0x7d,
+ SCORPION_VIU_DUAL_CYCLE = 0x7e,
+ SCORPION_VIU_SINGLE_CYCLE = 0x7f,
+ SCORPION_VX_PIPE_WAR_STALL_CYCLES = 0x80,
+ SCORPION_VX_PIPE_WAW_STALL_CYCLES = 0x81,
+ SCORPION_VX_PIPE_RAW_STALL_CYCLES = 0x82,
+ SCORPION_VX_PIPE_LOAD_USE_STALL = 0x83,
+ SCORPION_VS_PIPE_WAR_STALL_CYCLES = 0x84,
+ SCORPION_VS_PIPE_WAW_STALL_CYCLES = 0x85,
+ SCORPION_VS_PIPE_RAW_STALL_CYCLES = 0x86,
+ SCORPION_EXCEPTIONS_INV_OPERATION = 0x87,
+ SCORPION_EXCEPTIONS_DIV_BY_ZERO = 0x88,
+ SCORPION_COND_INST_FAIL_VX_PIPE = 0x89,
+ SCORPION_COND_INST_FAIL_VS_PIPE = 0x8a,
+ SCORPION_EXCEPTIONS_OVERFLOW = 0x8b,
+ SCORPION_EXCEPTIONS_UNDERFLOW = 0x8c,
+ SCORPION_EXCEPTIONS_DENORM = 0x8d,
+};
+
+enum scorpion_perf_smp {
+ SCORPIONMP_NUM_BARRIERS = 0x8e,
+ SCORPIONMP_BARRIER_CYCLES = 0x8f,
+};
+
+enum scorpion_perf_up {
+ SCORPION_BANK_AB_HIT = 0x8e,
+ SCORPION_BANK_AB_ACCESS = 0x8f,
+ SCORPION_BANK_CD_HIT = 0x90,
+ SCORPION_BANK_CD_ACCESS = 0x91,
+ SCORPION_BANK_AB_DSIDE_HIT = 0x92,
+ SCORPION_BANK_AB_DSIDE_ACCESS = 0x93,
+ SCORPION_BANK_CD_DSIDE_HIT = 0x94,
+ SCORPION_BANK_CD_DSIDE_ACCESS = 0x95,
+ SCORPION_BANK_AB_ISIDE_HIT = 0x96,
+ SCORPION_BANK_AB_ISIDE_ACCESS = 0x97,
+ SCORPION_BANK_CD_ISIDE_HIT = 0x98,
+ SCORPION_BANK_CD_ISIDE_ACCESS = 0x99,
+ SCORPION_ISIDE_RD_WAIT = 0x9a,
+ SCORPION_DSIDE_RD_WAIT = 0x9b,
+ SCORPION_BANK_BYPASS_WRITE = 0x9c,
+ SCORPION_BANK_AB_NON_CASTOUT = 0x9d,
+ SCORPION_BANK_AB_L2_CASTOUT = 0x9e,
+ SCORPION_BANK_CD_NON_CASTOUT = 0x9f,
+ SCORPION_BANK_CD_L2_CASTOUT = 0xa0,
+};
+
+static const unsigned armv7_scorpion_perf_map[PERF_COUNT_HW_MAX] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
+ [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
+ [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
+ [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+ [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES,
+};
+
+static const unsigned armv7_scorpion_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+ [C(L1D)] = {
+ /*
+ * The performance counters don't differentiate between read
+ * and write accesses/misses so this isn't strictly correct,
+ * but it's the best we can do. Writes and reads get
+ * combined.
+ */
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(L1I)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = SCORPION_ICACHE_ACCESS,
+ [C(RESULT_MISS)] = SCORPION_ICACHE_MISS,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = SCORPION_ICACHE_ACCESS,
+ [C(RESULT_MISS)] = SCORPION_ICACHE_MISS,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(LL)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(DTLB)] = {
+ /*
+ * Only ITLB misses and DTLB refills are supported.
+ * If users want the DTLB refills misses a raw counter
+ * must be used.
+ */
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = SCORPION_DTLB_ACCESS,
+ [C(RESULT_MISS)] = SCORPION_DTLB_MISS,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = SCORPION_DTLB_ACCESS,
+ [C(RESULT_MISS)] = SCORPION_DTLB_MISS,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(ITLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = SCORPION_ITLB_MISS,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = SCORPION_ITLB_MISS,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(BPU)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)]
+ = ARMV7_PERFCTR_PC_BRANCH_MIS_USED,
+ [C(RESULT_MISS)]
+ = ARMV7_PERFCTR_PC_BRANCH_MIS_USED,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)]
+ = ARMV7_PERFCTR_PC_BRANCH_MIS_USED,
+ [C(RESULT_MISS)]
+ = ARMV7_PERFCTR_PC_BRANCH_MIS_USED,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+};
+
+struct scorpion_evt {
+ /*
+ * The scorpion_evt_type field corresponds to the actual Scorpion
+ * event codes. These map many-to-one to the armv7 defined codes
+ */
+ u32 scorpion_evt_type;
+
+ /*
+ * The group_setval field corresponds to the value that the group
+ * register needs to be set to. This value is deduced from the row
+ * and column that the event belongs to in the event table
+ */
+ u32 group_setval;
+
+ /*
+ * The groupcode corresponds to the group that the event belongs to.
+ * Scorpion has 5 groups of events LPM0, LPM1, LPM2, L2LPM and VLPM
+ * going from 0 to 4 in terms of the codes used
+ */
+ u8 groupcode;
+
+ /*
+ * The armv7_evt_type field corresponds to the armv7 defined event
+ * code that the Scorpion events map to
+ */
+ u32 armv7_evt_type;
+};
+
+static const struct scorpion_evt scorpion_event[] = {
+ {SCORPION_ICACHE_EXPL_INV, 0x80000500, 0, 0x4d},
+ {SCORPION_ICACHE_MISS, 0x80050000, 0, 0x4e},
+ {SCORPION_ICACHE_ACCESS, 0x85000000, 0, 0x4f},
+ {SCORPION_ICACHE_CACHEREQ_L2, 0x86000000, 0, 0x4f},
+ {SCORPION_ICACHE_NOCACHE_L2, 0x87000000, 0, 0x4f},
+ {SCORPION_HIQUP_NOPED, 0x80080000, 0, 0x4e},
+ {SCORPION_DATA_ABORT, 0x8000000a, 0, 0x4c},
+ {SCORPION_IRQ, 0x80000a00, 0, 0x4d},
+ {SCORPION_FIQ, 0x800a0000, 0, 0x4e},
+ {SCORPION_ALL_EXCPT, 0x8a000000, 0, 0x4f},
+ {SCORPION_UNDEF, 0x8000000b, 0, 0x4c},
+ {SCORPION_SVC, 0x80000b00, 0, 0x4d},
+ {SCORPION_SMC, 0x800b0000, 0, 0x4e},
+ {SCORPION_PREFETCH_ABORT, 0x8b000000, 0, 0x4f},
+ {SCORPION_INDEX_CHECK, 0x8000000c, 0, 0x4c},
+ {SCORPION_NULL_CHECK, 0x80000c00, 0, 0x4d},
+ {SCORPION_ICIMVAU_IMPL_ICIALLU, 0x8000000d, 0, 0x4c},
+ {SCORPION_NONICIALLU_BTAC_INV, 0x80000d00, 0, 0x4d},
+ {SCORPION_IMPL_ICIALLU, 0x800d0000, 0, 0x4e},
+ {SCORPION_EXPL_ICIALLU, 0x8d000000, 0, 0x4f},
+
+ {SCORPION_SPIPE_ONLY_CYCLES, 0x80000600, 1, 0x51},
+ {SCORPION_XPIPE_ONLY_CYCLES, 0x80060000, 1, 0x52},
+ {SCORPION_DUAL_CYCLES, 0x86000000, 1, 0x53},
+ {SCORPION_DISPATCH_ANY_CYCLES, 0x89000000, 1, 0x53},
+ {SCORPION_FIFO_FULLBLK_CMT, 0x8000000d, 1, 0x50},
+ {SCORPION_FAIL_COND_INST, 0x800d0000, 1, 0x52},
+ {SCORPION_PASS_COND_INST, 0x8d000000, 1, 0x53},
+ {SCORPION_ALLOW_VU_CLK, 0x8000000e, 1, 0x50},
+ {SCORPION_VU_IDLE, 0x80000e00, 1, 0x51},
+ {SCORPION_ALLOW_L2_CLK, 0x800e0000, 1, 0x52},
+ {SCORPION_L2_IDLE, 0x8e000000, 1, 0x53},
+
+ {SCORPION_DTLB_IMPL_INV_SCTLR_DACR, 0x80000001, 2, 0x54},
+ {SCORPION_DTLB_EXPL_INV, 0x80000100, 2, 0x55},
+ {SCORPION_DTLB_MISS, 0x80010000, 2, 0x56},
+ {SCORPION_DTLB_ACCESS, 0x81000000, 2, 0x57},
+ {SCORPION_ITLB_MISS, 0x80000200, 2, 0x55},
+ {SCORPION_ITLB_IMPL_INV, 0x80020000, 2, 0x56},
+ {SCORPION_ITLB_EXPL_INV, 0x82000000, 2, 0x57},
+ {SCORPION_UTLB_D_MISS, 0x80000003, 2, 0x54},
+ {SCORPION_UTLB_D_ACCESS, 0x80000300, 2, 0x55},
+ {SCORPION_UTLB_I_MISS, 0x80030000, 2, 0x56},
+ {SCORPION_UTLB_I_ACCESS, 0x83000000, 2, 0x57},
+ {SCORPION_UTLB_INV_ASID, 0x80000400, 2, 0x55},
+ {SCORPION_UTLB_INV_MVA, 0x80040000, 2, 0x56},
+ {SCORPION_UTLB_INV_ALL, 0x84000000, 2, 0x57},
+ {SCORPION_S2_HOLD_RDQ_UNAVAIL, 0x80000800, 2, 0x55},
+ {SCORPION_S2_HOLD, 0x88000000, 2, 0x57},
+ {SCORPION_S2_HOLD_DEV_OP, 0x80000900, 2, 0x55},
+ {SCORPION_S2_HOLD_ORDER, 0x80090000, 2, 0x56},
+ {SCORPION_S2_HOLD_BARRIER, 0x89000000, 2, 0x57},
+
+ {SCORPION_VIU_DUAL_CYCLE, 0x80000001, 4, 0x5c},
+ {SCORPION_VIU_SINGLE_CYCLE, 0x80000100, 4, 0x5d},
+ {SCORPION_VX_PIPE_WAR_STALL_CYCLES, 0x80000005, 4, 0x5c},
+ {SCORPION_VX_PIPE_WAW_STALL_CYCLES, 0x80000500, 4, 0x5d},
+ {SCORPION_VX_PIPE_RAW_STALL_CYCLES, 0x80050000, 4, 0x5e},
+ {SCORPION_VX_PIPE_LOAD_USE_STALL, 0x80000007, 4, 0x5c},
+ {SCORPION_VS_PIPE_WAR_STALL_CYCLES, 0x80000008, 4, 0x5c},
+ {SCORPION_VS_PIPE_WAW_STALL_CYCLES, 0x80000800, 4, 0x5d},
+ {SCORPION_VS_PIPE_RAW_STALL_CYCLES, 0x80080000, 4, 0x5e},
+ {SCORPION_EXCEPTIONS_INV_OPERATION, 0x8000000b, 4, 0x5c},
+ {SCORPION_EXCEPTIONS_DIV_BY_ZERO, 0x80000b00, 4, 0x5d},
+ {SCORPION_COND_INST_FAIL_VX_PIPE, 0x800b0000, 4, 0x5e},
+ {SCORPION_COND_INST_FAIL_VS_PIPE, 0x8b000000, 4, 0x5f},
+ {SCORPION_EXCEPTIONS_OVERFLOW, 0x8000000c, 4, 0x5c},
+ {SCORPION_EXCEPTIONS_UNDERFLOW, 0x80000c00, 4, 0x5d},
+ {SCORPION_EXCEPTIONS_DENORM, 0x8c000000, 4, 0x5f},
+
+#ifdef CONFIG_MSM_SMP
+ {SCORPIONMP_NUM_BARRIERS, 0x80000e00, 3, 0x59},
+ {SCORPIONMP_BARRIER_CYCLES, 0x800e0000, 3, 0x5a},
+#else
+ {SCORPION_BANK_AB_HIT, 0x80000001, 3, 0x58},
+ {SCORPION_BANK_AB_ACCESS, 0x80000100, 3, 0x59},
+ {SCORPION_BANK_CD_HIT, 0x80010000, 3, 0x5a},
+ {SCORPION_BANK_CD_ACCESS, 0x81000000, 3, 0x5b},
+ {SCORPION_BANK_AB_DSIDE_HIT, 0x80000002, 3, 0x58},
+ {SCORPION_BANK_AB_DSIDE_ACCESS, 0x80000200, 3, 0x59},
+ {SCORPION_BANK_CD_DSIDE_HIT, 0x80020000, 3, 0x5a},
+ {SCORPION_BANK_CD_DSIDE_ACCESS, 0x82000000, 3, 0x5b},
+ {SCORPION_BANK_AB_ISIDE_HIT, 0x80000003, 3, 0x58},
+ {SCORPION_BANK_AB_ISIDE_ACCESS, 0x80000300, 3, 0x59},
+ {SCORPION_BANK_CD_ISIDE_HIT, 0x80030000, 3, 0x5a},
+ {SCORPION_BANK_CD_ISIDE_ACCESS, 0x83000000, 3, 0x5b},
+ {SCORPION_ISIDE_RD_WAIT, 0x80000009, 3, 0x58},
+ {SCORPION_DSIDE_RD_WAIT, 0x80090000, 3, 0x5a},
+ {SCORPION_BANK_BYPASS_WRITE, 0x8000000a, 3, 0x58},
+ {SCORPION_BANK_AB_NON_CASTOUT, 0x8000000c, 3, 0x58},
+ {SCORPION_BANK_AB_L2_CASTOUT, 0x80000c00, 3, 0x59},
+ {SCORPION_BANK_CD_NON_CASTOUT, 0x800c0000, 3, 0x5a},
+ {SCORPION_BANK_CD_L2_CASTOUT, 0x8c000000, 3, 0x5b},
+#endif
+};
+
+static unsigned int get_scorpion_evtinfo(unsigned int scorpion_evt_type,
+ struct scorpion_evt *evtinfo)
+{
+ u32 idx;
+
+ if (scorpion_evt_type < SCORPION_EVT_START_IDX || scorpion_evt_type >=
+ (ARRAY_SIZE(scorpion_event) + SCORPION_EVT_START_IDX))
+ return -EINVAL;
+ idx = scorpion_evt_type - SCORPION_EVT_START_IDX;
+ if (scorpion_event[idx].scorpion_evt_type == scorpion_evt_type) {
+ evtinfo->group_setval = scorpion_event[idx].group_setval;
+ evtinfo->groupcode = scorpion_event[idx].groupcode;
+ evtinfo->armv7_evt_type = scorpion_event[idx].armv7_evt_type;
+ return scorpion_event[idx].armv7_evt_type;
+ }
+ return -EINVAL;
+}
+
+static u32 scorpion_read_lpm0(void)
+{
+ u32 val;
+
+ asm volatile("mrc p15, 0, %0, c15, c0, 0" : "=r" (val));
+ return val;
+}
+
+static void scorpion_write_lpm0(u32 val)
+{
+ asm volatile("mcr p15, 0, %0, c15, c0, 0" : : "r" (val));
+}
+
+static u32 scorpion_read_lpm1(void)
+{
+ u32 val;
+
+ asm volatile("mrc p15, 1, %0, c15, c0, 0" : "=r" (val));
+ return val;
+}
+
+static void scorpion_write_lpm1(u32 val)
+{
+ asm volatile("mcr p15, 1, %0, c15, c0, 0" : : "r" (val));
+}
+
+static u32 scorpion_read_lpm2(void)
+{
+ u32 val;
+
+ asm volatile("mrc p15, 2, %0, c15, c0, 0" : "=r" (val));
+ return val;
+}
+
+static void scorpion_write_lpm2(u32 val)
+{
+ asm volatile("mcr p15, 2, %0, c15, c0, 0" : : "r" (val));
+}
+
+static u32 scorpion_read_l2lpm(void)
+{
+ u32 val;
+
+ asm volatile("mrc p15, 3, %0, c15, c2, 0" : "=r" (val));
+ return val;
+}
+
+static void scorpion_write_l2lpm(u32 val)
+{
+ asm volatile("mcr p15, 3, %0, c15, c2, 0" : : "r" (val));
+}
+
+static u32 scorpion_read_vlpm(void)
+{
+ u32 val;
+
+ asm volatile("mrc p10, 7, %0, c11, c0, 0" : "=r" (val));
+ return val;
+}
+
+static void scorpion_write_vlpm(u32 val)
+{
+ asm volatile("mcr p10, 7, %0, c11, c0, 0" : : "r" (val));
+}
+
+/*
+ * The Scorpion processor supports performance monitoring for Venum unit.
+ * In order to access the performance monitor registers corresponding to
+ * VFP, CPACR and FPEXC registers need to be set up beforehand.
+ * Also, they need to be recovered once the access is done.
+ * This is the reason for having pre and post functions
+ */
+
+static DEFINE_PER_CPU(u32, venum_orig_val);
+static DEFINE_PER_CPU(u32, fp_orig_val);
+
+static void scorpion_pre_vlpm(void)
+{
+ u32 venum_new_val;
+ u32 fp_new_val;
+
+ /* CPACR Enable CP10 access*/
+ venum_orig_val = get_copro_access();
+ venum_new_val = venum_orig_val | CPACC_SVC(10);
+ set_copro_access(venum_new_val);
+ /* Enable FPEXC */
+ fp_orig_val = fmrx(FPEXC);
+ fp_new_val = fp_orig_val | FPEXC_EN;
+ fmxr(FPEXC, fp_new_val);
+}
+
+static void scorpion_post_vlpm(void)
+{
+ /* Restore FPEXC*/
+ fmxr(FPEXC, fp_orig_val);
+ isb();
+ /* Restore CPACR*/
+ set_copro_access(venum_orig_val);
+}
+
+struct scorpion_access_funcs {
+ u32 (*read) (void);
+ void (*write) (u32);
+ void (*pre) (void);
+ void (*post) (void);
+};
+
+/*
+ * The scorpion_functions array is used to set up the event register codes
+ * based on the group to which an event belongs to.
+ * Having the following array modularizes the code for doing that.
+ */
+struct scorpion_access_funcs scorpion_functions[] = {
+ {scorpion_read_lpm0, scorpion_write_lpm0, NULL, NULL},
+ {scorpion_read_lpm1, scorpion_write_lpm1, NULL, NULL},
+ {scorpion_read_lpm2, scorpion_write_lpm2, NULL, NULL},
+ {scorpion_read_l2lpm, scorpion_write_l2lpm, NULL, NULL},
+ {scorpion_read_vlpm, scorpion_write_vlpm, scorpion_pre_vlpm,
+ scorpion_post_vlpm},
+};
+
+static inline u32 scorpion_get_columnmask(u32 evt_code)
+{
+ const u32 columnmasks[] = {0xffffff00, 0xffff00ff, 0xff00ffff,
+ 0x80ffffff};
+
+ return columnmasks[evt_code & 0x3];
+}
+
+static void scorpion_evt_setup(u32 gr, u32 setval, u32 evt_code)
+{
+ u32 val;
+
+ if (scorpion_functions[gr].pre)
+ scorpion_functions[gr].pre();
+ val = scorpion_get_columnmask(evt_code) & scorpion_functions[gr].read();
+ val = val | setval;
+ scorpion_functions[gr].write(val);
+ if (scorpion_functions[gr].post)
+ scorpion_functions[gr].post();
+}
+
+static void scorpion_clear_pmuregs(void)
+{
+ unsigned long flags;
+
+ scorpion_write_lpm0(0);
+ scorpion_write_lpm1(0);
+ scorpion_write_lpm2(0);
+ scorpion_write_l2lpm(0);
+ raw_spin_lock_irqsave(&pmu_lock, flags);
+ scorpion_pre_vlpm();
+ scorpion_write_vlpm(0);
+ scorpion_post_vlpm();
+ raw_spin_unlock_irqrestore(&pmu_lock, flags);
+}
+
+static void scorpion_clearpmu(u32 grp, u32 val, u32 evt_code)
+{
+ u32 orig_pmuval, new_pmuval;
+
+ if (scorpion_functions[grp].pre)
+ scorpion_functions[grp].pre();
+ orig_pmuval = scorpion_functions[grp].read();
+ val = val & ~scorpion_get_columnmask(evt_code);
+ new_pmuval = orig_pmuval & ~val;
+ scorpion_functions[grp].write(new_pmuval);
+ if (scorpion_functions[grp].post)
+ scorpion_functions[grp].post();
+}
+
+static void scorpion_pmu_disable_event(struct hw_perf_event *hwc, int idx)
+{
+ unsigned long flags;
+ u32 val = 0;
+ u32 gr;
+ unsigned long event;
+ struct scorpion_evt evtinfo;
+
+ /* Disable counter and interrupt */
+ raw_spin_lock_irqsave(&pmu_lock, flags);
+
+ /* Disable counter */
+ armv7_pmnc_disable_counter(idx);
+
+ /*
+ * Clear lpm code (if destined for PMNx counters)
+ * We don't need to set the event if it's a cycle count
+ */
+ if (idx != ARMV7_CYCLE_COUNTER) {
+ val = hwc->config_base;
+ val &= ARMV7_EVTSEL_MASK;
+ if (val > 0x40) {
+ event = get_scorpion_evtinfo(val, &evtinfo);
+ if (event == -EINVAL)
+ goto scorpion_dis_out;
+ val = evtinfo.group_setval;
+ gr = evtinfo.groupcode;
+ scorpion_clearpmu(gr, val, evtinfo.armv7_evt_type);
+ }
+ }
+ /* Disable interrupt for this counter */
+ armv7_pmnc_disable_intens(idx);
+
+scorpion_dis_out:
+ raw_spin_unlock_irqrestore(&pmu_lock, flags);
+}
+
+static void scorpion_pmu_enable_event(struct hw_perf_event *hwc, int idx)
+{
+ unsigned long flags;
+ u32 val = 0;
+ u32 gr;
+ unsigned long event;
+ struct scorpion_evt evtinfo;
+
+ /*
+ * Enable counter and interrupt, and set the counter to count
+ * the event that we're interested in.
+ */
+ raw_spin_lock_irqsave(&pmu_lock, flags);
+
+ /* Disable counter */
+ armv7_pmnc_disable_counter(idx);
+
+ /*
+ * Set event (if destined for PMNx counters)
+ * We don't need to set the event if it's a cycle count
+ */
+ if (idx != ARMV7_CYCLE_COUNTER) {
+ val = hwc->config_base;
+ val &= ARMV7_EVTSEL_MASK;
+ if (val < 0x40) {
+ armv7_pmnc_write_evtsel(idx, hwc->config_base);
+ } else {
+ event = get_scorpion_evtinfo(val, &evtinfo);
+
+ if (event == -EINVAL)
+ goto scorpion_out;
+ /*
+ * Set event (if destined for PMNx counters)
+ * We don't need to set the event if it's a cycle count
+ */
+ armv7_pmnc_write_evtsel(idx, event);
+ val = 0x0;
+ asm volatile("mcr p15, 0, %0, c9, c15, 0" : :
+ "r" (val));
+ val = evtinfo.group_setval;
+ gr = evtinfo.groupcode;
+ scorpion_evt_setup(gr, val, evtinfo.armv7_evt_type);
+ }
+ }
+
+ /* Enable interrupt for this counter */
+ armv7_pmnc_enable_intens(idx);
+
+ /* Enable counter */
+ armv7_pmnc_enable_counter(idx);
+
+scorpion_out:
+ raw_spin_unlock_irqrestore(&pmu_lock, flags);
+}
+
+#ifdef CONFIG_SMP
+static void scorpion_secondary_enable_callback(void *info)
+{
+ int irq = *(unsigned int *)info;
+
+ if (irq_get_chip(irq)->irq_unmask)
+ irq_get_chip(irq)->irq_unmask(irq_get_irq_data(irq));
+}
+static void scorpion_secondary_disable_callback(void *info)
+{
+ int irq = *(unsigned int *)info;
+
+ if (irq_get_chip(irq)->irq_mask)
+ irq_get_chip(irq)->irq_mask(irq_get_irq_data(irq));
+}
+
+static void scorpion_secondary_enable(unsigned int irq)
+{
+ smp_call_function(scorpion_secondary_enable_callback, &irq, 1);
+}
+
+static void scorpion_secondary_disable(unsigned int irq)
+{
+ smp_call_function(scorpion_secondary_disable_callback, &irq, 1);
+}
+#endif
+
+static struct arm_pmu scorpion_pmu = {
+ .handle_irq = armv7pmu_handle_irq,
+#ifdef CONFIG_SMP
+ .secondary_enable = scorpion_secondary_enable,
+ .secondary_disable = scorpion_secondary_disable,
+#endif
+ .enable = scorpion_pmu_enable_event,
+ .disable = scorpion_pmu_disable_event,
+ .read_counter = armv7pmu_read_counter,
+ .write_counter = armv7pmu_write_counter,
+ .raw_event_mask = 0xFF,
+ .get_event_idx = armv7pmu_get_event_idx,
+ .start = armv7pmu_start,
+ .stop = armv7pmu_stop,
+ .max_period = (1LLU << 32) - 1,
+};
+
+static const struct arm_pmu *__init armv7_scorpion_pmu_init(void)
+{
+ scorpion_pmu.id = ARM_PERF_PMU_ID_SCORPION;
+ scorpion_pmu.name = "ARMv7 Scorpion";
+ scorpion_pmu.cache_map = &armv7_scorpion_perf_cache_map;
+ scorpion_pmu.event_map = &armv7_scorpion_perf_map;
+ scorpion_pmu.num_events = armv7_read_num_pmnc_events();
+ scorpion_clear_pmuregs();
+ return &scorpion_pmu;
+}
+
+static const struct arm_pmu *__init armv7_scorpionmp_pmu_init(void)
+{
+ scorpion_pmu.id = ARM_PERF_PMU_ID_SCORPIONMP;
+ scorpion_pmu.name = "ARMv7 Scorpion-MP";
+ scorpion_pmu.cache_map = &armv7_scorpion_perf_cache_map;
+ scorpion_pmu.event_map = &armv7_scorpion_perf_map;
+ scorpion_pmu.num_events = armv7_read_num_pmnc_events();
+ scorpion_clear_pmuregs();
+ return &scorpion_pmu;
+}
+#else
+static const struct arm_pmu *__init armv7_scorpion_pmu_init(void)
+{
+ return NULL;
+}
+static const struct arm_pmu *__init armv7_scorpionmp_pmu_init(void)
+{
+ return NULL;
+}
+#endif /* CONFIG_CPU_V7 */
diff --git a/arch/arm/kernel/perf_event_msm_krait.c b/arch/arm/kernel/perf_event_msm_krait.c
new file mode 100644
index 0000000..cb94d64
--- /dev/null
+++ b/arch/arm/kernel/perf_event_msm_krait.c
@@ -0,0 +1,399 @@
+/*
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <asm/system.h>
+
+#ifdef CONFIG_CPU_V7
+#define KRAIT_EVT_PREFIX 1
+#define KRAIT_MAX_L1_REG 2
+/*
+ event encoding: prccg
+ p = prefix (1 for Krait L1)
+ r = register
+ cc = code
+ g = group
+*/
+#define KRAIT_L1_ICACHE_MISS 0x10010
+#define KRAIT_L1_ICACHE_ACCESS 0x10011
+#define KRAIT_DTLB_ACCESS 0x121B2
+#define KRAIT_ITLB_ACCESS 0x121C0
+
+u32 evt_type_base[] = {0x4c, 0x50, 0x54};
+
+static const unsigned armv7_krait_perf_map[PERF_COUNT_HW_MAX] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
+ [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
+ [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
+ [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+ [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES,
+};
+
+static const unsigned armv7_krait_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+ [C(L1D)] = {
+ /*
+ * The performance counters don't differentiate between read
+ * and write accesses/misses so this isn't strictly correct,
+ * but it's the best we can do. Writes and reads get
+ * combined.
+ */
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(L1I)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = KRAIT_L1_ICACHE_ACCESS,
+ [C(RESULT_MISS)] = KRAIT_L1_ICACHE_MISS,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = KRAIT_L1_ICACHE_ACCESS,
+ [C(RESULT_MISS)] = KRAIT_L1_ICACHE_MISS,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(LL)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(DTLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = KRAIT_DTLB_ACCESS,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = KRAIT_DTLB_ACCESS,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(ITLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = KRAIT_ITLB_ACCESS,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = KRAIT_ITLB_ACCESS,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(BPU)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)]
+ = ARMV7_PERFCTR_PC_BRANCH_MIS_USED,
+ [C(RESULT_MISS)]
+ = ARMV7_PERFCTR_PC_BRANCH_MIS_USED,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)]
+ = ARMV7_PERFCTR_PC_BRANCH_MIS_USED,
+ [C(RESULT_MISS)]
+ = ARMV7_PERFCTR_PC_BRANCH_MIS_USED,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+};
+
+struct krait_evt {
+ /*
+ * The group_setval field corresponds to the value that the group
+ * register needs to be set to. This value is calculated from the row
+ * and column that the event belongs to in the event table
+ */
+ u32 group_setval;
+
+ /*
+ * The groupcode corresponds to the group that the event belongs to.
+ * Krait has 3 groups of events PMRESR0, 1, 2
+ * going from 0 to 2 in terms of the codes used
+ */
+ u8 groupcode;
+
+ /*
+ * The armv7_evt_type field corresponds to the armv7 defined event
+ * code that the Krait events map to
+ */
+ u32 armv7_evt_type;
+};
+
+static unsigned int get_krait_evtinfo(unsigned int krait_evt_type,
+ struct krait_evt *evtinfo)
+{
+ u8 prefix;
+ u8 reg;
+ u8 code;
+ u8 group;
+
+ prefix = (krait_evt_type & 0xF0000) >> 16;
+ reg = (krait_evt_type & 0x0F000) >> 12;
+ code = (krait_evt_type & 0x00FF0) >> 4;
+ group = krait_evt_type & 0x0000F;
+
+ if ((prefix != KRAIT_EVT_PREFIX) || (group > 3) ||
+ (reg > KRAIT_MAX_L1_REG))
+ return -EINVAL;
+
+ evtinfo->group_setval = 0x80000000 | (code << (group * 8));
+ evtinfo->groupcode = reg;
+ evtinfo->armv7_evt_type = evt_type_base[reg] | group;
+
+ return evtinfo->armv7_evt_type;
+}
+
+static u32 krait_read_pmresr0(void)
+{
+ u32 val;
+
+ asm volatile("mrc p15, 1, %0, c9, c15, 0" : "=r" (val));
+ return val;
+}
+
+static void krait_write_pmresr0(u32 val)
+{
+ asm volatile("mcr p15, 1, %0, c9, c15, 0" : : "r" (val));
+}
+
+static u32 krait_read_pmresr1(void)
+{
+ u32 val;
+
+ asm volatile("mrc p15, 1, %0, c9, c15, 1" : "=r" (val));
+ return val;
+}
+
+static void krait_write_pmresr1(u32 val)
+{
+ asm volatile("mcr p15, 1, %0, c9, c15, 1" : : "r" (val));
+}
+
+static u32 krait_read_pmresr2(void)
+{
+ u32 val;
+
+ asm volatile("mrc p15, 1, %0, c9, c15, 2" : "=r" (val));
+ return val;
+}
+
+static void krait_write_pmresr2(u32 val)
+{
+ asm volatile("mcr p15, 1, %0, c9, c15, 2" : : "r" (val));
+}
+
+struct krait_access_funcs {
+ u32 (*read) (void);
+ void (*write) (u32);
+};
+
+/*
+ * The krait_functions array is used to set up the event register codes
+ * based on the group to which an event belongs.
+ * Having the following array modularizes the code for doing that.
+ */
+struct krait_access_funcs krait_functions[] = {
+ {krait_read_pmresr0, krait_write_pmresr0},
+ {krait_read_pmresr1, krait_write_pmresr1},
+ {krait_read_pmresr2, krait_write_pmresr2},
+};
+
+static inline u32 krait_get_columnmask(u32 evt_code)
+{
+ const u32 columnmasks[] = {0xffffff00, 0xffff00ff, 0xff00ffff,
+ 0x80ffffff};
+
+ return columnmasks[evt_code & 0x3];
+}
+
+static void krait_evt_setup(u32 gr, u32 setval, u32 evt_code)
+{
+ u32 val;
+
+ val = krait_get_columnmask(evt_code) & krait_functions[gr].read();
+ val = val | setval;
+ krait_functions[gr].write(val);
+}
+
+static void krait_clear_pmuregs(void)
+{
+ krait_write_pmresr0(0);
+ krait_write_pmresr1(0);
+ krait_write_pmresr2(0);
+}
+
+static void krait_clearpmu(u32 grp, u32 val, u32 evt_code)
+{
+ u32 new_pmuval;
+
+ new_pmuval = krait_functions[grp].read() &
+ krait_get_columnmask(evt_code);
+ krait_functions[grp].write(new_pmuval);
+}
+
+static void krait_pmu_disable_event(struct hw_perf_event *hwc, int idx)
+{
+ unsigned long flags;
+ u32 val = 0;
+ u32 gr;
+ unsigned long event;
+ struct krait_evt evtinfo;
+
+ /* Disable counter and interrupt */
+ raw_spin_lock_irqsave(&pmu_lock, flags);
+
+ /* Disable counter */
+ armv7_pmnc_disable_counter(idx);
+
+ /*
+ * Clear pmresr code (if destined for PMNx counters)
+ * We don't need to set the event if it's a cycle count
+ */
+ if (idx != ARMV7_CYCLE_COUNTER) {
+ val = hwc->config_base;
+ if (val > 0x40) {
+ event = get_krait_evtinfo(val, &evtinfo);
+ if (event == -EINVAL)
+ goto krait_dis_out;
+ val = evtinfo.group_setval;
+ gr = evtinfo.groupcode;
+ krait_clearpmu(gr, val, evtinfo.armv7_evt_type);
+ }
+ }
+ /* Disable interrupt for this counter */
+ armv7_pmnc_disable_intens(idx);
+
+krait_dis_out:
+ raw_spin_unlock_irqrestore(&pmu_lock, flags);
+}
+
+static void krait_pmu_enable_event(struct hw_perf_event *hwc, int idx)
+{
+ unsigned long flags;
+ u32 val = 0;
+ u32 gr;
+ unsigned long event;
+ struct krait_evt evtinfo;
+
+ /*
+ * Enable counter and interrupt, and set the counter to count
+ * the event that we're interested in.
+ */
+ raw_spin_lock_irqsave(&pmu_lock, flags);
+
+ /* Disable counter */
+ armv7_pmnc_disable_counter(idx);
+
+ /*
+ * Set event (if destined for PMNx counters)
+ * We don't need to set the event if it's a cycle count
+ */
+ if (idx != ARMV7_CYCLE_COUNTER) {
+ val = hwc->config_base;
+ if (val < 0x40) {
+ armv7_pmnc_write_evtsel(idx, hwc->config_base);
+ } else {
+ event = get_krait_evtinfo(val, &evtinfo);
+
+ if (event == -EINVAL)
+ goto krait_out;
+ /*
+ * Set event (if destined for PMNx counters)
+ * We don't need to set the event if it's a cycle count
+ */
+ armv7_pmnc_write_evtsel(idx, event);
+ val = 0x0;
+ asm volatile("mcr p15, 0, %0, c9, c15, 0" : :
+ "r" (val));
+ val = evtinfo.group_setval;
+ gr = evtinfo.groupcode;
+ krait_evt_setup(gr, val, evtinfo.armv7_evt_type);
+ }
+ }
+
+ /* Enable interrupt for this counter */
+ armv7_pmnc_enable_intens(idx);
+
+ /* Enable counter */
+ armv7_pmnc_enable_counter(idx);
+
+krait_out:
+ raw_spin_unlock_irqrestore(&pmu_lock, flags);
+}
+
+static struct arm_pmu krait_pmu = {
+ .handle_irq = armv7pmu_handle_irq,
+#ifdef CONFIG_ARCH_MSM_SMP
+ .secondary_enable = scorpion_secondary_enable,
+ .secondary_disable = scorpion_secondary_disable,
+#endif
+ .enable = krait_pmu_enable_event,
+ .disable = krait_pmu_disable_event,
+ .read_counter = armv7pmu_read_counter,
+ .write_counter = armv7pmu_write_counter,
+ .raw_event_mask = 0xFFFFF,
+ .get_event_idx = armv7pmu_get_event_idx,
+ .start = armv7pmu_start,
+ .stop = armv7pmu_stop,
+ .max_period = (1LLU << 32) - 1,
+};
+
+static const struct arm_pmu *__init armv7_krait_pmu_init(void)
+{
+ krait_pmu.id = ARM_PERF_PMU_ID_KRAIT;
+ krait_pmu.name = "ARMv7 Krait";
+ krait_pmu.cache_map = &armv7_krait_perf_cache_map;
+ krait_pmu.event_map = &armv7_krait_perf_map;
+ krait_pmu.num_events = armv7_read_num_pmnc_events();
+ krait_clear_pmuregs();
+ return &krait_pmu;
+}
+
+#else
+static const struct arm_pmu *__init armv7_krait_pmu_init(void)
+{
+ return NULL;
+}
+#endif /* CONFIG_CPU_V7 */
diff --git a/arch/arm/kernel/perf_event_msm_krait_l2.c b/arch/arm/kernel/perf_event_msm_krait_l2.c
new file mode 100644
index 0000000..7cb4ee7
--- /dev/null
+++ b/arch/arm/kernel/perf_event_msm_krait_l2.c
@@ -0,0 +1,656 @@
+/*
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifdef CONFIG_CPU_HAS_L2_PMU
+
+#include <linux/irq.h>
+
+#include <mach/msm-krait-l2-accessors.h>
+
+#define MAX_L2_PERIOD ((1ULL << 32) - 1)
+#define MAX_KRAIT_L2_CTRS 5
+
+#define L2PMCCNTR 0x409
+#define L2PMCCNTCR 0x408
+#define L2PMCCNTSR 0x40A
+#define L2CYCLE_CTR_BIT 31
+#define L2CYCLE_CTR_EVENT_IDX 4
+#define L2CYCLE_CTR_RAW_CODE 0xff
+
+#define L2PMOVSR 0x406
+
+#define L2PMCR 0x400
+#define L2PMCR_RESET_ALL 0x6
+#define L2PMCR_GLOBAL_ENABLE 0x1
+#define L2PMCR_GLOBAL_DISABLE 0x0
+
+#define L2PMCNTENSET 0x403
+#define L2PMCNTENCLR 0x402
+
+#define L2PMINTENSET 0x405
+#define L2PMINTENCLR 0x404
+
+#define IA_L2PMXEVCNTCR_BASE 0x420
+#define IA_L2PMXEVTYPER_BASE 0x424
+#define IA_L2PMRESX_BASE 0x410
+#define IA_L2PMXEVFILTER_BASE 0x423
+#define IA_L2PMXEVCNTR_BASE 0x421
+
+/* event format is -e rsRCCG See get_event_desc() */
+
+#define EVENT_REG_MASK 0xf000
+#define EVENT_GROUPSEL_MASK 0x000f
+#define EVENT_GROUPCODE_MASK 0x0ff0
+#define EVENT_REG_SHIFT 12
+#define EVENT_GROUPCODE_SHIFT 4
+
+#define RESRX_VALUE_EN 0x80000000
+
+static struct platform_device *l2_pmu_device;
+
+struct hw_krait_l2_pmu {
+ struct perf_event *events[MAX_KRAIT_L2_CTRS];
+ unsigned long active_mask[BITS_TO_LONGS(MAX_KRAIT_L2_CTRS)];
+ raw_spinlock_t lock;
+};
+
+struct hw_krait_l2_pmu hw_krait_l2_pmu;
+
+struct event_desc {
+ int event_groupsel;
+ int event_reg;
+ int event_group_code;
+};
+
+void get_event_desc(u64 config, struct event_desc *evdesc)
+{
+ /* L2PMEVCNTRX */
+ evdesc->event_reg = (config & EVENT_REG_MASK) >> EVENT_REG_SHIFT;
+ /* Group code (row ) */
+ evdesc->event_group_code =
+ (config & EVENT_GROUPCODE_MASK) >> EVENT_GROUPCODE_SHIFT;
+ /* Group sel (col) */
+ evdesc->event_groupsel = (config & EVENT_GROUPSEL_MASK);
+
+ pr_debug("%s: reg: %x, group_code: %x, groupsel: %x\n", __func__,
+ evdesc->event_reg, evdesc->event_group_code,
+ evdesc->event_groupsel);
+}
+
+static void set_evcntcr(int ctr)
+{
+ u32 evtcr_reg = (ctr * 16) + IA_L2PMXEVCNTCR_BASE;
+
+ set_l2_indirect_reg(evtcr_reg, 0x0);
+}
+
+static void set_evtyper(int event_groupsel, int event_reg, int ctr)
+{
+ u32 evtype_reg = (ctr * 16) + IA_L2PMXEVTYPER_BASE;
+ u32 evtype_val = event_groupsel + (4 * event_reg);
+
+ set_l2_indirect_reg(evtype_reg, evtype_val);
+}
+
+static void set_evres(int event_groupsel, int event_reg, int event_group_code)
+{
+ u32 group_reg = event_reg + IA_L2PMRESX_BASE;
+ u32 group_val =
+ RESRX_VALUE_EN | (event_group_code << (8 * event_groupsel));
+ u32 resr_val;
+ u32 group_byte = 0xff;
+ u32 group_mask = ~(group_byte << (8 * event_groupsel));
+
+ resr_val = get_l2_indirect_reg(group_reg);
+ resr_val &= group_mask;
+ resr_val |= group_val;
+
+ set_l2_indirect_reg(group_reg, resr_val);
+}
+
+static void set_evfilter(int ctr)
+{
+ u32 filter_reg = (ctr * 16) + IA_L2PMXEVFILTER_BASE;
+ u32 filter_val = 0x000f0030 | 1 << smp_processor_id();
+
+ set_l2_indirect_reg(filter_reg, filter_val);
+}
+
+static void enable_intenset(u32 idx)
+{
+ if (idx == L2CYCLE_CTR_EVENT_IDX)
+ set_l2_indirect_reg(L2PMINTENSET, 1 << L2CYCLE_CTR_BIT);
+ else
+ set_l2_indirect_reg(L2PMINTENSET, 1 << idx);
+}
+
+static void disable_intenclr(u32 idx)
+{
+ if (idx == L2CYCLE_CTR_EVENT_IDX)
+ set_l2_indirect_reg(L2PMINTENCLR, 1 << L2CYCLE_CTR_BIT);
+ else
+ set_l2_indirect_reg(L2PMINTENCLR, 1 << idx);
+}
+
+static void enable_counter(u32 idx)
+{
+ if (idx == L2CYCLE_CTR_EVENT_IDX)
+ set_l2_indirect_reg(L2PMCNTENSET, 1 << L2CYCLE_CTR_BIT);
+ else
+ set_l2_indirect_reg(L2PMCNTENSET, 1 << idx);
+}
+
+static void disable_counter(u32 idx)
+{
+ if (idx == L2CYCLE_CTR_EVENT_IDX)
+ set_l2_indirect_reg(L2PMCNTENCLR, 1 << L2CYCLE_CTR_BIT);
+ else
+ set_l2_indirect_reg(L2PMCNTENCLR, 1 << idx);
+}
+
+static u64 read_counter(u32 idx)
+{
+ u32 val;
+ u32 counter_reg = (idx * 16) + IA_L2PMXEVCNTR_BASE;
+
+ if (idx == L2CYCLE_CTR_EVENT_IDX)
+ val = get_l2_indirect_reg(L2PMCCNTR);
+ else
+ val = get_l2_indirect_reg(counter_reg);
+
+ return val;
+}
+
+static void write_counter(u32 idx, u32 val)
+{
+ u32 counter_reg = (idx * 16) + IA_L2PMXEVCNTR_BASE;
+
+ if (idx == L2CYCLE_CTR_EVENT_IDX)
+ set_l2_indirect_reg(L2PMCCNTR, val);
+ else
+ set_l2_indirect_reg(counter_reg, val);
+}
+
+static int
+pmu_event_set_period(struct perf_event *event,
+ struct hw_perf_event *hwc, int idx)
+{
+ s64 left = local64_read(&hwc->period_left);
+ s64 period = hwc->sample_period;
+ int ret = 0;
+
+ if (unlikely(left <= -period)) {
+ left = period;
+ local64_set(&hwc->period_left, left);
+ hwc->last_period = period;
+ ret = 1;
+ }
+
+ if (unlikely(left <= 0)) {
+ left += period;
+ local64_set(&hwc->period_left, left);
+ hwc->last_period = period;
+ ret = 1;
+ }
+
+ if (left > (s64) MAX_L2_PERIOD)
+ left = MAX_L2_PERIOD;
+
+ local64_set(&hwc->prev_count, (u64)-left);
+
+ write_counter(idx, (u64) (-left) & 0xffffffff);
+
+ perf_event_update_userpage(event);
+
+ return ret;
+}
+
+static u64
+pmu_event_update(struct perf_event *event, struct hw_perf_event *hwc, int idx,
+ int overflow)
+{
+ u64 prev_raw_count, new_raw_count;
+ u64 delta;
+
+again:
+ prev_raw_count = local64_read(&hwc->prev_count);
+ new_raw_count = read_counter(idx);
+
+ if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
+ new_raw_count) != prev_raw_count)
+ goto again;
+
+ new_raw_count &= MAX_L2_PERIOD;
+ prev_raw_count &= MAX_L2_PERIOD;
+
+ if (overflow)
+ delta = MAX_L2_PERIOD - prev_raw_count + new_raw_count;
+ else
+ delta = new_raw_count - prev_raw_count;
+
+ local64_add(delta, &event->count);
+ local64_sub(delta, &hwc->period_left);
+
+ pr_debug("%s: new: %lld, prev: %lld, event: %ld count: %lld\n",
+ __func__, new_raw_count, prev_raw_count,
+ hwc->config_base, local64_read(&event->count));
+
+ return new_raw_count;
+}
+
+static void krait_l2_read(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ pmu_event_update(event, hwc, hwc->idx, 0);
+}
+
+static void krait_l2_stop_counter(struct perf_event *event, int flags)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+
+ if (!(hwc->state & PERF_HES_STOPPED)) {
+ disable_intenclr(idx);
+ disable_counter(idx);
+
+ pmu_event_update(event, hwc, idx, 0);
+ hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
+ }
+
+ pr_debug("%s: event: %ld ctr: %d stopped\n", __func__, hwc->config_base,
+ idx);
+}
+
+static void krait_l2_start_counter(struct perf_event *event, int flags)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+ struct event_desc evdesc;
+
+ if (flags & PERF_EF_RELOAD)
+ WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
+
+ hwc->state = 0;
+
+ pmu_event_set_period(event, hwc, idx);
+
+ if (hwc->config_base == L2CYCLE_CTR_RAW_CODE)
+ goto out;
+
+ set_evcntcr(idx);
+
+ memset(&evdesc, 0, sizeof(evdesc));
+
+ get_event_desc(hwc->config_base, &evdesc);
+
+ set_evtyper(evdesc.event_groupsel, evdesc.event_reg, idx);
+
+ set_evres(evdesc.event_groupsel, evdesc.event_reg,
+ evdesc.event_group_code);
+
+ set_evfilter(idx);
+
+out:
+ enable_intenset(idx);
+ enable_counter(idx);
+
+ pr_debug
+ ("%s: ctr: %d group: %ld group_code: %lld started from cpu:%d\n",
+ __func__, idx, hwc->config_base, hwc->config, smp_processor_id());
+}
+
+static void krait_l2_del_event(struct perf_event *event, int flags)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+ unsigned long iflags;
+
+ raw_spin_lock_irqsave(&hw_krait_l2_pmu.lock, iflags);
+
+ clear_bit(idx, (long unsigned int *)(&hw_krait_l2_pmu.active_mask));
+
+ krait_l2_stop_counter(event, PERF_EF_UPDATE);
+ hw_krait_l2_pmu.events[idx] = NULL;
+ hwc->idx = -1;
+
+ raw_spin_unlock_irqrestore(&hw_krait_l2_pmu.lock, iflags);
+
+ pr_debug("%s: event: %ld deleted\n", __func__, hwc->config_base);
+
+ perf_event_update_userpage(event);
+}
+
+static int krait_l2_add_event(struct perf_event *event, int flags)
+{
+ int ctr = 0;
+ struct hw_perf_event *hwc = &event->hw;
+ unsigned long iflags;
+ int err = 0;
+
+ perf_pmu_disable(event->pmu);
+
+ raw_spin_lock_irqsave(&hw_krait_l2_pmu.lock, iflags);
+
+ /* Cycle counter has a resrvd index */
+ if (hwc->config_base == L2CYCLE_CTR_RAW_CODE) {
+ if (hw_krait_l2_pmu.events[L2CYCLE_CTR_EVENT_IDX]) {
+ pr_err("%s: Stale cycle ctr event ptr !\n", __func__);
+ err = -EINVAL;
+ goto out;
+ }
+ hwc->idx = L2CYCLE_CTR_EVENT_IDX;
+ hw_krait_l2_pmu.events[L2CYCLE_CTR_EVENT_IDX] = event;
+ set_bit(L2CYCLE_CTR_EVENT_IDX,
+ (long unsigned int *)&hw_krait_l2_pmu.active_mask);
+ goto skip_ctr_loop;
+ }
+
+ for (ctr = 0; ctr < MAX_KRAIT_L2_CTRS - 1; ctr++) {
+ if (!hw_krait_l2_pmu.events[ctr]) {
+ hwc->idx = ctr;
+ hw_krait_l2_pmu.events[ctr] = event;
+ set_bit(ctr,
+ (long unsigned int *)
+ &hw_krait_l2_pmu.active_mask);
+ break;
+ }
+ }
+
+ if (hwc->idx < 0) {
+ err = -ENOSPC;
+ pr_err("%s: No space for event: %llx!!\n", __func__,
+ event->attr.config);
+ goto out;
+ }
+
+skip_ctr_loop:
+
+ disable_counter(hwc->idx);
+
+ hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
+
+ if (flags & PERF_EF_START)
+ krait_l2_start_counter(event, PERF_EF_RELOAD);
+
+ perf_event_update_userpage(event);
+
+ pr_debug("%s: event: %ld, ctr: %d added from cpu:%d\n",
+ __func__, hwc->config_base, hwc->idx, smp_processor_id());
+out:
+ raw_spin_unlock_irqrestore(&hw_krait_l2_pmu.lock, iflags);
+
+ /* Resume the PMU even if this event could not be added */
+ perf_pmu_enable(event->pmu);
+
+ return err;
+}
+
+static void krait_l2_pmu_enable(struct pmu *pmu)
+{
+ isb();
+ set_l2_indirect_reg(L2PMCR, L2PMCR_GLOBAL_ENABLE);
+}
+
+static void krait_l2_pmu_disable(struct pmu *pmu)
+{
+ set_l2_indirect_reg(L2PMCR, L2PMCR_GLOBAL_DISABLE);
+ isb();
+}
+
+u32 get_reset_pmovsr(void)
+{
+ int val;
+
+ val = get_l2_indirect_reg(L2PMOVSR);
+ /* reset it */
+ val &= 0xffffffff;
+ set_l2_indirect_reg(L2PMOVSR, val);
+
+ return val;
+}
+
+static irqreturn_t krait_l2_handle_irq(int irq_num, void *dev)
+{
+ unsigned long pmovsr;
+ struct perf_sample_data data;
+ struct pt_regs *regs;
+ struct perf_event *event;
+ struct hw_perf_event *hwc;
+ int bitp;
+ int idx = 0;
+
+ pmovsr = get_reset_pmovsr();
+
+ if (!(pmovsr & 0xffffffff))
+ return IRQ_NONE;
+
+ regs = get_irq_regs();
+
+ perf_sample_data_init(&data, 0);
+
+ raw_spin_lock(&hw_krait_l2_pmu.lock);
+
+ while (pmovsr) {
+ bitp = __ffs(pmovsr);
+
+ if (bitp == L2CYCLE_CTR_BIT)
+ idx = L2CYCLE_CTR_EVENT_IDX;
+ else
+ idx = bitp;
+
+ event = hw_krait_l2_pmu.events[idx];
+
+ if (!event)
+ goto next;
+
+ if (!test_bit(idx, hw_krait_l2_pmu.active_mask))
+ goto next;
+
+ hwc = &event->hw;
+ pmu_event_update(event, hwc, idx, 1);
+ data.period = event->hw.last_period;
+
+ if (!pmu_event_set_period(event, hwc, idx))
+ goto next;
+
+ if (perf_event_overflow(event, 0, &data, regs))
+ disable_counter(hwc->idx);
+next:
+ pmovsr &= (pmovsr - 1);
+ }
+
+ raw_spin_unlock(&hw_krait_l2_pmu.lock);
+
+ irq_work_run();
+
+ return IRQ_HANDLED;
+}
+
+static atomic_t active_l2_events = ATOMIC_INIT(0);
+static DEFINE_MUTEX(krait_pmu_reserve_mutex);
+
+static int pmu_reserve_hardware(void)
+{
+ int i, err = -ENODEV, irq;
+
+ l2_pmu_device = reserve_pmu(ARM_PMU_DEVICE_L2);
+
+ if (IS_ERR(l2_pmu_device)) {
+ pr_warning("unable to reserve pmu\n");
+ return PTR_ERR(l2_pmu_device);
+ }
+
+ if (l2_pmu_device->num_resources < 1) {
+ pr_err("no irqs for PMUs defined\n");
+ return -ENODEV;
+ }
+
+ if (strncmp(l2_pmu_device->name, "l2-arm-pmu", 6)) {
+ pr_err("Incorrect pdev reserved !\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < l2_pmu_device->num_resources; ++i) {
+ irq = platform_get_irq(l2_pmu_device, i);
+ if (irq < 0)
+ continue;
+
+ err = request_irq(irq, krait_l2_handle_irq,
+ IRQF_DISABLED | IRQF_NOBALANCING,
+ "krait-l2-pmu", NULL);
+ if (err) {
+ pr_warning("unable to request IRQ%d for Krait L2 perf "
+ "counters\n", irq);
+ break;
+ }
+
+ irq_get_chip(irq)->irq_unmask(irq_get_irq_data(irq));
+ }
+
+ if (err) {
+ for (i = i - 1; i >= 0; --i) {
+ irq = platform_get_irq(l2_pmu_device, i);
+ if (irq >= 0)
+ free_irq(irq, NULL);
+ }
+ release_pmu(l2_pmu_device);
+ l2_pmu_device = NULL;
+ }
+
+ return err;
+}
+
+static void pmu_release_hardware(void)
+{
+ int i, irq;
+
+ for (i = l2_pmu_device->num_resources - 1; i >= 0; --i) {
+ irq = platform_get_irq(l2_pmu_device, i);
+ if (irq >= 0)
+ free_irq(irq, NULL);
+ }
+
+ krait_l2_pmu_disable(NULL);
+
+ release_pmu(l2_pmu_device);
+ l2_pmu_device = NULL;
+}
+
+static void pmu_perf_event_destroy(struct perf_event *event)
+{
+ if (atomic_dec_and_mutex_lock
+ (&active_l2_events, &krait_pmu_reserve_mutex)) {
+ pmu_release_hardware();
+ mutex_unlock(&krait_pmu_reserve_mutex);
+ }
+}
+
+static int krait_l2_event_init(struct perf_event *event)
+{
+ int err = 0;
+ struct hw_perf_event *hwc = &event->hw;
+ int status = 0;
+
+ switch (event->attr.type) {
+ case PERF_TYPE_SHARED:
+ break;
+
+ default:
+ return -ENOENT;
+ }
+
+ hwc->idx = -1;
+
+ event->destroy = pmu_perf_event_destroy;
+
+ if (!atomic_inc_not_zero(&active_l2_events)) {
+ /* 0 active events */
+ mutex_lock(&krait_pmu_reserve_mutex);
+ err = pmu_reserve_hardware();
+ mutex_unlock(&krait_pmu_reserve_mutex);
+ if (!err)
+ atomic_inc(&active_l2_events);
+ else
+ return err;
+ } else {
+ if (atomic_read(&active_l2_events) > (MAX_KRAIT_L2_CTRS - 1)) {
+ pr_err("%s: No space left on PMU for event: %llx\n",
+ __func__, event->attr.config);
+ atomic_dec(&active_l2_events);
+ return -ENOSPC;
+ }
+ }
+
+ hwc->config_base = event->attr.config;
+ hwc->config = 0;
+ hwc->event_base = 0;
+
+ /* Only one CPU can control the cycle counter */
+ if (hwc->config_base == L2CYCLE_CTR_RAW_CODE) {
+ /* Check if its already running */
+ status = get_l2_indirect_reg(L2PMCCNTSR);
+ if (status == 0x2) {
+ err = -ENOSPC;
+ goto out;
+ }
+ }
+
+ if (!hwc->sample_period) {
+ hwc->sample_period = MAX_L2_PERIOD;
+ hwc->last_period = hwc->sample_period;
+ local64_set(&hwc->period_left, hwc->sample_period);
+ }
+
+ pr_debug("%s: event: %lld init'd\n", __func__, event->attr.config);
+
+out:
+ if (err < 0)
+ pmu_perf_event_destroy(event);
+
+ return err;
+}
+
+static struct pmu krait_l2_pmu = {
+ .pmu_enable = krait_l2_pmu_enable,
+ .pmu_disable = krait_l2_pmu_disable,
+ .event_init = krait_l2_event_init,
+ .add = krait_l2_add_event,
+ .del = krait_l2_del_event,
+ .start = krait_l2_start_counter,
+ .stop = krait_l2_stop_counter,
+ .read = krait_l2_read,
+};
+
+static const struct arm_pmu *__init krait_l2_pmu_init(void)
+{
+ /* Register our own PMU here */
+ perf_pmu_register(&krait_l2_pmu, "Krait L2", PERF_TYPE_SHARED);
+
+ memset(&hw_krait_l2_pmu, 0, sizeof(hw_krait_l2_pmu));
+
+ /* Reset all ctrs */
+ set_l2_indirect_reg(L2PMCR, L2PMCR_RESET_ALL);
+
+ /* Avoid spurious interrupt if any */
+ get_reset_pmovsr();
+
+ /* Don't return an arm_pmu here */
+ return NULL;
+}
+#else
+
+static const struct arm_pmu *__init krait_l2_pmu_init(void)
+{
+ return NULL;
+}
+#endif
diff --git a/arch/arm/kernel/perf_event_msm_l2.c b/arch/arm/kernel/perf_event_msm_l2.c
new file mode 100644
index 0000000..8678b0a
--- /dev/null
+++ b/arch/arm/kernel/perf_event_msm_l2.c
@@ -0,0 +1,981 @@
+/*
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifdef CONFIG_CPU_HAS_L2_PMU
+
+#include <linux/irq.h>
+
+#define MAX_BB_L2_PERIOD ((1ULL << 32) - 1)
+#define MAX_BB_L2_CTRS 5
+#define BB_L2CYCLE_CTR_BIT 31
+#define BB_L2CYCLE_CTR_EVENT_IDX 4
+#define BB_L2CYCLE_CTR_RAW_CODE 0xff
+#define SCORPIONL2_PMNC_E (1 << 0) /* Enable all counters */
+
+/*
+ * Lock to protect r/m/w sequences to the L2 PMU.
+ */
+DEFINE_RAW_SPINLOCK(bb_l2_pmu_lock);
+
+static struct platform_device *bb_l2_pmu_device;
+
+struct hw_bb_l2_pmu {
+ struct perf_event *events[MAX_BB_L2_CTRS];
+ unsigned long active_mask[BITS_TO_LONGS(MAX_BB_L2_CTRS)];
+ raw_spinlock_t lock;
+};
+
+struct hw_bb_l2_pmu hw_bb_l2_pmu;
+
+struct bb_l2_scorp_evt {
+ u32 evt_type;
+ u32 val;
+ u8 grp;
+ u32 evt_type_act;
+};
+
+enum scorpion_perf_types {
+ SCORPIONL2_TOTAL_BANK_REQ = 0x90,
+ SCORPIONL2_DSIDE_READ = 0x91,
+ SCORPIONL2_DSIDE_WRITE = 0x92,
+ SCORPIONL2_ISIDE_READ = 0x93,
+ SCORPIONL2_L2CACHE_ISIDE_READ = 0x94,
+ SCORPIONL2_L2CACHE_BANK_REQ = 0x95,
+ SCORPIONL2_L2CACHE_DSIDE_READ = 0x96,
+ SCORPIONL2_L2CACHE_DSIDE_WRITE = 0x97,
+ SCORPIONL2_L2NOCACHE_DSIDE_WRITE = 0x98,
+ SCORPIONL2_L2NOCACHE_ISIDE_READ = 0x99,
+ SCORPIONL2_L2NOCACHE_TOTAL_REQ = 0x9a,
+ SCORPIONL2_L2NOCACHE_DSIDE_READ = 0x9b,
+ SCORPIONL2_DSIDE_READ_NOL1 = 0x9c,
+ SCORPIONL2_L2CACHE_WRITETHROUGH = 0x9d,
+ SCORPIONL2_BARRIERS = 0x9e,
+ SCORPIONL2_HARDWARE_TABLE_WALKS = 0x9f,
+ SCORPIONL2_MVA_POC = 0xa0,
+ SCORPIONL2_L2CACHE_HW_TABLE_WALKS = 0xa1,
+ SCORPIONL2_SETWAY_CACHE_OPS = 0xa2,
+ SCORPIONL2_DSIDE_WRITE_HITS = 0xa3,
+ SCORPIONL2_ISIDE_READ_HITS = 0xa4,
+ SCORPIONL2_CACHE_DSIDE_READ_NOL1 = 0xa5,
+ SCORPIONL2_TOTAL_CACHE_HITS = 0xa6,
+ SCORPIONL2_CACHE_MATCH_MISS = 0xa7,
+ SCORPIONL2_DREAD_HIT_L1_DATA = 0xa8,
+ SCORPIONL2_L2LINE_LOCKED = 0xa9,
+ SCORPIONL2_HW_TABLE_WALK_HIT = 0xaa,
+ SCORPIONL2_CACHE_MVA_POC = 0xab,
+ SCORPIONL2_L2ALLOC_DWRITE_MISS = 0xac,
+ SCORPIONL2_CORRECTED_TAG_ARRAY = 0xad,
+ SCORPIONL2_CORRECTED_DATA_ARRAY = 0xae,
+ SCORPIONL2_CORRECTED_REPLACEMENT_ARRAY = 0xaf,
+ SCORPIONL2_PMBUS_MPAAF = 0xb0,
+ SCORPIONL2_PMBUS_MPWDAF = 0xb1,
+ SCORPIONL2_PMBUS_MPBRT = 0xb2,
+ SCORPIONL2_CPU0_GRANT = 0xb3,
+ SCORPIONL2_CPU1_GRANT = 0xb4,
+ SCORPIONL2_CPU0_NOGRANT = 0xb5,
+ SCORPIONL2_CPU1_NOGRANT = 0xb6,
+ SCORPIONL2_CPU0_LOSING_ARB = 0xb7,
+ SCORPIONL2_CPU1_LOSING_ARB = 0xb8,
+ SCORPIONL2_SLAVEPORT_NOGRANT = 0xb9,
+ SCORPIONL2_SLAVEPORT_BPQ_FULL = 0xba,
+ SCORPIONL2_SLAVEPORT_LOSING_ARB = 0xbb,
+ SCORPIONL2_SLAVEPORT_GRANT = 0xbc,
+ SCORPIONL2_SLAVEPORT_GRANTLOCK = 0xbd,
+ SCORPIONL2_L2EM_STREX_PASS = 0xbe,
+ SCORPIONL2_L2EM_STREX_FAIL = 0xbf,
+ SCORPIONL2_LDREX_RESERVE_L2EM = 0xc0,
+ SCORPIONL2_SLAVEPORT_LDREX = 0xc1,
+ SCORPIONL2_CPU0_L2EM_CLEARED = 0xc2,
+ SCORPIONL2_CPU1_L2EM_CLEARED = 0xc3,
+ SCORPIONL2_SLAVEPORT_L2EM_CLEARED = 0xc4,
+ SCORPIONL2_CPU0_CLAMPED = 0xc5,
+ SCORPIONL2_CPU1_CLAMPED = 0xc6,
+ SCORPIONL2_CPU0_WAIT = 0xc7,
+ SCORPIONL2_CPU1_WAIT = 0xc8,
+ SCORPIONL2_CPU0_NONAMBAS_WAIT = 0xc9,
+ SCORPIONL2_CPU1_NONAMBAS_WAIT = 0xca,
+ SCORPIONL2_CPU0_DSB_WAIT = 0xcb,
+ SCORPIONL2_CPU1_DSB_WAIT = 0xcc,
+ SCORPIONL2_AXI_READ = 0xcd,
+ SCORPIONL2_AXI_WRITE = 0xce,
+
+ SCORPIONL2_1BEAT_WRITE = 0xcf,
+ SCORPIONL2_2BEAT_WRITE = 0xd0,
+ SCORPIONL2_4BEAT_WRITE = 0xd1,
+ SCORPIONL2_8BEAT_WRITE = 0xd2,
+ SCORPIONL2_12BEAT_WRITE = 0xd3,
+ SCORPIONL2_16BEAT_WRITE = 0xd4,
+ SCORPIONL2_1BEAT_DSIDE_READ = 0xd5,
+ SCORPIONL2_2BEAT_DSIDE_READ = 0xd6,
+ SCORPIONL2_4BEAT_DSIDE_READ = 0xd7,
+ SCORPIONL2_8BEAT_DSIDE_READ = 0xd8,
+ SCORPIONL2_CSYS_READ_1BEAT = 0xd9,
+ SCORPIONL2_CSYS_READ_2BEAT = 0xda,
+ SCORPIONL2_CSYS_READ_4BEAT = 0xdb,
+ SCORPIONL2_CSYS_READ_8BEAT = 0xdc,
+ SCORPIONL2_4BEAT_IFETCH_READ = 0xdd,
+ SCORPIONL2_8BEAT_IFETCH_READ = 0xde,
+ SCORPIONL2_CSYS_WRITE_1BEAT = 0xdf,
+ SCORPIONL2_CSYS_WRITE_2BEAT = 0xe0,
+ SCORPIONL2_AXI_READ_DATA_BEAT = 0xe1,
+ SCORPIONL2_AXI_WRITE_EVT1 = 0xe2,
+ SCORPIONL2_AXI_WRITE_EVT2 = 0xe3,
+ SCORPIONL2_LDREX_REQ = 0xe4,
+ SCORPIONL2_STREX_PASS = 0xe5,
+ SCORPIONL2_STREX_FAIL = 0xe6,
+ SCORPIONL2_CPREAD = 0xe7,
+ SCORPIONL2_CPWRITE = 0xe8,
+ SCORPIONL2_BARRIER_REQ = 0xe9,
+ SCORPIONL2_AXI_READ_SLVPORT = 0xea,
+ SCORPIONL2_AXI_WRITE_SLVPORT = 0xeb,
+ SCORPIONL2_AXI_READ_SLVPORT_DATABEAT = 0xec,
+ SCORPIONL2_AXI_WRITE_SLVPORT_DATABEAT = 0xed,
+ SCORPIONL2_SNOOPKILL_PREFILTER = 0xee,
+ SCORPIONL2_SNOOPKILL_FILTEROUT = 0xef,
+ SCORPIONL2_SNOOPED_IC = 0xf0,
+ SCORPIONL2_SNOOPED_BP = 0xf1,
+ SCORPIONL2_SNOOPED_BARRIERS = 0xf2,
+ SCORPIONL2_SNOOPED_TLB = 0xf3,
+ BB_L2_MAX_EVT,
+};
+
+static const struct bb_l2_scorp_evt sc_evt[] = {
+ {SCORPIONL2_TOTAL_BANK_REQ, 0x80000001, 0, 0x00},
+ {SCORPIONL2_DSIDE_READ, 0x80000100, 0, 0x01},
+ {SCORPIONL2_DSIDE_WRITE, 0x80010000, 0, 0x02},
+ {SCORPIONL2_ISIDE_READ, 0x81000000, 0, 0x03},
+ {SCORPIONL2_L2CACHE_ISIDE_READ, 0x80000002, 0, 0x00},
+ {SCORPIONL2_L2CACHE_BANK_REQ, 0x80000200, 0, 0x01},
+ {SCORPIONL2_L2CACHE_DSIDE_READ, 0x80020000, 0, 0x02},
+ {SCORPIONL2_L2CACHE_DSIDE_WRITE, 0x82000000, 0, 0x03},
+ {SCORPIONL2_L2NOCACHE_DSIDE_WRITE, 0x80000003, 0, 0x00},
+ {SCORPIONL2_L2NOCACHE_ISIDE_READ, 0x80000300, 0, 0x01},
+ {SCORPIONL2_L2NOCACHE_TOTAL_REQ, 0x80030000, 0, 0x02},
+ {SCORPIONL2_L2NOCACHE_DSIDE_READ, 0x83000000, 0, 0x03},
+ {SCORPIONL2_DSIDE_READ_NOL1, 0x80000004, 0, 0x00},
+ {SCORPIONL2_L2CACHE_WRITETHROUGH, 0x80000400, 0, 0x01},
+ {SCORPIONL2_BARRIERS, 0x84000000, 0, 0x03},
+ {SCORPIONL2_HARDWARE_TABLE_WALKS, 0x80000005, 0, 0x00},
+ {SCORPIONL2_MVA_POC, 0x80000500, 0, 0x01},
+ {SCORPIONL2_L2CACHE_HW_TABLE_WALKS, 0x80050000, 0, 0x02},
+ {SCORPIONL2_SETWAY_CACHE_OPS, 0x85000000, 0, 0x03},
+ {SCORPIONL2_DSIDE_WRITE_HITS, 0x80000006, 0, 0x00},
+ {SCORPIONL2_ISIDE_READ_HITS, 0x80000600, 0, 0x01},
+ {SCORPIONL2_CACHE_DSIDE_READ_NOL1, 0x80060000, 0, 0x02},
+ {SCORPIONL2_TOTAL_CACHE_HITS, 0x86000000, 0, 0x03},
+ {SCORPIONL2_CACHE_MATCH_MISS, 0x80000007, 0, 0x00},
+ {SCORPIONL2_DREAD_HIT_L1_DATA, 0x87000000, 0, 0x03},
+ {SCORPIONL2_L2LINE_LOCKED, 0x80000008, 0, 0x00},
+ {SCORPIONL2_HW_TABLE_WALK_HIT, 0x80000800, 0, 0x01},
+ {SCORPIONL2_CACHE_MVA_POC, 0x80080000, 0, 0x02},
+ {SCORPIONL2_L2ALLOC_DWRITE_MISS, 0x88000000, 0, 0x03},
+ {SCORPIONL2_CORRECTED_TAG_ARRAY, 0x80001A00, 0, 0x01},
+ {SCORPIONL2_CORRECTED_DATA_ARRAY, 0x801A0000, 0, 0x02},
+ {SCORPIONL2_CORRECTED_REPLACEMENT_ARRAY, 0x9A000000, 0, 0x03},
+ {SCORPIONL2_PMBUS_MPAAF, 0x80001C00, 0, 0x01},
+ {SCORPIONL2_PMBUS_MPWDAF, 0x801C0000, 0, 0x02},
+ {SCORPIONL2_PMBUS_MPBRT, 0x9C000000, 0, 0x03},
+
+ {SCORPIONL2_CPU0_GRANT, 0x80000001, 1, 0x04},
+ {SCORPIONL2_CPU1_GRANT, 0x80000100, 1, 0x05},
+ {SCORPIONL2_CPU0_NOGRANT, 0x80020000, 1, 0x06},
+ {SCORPIONL2_CPU1_NOGRANT, 0x82000000, 1, 0x07},
+ {SCORPIONL2_CPU0_LOSING_ARB, 0x80040000, 1, 0x06},
+ {SCORPIONL2_CPU1_LOSING_ARB, 0x84000000, 1, 0x07},
+ {SCORPIONL2_SLAVEPORT_NOGRANT, 0x80000007, 1, 0x04},
+ {SCORPIONL2_SLAVEPORT_BPQ_FULL, 0x80000700, 1, 0x05},
+ {SCORPIONL2_SLAVEPORT_LOSING_ARB, 0x80070000, 1, 0x06},
+ {SCORPIONL2_SLAVEPORT_GRANT, 0x87000000, 1, 0x07},
+ {SCORPIONL2_SLAVEPORT_GRANTLOCK, 0x80000008, 1, 0x04},
+ {SCORPIONL2_L2EM_STREX_PASS, 0x80000009, 1, 0x04},
+ {SCORPIONL2_L2EM_STREX_FAIL, 0x80000900, 1, 0x05},
+ {SCORPIONL2_LDREX_RESERVE_L2EM, 0x80090000, 1, 0x06},
+ {SCORPIONL2_SLAVEPORT_LDREX, 0x89000000, 1, 0x07},
+ {SCORPIONL2_CPU0_L2EM_CLEARED, 0x800A0000, 1, 0x06},
+ {SCORPIONL2_CPU1_L2EM_CLEARED, 0x8A000000, 1, 0x07},
+ {SCORPIONL2_SLAVEPORT_L2EM_CLEARED, 0x80000B00, 1, 0x05},
+ {SCORPIONL2_CPU0_CLAMPED, 0x8000000E, 1, 0x04},
+ {SCORPIONL2_CPU1_CLAMPED, 0x80000E00, 1, 0x05},
+ {SCORPIONL2_CPU0_WAIT, 0x800F0000, 1, 0x06},
+ {SCORPIONL2_CPU1_WAIT, 0x8F000000, 1, 0x07},
+ {SCORPIONL2_CPU0_NONAMBAS_WAIT, 0x80000010, 1, 0x04},
+ {SCORPIONL2_CPU1_NONAMBAS_WAIT, 0x80001000, 1, 0x05},
+ {SCORPIONL2_CPU0_DSB_WAIT, 0x80000014, 1, 0x04},
+ {SCORPIONL2_CPU1_DSB_WAIT, 0x80001400, 1, 0x05},
+
+ {SCORPIONL2_AXI_READ, 0x80000001, 2, 0x08},
+ {SCORPIONL2_AXI_WRITE, 0x80000100, 2, 0x09},
+ {SCORPIONL2_1BEAT_WRITE, 0x80010000, 2, 0x0a},
+ {SCORPIONL2_2BEAT_WRITE, 0x80010000, 2, 0x0b},
+ {SCORPIONL2_4BEAT_WRITE, 0x80000002, 2, 0x08},
+ {SCORPIONL2_8BEAT_WRITE, 0x80000200, 2, 0x09},
+ {SCORPIONL2_12BEAT_WRITE, 0x80020000, 2, 0x0a},
+ {SCORPIONL2_16BEAT_WRITE, 0x82000000, 2, 0x0b},
+ {SCORPIONL2_1BEAT_DSIDE_READ, 0x80000003, 2, 0x08},
+ {SCORPIONL2_2BEAT_DSIDE_READ, 0x80000300, 2, 0x09},
+ {SCORPIONL2_4BEAT_DSIDE_READ, 0x80030000, 2, 0x0a},
+ {SCORPIONL2_8BEAT_DSIDE_READ, 0x83000000, 2, 0x0b},
+ {SCORPIONL2_CSYS_READ_1BEAT, 0x80000004, 2, 0x08},
+ {SCORPIONL2_CSYS_READ_2BEAT, 0x80000400, 2, 0x09},
+ {SCORPIONL2_CSYS_READ_4BEAT, 0x80040000, 2, 0x0a},
+ {SCORPIONL2_CSYS_READ_8BEAT, 0x84000000, 2, 0x0b},
+ {SCORPIONL2_4BEAT_IFETCH_READ, 0x80000005, 2, 0x08},
+ {SCORPIONL2_8BEAT_IFETCH_READ, 0x80000500, 2, 0x09},
+ {SCORPIONL2_CSYS_WRITE_1BEAT, 0x80050000, 2, 0x0a},
+ {SCORPIONL2_CSYS_WRITE_2BEAT, 0x85000000, 2, 0x0b},
+ {SCORPIONL2_AXI_READ_DATA_BEAT, 0x80000600, 2, 0x09},
+ {SCORPIONL2_AXI_WRITE_EVT1, 0x80060000, 2, 0x0a},
+ {SCORPIONL2_AXI_WRITE_EVT2, 0x86000000, 2, 0x0b},
+ {SCORPIONL2_LDREX_REQ, 0x80000007, 2, 0x08},
+ {SCORPIONL2_STREX_PASS, 0x80000700, 2, 0x09},
+ {SCORPIONL2_STREX_FAIL, 0x80070000, 2, 0x0a},
+ {SCORPIONL2_CPREAD, 0x80000008, 2, 0x08},
+ {SCORPIONL2_CPWRITE, 0x80000800, 2, 0x09},
+ {SCORPIONL2_BARRIER_REQ, 0x88000000, 2, 0x0b},
+
+ {SCORPIONL2_AXI_READ_SLVPORT, 0x80000001, 3, 0x0c},
+ {SCORPIONL2_AXI_WRITE_SLVPORT, 0x80000100, 3, 0x0d},
+ {SCORPIONL2_AXI_READ_SLVPORT_DATABEAT, 0x80010000, 3, 0x0e},
+ {SCORPIONL2_AXI_WRITE_SLVPORT_DATABEAT, 0x81000000, 3, 0x0f},
+
+ {SCORPIONL2_SNOOPKILL_PREFILTER, 0x80000001, 4, 0x10},
+ {SCORPIONL2_SNOOPKILL_FILTEROUT, 0x80000100, 4, 0x11},
+ {SCORPIONL2_SNOOPED_IC, 0x80000002, 4, 0x10},
+ {SCORPIONL2_SNOOPED_BP, 0x80000200, 4, 0x11},
+ {SCORPIONL2_SNOOPED_BARRIERS, 0x80020000, 4, 0x12},
+ {SCORPIONL2_SNOOPED_TLB, 0x82000000, 4, 0x13},
+};
+
+static u32 bb_l2_read_l2pm0(void)
+{
+ u32 val;
+ asm volatile ("mrc p15, 3, %0, c15, c7, 0" : "=r" (val));
+ return val;
+}
+
+static void bb_l2_write_l2pm0(u32 val)
+{
+ asm volatile ("mcr p15, 3, %0, c15, c7, 0" : : "r" (val));
+}
+
+static u32 bb_l2_read_l2pm1(void)
+{
+ u32 val;
+ asm volatile ("mrc p15, 3, %0, c15, c7, 1" : "=r" (val));
+ return val;
+}
+
+static void bb_l2_write_l2pm1(u32 val)
+{
+ asm volatile ("mcr p15, 3, %0, c15, c7, 1" : : "r" (val));
+}
+
+static u32 bb_l2_read_l2pm2(void)
+{
+ u32 val;
+ asm volatile ("mrc p15, 3, %0, c15, c7, 2" : "=r" (val));
+ return val;
+}
+
+static void bb_l2_write_l2pm2(u32 val)
+{
+ asm volatile ("mcr p15, 3, %0, c15, c7, 2" : : "r" (val));
+}
+
+static u32 bb_l2_read_l2pm3(void)
+{
+ u32 val;
+ asm volatile ("mrc p15, 3, %0, c15, c7, 3" : "=r" (val));
+ return val;
+}
+
+static void bb_l2_write_l2pm3(u32 val)
+{
+ asm volatile ("mcr p15, 3, %0, c15, c7, 3" : : "r" (val));
+}
+
+static u32 bb_l2_read_l2pm4(void)
+{
+ u32 val;
+ asm volatile ("mrc p15, 3, %0, c15, c7, 4" : "=r" (val));
+ return val;
+}
+
+static void bb_l2_write_l2pm4(u32 val)
+{
+ asm volatile ("mcr p15, 3, %0, c15, c7, 4" : : "r" (val));
+}
+
+struct bb_scorpion_access_funcs {
+ u32(*read) (void);
+ void (*write) (u32);
+ void (*pre) (void);
+ void (*post) (void);
+};
+
+struct bb_scorpion_access_funcs bb_l2_func[] = {
+ {bb_l2_read_l2pm0, bb_l2_write_l2pm0, NULL, NULL},
+ {bb_l2_read_l2pm1, bb_l2_write_l2pm1, NULL, NULL},
+ {bb_l2_read_l2pm2, bb_l2_write_l2pm2, NULL, NULL},
+ {bb_l2_read_l2pm3, bb_l2_write_l2pm3, NULL, NULL},
+ {bb_l2_read_l2pm4, bb_l2_write_l2pm4, NULL, NULL},
+};
+
+#define COLMN0MASK 0x000000ff
+#define COLMN1MASK 0x0000ff00
+#define COLMN2MASK 0x00ff0000
+
+static u32 bb_l2_get_columnmask(u32 setval)
+{
+ if (setval & COLMN0MASK)
+ return 0xffffff00;
+ else if (setval & COLMN1MASK)
+ return 0xffff00ff;
+ else if (setval & COLMN2MASK)
+ return 0xff00ffff;
+ else
+ return 0x80ffffff;
+}
+
+static void bb_l2_evt_setup(u32 gr, u32 setval)
+{
+ u32 val;
+ if (bb_l2_func[gr].pre)
+ bb_l2_func[gr].pre();
+ val = bb_l2_get_columnmask(setval) & bb_l2_func[gr].read();
+ val = val | setval;
+ bb_l2_func[gr].write(val);
+ if (bb_l2_func[gr].post)
+ bb_l2_func[gr].post();
+}
+
+#define BB_L2_EVT_START_IDX 0x90
+#define BB_L2_INV_EVTYPE 0
+
+static unsigned int get_bb_l2_evtinfo(unsigned int evt_type,
+ struct bb_l2_scorp_evt *evtinfo)
+{
+ u32 idx;
+ if (evt_type < BB_L2_EVT_START_IDX || evt_type >= BB_L2_MAX_EVT)
+ return BB_L2_INV_EVTYPE;
+ idx = evt_type - BB_L2_EVT_START_IDX;
+ if (sc_evt[idx].evt_type == evt_type) {
+ evtinfo->val = sc_evt[idx].val;
+ evtinfo->grp = sc_evt[idx].grp;
+ evtinfo->evt_type_act = sc_evt[idx].evt_type_act;
+ return sc_evt[idx].evt_type_act;
+ }
+ return BB_L2_INV_EVTYPE;
+}
+
+static inline void bb_l2_pmnc_write(unsigned long val)
+{
+ val &= 0xff;
+ asm volatile ("mcr p15, 3, %0, c15, c4, 0" : : "r" (val));
+}
+
+static inline unsigned long bb_l2_pmnc_read(void)
+{
+ u32 val;
+ asm volatile ("mrc p15, 3, %0, c15, c4, 0" : "=r" (val));
+ return val;
+}
+
+static void bb_l2_set_evcntcr(void)
+{
+ u32 val = 0x0;
+ asm volatile ("mcr p15, 3, %0, c15, c6, 4" : : "r" (val));
+}
+
+static inline void bb_l2_set_evtyper(int ctr, int val)
+{
+ /* select ctr */
+ asm volatile ("mcr p15, 3, %0, c15, c6, 0" : : "r" (ctr));
+
+ /* write into EVTYPER */
+ asm volatile ("mcr p15, 3, %0, c15, c6, 7" : : "r" (val));
+}
+
+static void bb_l2_set_evfilter(void)
+{
+ u32 filter_val = 0x000f0030 | 1 << smp_processor_id();
+
+ asm volatile ("mcr p15, 3, %0, c15, c6, 3" : : "r" (filter_val));
+}
+
+static void bb_l2_enable_intenset(u32 idx)
+{
+ if (idx == BB_L2CYCLE_CTR_EVENT_IDX) {
+ asm volatile ("mcr p15, 3, %0, c15, c5, 1" : : "r"
+ (1 << BB_L2CYCLE_CTR_BIT));
+ } else {
+ asm volatile ("mcr p15, 3, %0, c15, c5, 1" : : "r" (1 << idx));
+ }
+}
+
+static void bb_l2_disable_intenclr(u32 idx)
+{
+ if (idx == BB_L2CYCLE_CTR_EVENT_IDX) {
+ asm volatile ("mcr p15, 3, %0, c15, c5, 0" : : "r"
+ (1 << BB_L2CYCLE_CTR_BIT));
+ } else {
+ asm volatile ("mcr p15, 3, %0, c15, c5, 0" : : "r" (1 << idx));
+ }
+}
+
+static void bb_l2_enable_counter(u32 idx)
+{
+ if (idx == BB_L2CYCLE_CTR_EVENT_IDX) {
+ asm volatile ("mcr p15, 3, %0, c15, c4, 3" : : "r"
+ (1 << BB_L2CYCLE_CTR_BIT));
+ } else {
+ asm volatile ("mcr p15, 3, %0, c15, c4, 3" : : "r" (1 << idx));
+ }
+}
+
+static void bb_l2_disable_counter(u32 idx)
+{
+ if (idx == BB_L2CYCLE_CTR_EVENT_IDX) {
+ asm volatile ("mcr p15, 3, %0, c15, c4, 2" : : "r"
+ (1 << BB_L2CYCLE_CTR_BIT));
+
+ } else {
+ asm volatile ("mcr p15, 3, %0, c15, c4, 2" : : "r" (1 << idx));
+ }
+}
+
+static u64 bb_l2_read_counter(u32 idx)
+{
+ u32 val;
+ unsigned long flags;
+
+ if (idx == BB_L2CYCLE_CTR_EVENT_IDX) {
+ asm volatile ("mrc p15, 3, %0, c15, c4, 5" : "=r" (val));
+ } else {
+ raw_spin_lock_irqsave(&bb_l2_pmu_lock, flags);
+ asm volatile ("mcr p15, 3, %0, c15, c6, 0" : : "r" (idx));
+
+ /* read val from counter */
+ asm volatile ("mrc p15, 3, %0, c15, c6, 5" : "=r" (val));
+ raw_spin_unlock_irqrestore(&bb_l2_pmu_lock, flags);
+ }
+
+ return val;
+}
+
+static void bb_l2_write_counter(u32 idx, u32 val)
+{
+ unsigned long flags;
+
+ if (idx == BB_L2CYCLE_CTR_EVENT_IDX) {
+ asm volatile ("mcr p15, 3, %0, c15, c4, 5" : : "r" (val));
+ } else {
+ raw_spin_lock_irqsave(&bb_l2_pmu_lock, flags);
+ /* select counter */
+ asm volatile ("mcr p15, 3, %0, c15, c6, 0" : : "r" (idx));
+
+ /* write val into counter */
+ asm volatile ("mcr p15, 3, %0, c15, c6, 5" : : "r" (val));
+ raw_spin_unlock_irqrestore(&bb_l2_pmu_lock, flags);
+ }
+}
+
+static int
+bb_pmu_event_set_period(struct perf_event *event,
+ struct hw_perf_event *hwc, int idx)
+{
+ s64 left = local64_read(&hwc->period_left);
+ s64 period = hwc->sample_period;
+ int ret = 0;
+
+ if (unlikely(left <= -period)) {
+ left = period;
+ local64_set(&hwc->period_left, left);
+ hwc->last_period = period;
+ ret = 1;
+ }
+
+ if (unlikely(left <= 0)) {
+ left += period;
+ local64_set(&hwc->period_left, left);
+ hwc->last_period = period;
+ ret = 1;
+ }
+
+ if (left > (s64) MAX_BB_L2_PERIOD)
+ left = MAX_BB_L2_PERIOD;
+
+ local64_set(&hwc->prev_count, (u64)-left);
+
+ bb_l2_write_counter(idx, (u64) (-left) & 0xffffffff);
+
+ perf_event_update_userpage(event);
+
+ return ret;
+}
+
+static u64
+bb_pmu_event_update(struct perf_event *event, struct hw_perf_event *hwc,
+ int idx, int overflow)
+{
+ u64 prev_raw_count, new_raw_count;
+ u64 delta;
+
+again:
+ prev_raw_count = local64_read(&hwc->prev_count);
+ new_raw_count = bb_l2_read_counter(idx);
+
+ if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
+ new_raw_count) != prev_raw_count)
+ goto again;
+
+ new_raw_count &= MAX_BB_L2_PERIOD;
+ prev_raw_count &= MAX_BB_L2_PERIOD;
+
+ if (overflow) {
+ delta = MAX_BB_L2_PERIOD - prev_raw_count + new_raw_count;
+ pr_err("%s: delta: %lld\n", __func__, delta);
+ } else
+ delta = new_raw_count - prev_raw_count;
+
+ local64_add(delta, &event->count);
+ local64_sub(delta, &hwc->period_left);
+
+ pr_debug("%s: new: %lld, prev: %lld, event: %ld count: %lld\n",
+ __func__, new_raw_count, prev_raw_count,
+ hwc->config_base, local64_read(&event->count));
+
+ return new_raw_count;
+}
+
+static void bb_l2_read(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ bb_pmu_event_update(event, hwc, hwc->idx, 0);
+}
+
+static void bb_l2_stop_counter(struct perf_event *event, int flags)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+
+ if (!(hwc->state & PERF_HES_STOPPED)) {
+ bb_l2_disable_intenclr(idx);
+ bb_l2_disable_counter(idx);
+
+ bb_pmu_event_update(event, hwc, idx, 0);
+ hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
+ }
+
+ pr_debug("%s: event: %ld ctr: %d stopped\n", __func__, hwc->config_base,
+ idx);
+}
+
+static void bb_l2_start_counter(struct perf_event *event, int flags)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+ struct bb_l2_scorp_evt evtinfo;
+ int evtype = hwc->config_base;
+ int ev_typer;
+ unsigned long iflags;
+ int cpu_id = smp_processor_id();
+
+ if (flags & PERF_EF_RELOAD)
+ WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
+
+ hwc->state = 0;
+
+ bb_pmu_event_set_period(event, hwc, idx);
+
+ if (hwc->config_base == BB_L2CYCLE_CTR_RAW_CODE)
+ goto out;
+
+ memset(&evtinfo, 0, sizeof(evtinfo));
+
+ ev_typer = get_bb_l2_evtinfo(evtype, &evtinfo);
+
+ raw_spin_lock_irqsave(&bb_l2_pmu_lock, iflags);
+
+ bb_l2_set_evtyper(idx, ev_typer);
+
+ bb_l2_set_evcntcr();
+
+ bb_l2_set_evfilter();
+
+ bb_l2_evt_setup(evtinfo.grp, evtinfo.val);
+
+ raw_spin_unlock_irqrestore(&bb_l2_pmu_lock, iflags);
+
+out:
+
+ bb_l2_enable_intenset(idx);
+
+ bb_l2_enable_counter(idx);
+
+ pr_debug("%s: idx: %d, event: %d, val: %x, cpu: %d\n",
+ __func__, idx, evtype, evtinfo.val, cpu_id);
+}
+
+static void bb_l2_del_event(struct perf_event *event, int flags)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+ unsigned long iflags;
+
+ raw_spin_lock_irqsave(&hw_bb_l2_pmu.lock, iflags);
+
+ clear_bit(idx, (long unsigned int *)(&hw_bb_l2_pmu.active_mask));
+
+ bb_l2_stop_counter(event, PERF_EF_UPDATE);
+ hw_bb_l2_pmu.events[idx] = NULL;
+ hwc->idx = -1;
+
+ raw_spin_unlock_irqrestore(&hw_bb_l2_pmu.lock, iflags);
+
+ pr_debug("%s: event: %ld deleted\n", __func__, hwc->config_base);
+
+ perf_event_update_userpage(event);
+}
+
+static int bb_l2_add_event(struct perf_event *event, int flags)
+{
+ int ctr = 0;
+ struct hw_perf_event *hwc = &event->hw;
+ unsigned long iflags;
+ int err = 0;
+
+ perf_pmu_disable(event->pmu);
+
+ raw_spin_lock_irqsave(&hw_bb_l2_pmu.lock, iflags);
+
+ /* Cycle counter has a resrvd index */
+ if (hwc->config_base == BB_L2CYCLE_CTR_RAW_CODE) {
+ if (hw_bb_l2_pmu.events[BB_L2CYCLE_CTR_EVENT_IDX]) {
+ pr_err("%s: Stale cycle ctr event ptr !\n", __func__);
+ err = -EINVAL;
+ goto out;
+ }
+ hwc->idx = BB_L2CYCLE_CTR_EVENT_IDX;
+ hw_bb_l2_pmu.events[BB_L2CYCLE_CTR_EVENT_IDX] = event;
+ set_bit(BB_L2CYCLE_CTR_EVENT_IDX,
+ (long unsigned int *)&hw_bb_l2_pmu.active_mask);
+ goto skip_ctr_loop;
+ }
+
+ for (ctr = 0; ctr < MAX_BB_L2_CTRS - 1; ctr++) {
+ if (!hw_bb_l2_pmu.events[ctr]) {
+ hwc->idx = ctr;
+ hw_bb_l2_pmu.events[ctr] = event;
+ set_bit(ctr, (long unsigned int *)
+ &hw_bb_l2_pmu.active_mask);
+ break;
+ }
+ }
+
+ if (hwc->idx < 0) {
+ err = -ENOSPC;
+ pr_err("%s: No space for event: %llx!!\n", __func__,
+ event->attr.config);
+ goto out;
+ }
+
+skip_ctr_loop:
+
+ bb_l2_disable_counter(hwc->idx);
+
+ hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
+
+ if (flags & PERF_EF_START)
+ bb_l2_start_counter(event, PERF_EF_RELOAD);
+
+ perf_event_update_userpage(event);
+
+ pr_debug("%s: event: %ld, ctr: %d added from cpu:%d\n",
+ __func__, hwc->config_base, hwc->idx, smp_processor_id());
+out:
+ raw_spin_unlock_irqrestore(&hw_bb_l2_pmu.lock, iflags);
+
+ /* Resume the PMU even if this event could not be added */
+ perf_pmu_enable(event->pmu);
+
+ return err;
+}
+
+static void bb_l2_pmu_enable(struct pmu *pmu)
+{
+ unsigned long flags;
+ isb();
+ raw_spin_lock_irqsave(&bb_l2_pmu_lock, flags);
+ /* Enable all counters */
+ bb_l2_pmnc_write(bb_l2_pmnc_read() | SCORPIONL2_PMNC_E);
+ raw_spin_unlock_irqrestore(&bb_l2_pmu_lock, flags);
+}
+
+static void bb_l2_pmu_disable(struct pmu *pmu)
+{
+ unsigned long flags;
+ raw_spin_lock_irqsave(&bb_l2_pmu_lock, flags);
+ /* Disable all counters */
+ bb_l2_pmnc_write(bb_l2_pmnc_read() & ~SCORPIONL2_PMNC_E);
+ raw_spin_unlock_irqrestore(&bb_l2_pmu_lock, flags);
+ isb();
+}
+
+static inline u32 bb_l2_get_reset_pmovsr(void)
+{
+ u32 val;
+
+ /* Read */
+ asm volatile ("mrc p15, 3, %0, c15, c4, 1" : "=r" (val));
+
+ /* Write to clear flags */
+ val &= 0xffffffff;
+ asm volatile ("mcr p15, 3, %0, c15, c4, 1" : : "r" (val));
+
+ return val;
+}
+
+static irqreturn_t bb_l2_handle_irq(int irq_num, void *dev)
+{
+ unsigned long pmovsr;
+ struct perf_sample_data data;
+ struct pt_regs *regs;
+ struct perf_event *event;
+ struct hw_perf_event *hwc;
+ int bitp;
+ int idx = 0;
+
+ pmovsr = bb_l2_get_reset_pmovsr();
+
+ if (!(pmovsr & 0xffffffff))
+ return IRQ_NONE;
+
+ regs = get_irq_regs();
+
+ perf_sample_data_init(&data, 0);
+
+ raw_spin_lock(&hw_bb_l2_pmu.lock);
+
+ while (pmovsr) {
+ bitp = __ffs(pmovsr);
+
+ if (bitp == BB_L2CYCLE_CTR_BIT)
+ idx = BB_L2CYCLE_CTR_EVENT_IDX;
+ else
+ idx = bitp;
+
+ event = hw_bb_l2_pmu.events[idx];
+
+ if (!event)
+ goto next;
+
+ if (!test_bit(idx, hw_bb_l2_pmu.active_mask))
+ goto next;
+
+ hwc = &event->hw;
+ bb_pmu_event_update(event, hwc, idx, 1);
+ data.period = event->hw.last_period;
+
+ if (!bb_pmu_event_set_period(event, hwc, idx))
+ goto next;
+
+ if (perf_event_overflow(event, 0, &data, regs))
+ bb_l2_disable_counter(hwc->idx);
+next:
+ pmovsr &= (pmovsr - 1);
+ }
+
+ raw_spin_unlock(&hw_bb_l2_pmu.lock);
+
+ irq_work_run();
+
+ return IRQ_HANDLED;
+}
+
+static atomic_t active_bb_l2_events = ATOMIC_INIT(0);
+static DEFINE_MUTEX(bb_pmu_reserve_mutex);
+
+static int bb_pmu_reserve_hardware(void)
+{
+ int i, err = -ENODEV, irq;
+
+ bb_l2_pmu_device = reserve_pmu(ARM_PMU_DEVICE_L2);
+
+ if (IS_ERR(bb_l2_pmu_device)) {
+ pr_warning("unable to reserve pmu\n");
+ return PTR_ERR(bb_l2_pmu_device);
+ }
+
+ if (bb_l2_pmu_device->num_resources < 1) {
+ pr_err("no irqs for PMUs defined\n");
+ return -ENODEV;
+ }
+
+ if (strncmp(bb_l2_pmu_device->name, "l2-arm-pmu", 6)) {
+ pr_err("Incorrect pdev reserved !\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < bb_l2_pmu_device->num_resources; ++i) {
+ irq = platform_get_irq(bb_l2_pmu_device, i);
+ if (irq < 0)
+ continue;
+
+ err = request_irq(irq, bb_l2_handle_irq,
+ IRQF_DISABLED | IRQF_NOBALANCING,
+ "bb-l2-pmu", NULL);
+ if (err) {
+ pr_warning("unable to request IRQ%d for Krait L2 perf "
+ "counters\n", irq);
+ break;
+ }
+
+ irq_get_chip(irq)->irq_unmask(irq_get_irq_data(irq));
+ }
+
+ if (err) {
+ for (i = i - 1; i >= 0; --i) {
+ irq = platform_get_irq(bb_l2_pmu_device, i);
+ if (irq >= 0)
+ free_irq(irq, NULL);
+ }
+ release_pmu(bb_l2_pmu_device);
+ bb_l2_pmu_device = NULL;
+ }
+
+ return err;
+}
+
+static void bb_pmu_release_hardware(void)
+{
+ int i, irq;
+
+ for (i = bb_l2_pmu_device->num_resources - 1; i >= 0; --i) {
+ irq = platform_get_irq(bb_l2_pmu_device, i);
+ if (irq >= 0)
+ free_irq(irq, NULL);
+ }
+
+ bb_l2_pmu_disable(NULL);
+
+ release_pmu(bb_l2_pmu_device);
+ bb_l2_pmu_device = NULL;
+}
+
+static void bb_pmu_perf_event_destroy(struct perf_event *event)
+{
+ if (atomic_dec_and_mutex_lock
+ (&active_bb_l2_events, &bb_pmu_reserve_mutex)) {
+ bb_pmu_release_hardware();
+ mutex_unlock(&bb_pmu_reserve_mutex);
+ }
+}
+
+static int bb_l2_event_init(struct perf_event *event)
+{
+ int err = 0;
+ struct hw_perf_event *hwc = &event->hw;
+ int status = 0;
+
+ switch (event->attr.type) {
+ case PERF_TYPE_SHARED:
+ break;
+
+ default:
+ return -ENOENT;
+ }
+
+ hwc->idx = -1;
+
+ event->destroy = bb_pmu_perf_event_destroy;
+
+ if (!atomic_inc_not_zero(&active_bb_l2_events)) {
+ /* 0 active events */
+ mutex_lock(&bb_pmu_reserve_mutex);
+ err = bb_pmu_reserve_hardware();
+ mutex_unlock(&bb_pmu_reserve_mutex);
+ if (!err)
+ atomic_inc(&active_bb_l2_events);
+ else
+ return err;
+ } else {
+ if (atomic_read(&active_bb_l2_events) > (MAX_BB_L2_CTRS - 1)) {
+ pr_err("%s: No space left on PMU for event: %llx\n",
+ __func__, event->attr.config);
+ atomic_dec(&active_bb_l2_events);
+ return -ENOSPC;
+ }
+ }
+
+ hwc->config_base = event->attr.config & 0xff;
+ hwc->config = 0;
+ hwc->event_base = 0;
+
+ /* Only one CPU can control the cycle counter */
+ if (hwc->config_base == BB_L2CYCLE_CTR_RAW_CODE) {
+ /* Check if its already running */
+ asm volatile ("mrc p15, 3, %0, c15, c4, 6" : "=r" (status));
+ if (status == 0x2) {
+ err = -ENOSPC;
+ goto out;
+ }
+ }
+
+ if (!hwc->sample_period) {
+ hwc->sample_period = MAX_BB_L2_PERIOD;
+ hwc->last_period = hwc->sample_period;
+ local64_set(&hwc->period_left, hwc->sample_period);
+ }
+
+ pr_debug("%s: event: %lld init'd\n", __func__, event->attr.config);
+
+out:
+ if (err < 0)
+ bb_pmu_perf_event_destroy(event);
+
+ return err;
+}
+
+static struct pmu bb_l2_pmu = {
+ .pmu_enable = bb_l2_pmu_enable,
+ .pmu_disable = bb_l2_pmu_disable,
+ .event_init = bb_l2_event_init,
+ .add = bb_l2_add_event,
+ .del = bb_l2_del_event,
+ .start = bb_l2_start_counter,
+ .stop = bb_l2_stop_counter,
+ .read = bb_l2_read,
+};
+
+static const struct arm_pmu *__init scorpionmp_l2_pmu_init(void)
+{
+ /* Register our own PMU here */
+ perf_pmu_register(&bb_l2_pmu, "BB L2", PERF_TYPE_SHARED);
+
+ memset(&hw_bb_l2_pmu, 0, sizeof(hw_bb_l2_pmu));
+
+ /* Avoid spurious interrupts at startup */
+ bb_l2_get_reset_pmovsr();
+
+ /* Don't return an arm_pmu here */
+ return NULL;
+}
+#else
+
+static const struct arm_pmu *__init scorpionmp_l2_pmu_init(void)
+{
+ return NULL;
+}
+
+#endif
diff --git a/arch/arm/kernel/pmu.c b/arch/arm/kernel/pmu.c
index 2c79eec..a4688e8 100644
--- a/arch/arm/kernel/pmu.c
+++ b/arch/arm/kernel/pmu.c
@@ -45,16 +45,41 @@
return 0;
}
-static struct platform_driver pmu_driver = {
+static struct platform_driver cpu_pmu_driver = {
.driver = {
- .name = "arm-pmu",
+ .name = "cpu-arm-pmu",
},
.probe = pmu_device_probe,
};
+static struct platform_driver l2_pmu_driver = {
+ .driver = {
+ .name = "l2-arm-pmu",
+ },
+ .probe = pmu_device_probe,
+};
+
+static struct platform_driver *pmu_drivers[] __initdata = {
+ &cpu_pmu_driver,
+ &l2_pmu_driver,
+};
+
static int __init register_pmu_driver(void)
{
- return platform_driver_register(&pmu_driver);
+ int i;
+ int err;
+
+ for (i = 0; i < ARM_NUM_PMU_DEVICES; i++) {
+ err = platform_driver_register(pmu_drivers[i]);
+ if (err) {
+ pr_err("%s: failed to register id:%d\n", __func__, i);
+ while (--i >= 0)
+ platform_driver_unregister(pmu_drivers[i]);
+ break;
+ }
+ }
+
+ return err;
}
device_initcall(register_pmu_driver);
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index acbb447..6b1ee83 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -29,6 +29,9 @@
#include <linux/fs.h>
#include <linux/proc_fs.h>
#include <linux/memblock.h>
+#ifdef CONFIG_MEMORY_HOTPLUG
+#include <linux/memory_hotplug.h>
+#endif
#include <asm/unified.h>
#include <asm/cpu.h>
@@ -97,6 +100,8 @@
unsigned int elf_hwcap __read_mostly;
EXPORT_SYMBOL(elf_hwcap);
+unsigned int boot_reason;
+EXPORT_SYMBOL(boot_reason);
#ifdef MULTI_CPU
struct processor processor __read_mostly;
@@ -517,6 +522,62 @@
}
early_param("mem", early_mem);
+#ifdef CONFIG_MEMORY_HOTPLUG
+static void __init early_mem_reserved(char **p)
+{
+ unsigned int start;
+ unsigned int size;
+ unsigned int end;
+ unsigned int h_end;
+
+ start = PHYS_OFFSET;
+ size = memparse(*p, p);
+ if (**p == '@')
+ start = memparse(*p + 1, p);
+
+ if (movable_reserved_start) {
+ end = start + size;
+ h_end = movable_reserved_start + movable_reserved_size;
+ end = max(end, h_end);
+ movable_reserved_start = min(movable_reserved_start,
+ (unsigned long)start);
+ movable_reserved_size = end - movable_reserved_start;
+ } else {
+ movable_reserved_start = start;
+ movable_reserved_size = size;
+ }
+}
+__early_param("mem_reserved=", early_mem_reserved);
+
+static void __init early_mem_low_power(char **p)
+{
+ unsigned int start;
+ unsigned int size;
+ unsigned int end;
+ unsigned int h_end;
+
+ start = PHYS_OFFSET;
+ size = memparse(*p, p);
+ if (**p == '@')
+ start = memparse(*p + 1, p);
+
+ if (low_power_memory_start) {
+ end = start + size;
+ h_end = low_power_memory_start + low_power_memory_size;
+ end = max(end, h_end);
+ low_power_memory_start = min(low_power_memory_start,
+ (unsigned long)start);
+ low_power_memory_size = end - low_power_memory_start;
+ } else {
+ low_power_memory_start = start;
+ low_power_memory_size = size;
+ }
+
+ arm_add_memory(start, size);
+}
+__early_param("mem_low_power=", early_mem_low_power);
+#endif
+
static void __init
setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz)
{
@@ -606,6 +667,66 @@
__tagtable(ATAG_MEM, parse_tag_mem32);
+#ifdef CONFIG_MEMORY_HOTPLUG
+static int __init parse_tag_mem32_reserved(const struct tag *tag)
+{
+ unsigned int start;
+ unsigned int size;
+ unsigned int end;
+ unsigned int h_end;
+
+ start = tag->u.mem.start;
+ size = tag->u.mem.size;
+
+ if (movable_reserved_start) {
+ end = start + size;
+ h_end = movable_reserved_start + movable_reserved_size;
+ end = max(end, h_end);
+ movable_reserved_start = min(movable_reserved_start,
+ (unsigned long)start);
+ movable_reserved_size = end - movable_reserved_start;
+ } else {
+ movable_reserved_start = tag->u.mem.start;
+ movable_reserved_size = tag->u.mem.size;
+ }
+ printk(KERN_ALERT "reserved %lx at %lx for hotplug\n",
+ movable_reserved_size, movable_reserved_start);
+
+ return 0;
+}
+
+__tagtable(ATAG_MEM_RESERVED, parse_tag_mem32_reserved);
+
+static int __init parse_tag_mem32_low_power(const struct tag *tag)
+{
+ unsigned int start;
+ unsigned int size;
+ unsigned int end;
+ unsigned int h_end;
+
+ start = tag->u.mem.start;
+ size = tag->u.mem.size;
+
+ if (low_power_memory_start) {
+ end = start + size;
+ h_end = low_power_memory_start + low_power_memory_size;
+ end = max(end, h_end);
+ low_power_memory_start = min(low_power_memory_start,
+ (unsigned long)start);
+ low_power_memory_size = end - low_power_memory_start;
+ } else {
+ low_power_memory_start = tag->u.mem.start;
+ low_power_memory_size = tag->u.mem.size;
+ }
+ printk(KERN_ALERT "low power memory %lx at %lx\n",
+ low_power_memory_size, low_power_memory_start);
+
+ return arm_add_memory(tag->u.mem.start, tag->u.mem.size);
+}
+
+__tagtable(ATAG_MEM_LOW_POWER, parse_tag_mem32_low_power);
+#endif
+
#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
struct screen_info screen_info = {
.orig_video_lines = 30,
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index e7f92a4..1b2887b 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -597,6 +597,11 @@
void smp_send_reschedule(int cpu)
{
+ if (unlikely(cpu_is_offline(cpu))) {
+ pr_warn("%s: attempt to send resched-IPI to an offline cpu.\n",
+ __func__);
+ return;
+ }
smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
}
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
index 2c277d4..f62743c 100644
--- a/arch/arm/kernel/smp_twd.c
+++ b/arch/arm/kernel/smp_twd.c
@@ -127,8 +127,7 @@
twd_calibrate_rate();
clk->name = "local_timer";
- clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT |
- CLOCK_EVT_FEAT_C3STOP;
+ clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
clk->rating = 350;
clk->set_mode = twd_set_mode;
clk->set_next_event = twd_set_next_event;
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 56b2715..aaca029 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -453,6 +453,10 @@
up_read(&mm->mmap_sem);
flush_cache_user_range(start, end);
+
+#ifdef CONFIG_ARCH_MSM7X27
+ mb();
+#endif
return;
}
up_read(&mm->mmap_sem);