Initial Contribution
msm-2.6.38: tag AU_LINUX_ANDROID_GINGERBREAD.02.03.04.00.142
Signed-off-by: Bryan Huntsman <bryanh@codeaurora.org>
diff --git a/arch/arm/perfmon/Makefile b/arch/arm/perfmon/Makefile
new file mode 100644
index 0000000..716e087
--- /dev/null
+++ b/arch/arm/perfmon/Makefile
@@ -0,0 +1,6 @@
+obj-$(CONFIG_KSAPI) += ksapi.o
+
+# Object file lists.
+obj-y += perf-function-hooks.o
+ksapi-y += perf-v7.o per.o per-process-perf.o per-axi.o
+ksapi-$(CONFIG_ARCH_MSM8X60) += perf-smp.o
diff --git a/arch/arm/perfmon/cp15_registers.h b/arch/arm/perfmon/cp15_registers.h
new file mode 100644
index 0000000..3de4d8b
--- /dev/null
+++ b/arch/arm/perfmon/cp15_registers.h
@@ -0,0 +1,94 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/*
+cp15_registers.h
+
+DESCRIPTION: define macros for reading and writing to the cp registers
+for the ARMv7
+
+REV/DATE: Fri Mar 18 15:54:32 EST 2005
+*/
+
+#ifndef __cp15_registers__
+#define __cp15_registers__
+
+#include "mcrmrc.h"
+
+#define WCP15_SDER(reg) MCR15(reg, 0, c1, c1, 1)
+/*
+* Performance Monitor Registers
+*/
+#define WCP15_PMACTLR(reg) MCR15(reg, 0, c9, c15, 5)
+#define WCP15_PMCCNTCR(reg) MCR15(reg, 0, c9, c15, 2)
+#define WCP15_PMCCNTR(reg) MCR15(reg, 0, c9, c13, 0)
+#define WCP15_PMCCNTSR(reg) MCR15(reg, 0, c9, c13, 3)
+#define WCP15_PMCNTENCLR(reg) MCR15(reg, 0, c9, c12, 2)
+#define WCP15_PMCNTENSET(reg) MCR15(reg, 0, c9, c12, 1)
+#define WCP15_PMCR(reg) MCR15(reg, 0, c9, c12, 0)
+#define WCP15_PMINTENCLR(reg) MCR15(reg, 0, c9, c14, 2)
+#define WCP15_PMINTENSET(reg) MCR15(reg, 0, c9, c14, 1)
+#define WCP15_PMOVSR(reg) MCR15(reg, 0, c9, c12, 3)
+#define WCP15_PMRLDR(reg) MCR15(reg, 0, c9, c15, 4)
+#define WCP15_PMSELR(reg) MCR15(reg, 0, c9, c12, 5)
+#define WCP15_PMSWINC(reg) MCR15(reg, 0, c9, c12, 4)
+#define WCP15_PMUSERENR(reg) MCR15(reg, 0, c9, c14, 0)
+#define WCP15_PMXEVCNTCR(reg) MCR15(reg, 0, c9, c15, 0)
+#define WCP15_PMXEVCNTR(reg) MCR15(reg, 0, c9, c13, 2)
+#define WCP15_PMXEVCNTSR(reg) MCR15(reg, 0, c9, c15, 1)
+#define WCP15_PMXEVTYPER(reg) MCR15(reg, 0, c9, c13, 1)
+#define WCP15_LPM0EVTYPER(reg) MCR15(reg, 0, c15, c0, 0)
+#define WCP15_LPM1EVTYPER(reg) MCR15(reg, 1, c15, c0, 0)
+#define WCP15_LPM2EVTYPER(reg) MCR15(reg, 2, c15, c0, 0)
+#define WCP15_LPM3EVTYPER(reg) MCR15(reg, 3, c15, c0, 0)
+#define WCP15_L2LPMEVTYPER(reg) MCR15(reg, 3, c15, c2, 0)
+#define WCP15_VLPMEVTYPER(reg) MCR15(reg, 7, c11, c0, 0)
+#define WCP15_L2VR3F1(reg) MCR15(reg, 3, c15, c15, 1)
+
+/*
+* READ the registers
+*/
+#define RCP15_SDER(reg) MRC15(reg, 0, c1, c1, 1)
+/*
+* Performance Monitor Registers
+*/
+#define RCP15_PMACTLR(reg) MRC15(reg, 0, c9, c15, 5)
+#define RCP15_PMCCNTCR(reg) MRC15(reg, 0, c9, c15, 2)
+#define RCP15_PMCCNTR(reg) MRC15(reg, 0, c9, c13, 0)
+#define RCP15_PMCCNTSR(reg) MRC15(reg, 0, c9, c13, 3)
+#define RCP15_PMCNTENCLR(reg) MRC15(reg, 0, c9, c12, 2)
+#define RCP15_PMCNTENSET(reg) MRC15(reg, 0, c9, c12, 1)
+#define RCP15_PMCR(reg) MRC15(reg, 0, c9, c12, 0)
+#define RCP15_PMINTENCLR(reg) MRC15(reg, 0, c9, c14, 2)
+#define RCP15_PMINTENSET(reg) MRC15(reg, 0, c9, c14, 1)
+#define RCP15_PMOVSR(reg) MRC15(reg, 0, c9, c12, 3)
+#define RCP15_PMRLDR(reg) MRC15(reg, 0, c9, c15, 4)
+#define RCP15_PMSELR(reg) MRC15(reg, 0, c9, c12, 5)
+#define RCP15_PMSWINC(reg) MRC15(reg, 0, c9, c12, 4)
+#define RCP15_PMUSERENR(reg) MRC15(reg, 0, c9, c14, 0)
+#define RCP15_PMXEVCNTCR(reg) MRC15(reg, 0, c9, c15, 0)
+#define RCP15_PMXEVCNTR(reg) MRC15(reg, 0, c9, c13, 2)
+#define RCP15_PMXEVCNTSR(reg) MRC15(reg, 0, c9, c15, 1)
+#define RCP15_PMXEVTYPER(reg) MRC15(reg, 0, c9, c13, 1)
+#define RCP15_LPM0EVTYPER(reg) MRC15(reg, 0, c15, c0, 0)
+#define RCP15_LPM1EVTYPER(reg) MRC15(reg, 1, c15, c0, 0)
+#define RCP15_LPM2EVTYPER(reg) MRC15(reg, 2, c15, c0, 0)
+#define RCP15_LPM3EVTYPER(reg) MRC15(reg, 3, c15, c0, 0)
+#define RCP15_L2LPMEVTYPER(reg) MRC15(reg, 3, c15, c2, 0)
+#define RCP15_VLPMEVTYPER(reg) MRC15(reg, 7, c11, c0, 0)
+#define RCP15_CONTEXTIDR(reg) MRC15(reg, 0, c13, c0, 1)
+#define RCP15_L2CR0(reg) MRC15(reg, 3, c15, c0, 1)
+#define RCP15_L2VR3F1(reg) MRC15(reg, 3, c15, c15, 1)
+
+#endif
+
diff --git a/arch/arm/perfmon/l2_cp15_registers.h b/arch/arm/perfmon/l2_cp15_registers.h
new file mode 100644
index 0000000..796dc8b
--- /dev/null
+++ b/arch/arm/perfmon/l2_cp15_registers.h
@@ -0,0 +1,88 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/*
+cp15_registers.h
+
+DESCRIPTION: define macros for reading and writing to the cp registers
+for the ARMv7
+
+REV/DATE: Fri Mar 18 15:54:32 EST 2005
+*/
+
+#ifndef __l2_cp15_registers__
+#define __l2_cp15_registers__
+
+#include "mcrmrc.h"
+
+#define WCP15_SDER(reg) MCR15(reg, 0, c1, c1, 1)
+/*
+* Performance Monitor Registers
+*/
+#define WCP15_L2MPCR(reg) MCR15(reg, 3, c15, c0, 4)
+#define WCP15_L2PMCCNTCR(reg) MCR15(reg, 3, c15, c4, 4)
+#define WCP15_L2PMCCNTR(reg) MCR15(reg, 3, c15, c4, 5)
+#define WCP15_L2PMCCNTSR(reg) MCR15(reg, 3, c15, c4, 6)
+#define WCP15_L2PMCNTENCLR(reg) MCR15(reg, 3, c15, c4, 2)
+#define WCP15_L2PMCNTENSET(reg) MCR15(reg, 3, c15, c4, 3)
+#define WCP15_L2PMCR(reg) MCR15(reg, 3, c15, c4, 0)
+#define WCP15_L2PMINTENCLR(reg) MCR15(reg, 3, c15, c5, 0)
+#define WCP15_L2PMINTENSET(reg) MCR15(reg, 3, c15, c5, 1)
+#define WCP15_L2PMOVSR(reg) MCR15(reg, 3, c15, c4, 1)
+#define WCP15_L2PMRLDR(reg) MCR15(reg, 3, c15, c4, 7)
+#define WCP15_L2PMSELR(reg) MCR15(reg, 3, c15, c6, 0)
+#define WCP15_L2PMXEVCNTCR(reg) MCR15(reg, 3, c15, c6, 4)
+#define WCP15_L2PMXEVCNTR(reg) MCR15(reg, 3, c15, c6, 5)
+#define WCP15_L2PMXEVCNTSR(reg) MCR15(reg, 3, c15, c6, 6)
+#define WCP15_L2PMXEVTYPER(reg) MCR15(reg, 3, c15, c6, 7)
+#define WCP15_L2PMXEVFILTER(reg) MCR15(reg, 3, c15, c6, 3)
+#define WCP15_L2PMEVTYPER0(reg) MCR15(reg, 3, c15, c7, 0)
+#define WCP15_L2PMEVTYPER1(reg) MCR15(reg, 3, c15, c7, 1)
+#define WCP15_L2PMEVTYPER2(reg) MCR15(reg, 3, c15, c7, 2)
+#define WCP15_L2PMEVTYPER3(reg) MCR15(reg, 3, c15, c7, 3)
+#define WCP15_L2PMEVTYPER4(reg) MCR15(reg, 3, c15, c7, 4)
+#define WCP15_L2VR3F1(reg) MCR15(reg, 3, c15, c15, 1)
+
+/*
+* READ the registers
+*/
+#define RCP15_SDER(reg) MRC15(reg, 0, c1, c1, 1)
+/*
+* Performance Monitor Registers
+*/
+#define RCP15_L2MPCR(reg) MRC15(reg, 3, c15, c0, 4)
+#define RCP15_L2PMCCNTCR(reg) MRC15(reg, 3, c15, c4, 4)
+#define RCP15_L2PMCCNTR(reg) MRC15(reg, 3, c15, c4, 5)
+#define RCP15_L2PMCCNTSR(reg) MRC15(reg, 3, c15, c4, 6)
+#define RCP15_L2PMCNTENCLR(reg) MRC15(reg, 3, c15, c4, 2)
+#define RCP15_L2PMCNTENSET(reg) MRC15(reg, 3, c15, c4, 3)
+#define RCP15_L2PMCR(reg) MRC15(reg, 3, c15, c4, 0)
+#define RCP15_L2PMINTENCLR(reg) MRC15(reg, 3, c15, c5, 0)
+#define RCP15_L2PMINTENSET(reg) MRC15(reg, 3, c15, c5, 1)
+#define RCP15_L2PMOVSR(reg) MRC15(reg, 3, c15, c4, 1)
+#define RCP15_L2PMRLDR(reg) MRC15(reg, 3, c15, c4, 7)
+#define RCP15_L2PMSELR(reg) MRC15(reg, 3, c15, c6, 0)
+#define RCP15_L2PMXEVCNTCR(reg) MRC15(reg, 3, c15, c6, 4)
+#define RCP15_L2PMXEVCNTR(reg) MRC15(reg, 3, c15, c6, 5)
+#define RCP15_L2PMXEVCNTSR(reg) MRC15(reg, 3, c15, c6, 6)
+#define RCP15_L2PMXEVTYPER(reg) MRC15(reg, 3, c15, c6, 7)
+#define RCP15_L2PMXEVFILTER(reg) MRC15(reg, 3, c15, c6, 3)
+#define RCP15_L2PMEVTYPER0(reg) MRC15(reg, 3, c15, c7, 0)
+#define RCP15_L2PMEVTYPER1(reg) MRC15(reg, 3, c15, c7, 1)
+#define RCP15_L2PMEVTYPER2(reg) MRC15(reg, 3, c15, c7, 2)
+#define RCP15_L2PMEVTYPER3(reg) MRC15(reg, 3, c15, c7, 3)
+#define RCP15_L2PMEVTYPER4(reg) MRC15(reg, 3, c15, c7, 4)
+#define RCP15_L2VR3F1(reg) MRC15(reg, 3, c15, c15, 1)
+
+#endif
+
diff --git a/arch/arm/perfmon/mcrmrc.h b/arch/arm/perfmon/mcrmrc.h
new file mode 100644
index 0000000..29f9ac0
--- /dev/null
+++ b/arch/arm/perfmon/mcrmrc.h
@@ -0,0 +1,86 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/*
+mrcmcr.h
+
+DESCRIPTION: Convenience macros for access the cp registers in the arm.
+
+REV/DATE: Fri Mar 18 16:34:44 EST 2005
+*/
+
+#ifndef __mrcmcr__h_
+#define __mrcmcr__h_
+
+/*
+* Define some convenience macros to acccess the cp registers from c code
+* Lots of macro trickery here.
+*
+* Takes the same format as the asm instructions and unfortunatly you cannot
+* use variables to select the crn, crn or op fields...
+*
+* For those unfamiliar with the # and string stuff.
+* # creates a string from the value and any two strings that are beside
+* are concatenated...thus these create one big asm string for the
+* inline asm code.
+*
+* When compiled these compile to single asm instructions (fast) but
+* without all the hassel of __asm__ __volatile__ (...) =r
+*
+* Format is:
+*
+* unsigned long reg; // destination variable
+* MRC(reg, p15, 0, c1, c0, 0 );
+*
+* MRC read control register
+* MCR control register write
+*/
+
+/*
+* Some assembly macros so we can use the same macros as in the C version.
+* Turns the ASM code a little C-ish but keeps the code consistent and in
+* one location...
+*/
+#ifdef __ASSEMBLY__
+
+
+#define MRC(reg, processor, op1, crn, crm, op2) \
+(mrc processor , op1 , reg, crn , crm , op2)
+
+#define MCR(reg, processor, op1, crn, crm, op2) \
+(mcr processor , op1 , reg, crn , crm , op2)
+
+/*
+* C version of the macros.
+*/
+#else
+
+#define MRC(reg, processor, op1, crn, crm, op2) \
+__asm__ __volatile__ ( \
+" mrc " #processor "," #op1 ", %0," #crn "," #crm "," #op2 "\n" \
+: "=r" (reg))
+
+#define MCR(reg, processor, op1, crn, crm, op2) \
+__asm__ __volatile__ ( \
+" mcr " #processor "," #op1 ", %0," #crn "," #crm "," #op2 "\n" \
+: : "r" (reg))
+#endif
+
+
+/*
+* Easy access convenience function to read CP15 registers from c code
+*/
+#define MRC15(reg, op1, crn, crm, op2) MRC(reg, p15, op1, crn, crm, op2)
+#define MCR15(reg, op1, crn, crm, op2) MCR(reg, p15, op1, crn, crm, op2)
+
+#endif
diff --git a/arch/arm/perfmon/per-axi.c b/arch/arm/perfmon/per-axi.c
new file mode 100644
index 0000000..48309be
--- /dev/null
+++ b/arch/arm/perfmon/per-axi.c
@@ -0,0 +1,759 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+per-axi
+DESCRIPTION
+Functions related to AXI bus performance counter manipulations.
+*/
+
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/proc_fs.h>
+#include "asm/uaccess.h"
+#include "per-axi.h"
+#include "perf.h"
+
+/*
+Definitions for AXI register addresses, macros to set and get register values
+*/
+#define AXI_BASE_SIZE 0x00004000
+#define AXI_REG_BASE (AXI_BASE + 0x00000000)
+#define AXI_REG_BASE_PHYS 0xa8200000
+
+#define __inpdw(port) ioread32(port)
+#define in_dword_masked(addr, mask) (__inpdw(addr) & (mask))
+#define __outpdw(port, val) (iowrite32((uint32_t) (val), port))
+#define out_dword(addr, val) __outpdw(addr, val)
+
+#define HWIO_AXI_MONITOR_CYCLE_COUNT_UPPER_REG_ADDR \
+ (AXI_REG_BASE + 0x00003434)
+#define HWIO_AXI_MONITOR_CYCLE_COUNT_UPPER_REG_RMSK 0xffff
+#define HWIO_AXI_MONITOR_CYCLE_COUNT_UPPER_REG_IN \
+ in_dword_masked(HWIO_AXI_MONITOR_CYCLE_COUNT_UPPER_REG_ADDR, \
+ HWIO_AXI_MONITOR_CYCLE_COUNT_UPPER_REG_RMSK)
+
+#define HWIO_AXI_MONITOR_CYCLE_COUNT_LOWER_REG_ADDR (AXI_REG_BASE + 0x00003438)
+#define HWIO_AXI_MONITOR_CYCLE_COUNT_LOWER_REG_RMSK 0xffff
+#define HWIO_AXI_MONITOR_CYCLE_COUNT_LOWER_REG_IN \
+ in_dword_masked(HWIO_AXI_MONITOR_CYCLE_COUNT_LOWER_REG_ADDR, \
+ HWIO_AXI_MONITOR_CYCLE_COUNT_LOWER_REG_RMSK)
+
+#define HWIO_AXI_MONITOR_SELECTION_REG0_ADDR (AXI_REG_BASE + 0x00003428)
+#define HWIO_AXI_MONITOR_SELECTION_REG1_ADDR (AXI_REG_BASE + 0x0000342c)
+#define HWIO_AXI_MONITOR_TENURE_SELECTION_REG_ADDR (AXI_REG_BASE + 0x00003430)
+#define HWIO_AXI_MONITOR_SELECTION_REG0_ETC_BMSK 0x4000
+#define HWIO_AXI_MONITOR_SELECTION_REG0_ECC_BMSK 0x2000
+#define HWIO_AXI_MONITOR_SELECTION_REG0_EEC1_BMSK 0x800
+#define HWIO_AXI_MONITOR_SELECTION_REG0_EEC0_BMSK 0x200
+#define HWIO_AXI_MONITOR_CYCLE_COUNT_UPPER_REG_OUT(v) \
+ out_dword(HWIO_AXI_MONITOR_CYCLE_COUNT_UPPER_REG_ADDR, v)
+#define HWIO_AXI_MONITOR_CYCLE_COUNT_LOWER_REG_OUT(v) \
+ out_dword(HWIO_AXI_MONITOR_CYCLE_COUNT_LOWER_REG_ADDR, v)
+#define HWIO_AXI_MONITOR_SELECTION_REG0_OUT(v) \
+ out_dword(HWIO_AXI_MONITOR_SELECTION_REG0_ADDR, v)
+#define HWIO_AXI_MONITOR_SELECTION_REG1_OUT(v) \
+ out_dword(HWIO_AXI_MONITOR_SELECTION_REG1_ADDR, v)
+#define HWIO_AXI_MONITOR_TENURE_SELECTION_REG_OUT(v) \
+ out_dword(HWIO_AXI_MONITOR_TENURE_SELECTION_REG_ADDR, v)
+#define HWIO_AXI_MONITOR_SELECTION_REG0_RMSK 0xffff
+#define HWIO_AXI_MONITOR_SELECTION_REG0_IN \
+ in_dword_masked(HWIO_AXI_MONITOR_SELECTION_REG0_ADDR, \
+ HWIO_AXI_MONITOR_SELECTION_REG0_RMSK)
+
+#define HWIO_AXI_CONFIGURATION_REG_ADDR (AXI_REG_BASE + 0x00000008)
+#define HWIO_AXI_CONFIGURATION_REG_OUT(v) \
+ out_dword(HWIO_AXI_CONFIGURATION_REG_ADDR, v)
+#define HWIO_AXI_CONFIGURATION_REG_PPDM_BMSK 0x0
+#define HWIO_AXI_CONFIGURATION_REG_DISABLE 0x2
+#define AXI_EVTSEL_ENABLE_MASK 0x6a00
+#define AXI_EVTSEL_DISABLE_MASK 0x95ff
+#define AXI_EVTSEL_RESET_MASK 0xfe40
+
+#define HWIO_AXI_MONITOR_EVENT_LOWER_REG0_ADDR (AXI_REG_BASE + 0x00003450)
+#define HWIO_AXI_MONITOR_EVENT_LOWER_REG0_RMSK 0xffff
+#define HWIO_AXI_MONITOR_EVENT_LOWER_REG0_SHFT 0
+#define HWIO_AXI_MONITOR_EVENT_LOWER_REG0_IN \
+ in_dword_masked(HWIO_AXI_MONITOR_EVENT_LOWER_REG0_ADDR, \
+ HWIO_AXI_MONITOR_EVENT_LOWER_REG0_RMSK)
+#define HWIO_AXI_MONITOR_EVENT_UPPER_REG0_ADDR (AXI_REG_BASE + 0x00003454)
+#define HWIO_AXI_MONITOR_EVENT_UPPER_REG0_RMSK 0xffff
+#define HWIO_AXI_MONITOR_EVENT_UPPER_REG0_SHFT 0
+#define HWIO_AXI_MONITOR_EVENT_UPPER_REG0_IN \
+ in_dword_masked(HWIO_AXI_MONITOR_EVENT_UPPER_REG0_ADDR, \
+ HWIO_AXI_MONITOR_EVENT_UPPER_REG0_RMSK)
+
+#define HWIO_AXI_MONITOR_EVENT_LOWER_REG1_ADDR (AXI_REG_BASE + 0x00003458)
+#define HWIO_AXI_MONITOR_EVENT_LOWER_REG1_RMSK 0xffff
+#define HWIO_AXI_MONITOR_EVENT_LOWER_REG1_SHFT 0
+#define HWIO_AXI_MONITOR_EVENT_LOWER_REG1_IN \
+ in_dword_masked(HWIO_AXI_MONITOR_EVENT_LOWER_REG1_ADDR, \
+ HWIO_AXI_MONITOR_EVENT_LOWER_REG1_RMSK)
+#define HWIO_AXI_MONITOR_EVENT_UPPER_REG1_ADDR (AXI_REG_BASE + 0x0000345c)
+#define HWIO_AXI_MONITOR_EVENT_UPPER_REG1_RMSK 0xffff
+#define HWIO_AXI_MONITOR_EVENT_UPPER_REG1_SHFT 0
+#define HWIO_AXI_MONITOR_EVENT_UPPER_REG1_IN \
+ in_dword_masked(HWIO_AXI_MONITOR_EVENT_UPPER_REG1_ADDR, \
+ HWIO_AXI_MONITOR_EVENT_UPPER_REG1_RMSK)
+
+#define HWIO_AXI_MONITOR_TENURE_LOWER_REG_ADDR (AXI_REG_BASE + 0x00003448)
+#define HWIO_AXI_MONITOR_TENURE_LOWER_REG_RMSK 0xffff
+#define HWIO_AXI_MONITOR_TENURE_LOWER_REG_SHFT 0
+#define HWIO_AXI_MONITOR_TENURE_LOWER_REG_IN \
+ in_dword_masked(HWIO_AXI_MONITOR_TENURE_LOWER_REG_ADDR, \
+ HWIO_AXI_MONITOR_TENURE_LOWER_REG_RMSK)
+#define HWIO_AXI_MONITOR_TENURE_UPPER_REG_ADDR (AXI_REG_BASE + 0x00003444)
+#define HWIO_AXI_MONITOR_TENURE_UPPER_REG_RMSK 0xffff
+#define HWIO_AXI_MONITOR_TENURE_UPPER_REG_SHFT 0
+#define HWIO_AXI_MONITOR_TENURE_UPPER_REG_IN \
+ in_dword_masked(HWIO_AXI_MONITOR_TENURE_UPPER_REG_ADDR, \
+ HWIO_AXI_MONITOR_TENURE_UPPER_REG_RMSK)
+
+#define HWIO_AXI_MONITOR_MIN_REG_ADDR (AXI_REG_BASE + 0x0000343c)
+#define HWIO_AXI_MONITOR_MIN_REG_RMSK 0xffff
+#define HWIO_AXI_MONITOR_MIN_REG_SHFT 0
+#define HWIO_AXI_MONITOR_MIN_REG_IN \
+ in_dword_masked(HWIO_AXI_MONITOR_MIN_REG_ADDR, \
+ HWIO_AXI_MONITOR_MIN_REG_RMSK)
+#define HWIO_AXI_MONITOR_MAX_REG_ADDR (AXI_REG_BASE + 0x00003440)
+#define HWIO_AXI_MONITOR_MAX_REG_RMSK 0xffff
+#define HWIO_AXI_MONITOR_MAX_REG_SHFT 0
+#define HWIO_AXI_MONITOR_MAX_REG_IN \
+ in_dword_masked(HWIO_AXI_MONITOR_MAX_REG_ADDR, \
+ HWIO_AXI_MONITOR_MAX_REG_RMSK)
+#define HWIO_AXI_MONITOR_LAST_TENURE_REG_ADDR (AXI_REG_BASE + 0x0000344c)
+#define HWIO_AXI_MONITOR_LAST_TENURE_REG_RMSK 0xffff
+#define HWIO_AXI_MONITOR_LAST_TENURE_REG_SHFT 0
+#define HWIO_AXI_MONITOR_LAST_TENURE_REG_IN \
+ in_dword_masked(HWIO_AXI_MONITOR_LAST_TENURE_REG_ADDR, \
+ HWIO_AXI_MONITOR_LAST_TENURE_REG_RMSK)
+#define HWIO_AXI_MONITOR_TENURE_UPPER_REG_OUT(v) \
+ out_dword(HWIO_AXI_MONITOR_TENURE_UPPER_REG_ADDR, v)
+#define HWIO_AXI_MONITOR_TENURE_LOWER_REG_OUT(v) \
+ out_dword(HWIO_AXI_MONITOR_TENURE_LOWER_REG_ADDR, v)
+
+#define HWIO_AXI_RESET_ALL 0x9400
+#define HWIO_AXI_ENABLE_ALL_NOCYCLES 0x4a00
+#define HWIO_AXI_DISABLE_ALL 0xb500
+uint32_t AXI_BASE;
+
+unsigned int is_first = 1;
+struct perf_mon_axi_data pm_axi_info;
+struct perf_mon_axi_cnts axi_cnts;
+
+/*
+FUNCTION get_axi_sel_reg0
+
+DESCRIPTION
+ Retrieve the value of AXI_SEL_REG0
+
+DEPENDENCIES
+
+RETURN VALUE
+ AXI_SEL_REG0
+SIDE EFFECTS
+*/
+unsigned long get_axi_sel_reg0(void)
+{
+ return pm_axi_info.sel_reg0;
+}
+
+/*
+FUNCTION get_axi_sel_reg1
+
+DESCRIPTION
+ Retrieve the value of AXI_SEL_REG1
+
+DEPENDENCIES
+
+RETURN VALUE
+ AXI_SEL_REG1
+SIDE EFFECTS
+*/
+unsigned long get_axi_sel_reg1(void)
+{
+ return pm_axi_info.sel_reg1;
+}
+
+/*
+FUNCTION get_axi_ten_sel_reg
+
+DESCRIPTION
+ Retrieve the value of AXI_TEN_REG
+
+DEPENDENCIES
+
+RETURN VALUE
+ AXI_TEN_REG
+SIDE EFFECTS
+*/
+unsigned long get_axi_ten_sel_reg(void)
+{
+ return pm_axi_info.ten_sel_reg;
+}
+
+/*
+FUNCTION get_axi_valid
+
+DESCRIPTION
+ Retrieve the value of AXI valid bit
+
+DEPENDENCIES
+
+RETURN VALUE
+ AXI Valid bit
+SIDE EFFECTS
+*/
+unsigned long get_axi_valid(void)
+{
+ return pm_axi_info.valid;
+}
+
+/*
+FUNCTION get_axi_enable
+
+DESCRIPTION
+ Retrieve the value of AXI enable bit
+
+DEPENDENCIES
+
+RETURN VALUE
+ AXI enable bit
+SIDE EFFECTS
+*/
+unsigned long get_axi_enable(void)
+{
+ return pm_axi_info.enable;
+}
+
+/*
+FUNCTION get_axi_clear
+
+DESCRIPTION
+ Retrieve the value of AXI clear bit
+
+DEPENDENCIES
+
+RETURN VALUE
+ AXI clear bit
+SIDE EFFECTS
+*/
+unsigned long get_axi_clear(void)
+{
+ return pm_axi_info.clear;
+}
+
+/*
+FUNCTION pm_axi_cnts_write
+
+DESCRIPTION
+ Write handler for the /proc axi results directory.
+
+DEPENDENCIES
+
+RETURN VALUE
+ Number of characters to output.
+
+SIDE EFFECTS
+*/
+int pm_axi_cnts_write(struct file *file, const char *buff,
+ unsigned long cnt, void *data)
+{
+ char *newbuf;
+ struct PerfMonAxiCnts *p =
+ (struct PerfMonAxiCnts *)data;
+
+ if (p == 0)
+ return cnt;
+ /*
+ * Alloc the user data in kernel space. and then copy user to kernel
+ */
+ newbuf = kmalloc(cnt + 1, GFP_KERNEL);
+ if (0 == newbuf)
+ return cnt;
+ if (copy_from_user(newbuf, buff, cnt) != 0) {
+ printk(KERN_INFO "%s copy_from_user failed\n", __func__);
+ return cnt;
+ }
+ return cnt;
+}
+
+/*
+FUNCTION pm_axi_update_cnts
+
+DESCRIPTION
+ Read the current AXI counter values. Check for overflows and
+ adjust the values stored accordingly.
+
+DEPENDENCIES
+
+RETURN VALUE
+
+SIDE EFFECTS
+*/
+void pm_axi_update_cnts(void)
+{
+ if (is_first) {
+ pm_axi_start();
+ } else {
+ if (pm_axi_info.valid == 1) {
+ pm_axi_info.valid = 0;
+ pm_axi_update();
+ } else {
+ pm_axi_enable();
+ }
+ }
+ is_first = 0;
+ axi_cnts.cycles += pm_get_axi_cycle_count();
+ axi_cnts.cnt0 += pm_get_axi_evt0_count();
+ axi_cnts.cnt1 += pm_get_axi_evt1_count();
+ axi_cnts.tenure_total += pm_get_axi_ten_total_count();
+
+ axi_cnts.tenure_min = pm_get_axi_ten_min_count();
+ axi_cnts.tenure_max = pm_get_axi_ten_max_count();
+ axi_cnts.tenure_last = pm_get_axi_ten_last_count();
+
+ pm_axi_start();
+}
+
+/*
+FUNCTION pm_axi_clear_cnts
+
+DESCRIPTION
+ Clear the locally stored AXI counter values.
+ Also clear the AXI counter registers.
+
+DEPENDENCIES
+
+RETURN VALUE
+
+SIDE EFFECTS
+*/
+void pm_axi_clear_cnts(void)
+{
+ axi_cnts.cycles = 0;
+ axi_cnts.cnt0 = 0;
+ axi_cnts.cnt1 = 0;
+ axi_cnts.tenure_total = 0;
+ axi_cnts.tenure_min = 0;
+ axi_cnts.tenure_max = 0;
+ axi_cnts.tenure_last = 0;
+ pm_axi_start();
+}
+
+/*
+FUNCTION pm_axi_read_decimal
+
+DESCRIPTION
+ Read handler for the /proc axi results directory in decimal format.
+
+DEPENDENCIES
+
+RETURN VALUE
+ Number of characters to output.
+
+SIDE EFFECTS
+*/
+int pm_axi_read_decimal(char *page, char **start, off_t off, int count,
+ int *eof, void *data)
+{
+ struct perf_mon_axi_cnts *p = (struct perf_mon_axi_cnts *)data;
+
+ return sprintf(page, "cnt0:%llu cnt1:%llu tenure:%llu ten_max:%llu \
+ ten_min:%llu ten_last:%llu cycles:%llu\n",
+ p->cnt0,
+ p->cnt1,
+ p->tenure_total,
+ p->tenure_max,
+ p->tenure_min,
+ p->tenure_last,
+ p->cycles);
+}
+
+/*
+FUNCTION pm_axi_read_hex
+
+DESCRIPTION
+ Read handler for the /proc axi results directory in hex format.
+
+DEPENDENCIES
+
+RETURN VALUE
+ Number of characters to output.
+
+SIDE EFFECTS
+*/
+int pm_axi_read_hex(char *page, char **start, off_t off, int count,
+ int *eof, void *data)
+{
+ struct perf_mon_axi_cnts *p = (struct perf_mon_axi_cnts *)data;
+
+ return sprintf(page, "cnt0:%llx cnt1:%llx tenure:%llx ten_max:%llx \
+ ten_min:%llx ten_last:%llx cycles:%llx\n",
+ p->cnt0,
+ p->cnt1,
+ p->tenure_total,
+ p->tenure_max,
+ p->tenure_min,
+ p->tenure_last,
+ p->cycles);
+
+}
+
+/*
+FUNCTION pm_axi_set_proc_entry
+
+DESCRIPTION
+ Create a generic entry for the /proc axi settings directory.
+
+DEPENDENCIES
+
+RETURN VALUE
+
+SIDE EFFECTS
+*/
+void pm_axi_set_proc_entry(char *name, unsigned long *var,
+ struct proc_dir_entry *d, int hex)
+{
+ struct proc_dir_entry *pe;
+ pe = create_proc_entry(name, 0777, d);
+ if (0 == pe)
+ return;
+ if (hex) {
+ pe->read_proc = per_process_read;
+ pe->write_proc = per_process_write_hex;
+ } else {
+ pe->read_proc = per_process_read_decimal;
+ pe->write_proc = per_process_write_dec;
+ }
+ pe->data = (void *)var;
+}
+
+/*
+FUNCTION pm_axi_get_cnt_proc_entry
+
+DESCRIPTION
+ Create a generic entry for the /proc axi results directory.
+
+DEPENDENCIES
+
+RETURN VALUE
+
+SIDE EFFECTS
+*/
+void pm_axi_get_cnt_proc_entry(char *name, struct perf_mon_axi_cnts *var,
+ struct proc_dir_entry *d, int hex)
+{
+ struct proc_dir_entry *pe;
+ pe = create_proc_entry(name, 0777, d);
+ if (0 == pe)
+ return;
+ if (hex) {
+ pe->read_proc = pm_axi_read_hex;
+ pe->write_proc = pm_axi_cnts_write;
+ } else {
+ pe->read_proc = pm_axi_read_decimal;
+ pe->write_proc = pm_axi_cnts_write;
+ }
+ pe->data = (void *)var;
+}
+
+/*
+FUNCTION pm_axi_clear_tenure
+
+DESCRIPTION
+ Clear AXI tenure cntr manually. Temporary solution till hardware bug
+ is fixed
+
+DEPENDENCIES
+
+RETURN VALUE
+
+SIDE EFFECTS
+*/
+void pm_axi_clear_tenure(void)
+{
+ HWIO_AXI_MONITOR_TENURE_UPPER_REG_OUT(0x0);
+ HWIO_AXI_MONITOR_TENURE_LOWER_REG_OUT(0x0);
+}
+
+/*
+FUNCTION pm_axi_init
+
+DESCRIPTION
+ Map AXI region to virtual memory.
+
+DEPENDENCIES
+
+RETURN VALUE
+
+SIDE EFFECTS
+*/
+void pm_axi_init()
+{
+ /*Map the AXI regs*/
+ #ifdef CONFIG_ARCH_QSD8X50
+ {
+ /*Map the AXI regs*/
+ AXI_BASE = (uint32_t)ioremap(AXI_REG_BASE_PHYS, AXI_BASE_SIZE);
+ if (!AXI_BASE)
+ printk(KERN_ERR "Mem map failed\n");
+ }
+ #else
+ {
+ AXI_BASE = (uint32_t)kmalloc(AXI_BASE_SIZE, GFP_KERNEL);
+ }
+ #endif
+
+}
+
+/*
+FUNCTION pm_axi_start
+
+DESCRIPTION
+ Set event0, event1 and tenure registers based on the /proc entries.
+ Set cycle cntr to fffffffe to start counters.
+
+DEPENDENCIES
+
+RETURN VALUE
+
+SIDE EFFECTS
+*/
+void
+pm_axi_start()
+{
+ unsigned long sel_reg0, sel_reg1, ten_sel_reg;
+ sel_reg0 = get_axi_sel_reg0();
+ sel_reg1 = get_axi_sel_reg1();
+ ten_sel_reg = get_axi_ten_sel_reg();
+ HWIO_AXI_CONFIGURATION_REG_OUT(HWIO_AXI_CONFIGURATION_REG_PPDM_BMSK);
+ /*Set AXI Cycle Counter to enable AXI Monitors*/
+ HWIO_AXI_MONITOR_CYCLE_COUNT_UPPER_REG_OUT(0xffff);
+ HWIO_AXI_MONITOR_CYCLE_COUNT_LOWER_REG_OUT(0xfffe);
+ /*Set master/slave*/
+ HWIO_AXI_MONITOR_SELECTION_REG1_OUT(sel_reg1);
+ HWIO_AXI_MONITOR_SELECTION_REG0_OUT(HWIO_AXI_RESET_ALL);
+ HWIO_AXI_MONITOR_SELECTION_REG0_OUT(HWIO_AXI_ENABLE_ALL_NOCYCLES);
+ HWIO_AXI_MONITOR_SELECTION_REG0_OUT(HWIO_AXI_MONITOR_SELECTION_REG0_IN
+ | sel_reg0);
+ HWIO_AXI_MONITOR_SELECTION_REG0_OUT(HWIO_AXI_MONITOR_SELECTION_REG0_IN
+ | HWIO_AXI_MONITOR_SELECTION_REG0_ECC_BMSK);
+ HWIO_AXI_CONFIGURATION_REG_OUT(HWIO_AXI_CONFIGURATION_REG_PPDM_BMSK);
+}
+
+/*
+FUNCTION pm_axi_update
+
+DESCRIPTION
+ Set event0, event1 and tenure registers based on the /proc entries.
+
+DEPENDENCIES
+
+RETURN VALUE
+
+SIDE EFFECTS
+*/
+void
+pm_axi_update()
+{
+ HWIO_AXI_CONFIGURATION_REG_OUT(HWIO_AXI_CONFIGURATION_REG_PPDM_BMSK);
+ HWIO_AXI_MONITOR_SELECTION_REG0_OUT(HWIO_AXI_MONITOR_SELECTION_REG0_IN
+ | HWIO_AXI_RESET_ALL);
+ HWIO_AXI_MONITOR_SELECTION_REG0_OUT(HWIO_AXI_MONITOR_SELECTION_REG0_IN
+ & HWIO_AXI_DISABLE_ALL);
+ pm_axi_start();
+}
+
+/*
+FUNCTION pm_axi_disable
+
+DESCRIPTION
+ Disable all cntrs.
+
+DEPENDENCIES
+
+RETURN VALUE
+
+SIDE EFFECTS
+*/
+void
+pm_axi_disable(void)
+{
+ unsigned long sel_reg0;
+ /*Disable cntrs*/
+ sel_reg0 = get_axi_sel_reg0();
+ HWIO_AXI_MONITOR_SELECTION_REG0_OUT(sel_reg0 & AXI_EVTSEL_DISABLE_MASK);
+ /*Disable clk*/
+ HWIO_AXI_CONFIGURATION_REG_OUT(HWIO_AXI_CONFIGURATION_REG_DISABLE);
+}
+
+/*
+FUNCTION pm_axi_enable
+
+DESCRIPTION
+ Enable all cntrs.
+
+DEPENDENCIES
+
+RETURN VALUE
+
+SIDE EFFECTS
+*/
+void
+pm_axi_enable(void)
+{
+ unsigned long sel_reg0;
+ /*Enable cntrs*/
+ sel_reg0 = get_axi_sel_reg0();
+ HWIO_AXI_MONITOR_SELECTION_REG0_OUT(sel_reg0 | 0x6a00);
+ /*Enable clk*/
+ HWIO_AXI_CONFIGURATION_REG_OUT(HWIO_AXI_CONFIGURATION_REG_PPDM_BMSK);
+}
+
+/*
+FUNCTION pm_axi_disable_cnts
+
+DESCRIPTION
+ Read cycle cntr value
+
+DEPENDENCIES
+
+RETURN VALUE
+
+SIDE EFFECTS
+*/
+unsigned long
+pm_get_axi_cycle_count(void)
+{
+ if (HWIO_AXI_MONITOR_CYCLE_COUNT_UPPER_REG_IN == 0x0 &&
+ HWIO_AXI_MONITOR_CYCLE_COUNT_LOWER_REG_IN == 0x0) {
+ /*Set AXI Cycle Counter to enable AXI Monitors*/
+ HWIO_AXI_MONITOR_CYCLE_COUNT_UPPER_REG_OUT(0xffff);
+ HWIO_AXI_MONITOR_CYCLE_COUNT_LOWER_REG_OUT(0xfffe);
+ }
+ return 0xfffffffe - ((HWIO_AXI_MONITOR_CYCLE_COUNT_UPPER_REG_IN << 16)
+ + HWIO_AXI_MONITOR_CYCLE_COUNT_LOWER_REG_IN);
+}
+
+/*
+FUNCTION pm_get_axi_evt0_count
+
+DESCRIPTION
+ Read Event0 cntr value
+
+DEPENDENCIES
+
+RETURN VALUE
+
+SIDE EFFECTS
+*/
+unsigned long
+pm_get_axi_evt0_count(void)
+{
+ return (HWIO_AXI_MONITOR_EVENT_UPPER_REG0_IN << 16)
+ + HWIO_AXI_MONITOR_EVENT_LOWER_REG0_IN;
+}
+
+/*
+FUNCTION pm_get_axi_evt1_count
+
+DESCRIPTION
+ Read Event1 cntr value
+
+DEPENDENCIES
+
+RETURN VALUE
+
+SIDE EFFECTS
+*/
+unsigned long
+pm_get_axi_evt1_count(void)
+{
+ return (HWIO_AXI_MONITOR_EVENT_UPPER_REG1_IN << 16)
+ + HWIO_AXI_MONITOR_EVENT_LOWER_REG1_IN;
+}
+
+/*
+FUNCTION pm_get_axi_ten_min_count
+
+DESCRIPTION
+ Read min tenure cntr value
+
+DEPENDENCIES
+
+RETURN VALUE
+
+SIDE EFFECTS
+*/
+unsigned long
+pm_get_axi_ten_min_count(void)
+{
+ return HWIO_AXI_MONITOR_MIN_REG_IN;
+}
+
+/*
+FUNCTION pm_get_axi_ten_max_count
+
+DESCRIPTION
+ Read max tenure cntr value
+
+DEPENDENCIES
+
+RETURN VALUE
+
+SIDE EFFECTS
+*/
+unsigned long
+pm_get_axi_ten_max_count(void)
+{
+ return HWIO_AXI_MONITOR_MAX_REG_IN;
+}
+
+/*
+FUNCTION pm_get_axi_ten_total_count
+
+DESCRIPTION
+ Read total tenure cntr value
+
+DEPENDENCIES
+
+RETURN VALUE
+
+SIDE EFFECTS
+*/
+unsigned long
+pm_get_axi_ten_total_count(void)
+{
+ return (HWIO_AXI_MONITOR_TENURE_UPPER_REG_IN << 16)
+ + HWIO_AXI_MONITOR_TENURE_LOWER_REG_IN;
+}
+
+/*
+FUNCTION pm_get_axi_ten_last_count
+
+DESCRIPTION
+ Read last tenure cntr value
+
+DEPENDENCIES
+
+RETURN VALUE
+
+SIDE EFFECTS
+*/
+unsigned long
+pm_get_axi_ten_last_count(void)
+{
+ return HWIO_AXI_MONITOR_LAST_TENURE_REG_IN;
+}
diff --git a/arch/arm/perfmon/per-axi.h b/arch/arm/perfmon/per-axi.h
new file mode 100644
index 0000000..89f67fc
--- /dev/null
+++ b/arch/arm/perfmon/per-axi.h
@@ -0,0 +1,76 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/*
+*per-axi
+*DESCRIPTION
+*Header File for Functions related to AXI bus performance counter manipulations.
+*/
+
+#ifndef __PER_AXI_H__
+#define __PER_AXI_H__
+unsigned long pm_get_axi_cycle_count(void);
+unsigned long pm_get_axi_evt0_count(void);
+unsigned long pm_get_axi_evt1_count(void);
+unsigned long pm_get_axi_evt2_count(void);
+unsigned long pm_get_axi_ten_min_count(void);
+unsigned long pm_get_axi_ten_max_count(void);
+unsigned long pm_get_axi_ten_total_count(void);
+unsigned long pm_get_axi_ten_last_count(void);
+
+unsigned long get_axi_sel_reg0(void);
+unsigned long get_axi_sel_seg1(void);
+unsigned long get_axi_ten_sel_reg(void);
+unsigned long get_axi_valid(void);
+unsigned long get_axi_enable(void);
+unsigned long get_axi_clear(void);
+
+void pm_axi_clear_cnts(void);
+void pm_axi_update_cnts(void);
+
+void pm_axi_init(void);
+void pm_axi_start(void);
+void pm_axi_update(void);
+void pm_axi_disable(void);
+void pm_axi_enable(void);
+
+struct perf_mon_axi_cnts{
+ unsigned long long cycles;
+ unsigned long long cnt0;
+ unsigned long long cnt1;
+ unsigned long long tenure_total;
+ unsigned long long tenure_min;
+ unsigned long long tenure_max;
+ unsigned long long tenure_last;
+};
+
+struct perf_mon_axi_data{
+ struct proc_dir_entry *proc;
+ unsigned long enable;
+ unsigned long clear;
+ unsigned long valid;
+ unsigned long sel_reg0;
+ unsigned long sel_reg1;
+ unsigned long ten_sel_reg;
+ unsigned long refresh;
+};
+
+extern struct perf_mon_axi_data pm_axi_info;
+extern struct perf_mon_axi_cnts axi_cnts;
+
+void pm_axi_set_proc_entry(char *name, unsigned long *var,
+ struct proc_dir_entry *d, int hex);
+void pm_axi_get_cnt_proc_entry(char *name, struct perf_mon_axi_cnts *var,
+ struct proc_dir_entry *d, int hex);
+
+#endif
diff --git a/arch/arm/perfmon/per-process-perf.c b/arch/arm/perfmon/per-process-perf.c
new file mode 100644
index 0000000..c8bebd8
--- /dev/null
+++ b/arch/arm/perfmon/per-process-perf.c
@@ -0,0 +1,1251 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+*/
+
+/*
+per-process_perf
+DESCRIPTION
+Capture the processor performances registers when the process context
+switches. The /proc file system is used to control and access the results
+of the performance counters.
+
+Each time a process is context switched, the performance counters for
+the Snoop Control Unit and the standard ARM counters are set according
+to the values stored for that process.
+
+The events to capture per process are set in the /proc/ppPerf/settings
+directory.
+
+EXTERNALIZED FUNCTIONS
+
+INITIALIZATION AND SEQUENCING REQUIREMENTS
+Detail how to initialize and use this service. The sequencing aspect
+is only needed if the order of operations is important.
+*/
+
+/*
+INCLUDE FILES FOR MODULE
+*/
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/sysrq.h>
+#include <linux/time.h>
+#include "linux/proc_fs.h"
+#include "linux/kernel_stat.h"
+#include <asm/thread_notify.h>
+#include "asm/uaccess.h"
+#include "cp15_registers.h"
+#include "l2_cp15_registers.h"
+#include <asm/perftypes.h>
+#include "per-axi.h"
+#include "perf.h"
+
+#define DEBUG_SWAPIO
+#ifdef DEBUG_SWAPIO
+#define MR_SIZE 1024
+#define PM_PP_ERR -1
+struct mark_data_s {
+ long c;
+ long cpu;
+ unsigned long pid_old;
+ unsigned long pid_new;
+};
+
+struct mark_data_s markRay[MR_SIZE] __attribute__((aligned(16)));
+int mrcnt;
+
+DEFINE_SPINLOCK(_mark_lock);
+
+static inline void MARKPIDS(char a, int opid, int npid)
+{
+ int cpu = smp_processor_id();
+
+ if (opid == 0)
+ return;
+ spin_lock(&_mark_lock);
+ if (++mrcnt >= MR_SIZE)
+ mrcnt = 0;
+ spin_unlock(&_mark_lock);
+
+ markRay[mrcnt].pid_old = opid;
+ markRay[mrcnt].pid_new = npid;
+ markRay[mrcnt].cpu = cpu;
+ markRay[mrcnt].c = a;
+}
+static inline void MARK(char a) { MARKPIDS(a, 0xFFFF, 0xFFFF); }
+static inline void MARKPID(char a, int pid) { MARKPIDS(a, pid, 0xFFFF); }
+
+#else
+#define MARK(a)
+#define MARKPID(a, b)
+#define MARKPIDS(a, b, c)
+
+#endif /* DEBUG_SWAPIO */
+
+/*
+DEFINITIONS AND DECLARATIONS FOR MODULE
+
+This section contains definitions for constants, macros, types, variables
+and other items needed by this module.
+*/
+
+/*
+Constant / Define Declarations
+*/
+
+#define PERF_MON_PROCESS_NUM 0x400
+#define PERF_MON_PROCESS_MASK (PERF_MON_PROCESS_NUM-1)
+#define PP_MAX_PROC_ENTRIES 32
+
+/*
+ * The entry is locked and is not to be replaced.
+ */
+#define PERF_ENTRY_LOCKED (1<<0)
+#define PERF_NOT_FIRST_TIME (1<<1)
+#define PERF_EXITED (1<<2)
+#define PERF_AUTOLOCK (1<<3)
+
+#define IS_LOCKED(p) (p->flags & PERF_ENTRY_LOCKED)
+
+#define PERF_NUM_MONITORS 4
+
+#define L1_EVENTS_0 0
+#define L1_EVENTS_1 1
+#define L2_EVENTS_0 2
+#define L2_EVENTS_1 3
+
+#define PM_CYCLE_OVERFLOW_MASK 0x80000000
+#define L2_PM_CYCLE_OVERFLOW_MASK 0x80000000
+
+#define PM_START_ALL() do {\
+ if (pm_global) \
+ pmStartAll();\
+ } while (0);
+#define PM_STOP_ALL() do {\
+ if (pm_global)\
+ pmStopAll();\
+ } while (0);
+#define PM_RESET_ALL() do {\
+ if (pm_global)\
+ pmResetAll();\
+ } while (0);
+
+/*
+ * Accessors for SMP based variables.
+ */
+#define _SWAPS(p) ((p)->cnts[smp_processor_id()].swaps)
+#define _CYCLES(p) ((p)->cnts[smp_processor_id()].cycles)
+#define _COUNTS(p, i) ((p)->cnts[smp_processor_id()].counts[i])
+#define _L2COUNTS(p, i) ((p)->cnts[smp_processor_id()].l2_counts[i])
+#define _L2CYCLES(p) ((p)->cnts[smp_processor_id()].l2_cycles)
+
+/*
+ Type Declarations
+*/
+
+/*
+ * Counts are on a per core basis.
+ */
+struct pm_counters_s {
+ unsigned long long cycles;
+ unsigned long long l2_cycles;
+ unsigned long long counts[PERF_NUM_MONITORS];
+ unsigned long long l2_counts[PERF_NUM_MONITORS];
+ unsigned long swaps;
+};
+
+struct per_process_perf_mon_type{
+ struct pm_counters_s cnts[NR_CPUS];
+ unsigned long control;
+ unsigned long index[PERF_NUM_MONITORS];
+ unsigned long l2_index[PERF_NUM_MONITORS];
+ unsigned long pid;
+ struct proc_dir_entry *proc;
+ struct proc_dir_entry *l2_proc;
+ unsigned short flags;
+ unsigned short running_cpu;
+ char *pidName;
+ unsigned long lpm0evtyper;
+ unsigned long lpm1evtyper;
+ unsigned long lpm2evtyper;
+ unsigned long l2lpmevtyper;
+ unsigned long vlpmevtyper;
+ unsigned long l2pmevtyper0;
+ unsigned long l2pmevtyper1;
+ unsigned long l2pmevtyper2;
+ unsigned long l2pmevtyper3;
+ unsigned long l2pmevtyper4;
+};
+
+unsigned long last_in_pid[NR_CPUS];
+unsigned long fake_swap_out[NR_CPUS] = {0};
+
+/*
+ Local Object Definitions
+*/
+struct per_process_perf_mon_type perf_mons[PERF_MON_PROCESS_NUM];
+struct proc_dir_entry *proc_dir;
+struct proc_dir_entry *settings_dir;
+struct proc_dir_entry *values_dir;
+struct proc_dir_entry *axi_dir;
+struct proc_dir_entry *l2_dir;
+struct proc_dir_entry *axi_settings_dir;
+struct proc_dir_entry *axi_results_dir;
+struct proc_dir_entry *l2_results_dir;
+
+unsigned long pp_enabled;
+unsigned long pp_settings_valid = -1;
+unsigned long pp_auto_lock;
+unsigned long pp_set_pid;
+signed long pp_clear_pid = -1;
+unsigned long per_proc_event[PERF_NUM_MONITORS];
+unsigned long l2_per_proc_event[PERF_NUM_MONITORS];
+unsigned long dbg_flags;
+unsigned long pp_lpm0evtyper;
+unsigned long pp_lpm1evtyper;
+unsigned long pp_lpm2evtyper;
+unsigned long pp_l2lpmevtyper;
+unsigned long pp_vlpmevtyper;
+unsigned long pm_stop_for_interrupts;
+unsigned long pm_global; /* track all, not process based */
+unsigned long pm_global_enable;
+unsigned long pm_remove_pid;
+
+unsigned long pp_l2pmevtyper0;
+unsigned long pp_l2pmevtyper1;
+unsigned long pp_l2pmevtyper2;
+unsigned long pp_l2pmevtyper3;
+unsigned long pp_l2pmevtyper4;
+
+unsigned long pp_proc_entry_index;
+char *per_process_proc_names[PP_MAX_PROC_ENTRIES];
+
+unsigned int axi_swaps;
+#define MAX_AXI_SWAPS 10
+int first_switch = 1;
+/*
+ Forward Declarations
+*/
+
+/*
+Function Definitions
+*/
+
+/*
+FUNCTION per_process_find
+
+DESCRIPTION
+ Find the per process information based on the process id (pid) passed.
+ This is a simple mask based on the number of entries stored in the
+ static array
+
+DEPENDENCIES
+
+RETURN VALUE
+ Pointer to the per process data
+SIDE EFFECTS
+
+*/
+struct per_process_perf_mon_type *per_process_find(unsigned long pid)
+{
+ return &perf_mons[pid & PERF_MON_PROCESS_MASK];
+}
+
+/*
+FUNCTION per_process_get_name
+
+DESCRIPTION
+ Retreive the name of the performance counter based on the table and
+ index passed. We have two different sets of performance counters so
+ different table need to be used.
+
+DEPENDENCIES
+
+RETURN VALUE
+ Pointer to char string with the name of the event or "BAD"
+ Never returns NULL or a bad pointer.
+
+SIDE EFFECTS
+*/
+char *per_process_get_name(unsigned long index)
+{
+ return pm_find_event_name(index);
+}
+
+/*
+FUNCTION per_process_results_read
+
+DESCRIPTION
+ Print out the formatted results from the process id read. Event names
+ and counts are printed.
+
+DEPENDENCIES
+
+RETURN VALUE
+
+SIDE EFFECTS
+*/
+int per_process_results_read(char *page, char **start, off_t off, int count,
+ int *eof, void *data)
+{
+ struct per_process_perf_mon_type *p =
+ (struct per_process_perf_mon_type *)data;
+ struct pm_counters_s cnts;
+ int i, j;
+
+ /*
+ * Total across all CPUS
+ */
+ memset(&cnts, 0, sizeof(cnts));
+ for (i = 0; i < num_possible_cpus(); i++) {
+ cnts.swaps += p->cnts[i].swaps;
+ cnts.cycles += p->cnts[i].cycles;
+ for (j = 0; j < PERF_NUM_MONITORS; j++)
+ cnts.counts[j] += p->cnts[i].counts[j];
+ }
+
+ /*
+ * Display as single results of the totals calculated above.
+ * Do we want to display or have option to display individula cores?
+ */
+ return sprintf(page, "pid:%lu one:%s:%llu two:%s:%llu three:%s:%llu \
+ four:%s:%llu cycles:%llu swaps:%lu\n",
+ p->pid,
+ per_process_get_name(p->index[0]), cnts.counts[0],
+ per_process_get_name(p->index[1]), cnts.counts[1],
+ per_process_get_name(p->index[2]), cnts.counts[2],
+ per_process_get_name(p->index[3]), cnts.counts[3],
+ cnts.cycles, cnts.swaps);
+}
+
+int per_process_l2_results_read(char *page, char **start, off_t off, int count,
+ int *eof, void *data)
+{
+ struct per_process_perf_mon_type *p =
+ (struct per_process_perf_mon_type *)data;
+ struct pm_counters_s cnts;
+ int i, j;
+
+ /*
+ * Total across all CPUS
+ */
+ memset(&cnts, 0, sizeof(cnts));
+ for (i = 0; i < num_possible_cpus(); i++) {
+ cnts.l2_cycles += p->cnts[i].l2_cycles;
+ for (j = 0; j < PERF_NUM_MONITORS; j++)
+ cnts.l2_counts[j] += p->cnts[i].l2_counts[j];
+ }
+
+ /*
+ * Display as single results of the totals calculated above.
+ * Do we want to display or have option to display individula cores?
+ */
+ return sprintf(page, "pid:%lu l2_one:%s:%llu l2_two:%s:%llu \
+ l2_three:%s:%llu \
+ l2_four:%s:%llu l2_cycles:%llu\n",
+ p->pid,
+ per_process_get_name(p->l2_index[0]), cnts.l2_counts[0],
+ per_process_get_name(p->l2_index[1]), cnts.l2_counts[1],
+ per_process_get_name(p->l2_index[2]), cnts.l2_counts[2],
+ per_process_get_name(p->l2_index[3]), cnts.l2_counts[3],
+ cnts.l2_cycles);
+}
+
+/*
+FUNCTION per_process_results_write
+
+DESCRIPTION
+ Allow some control over the results. If the user forgets to autolock or
+ wants to unlock the results so they will be deleted, then this is
+ where it is processed.
+
+ For example, to unlock process 23
+ echo "unlock" > 23
+
+DEPENDENCIES
+
+RETURN VALUE
+ Number of characters used (all of them!)
+
+SIDE EFFECTS
+*/
+int per_process_results_write(struct file *file, const char *buff,
+ unsigned long cnt, void *data)
+{
+ char *newbuf;
+ struct per_process_perf_mon_type *p =
+ (struct per_process_perf_mon_type *)data;
+
+ if (p == 0)
+ return cnt;
+ /*
+ * Alloc the user data in kernel space. and then copy user to kernel
+ */
+ newbuf = kmalloc(cnt + 1, GFP_KERNEL);
+ if (0 == newbuf)
+ return cnt;
+ if (copy_from_user(newbuf, buff, cnt) != 0) {
+ printk(KERN_INFO "%s copy_from_user failed\n", __func__);
+ return cnt;
+ }
+
+ if (0 == strcmp("lock", newbuf))
+ p->flags |= PERF_ENTRY_LOCKED;
+ else if (0 == strcmp("unlock", newbuf))
+ p->flags &= ~PERF_ENTRY_LOCKED;
+ else if (0 == strcmp("auto", newbuf))
+ p->flags |= PERF_AUTOLOCK;
+ else if (0 == strcmp("autoun", newbuf))
+ p->flags &= ~PERF_AUTOLOCK;
+
+ return cnt;
+}
+
+/*
+FUNCTION perProcessCreateResults
+
+DESCRIPTION
+ Create the results /proc file if the system parameters allow it...
+DEPENDENCIES
+
+RETURN VALUE
+
+SIDE EFFECTS
+*/
+void per_process_create_results_proc(struct per_process_perf_mon_type *p)
+{
+
+ if (0 == p->pidName)
+ p->pidName = kmalloc(12, GFP_KERNEL);
+ if (0 == p->pidName)
+ return;
+ sprintf(p->pidName, "%ld", p->pid);
+
+ if (0 == p->proc) {
+ p->proc = create_proc_entry(p->pidName, 0777, values_dir);
+ if (0 == p->proc)
+ return;
+ } else {
+ p->proc->name = p->pidName;
+ }
+
+ p->proc->read_proc = per_process_results_read;
+ p->proc->write_proc = per_process_results_write;
+ p->proc->data = (void *)p;
+}
+
+void per_process_create_l2_results_proc(struct per_process_perf_mon_type *p)
+{
+
+ if (0 == p->pidName)
+ p->pidName = kmalloc(12, GFP_KERNEL);
+ if (0 == p->pidName)
+ return;
+ sprintf(p->pidName, "%ld", p->pid);
+
+ if (0 == p->l2_proc) {
+ p->l2_proc = create_proc_entry(p->pidName, 0777,
+ l2_results_dir);
+ if (0 == p->l2_proc)
+ return;
+ } else {
+ p->l2_proc->name = p->pidName;
+ }
+
+ p->l2_proc->read_proc = per_process_l2_results_read;
+ p->l2_proc->write_proc = per_process_results_write;
+ p->l2_proc->data = (void *)p;
+}
+/*
+FUNCTION per_process_swap_out
+
+DESCRIPTION
+ Store the counters from the process that is about to swap out. We take
+ the old counts and add them to the current counts in the perf registers.
+ Before the new process is swapped in, the counters are reset.
+
+DEPENDENCIES
+
+RETURN VALUE
+
+SIDE EFFECTS
+*/
+typedef void (*vfun)(void *);
+void per_process_swap_out(struct per_process_perf_mon_type *data)
+{
+ int i;
+ unsigned long overflow;
+#ifdef CONFIG_ARCH_MSM8X60
+ unsigned long l2_overflow;
+#endif
+ struct per_process_perf_mon_type *p = data;
+
+ MARKPIDS('O', p->pid, 0);
+ RCP15_PMOVSR(overflow);
+#ifdef CONFIG_ARCH_MSM8X60
+ RCP15_L2PMOVSR(l2_overflow);
+#endif
+
+ if (!pp_enabled)
+ return;
+
+ /*
+ * The kernel for some reason (2.6.32.9) starts a process context on
+ * one core and ends on another. So the swap in and swap out can be
+ * on different cores. If this happens, we need to stop the
+ * counters and collect the data on the core that started the counters
+ * ....otherwise we receive invalid data. So we mark the the core with
+ * the process as deferred. The next time a process is swapped on
+ * the core that the process was running on, the counters will be
+ * updated.
+ */
+ if ((smp_processor_id() != p->running_cpu) && (p->pid != 0)) {
+ fake_swap_out[p->running_cpu] = 1;
+ return;
+ }
+
+ _SWAPS(p)++;
+ _CYCLES(p) += pm_get_cycle_count();
+
+ if (overflow & PM_CYCLE_OVERFLOW_MASK)
+ _CYCLES(p) += 0xFFFFFFFF;
+
+ for (i = 0; i < PERF_NUM_MONITORS; i++) {
+ _COUNTS(p, i) += pm_get_count(i);
+ if (overflow & (1 << i))
+ _COUNTS(p, i) += 0xFFFFFFFF;
+ }
+
+#ifdef CONFIG_ARCH_MSM8X60
+ _L2CYCLES(p) += l2_pm_get_cycle_count();
+ if (l2_overflow & L2_PM_CYCLE_OVERFLOW_MASK)
+ _L2CYCLES(p) += 0xFFFFFFFF;
+ for (i = 0; i < PERF_NUM_MONITORS; i++) {
+ _L2COUNTS(p, i) += l2_pm_get_count(i);
+ if (l2_overflow & (1 << i))
+ _L2COUNTS(p, i) += 0xFFFFFFFF;
+ }
+#endif
+}
+
+/*
+FUNCTION per_process_remove_manual
+
+DESCRIPTION
+ Remove an entry from the results directory if the flags allow this.
+ When not enbled or the entry is locked, the values/results will
+ not be removed.
+
+DEPENDENCIES
+
+RETURN VALUE
+
+SIDE EFFECTS
+*/
+void per_process_remove_manual(unsigned long pid)
+{
+ struct per_process_perf_mon_type *p = per_process_find(pid);
+
+ /*
+ * Check all of the flags to see if we can remove this one
+ * Then mark as not used
+ */
+ if (0 == p)
+ return;
+ p->pid = (0xFFFFFFFF);
+
+ /*
+ * Remove the proc entry.
+ */
+ if (p->proc)
+ remove_proc_entry(p->pidName, values_dir);
+ if (p->l2_proc)
+ remove_proc_entry(p->pidName, l2_results_dir);
+ kfree(p->pidName);
+
+ /*
+ * Clear them out...and ensure the pid is invalid
+ */
+ memset(p, 0, sizeof *p);
+ p->pid = 0xFFFFFFFF;
+ pm_remove_pid = -1;
+}
+
+/*
+* Remove called when a process exits...
+*/
+void _per_process_remove(unsigned long pid) {}
+
+/*
+FUNCTION per_process_initialize
+
+DESCRIPTION
+Initialize performance collection information for a new process.
+
+DEPENDENCIES
+
+RETURN VALUE
+
+SIDE EFFECTS
+May create a new proc entry
+*/
+void per_process_initialize(struct per_process_perf_mon_type *p,
+ unsigned long pid)
+{
+ int i;
+
+ /*
+ * See if this is the pid we are interested in...
+ */
+ if (pp_settings_valid == -1)
+ return;
+ if ((pp_set_pid != pid) && (pp_set_pid != 0))
+ return;
+
+ /*
+ * Clear out the statistics table then insert this pid
+ * We want to keep the proc entry and the name
+ */
+ p->pid = pid;
+
+ /*
+ * Create a proc entry for this pid, then get the current event types and
+ * store in data struct so when the process is switched in we can track
+ * it.
+ */
+ if (p->proc == 0) {
+ per_process_create_results_proc(p);
+#ifdef CONFIG_ARCH_MSM8X60
+ per_process_create_l2_results_proc(p);
+#endif
+ }
+ _CYCLES(p) = 0;
+ _L2CYCLES(p) = 0;
+ _SWAPS(p) = 0;
+ /*
+ * Set the per process data struct, but not the monitors until later...
+ * Init only happens with the user sets the SetPID variable to this pid
+ * so we can load new values.
+ */
+ for (i = 0; i < PERF_NUM_MONITORS; i++) {
+ p->index[i] = per_proc_event[i];
+#ifdef CONFIG_ARCH_MSM8X60
+ p->l2_index[i] = l2_per_proc_event[i];
+#endif
+ _COUNTS(p, i) = 0;
+ _L2COUNTS(p, i) = 0;
+ }
+ p->lpm0evtyper = pp_lpm0evtyper;
+ p->lpm1evtyper = pp_lpm1evtyper;
+ p->lpm2evtyper = pp_lpm2evtyper;
+ p->l2lpmevtyper = pp_l2lpmevtyper;
+ p->vlpmevtyper = pp_vlpmevtyper;
+
+#ifdef CONFIG_ARCH_MSM8X60
+ p->l2pmevtyper0 = pp_l2pmevtyper0;
+ p->l2pmevtyper1 = pp_l2pmevtyper1;
+ p->l2pmevtyper2 = pp_l2pmevtyper2;
+ p->l2pmevtyper3 = pp_l2pmevtyper3;
+ p->l2pmevtyper4 = pp_l2pmevtyper4;
+#endif
+
+ /*
+ * Reset pid and settings value
+ */
+ pp_set_pid = -1;
+ pp_settings_valid = -1;
+}
+
+/*
+FUNCTION per_process_swap_in
+
+DESCRIPTION
+ Called when a context switch is about to start this PID.
+ We check to see if this process has an entry or not and create one
+ if not locked...
+
+DEPENDENCIES
+
+RETURN VALUE
+
+SIDE EFFECTS
+*/
+void per_process_swap_in(struct per_process_perf_mon_type *p_new,
+ unsigned long pid)
+{
+ int i;
+
+ MARKPIDS('I', p_new->pid, 0);
+ /*
+ * If the set proc variable == the current pid then init a new
+ * entry...
+ */
+ if (pp_set_pid == pid)
+ per_process_initialize(p_new, pid);
+
+ p_new->running_cpu = smp_processor_id();
+ last_in_pid[smp_processor_id()] = pid;
+
+ /*
+ * setup the monitors for this process.
+ */
+ for (i = 0; i < PERF_NUM_MONITORS; i++) {
+ pm_set_event(i, p_new->index[i]);
+#ifdef CONFIG_ARCH_MSM8X60
+ l2_pm_set_event(i, p_new->l2_index[i]);
+#endif
+ }
+ pm_set_local_iu(p_new->lpm0evtyper);
+ pm_set_local_xu(p_new->lpm1evtyper);
+ pm_set_local_su(p_new->lpm2evtyper);
+ pm_set_local_l2(p_new->l2lpmevtyper);
+
+#ifdef CONFIG_ARCH_MSM8X60
+ pm_set_local_bu(p_new->l2pmevtyper0);
+ pm_set_local_cb(p_new->l2pmevtyper1);
+ pm_set_local_mp(p_new->l2pmevtyper2);
+ pm_set_local_sp(p_new->l2pmevtyper3);
+ pm_set_local_scu(p_new->l2pmevtyper4);
+#endif
+}
+
+/*
+FUNCTION perProcessSwitch
+
+DESCRIPTION
+ Called during context switch. Updates the counts on the process about to
+ be swapped out and brings in the counters for the process about to be
+ swapped in.
+
+ All is dependant on the enabled and lock flags.
+
+DEPENDENCIES
+
+RETURN VALUE
+
+SIDE EFFECTS
+*/
+
+DEFINE_SPINLOCK(pm_lock);
+void _per_process_switch(unsigned long old_pid, unsigned long new_pid)
+{
+ struct per_process_perf_mon_type *p_old, *p_new;
+
+ if (pm_global_enable == 0)
+ return;
+
+ spin_lock(&pm_lock);
+
+ pm_stop_all();
+#ifdef CONFIG_ARCH_MSM8X60
+ l2_pm_stop_all();
+#endif
+
+ /*
+ * We detected that the process was swapped in on one core and out on
+ * a different core. This does not allow us to stop and stop counters
+ * properly so we need to defer processing. This checks to see if there
+ * is any defered processing necessary. And does it... */
+ if (fake_swap_out[smp_processor_id()] != 0) {
+ fake_swap_out[smp_processor_id()] = 0;
+ p_old = per_process_find(last_in_pid[smp_processor_id()]);
+ last_in_pid[smp_processor_id()] = 0;
+ if (p_old != 0)
+ per_process_swap_out(p_old);
+ }
+
+ /*
+ * Clear the data collected so far for this process?
+ */
+ if (pp_clear_pid != -1) {
+ struct per_process_perf_mon_type *p_clear =
+ per_process_find(pp_clear_pid);
+ if (p_clear) {
+ memset(p_clear->cnts, 0,
+ sizeof(struct pm_counters_s)*num_possible_cpus());
+ printk(KERN_INFO "Clear Per Processor Stats for \
+ PID:%ld\n", pp_clear_pid);
+ pp_clear_pid = -1;
+ }
+ }
+ /*
+ * Always collect for 0, it collects for all.
+ */
+ if (pp_enabled) {
+ if (first_switch == 1) {
+ per_process_initialize(&perf_mons[0], 0);
+ first_switch = 0;
+ }
+ if (pm_global) {
+ per_process_swap_out(&perf_mons[0]);
+ per_process_swap_in(&perf_mons[0], 0);
+ } else {
+ p_old = per_process_find(old_pid);
+ p_new = per_process_find(new_pid);
+
+
+ /*
+ * save the old counts to the old data struct, if the
+ * returned ptr is NULL or the process id passed is not
+ * the same as the process id in the data struct then
+ * don't update the data.
+ */
+ if ((p_old) && (p_old->pid == old_pid) &&
+ (p_old->pid != 0)) {
+ per_process_swap_out(p_old);
+ }
+
+ /*
+ * Setup the counters for the new process
+ */
+ if (pp_set_pid == new_pid)
+ per_process_initialize(p_new, new_pid);
+ if ((p_new->pid == new_pid) && (new_pid != 0))
+ per_process_swap_in(p_new, new_pid);
+ }
+ pm_reset_all();
+#ifdef CONFIG_ARCH_MSM8X60
+ l2_pm_reset_all();
+#endif
+#ifdef CONFIG_ARCH_QSD8X50
+ axi_swaps++;
+ if (axi_swaps%pm_axi_info.refresh == 0) {
+ if (pm_axi_info.clear == 1) {
+ pm_axi_clear_cnts();
+ pm_axi_info.clear = 0;
+ }
+ if (pm_axi_info.enable == 0)
+ pm_axi_disable();
+ else
+ pm_axi_update_cnts();
+ axi_swaps = 0;
+ }
+#endif
+ }
+ pm_start_all();
+#ifdef CONFIG_ARCH_MSM8X60
+ l2_pm_start_all();
+#endif
+
+ spin_unlock(&pm_lock);
+}
+
+/*
+FUNCTION pmInterruptIn
+
+DESCRIPTION
+ Called when an interrupt is being processed. If the pmStopForInterrutps
+ flag is non zero then we disable the counting of performance monitors.
+
+DEPENDENCIES
+
+RETURN VALUE
+
+SIDE EFFECTS
+*/
+static int pm_interrupt_nesting_count;
+static unsigned long pm_cycle_in, pm_cycle_out;
+void _perf_mon_interrupt_in(void)
+{
+ if (pm_global_enable == 0)
+ return;
+ if (pm_stop_for_interrupts == 0)
+ return;
+ pm_interrupt_nesting_count++; /* Atomic */
+ pm_stop_all();
+ pm_cycle_in = pm_get_cycle_count();
+}
+
+/*
+FUNCTION perfMonInterruptOut
+
+DESCRIPTION
+ Reenable performance monitor counting whn the nest count goes to zero
+ provided the counting has been stoped
+
+DEPENDENCIES
+
+RETURN VALUE
+
+SIDE EFFECTS
+*/
+void _perf_mon_interrupt_out(void)
+{
+ if (pm_global_enable == 0)
+ return;
+ if (pm_stop_for_interrupts == 0)
+ return;
+ --pm_interrupt_nesting_count; /* Atomic?? */
+
+ if (pm_interrupt_nesting_count <= 0) {
+ pm_cycle_out = pm_get_cycle_count();
+ if (pm_cycle_in != pm_cycle_out)
+ printk(KERN_INFO "pmIn!=pmOut in:%lx out:%lx\n",
+ pm_cycle_in, pm_cycle_out);
+ if (pp_enabled) {
+ pm_start_all();
+#ifdef CONFIG_ARCH_MSM8X60
+ l2_pm_start_all();
+#endif
+ }
+ pm_interrupt_nesting_count = 0;
+ }
+}
+
+void per_process_do_global(unsigned long g)
+{
+ pm_global = g;
+
+ if (pm_global == 1) {
+ pm_stop_all();
+#ifdef CONFIG_ARCH_MSM8X60
+ l2_pm_stop_all();
+#endif
+ pm_reset_all();
+#ifdef CONFIG_ARCH_MSM8X60
+ l2_pm_reset_all();
+#endif
+ pp_set_pid = 0;
+ per_process_swap_in(&perf_mons[0], 0);
+ pm_start_all();
+#ifdef CONFIG_ARCH_MSM8X60
+ l2_pm_start_all();
+#endif
+ } else {
+ pm_stop_all();
+#ifdef CONFIG_ARCH_MSM8X60
+ l2_pm_stop_all();
+#endif
+ }
+}
+
+
+/*
+FUNCTION per_process_write
+
+DESCRIPTION
+ Generic routine to handle any of the settings /proc directory writes.
+
+DEPENDENCIES
+
+RETURN VALUE
+
+SIDE EFFECTS
+*/
+int per_process_write(struct file *file, const char *buff,
+ unsigned long cnt, void *data, const char *fmt)
+{
+ char *newbuf;
+ unsigned long *d = (unsigned long *)data;
+
+ /*
+ * Alloc the user data in kernel space. and then copy user to kernel
+ */
+ newbuf = kmalloc(cnt + 1, GFP_KERNEL);
+ if (0 == newbuf)
+ return PM_PP_ERR;
+ if (copy_from_user(newbuf, buff, cnt) != 0) {
+ printk(KERN_INFO "%s copy_from_user failed\n", __func__);
+ return cnt;
+ }
+ sscanf(newbuf, fmt, d);
+ kfree(newbuf);
+
+ /*
+ * If this is a remove command then do it now...
+ */
+ if (d == &pm_remove_pid)
+ per_process_remove_manual(*d);
+ if (d == &pm_global)
+ per_process_do_global(*d);
+ return cnt;
+}
+
+int per_process_write_dec(struct file *file, const char *buff,
+ unsigned long cnt, void *data)
+{
+ return per_process_write(file, buff, cnt, data, "%ld");
+}
+
+int per_process_write_hex(struct file *file, const char *buff,
+ unsigned long cnt, void *data)
+{
+ return per_process_write(file, buff, cnt, data, "%lx");
+}
+
+/*
+FUNCTION per_process_read
+
+DESCRIPTION
+ Generic read handler for the /proc settings directory.
+
+DEPENDENCIES
+
+RETURN VALUE
+ Number of characters to output.
+
+SIDE EFFECTS
+*/
+int per_process_read(char *page, char **start, off_t off, int count,
+ int *eof, void *data)
+{
+ unsigned long *d = (unsigned long *)data;
+ return sprintf(page, "%lx", *d);
+}
+
+int per_process_read_decimal(char *page, char **start, off_t off, int count,
+ int *eof, void *data)
+{
+ unsigned long *d = (unsigned long *)data;
+ return sprintf(page, "%ld", *d);
+}
+
+/*
+FUNCTION per_process_proc_entry
+
+DESCRIPTION
+ Create a generic entry for the /proc settings directory.
+
+DEPENDENCIES
+
+RETURN VALUE
+
+SIDE EFFECTS
+*/
+void per_process_proc_entry(char *name, unsigned long *var,
+ struct proc_dir_entry *d, int hex)
+{
+ struct proc_dir_entry *pe;
+
+ pe = create_proc_entry(name, 0777, d);
+ if (0 == pe)
+ return;
+ if (hex) {
+ pe->read_proc = per_process_read;
+ pe->write_proc = per_process_write_hex;
+ } else {
+ pe->read_proc = per_process_read_decimal;
+ pe->write_proc = per_process_write_dec;
+ }
+ pe->data = (void *)var;
+
+ if (pp_proc_entry_index >= PP_MAX_PROC_ENTRIES) {
+ printk(KERN_INFO "PERF: proc entry overflow,\
+ memleak on module unload occured");
+ return;
+ }
+ per_process_proc_names[pp_proc_entry_index++] = name;
+}
+
+static int perfmon_notifier(struct notifier_block *self, unsigned long cmd,
+ void *v)
+{
+ static int old_pid = -1;
+ struct thread_info *thread = v;
+ int current_pid;
+
+ if (cmd != THREAD_NOTIFY_SWITCH)
+ return old_pid;
+
+ current_pid = thread->task->pid;
+ if (old_pid != -1)
+ _per_process_switch(old_pid, current_pid);
+ old_pid = current_pid;
+ return old_pid;
+}
+
+static struct notifier_block perfmon_notifier_block = {
+ .notifier_call = perfmon_notifier,
+};
+
+/*
+FUNCTION per_process_perf_init
+
+DESCRIPTION
+ Initialze the per process performance monitor variables and /proc space.
+
+DEPENDENCIES
+
+RETURN VALUE
+
+SIDE EFFECTS
+*/
+int per_process_perf_init(void)
+{
+#ifdef CONFIG_ARCH_MSM8X60
+ smp_call_function_single(0, (void *)pm_initialize, (void *)NULL, 1);
+ smp_call_function_single(1, (void *)pm_initialize, (void *)NULL, 1);
+ l2_pm_initialize();
+#else
+ pm_initialize();
+#endif
+ pm_axi_init();
+ pm_axi_clear_cnts();
+ proc_dir = proc_mkdir("ppPerf", NULL);
+ values_dir = proc_mkdir("results", proc_dir);
+ settings_dir = proc_mkdir("settings", proc_dir);
+ per_process_proc_entry("enable", &pp_enabled, settings_dir, 1);
+ per_process_proc_entry("valid", &pp_settings_valid, settings_dir, 1);
+ per_process_proc_entry("setPID", &pp_set_pid, settings_dir, 0);
+ per_process_proc_entry("clearPID", &pp_clear_pid, settings_dir, 0);
+ per_process_proc_entry("event0", &per_proc_event[0], settings_dir, 1);
+ per_process_proc_entry("event1", &per_proc_event[1], settings_dir, 1);
+ per_process_proc_entry("event2", &per_proc_event[2], settings_dir, 1);
+ per_process_proc_entry("event3", &per_proc_event[3], settings_dir, 1);
+ per_process_proc_entry("l2_event0", &l2_per_proc_event[0], settings_dir,
+ 1);
+ per_process_proc_entry("l2_event1", &l2_per_proc_event[1], settings_dir,
+ 1);
+ per_process_proc_entry("l2_event2", &l2_per_proc_event[2], settings_dir,
+ 1);
+ per_process_proc_entry("l2_event3", &l2_per_proc_event[3], settings_dir,
+ 1);
+ per_process_proc_entry("debug", &dbg_flags, settings_dir, 1);
+ per_process_proc_entry("autolock", &pp_auto_lock, settings_dir, 1);
+ per_process_proc_entry("lpm0evtyper", &pp_lpm0evtyper, settings_dir, 1);
+ per_process_proc_entry("lpm1evtyper", &pp_lpm1evtyper, settings_dir, 1);
+ per_process_proc_entry("lpm2evtyper", &pp_lpm2evtyper, settings_dir, 1);
+ per_process_proc_entry("l2lpmevtyper", &pp_l2lpmevtyper, settings_dir,
+ 1);
+ per_process_proc_entry("vlpmevtyper", &pp_vlpmevtyper, settings_dir, 1);
+ per_process_proc_entry("l2pmevtyper0", &pp_l2pmevtyper0, settings_dir,
+ 1);
+ per_process_proc_entry("l2pmevtyper1", &pp_l2pmevtyper1, settings_dir,
+ 1);
+ per_process_proc_entry("l2pmevtyper2", &pp_l2pmevtyper2, settings_dir,
+ 1);
+ per_process_proc_entry("l2pmevtyper3", &pp_l2pmevtyper3, settings_dir,
+ 1);
+ per_process_proc_entry("l2pmevtyper4", &pp_l2pmevtyper4, settings_dir,
+ 1);
+ per_process_proc_entry("stopForInterrupts", &pm_stop_for_interrupts,
+ settings_dir, 1);
+ per_process_proc_entry("global", &pm_global, settings_dir, 1);
+ per_process_proc_entry("globalEnable", &pm_global_enable, settings_dir,
+ 1);
+ per_process_proc_entry("removePID", &pm_remove_pid, settings_dir, 0);
+
+ axi_dir = proc_mkdir("axi", proc_dir);
+ axi_settings_dir = proc_mkdir("settings", axi_dir);
+ axi_results_dir = proc_mkdir("results", axi_dir);
+ pm_axi_set_proc_entry("axi_enable", &pm_axi_info.enable,
+ axi_settings_dir, 1);
+ pm_axi_set_proc_entry("axi_clear", &pm_axi_info.clear, axi_settings_dir,
+ 0);
+ pm_axi_set_proc_entry("axi_valid", &pm_axi_info.valid, axi_settings_dir,
+ 1);
+ pm_axi_set_proc_entry("axi_sel_reg0", &pm_axi_info.sel_reg0,
+ axi_settings_dir, 1);
+ pm_axi_set_proc_entry("axi_sel_reg1", &pm_axi_info.sel_reg1,
+ axi_settings_dir, 1);
+ pm_axi_set_proc_entry("axi_ten_sel", &pm_axi_info.ten_sel_reg,
+ axi_settings_dir, 1);
+ pm_axi_set_proc_entry("axi_refresh", &pm_axi_info.refresh,
+ axi_settings_dir, 1);
+ pm_axi_get_cnt_proc_entry("axi_cnts", &axi_cnts, axi_results_dir, 0);
+ l2_dir = proc_mkdir("l2", proc_dir);
+ l2_results_dir = proc_mkdir("results", l2_dir);
+
+ memset(perf_mons, 0, sizeof(perf_mons));
+ per_process_create_results_proc(&perf_mons[0]);
+ per_process_create_l2_results_proc(&perf_mons[0]);
+ thread_register_notifier(&perfmon_notifier_block);
+ /*
+ * Set the function pointers so the module can be activated.
+ */
+ pp_interrupt_out_ptr = _perf_mon_interrupt_out;
+ pp_interrupt_in_ptr = _perf_mon_interrupt_in;
+ pp_process_remove_ptr = _per_process_remove;
+ pp_loaded = 1;
+ pm_axi_info.refresh = 1;
+
+#ifdef CONFIG_ARCH_MSM8X60
+ smp_call_function_single(0, (void *)pm_reset_all, (void *)NULL, 1);
+ smp_call_function_single(1, (void *)pm_reset_all, (void *)NULL, 1);
+ smp_call_function_single(0, (void *)l2_pm_reset_all, (void *)NULL, 1);
+ smp_call_function_single(1, (void *)l2_pm_reset_all, (void *)NULL, 1);
+#else
+ pm_reset_all();
+#endif
+
+ return 0;
+}
+
+/*
+FUNCTION per_process_perf_exit
+
+DESCRIPTION
+ Module exit functionm, clean up, renmove proc entries
+
+DEPENDENCIES
+
+RETURN VALUE
+
+SIDE EFFECTS
+ No more per process
+*/
+void per_process_perf_exit(void)
+{
+ unsigned long i;
+ /*
+ * Sert the function pointers to 0 so the functions will no longer
+ * be invoked
+ */
+ pp_loaded = 0;
+ pp_interrupt_out_ptr = 0;
+ pp_interrupt_in_ptr = 0;
+ pp_process_remove_ptr = 0;
+ /*
+ * Remove the results
+ */
+ for (i = 0; i < PERF_MON_PROCESS_NUM; i++)
+ per_process_remove_manual(perf_mons[i].pid);
+ /*
+ * Remove the proc entries in the settings dir
+ */
+ i = 0;
+ for (i = 0; i < pp_proc_entry_index; i++)
+ remove_proc_entry(per_process_proc_names[i], settings_dir);
+
+ /*remove proc axi files*/
+ remove_proc_entry("axi_enable", axi_settings_dir);
+ remove_proc_entry("axi_valid", axi_settings_dir);
+ remove_proc_entry("axi_refresh", axi_settings_dir);
+ remove_proc_entry("axi_clear", axi_settings_dir);
+ remove_proc_entry("axi_sel_reg0", axi_settings_dir);
+ remove_proc_entry("axi_sel_reg1", axi_settings_dir);
+ remove_proc_entry("axi_ten_sel", axi_settings_dir);
+ remove_proc_entry("axi_cnts", axi_results_dir);
+ /*
+ * Remove the directories
+ */
+ remove_proc_entry("results", l2_dir);
+ remove_proc_entry("l2", proc_dir);
+ remove_proc_entry("results", proc_dir);
+ remove_proc_entry("settings", proc_dir);
+ remove_proc_entry("results", axi_dir);
+ remove_proc_entry("settings", axi_dir);
+ remove_proc_entry("axi", proc_dir);
+ remove_proc_entry("ppPerf", NULL);
+ pm_free_irq();
+#ifdef CONFIG_ARCH_MSM8X60
+ l2_pm_free_irq();
+#endif
+ thread_unregister_notifier(&perfmon_notifier_block);
+#ifdef CONFIG_ARCH_MSM8X60
+ smp_call_function_single(0, (void *)pm_deinitialize, (void *)NULL, 1);
+ smp_call_function_single(1, (void *)pm_deinitialize, (void *)NULL, 1);
+ l2_pm_deinitialize();
+#else
+ pm_deinitialize();
+#endif
+}
diff --git a/arch/arm/perfmon/per.c b/arch/arm/perfmon/per.c
new file mode 100644
index 0000000..4222844
--- /dev/null
+++ b/arch/arm/perfmon/per.c
@@ -0,0 +1,59 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+per.c
+
+DESCRIPTION: Performance count interface for linux via proc in the T32
+command file style
+*/
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/sysrq.h>
+#include <linux/time.h>
+#include "linux/proc_fs.h"
+#include "linux/kernel_stat.h"
+#include "asm/uaccess.h"
+#include "cp15_registers.h"
+#include "perf.h"
+
+#define PM_PER_ERR -1
+/*
+FUNCTION perf_if_proc_init
+
+DESCRIPTION Initialize the proc interface for thje performance data.
+*/
+static __init int per_init(void)
+{
+
+ if (atomic_read(&pm_op_lock) == 1) {
+ printk(KERN_INFO "Can not load KSAPI, monitors are in use\n");
+ return PM_PER_ERR;
+ }
+ atomic_set(&pm_op_lock, 1);
+ per_process_perf_init();
+ printk(KERN_INFO "ksapi init\n");
+ return 0;
+}
+
+static void __exit per_exit(void)
+{
+ per_process_perf_exit();
+ printk(KERN_INFO "ksapi exit\n");
+ atomic_set(&pm_op_lock, 0);
+}
+
+MODULE_LICENSE("GPL v2");
+module_init(per_init);
+module_exit(per_exit);
diff --git a/arch/arm/perfmon/perf-function-hooks.c b/arch/arm/perfmon/perf-function-hooks.c
new file mode 100644
index 0000000..aacc353
--- /dev/null
+++ b/arch/arm/perfmon/perf-function-hooks.c
@@ -0,0 +1,81 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+* perf-function-hooks.c
+* DESCRIPTION
+* Hooks for ksapi.ko
+*/
+
+#include <linux/module.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/sysrq.h>
+#include <linux/time.h>
+#include "linux/proc_fs.h"
+#include "linux/kernel_stat.h"
+#include "asm/uaccess.h"
+#include <linux/proc_fs.h>
+#include "cp15_registers.h"
+#include <asm/perftypes.h>
+#include "perf.h"
+
+/*
+* Function Pointers for when the module is installed...
+* Should we use a single "ready" variable for the testing
+* in the functions below, will be safer when module is removed
+* testing for a locked variable...
+*/
+VPVF pp_interrupt_out_ptr;
+VPVF pp_interrupt_in_ptr;
+VPULF pp_process_remove_ptr;
+unsigned int pp_loaded;
+EXPORT_SYMBOL(pp_loaded);
+atomic_t pm_op_lock;
+EXPORT_SYMBOL(pm_op_lock);
+
+/*
+FUNCTION VARIOUS
+
+DESCRIPTION
+Hooks to callinto the module functions after they are loaded. The
+above pointers will be set and then these functions are ready to be
+called.
+
+DEPENDENCIES
+THe per preocess performance monitor needs to be loaded ...
+
+RETURN VALUE
+
+SIDE EFFECTS
+*/
+void perf_mon_interrupt_out(void)
+{
+ if (pp_loaded)
+ (*pp_interrupt_out_ptr)();
+}
+EXPORT_SYMBOL(pp_interrupt_out_ptr);
+
+void perf_mon_interrupt_in(void)
+{
+ if (pp_loaded)
+ (*pp_interrupt_in_ptr)();
+}
+EXPORT_SYMBOL(pp_interrupt_in_ptr);
+
+void per_process_remove(unsigned long pid)
+{
+ if (pp_loaded)
+ (*pp_process_remove_ptr)(pid);
+}
+EXPORT_SYMBOL(pp_process_remove_ptr);
diff --git a/arch/arm/perfmon/perf-smp.c b/arch/arm/perfmon/perf-smp.c
new file mode 100644
index 0000000..5417fc7
--- /dev/null
+++ b/arch/arm/perfmon/perf-smp.c
@@ -0,0 +1,751 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+perf-smp.c
+DESCRIPTION
+Manipulation, initialization of the ARMV7 Performance counter register.
+
+
+EXTERNALIZED FUNCTIONS
+
+INITIALIZATION AND SEQUENCING REQUIREMENTS
+*/
+
+/*
+INCLUDE FILES FOR MODULE
+*/
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/time.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include "l2_cp15_registers.h"
+
+/*
+DEFINITIONS AND DECLARATIONS FOR MODULE
+
+This section contains definitions for constants, macros, types, variables
+and other items needed by this module.
+*/
+
+/*
+ Constant / Define Declarations
+*/
+
+#define PM_NUM_COUNTERS 4
+#define L2_PM_ERR -1
+
+/*------------------------------------------------------------------------
+ * Global control bits
+------------------------------------------------------------------------*/
+#define PM_L2_GLOBAL_ENABLE (1<<0)
+#define PM_L2_EVENT_RESET (1<<1)
+#define PM_L2_CYCLE_RESET (1<<2)
+#define PM_L2_CLKDIV (1<<3)
+#define PM_L2_GLOBAL_TRACE (1<<4)
+#define PM_L2_DISABLE_PROHIBIT (1<<5)
+
+/*---------------------------------------------------------------------------
+ * Enable and clear bits for each event/trigger
+----------------------------------------------------------------------------*/
+#define PM_L2EV0_ENABLE (1<<0)
+#define PM_L2EV1_ENABLE (1<<1)
+#define PM_L2EV2_ENABLE (1<<2)
+#define PM_L2EV3_ENABLE (1<<3)
+#define PM_L2_COUNT_ENABLE (1<<31)
+#define PM_L2_ALL_ENABLE (0x8000000F)
+
+
+/*-----------------------------------------------------------------------------
+ * Overflow actions
+------------------------------------------------------------------------------*/
+#define PM_L2_OVERFLOW_NOACTION (0)
+#define PM_L2_OVERFLOW_HALT (1)
+#define PM_L2_OVERFLOW_STOP (2)
+#define PM_L2_OVERFLOW_SKIP (3)
+
+/*
+ * Shifts for each trigger type
+ */
+#define PM_STOP_SHIFT 24
+#define PM_RELOAD_SHIFT 22
+#define PM_RESUME_SHIFT 20
+#define PM_SUSPEND_SHIFT 18
+#define PM_START_SHIFT 16
+#define PM_STOPALL_SHIFT 15
+#define PM_STOPCOND_SHIFT 12
+#define PM_RELOADCOND_SHIFT 9
+#define PM_RESUMECOND_SHIFT 6
+#define PM_SUSPENDCOND_SHIFT 3
+#define PM_STARTCOND_SHIFT 0
+
+
+/*---------------------------------------------------------------------------
+External control register. What todo when various events happen.
+Triggering events, etc.
+----------------------------------------------------------------------------*/
+#define PM_EXTTR0 0
+#define PM_EXTTR1 1
+#define PM_EXTTR2 2
+#define PM_EXTTR3 3
+
+#define PM_COND_NO_STOP 0
+#define PM_COND_STOP_CNTOVRFLW 1
+#define PM_COND_STOP_EXTERNAL 4
+#define PM_COND_STOP_TRACE 5
+#define PM_COND_STOP_EVOVRFLW 6
+#define PM_COND_STOP_EVTYPER 7
+
+/*--------------------------------------------------------------------------
+Protect against concurrent access. There is an index register that is
+used to select the appropriate bank of registers. If multiple processes
+are writting this at different times we could have a mess...
+---------------------------------------------------------------------------*/
+#define PM_LOCK()
+#define PM_UNLOCK()
+#define PRINT printk
+
+/*--------------------------------------------------------------------------
+The Event definitions
+--------------------------------------------------------------------------*/
+#define L2PM_EVT_PM0_EVT0 0x00
+#define L2PM_EVT_PM0_EVT1 0x01
+#define L2PM_EVT_PM0_EVT2 0x02
+#define L2PM_EVT_PM0_EVT3 0x03
+#define L2PM_EVT_PM1_EVT0 0x04
+#define L2PM_EVT_PM1_EVT1 0x05
+#define L2PM_EVT_PM1_EVT2 0x06
+#define L2PM_EVT_PM1_EVT3 0x07
+#define L2PM_EVT_PM2_EVT0 0x08
+#define L2PM_EVT_PM2_EVT1 0x09
+#define L2PM_EVT_PM2_EVT2 0x0a
+#define L2PM_EVT_PM2_EVT3 0x0b
+#define L2PM_EVT_PM3_EVT0 0x0c
+#define L2PM_EVT_PM3_EVT1 0x0d
+#define L2PM_EVT_PM3_EVT2 0x0e
+#define L2PM_EVT_PM3_EVT3 0x0f
+#define L2PM_EVT_PM4_EVT0 0x10
+#define L2PM_EVT_PM4_EVT1 0x11
+#define L2PM_EVT_PM4_EVT2 0x12
+#define L2PM_EVT_PM4_EVT3 0x13
+
+/*
+Type Declarations
+*/
+
+/*
+Local Object Definitions
+*/
+
+unsigned long l2_pm_cycle_overflow_count;
+unsigned long l2_pm_overflow_count[PM_NUM_COUNTERS];
+
+/*---------------------------------------------------------------------------
+Max number of events read from the config registers
+---------------------------------------------------------------------------*/
+static int pm_l2_max_events;
+
+static int irqid;
+
+/*
+Function Definitions
+*/
+
+/*
+FUNCTION l2_pm_group_stop
+
+DESCRIPTION Stop a group of the performance monitors. Event monitor 0 is bit
+0, event monitor 1 bit 1, etc. The cycle count can also be disable with
+bit 31. Macros are provided for all of the indexes including an ALL.
+
+DEPENDENCIES
+
+RETURN VALUE
+None
+
+SIDE EFFECTS
+Stops the performance monitoring for the index passed.
+*/
+void pm_l2_group_stop(unsigned long mask)
+{
+ WCP15_L2PMCNTENCLR(mask);
+}
+
+/*
+FUNCTION l2_pm_group_start
+
+DESCRIPTION Start a group of the performance monitors. Event monitor 0 is bit
+0, event monitor 1 bit 1, etc. The cycle count can also be enabled with
+bit 31. Macros are provided for all of the indexes including an ALL.
+
+DEPENDENCIES
+
+RETURN VALUE
+None
+
+SIDE EFFECTS
+Starts the performance monitoring for the index passed.
+*/
+void pm_l2_group_start(unsigned long mask)
+{
+ WCP15_L2PMCNTENSET(mask);
+}
+
+/*
+FUNCTION l2_pm_get_overflow
+
+DESCRIPTION Return the overflow condition for the index passed.
+
+DEPENDENCIES
+
+RETURN VALUE
+0 no overflow
+!0 (anything else) overflow;
+
+SIDE EFFECTS
+*/
+unsigned long l2_pm_get_overflow(int index)
+{
+ unsigned long overflow = 0;
+
+/*
+* Range check
+*/
+ if (index > pm_l2_max_events)
+ return L2_PM_ERR;
+ RCP15_L2PMOVSR(overflow);
+
+ return overflow & (1<<index);
+}
+
+/*
+FUNCTION l2_pm_get_cycle_overflow
+
+DESCRIPTION
+Returns if the cycle counter has overflowed or not.
+
+DEPENDENCIES
+
+RETURN VALUE
+0 no overflow
+!0 (anything else) overflow;
+
+SIDE EFFECTS
+*/
+unsigned long l2_pm_get_cycle_overflow(void)
+{
+ unsigned long overflow = 0;
+
+ RCP15_L2PMOVSR(overflow);
+ return overflow & PM_L2_COUNT_ENABLE;
+}
+
+/*
+FUNCTION l2_pm_reset_overflow
+
+DESCRIPTION Reset the cycle counter overflow bit.
+
+DEPENDENCIES
+
+RETURN VALUE
+None
+
+SIDE EFFECTS
+*/
+void l2_pm_reset_overflow(int index)
+{
+ WCP15_L2PMOVSR(1<<index);
+}
+
+/*
+FUNCTION l2_pm_reset_cycle_overflow
+
+DESCRIPTION Reset the cycle counter overflow bit.
+
+DEPENDENCIES
+
+RETURN VALUE
+None
+
+SIDE EFFECTS
+*/
+void l2_pm_reset_cycle_overflow(void)
+{
+ WCP15_L2PMOVSR(PM_L2_COUNT_ENABLE);
+}
+
+/*
+FUNCTION l2_pm_get_cycle_count
+
+DESCRIPTION return the count in the cycle count register.
+
+DEPENDENCIES
+
+RETURN VALUE
+The value in the cycle count register.
+
+SIDE EFFECTS
+*/
+unsigned long l2_pm_get_cycle_count(void)
+{
+ unsigned long cnt = 0;
+ RCP15_L2PMCCNTR(cnt);
+ return cnt;
+}
+
+/*
+FUNCTION l2_pm_reset_cycle_count
+
+DESCRIPTION reset the value in the cycle count register
+
+DEPENDENCIES
+
+RETURN VALUE
+NONE
+
+SIDE EFFECTS
+Resets the performance monitor cycle count register.
+Any interrupts period based on this overflow will be changed
+*/
+void l2_pm_reset_cycle_count(void)
+{
+ WCP15_L2PMCNTENCLR(PM_L2_COUNT_ENABLE);
+}
+
+/*
+FUNCTION l2_pm_cycle_div_64
+
+DESCRIPTION Set the cycle counter to count every 64th cycle instead of
+every cycle when the value passed is 1, otherwise counts every cycle.
+
+DEPENDENCIES
+
+RETURN VALUE
+none
+
+SIDE EFFECTS
+Changes the rate at which cycles are counted. Anything that is reading
+the cycle count (pmGetCyucleCount) may get different results.
+*/
+void l2_pm_cycle_div_64(int enable)
+{
+ unsigned long enables = 0;
+
+ RCP15_L2PMCR(enables);
+ if (enable)
+ WCP15_L2PMCR(enables | PM_L2_CLKDIV);
+ else
+ WCP15_L2PMCR(enables & ~PM_L2_CLKDIV);
+}
+
+/*
+FUNCTION l2_pm_enable_cycle_counter
+
+DESCRIPTION Enable the cycle counter. Sets the bit in the enable register
+so the performance monitor counter starts up counting.
+
+DEPENDENCIES
+
+RETURN VALUE
+none
+
+SIDE EFFECTS
+*/
+void l2_pm_enable_cycle_counter(void)
+{
+/*
+* Enable the counter.
+*/
+ WCP15_L2PMCNTENSET(PM_L2_COUNT_ENABLE);
+}
+
+/*
+FUNCTION l2_pm_disable_counter
+
+DESCRIPTION Disable a single counter based on the index passed.
+
+DEPENDENCIES
+
+RETURN VALUE
+none
+
+SIDE EFFECTS
+Any triggers that are based on the stoped counter may not trigger...
+*/
+void l2_pm_disable_counter(int index)
+{
+ /*
+ * Range check
+ */
+ if (index > pm_l2_max_events)
+ return;
+ WCP15_L2PMCNTENCLR(1<<index);
+}
+
+/*
+FUNCTION l2_pm_enable_counter
+
+DESCRIPTION Enable the counter with the index passed.
+
+DEPENDENCIES
+
+RETURN VALUE
+none.
+
+SIDE EFFECTS
+*/
+void l2_pm_enable_counter(int index)
+{
+ /*
+ * Range check
+ */
+ if (index > pm_l2_max_events)
+ return;
+ WCP15_L2PMCNTENSET(1<<index);
+}
+
+/*
+FUNCTION l2_pm_set_count
+
+DESCRIPTION Set the number of events in a register, used for resets
+passed.
+
+DEPENDENCIES
+
+RETURN VALUE
+-1 if the index is out of range
+
+SIDE EFFECTS
+*/
+int l2_pm_set_count(int index, unsigned long new_value)
+{
+ unsigned long reg = 0;
+
+/*
+* Range check
+*/
+ if (index > pm_l2_max_events)
+ return L2_PM_ERR;
+
+/*
+* Lock, select the index and read the count...unlock
+*/
+ PM_LOCK();
+ WCP15_L2PMSELR(index);
+ WCP15_L2PMXEVCNTR(new_value);
+ PM_UNLOCK();
+ return reg;
+}
+
+int l2_pm_reset_count(int index)
+{
+ return l2_pm_set_count(index, 0);
+}
+
+/*
+FUNCTION l2_pm_get_count
+
+DESCRIPTION Return the number of events that have happened for the index
+passed.
+
+DEPENDENCIES
+
+RETURN VALUE
+-1 if the index is out of range
+The number of events if inrange
+
+SIDE EFFECTS
+*/
+unsigned long l2_pm_get_count(int index)
+{
+ unsigned long reg = 0;
+
+/*
+* Range check
+*/
+ if (index > pm_l2_max_events)
+ return L2_PM_ERR;
+
+/*
+* Lock, select the index and read the count...unlock
+*/
+ PM_LOCK();
+ WCP15_L2PMSELR(index);
+ RCP15_L2PMXEVCNTR(reg);
+ PM_UNLOCK();
+ return reg;
+}
+
+unsigned long get_filter_code(unsigned long event)
+{
+ if (event == 0x0 || event == 0x4 || event == 0x08
+ || event == 0x0c || event == 0x10)
+ return 0x0001003f;
+ else if (event == 0x1 || event == 0x5 || event == 0x09
+ || event == 0x0d || event == 0x11)
+ return 0x0002003f;
+ else if (event == 0x2 || event == 0x6 || event == 0x0a
+ || event == 0x0e || event == 0x12)
+ return 0x0004003f;
+ else if (event == 0x3 || event == 0x7 || event == 0x0b
+ || event == 0x0f || event == 0x13)
+ return 0x0008003f;
+ else
+ return 0;
+}
+
+int l2_pm_set_event(int index, unsigned long event)
+{
+ unsigned long reg = 0;
+
+ /*
+ * Range check
+ */
+ if (index > pm_l2_max_events)
+ return L2_PM_ERR;
+
+ /*
+ * Lock, select the index and read the count...unlock
+ */
+ PM_LOCK();
+ WCP15_L2PMSELR(index);
+ WCP15_L2PMXEVTYPER(event);
+ /* WCP15_L2PMXEVFILTER(get_filter_code(event)); */
+ WCP15_L2PMXEVFILTER(0x000f003f);
+ PM_UNLOCK();
+ return reg;
+}
+
+/*
+FUNCTION pm_set_local_bu
+
+DESCRIPTION Set the local BU triggers. Note that the MSB determines if
+ these are enabled or not.
+
+DEPENDENCIES
+
+RETURN VALUE
+ NONE
+
+SIDE EFFECTS
+*/
+void pm_set_local_bu(unsigned long value)
+{
+ WCP15_L2PMEVTYPER0(value);
+}
+
+/*
+FUNCTION pm_set_local_cb
+
+DESCRIPTION Set the local CB triggers. Note that the MSB determines if
+ these are enabled or not.
+
+DEPENDENCIES
+
+RETURN VALUE
+ NONE
+
+SIDE EFFECTS
+*/
+void pm_set_local_cb(unsigned long value)
+{
+ WCP15_L2PMEVTYPER1(value);
+}
+
+/*
+FUNCTION pm_set_local_mp
+
+DESCRIPTION Set the local MP triggers. Note that the MSB determines if
+ these are enabled or not.
+
+DEPENDENCIES
+
+RETURN VALUE
+ NONE
+
+SIDE EFFECTS
+*/
+void pm_set_local_mp(unsigned long value)
+{
+ WCP15_L2PMEVTYPER2(value);
+}
+
+/*
+FUNCTION pm_set_local_sp
+
+DESCRIPTION Set the local SP triggers. Note that the MSB determines if
+ these are enabled or not.
+
+DEPENDENCIES
+
+RETURN VALUE
+ NONE
+
+SIDE EFFECTS
+*/
+void pm_set_local_sp(unsigned long value)
+{
+ WCP15_L2PMEVTYPER3(value);
+}
+
+/*
+FUNCTION pm_set_local_scu
+
+DESCRIPTION Set the local SCU triggers. Note that the MSB determines if
+ these are enabled or not.
+
+DEPENDENCIES
+
+RETURN VALUE
+ NONE
+
+SIDE EFFECTS
+*/
+void pm_set_local_scu(unsigned long value)
+{
+ WCP15_L2PMEVTYPER4(value);
+}
+
+/*
+FUNCTION l2_pm_isr
+
+DESCRIPTION:
+ Performance Monitor interrupt service routine to capture overflows
+
+DEPENDENCIES
+
+RETURN VALUE
+
+SIDE EFFECTS
+*/
+static irqreturn_t l2_pm_isr(int irq, void *d)
+{
+ int i;
+
+ for (i = 0; i < PM_NUM_COUNTERS; i++) {
+ if (l2_pm_get_overflow(i)) {
+ l2_pm_overflow_count[i]++;
+ l2_pm_reset_overflow(i);
+ }
+ }
+
+ if (l2_pm_get_cycle_overflow()) {
+ l2_pm_cycle_overflow_count++;
+ l2_pm_reset_cycle_overflow();
+ }
+
+ return IRQ_HANDLED;
+}
+
+
+void l2_pm_stop_all(void)
+{
+ WCP15_L2PMCNTENCLR(0xFFFFFFFF);
+}
+
+void l2_pm_reset_all(void)
+{
+ WCP15_L2PMCR(0xF);
+ WCP15_L2PMOVSR(PM_L2_ALL_ENABLE); /* overflow clear */
+}
+
+void l2_pm_start_all(void)
+{
+ WCP15_L2PMCNTENSET(PM_L2_ALL_ENABLE);
+}
+
+/*
+FUNCTION l2_pm_initialize
+
+DESCRIPTION Initialize the performanca monitoring for the v7 processor.
+ Ensures the cycle count is running and the event counters are enabled.
+
+DEPENDENCIES
+
+RETURN VALUE
+ NONE
+
+SIDE EFFECTS
+*/
+void l2_pm_initialize(void)
+{
+ unsigned long reg = 0;
+ unsigned char imp;
+ unsigned char id;
+ unsigned char num;
+ unsigned long enables = 0;
+ static int initialized;
+
+ if (initialized)
+ return;
+ initialized = 1;
+
+ irqid = SC_SICL2PERFMONIRPTREQ;
+ RCP15_L2PMCR(reg);
+ imp = (reg>>24) & 0xFF;
+ id = (reg>>16) & 0xFF;
+ pm_l2_max_events = num = (reg>>11) & 0xFF;
+ PRINT("V7 MP L2SCU Performance Monitor Capabilities\n");
+ PRINT(" Implementor %c(%d)\n", imp, imp);
+ PRINT(" Id %d %x\n", id, id);
+ PRINT(" Num Events %d %x\n", num, num);
+ PRINT("\nCycle counter enabled by default...\n");
+
+ /*
+ * Global enable, ensure the global enable is set so all
+ * subsequent actions take effect. Also resets the counts
+ */
+ RCP15_L2PMCR(enables);
+ WCP15_L2PMCR(enables | PM_L2_GLOBAL_ENABLE | PM_L2_EVENT_RESET |
+ PM_L2_CYCLE_RESET | PM_L2_CLKDIV);
+
+ /*
+ * Enable access from user space
+ */
+
+ /*
+ * Install interrupt handler and the enable the interrupts
+ */
+ l2_pm_reset_cycle_overflow();
+ l2_pm_reset_overflow(0);
+ l2_pm_reset_overflow(1);
+ l2_pm_reset_overflow(2);
+ l2_pm_reset_overflow(3);
+ l2_pm_reset_overflow(4);
+
+ if (0 != request_irq(irqid, l2_pm_isr, 0, "l2perfmon", 0))
+ printk(KERN_ERR "%s:%d request_irq returned error\n",
+ __FILE__, __LINE__);
+ WCP15_L2PMINTENSET(PM_L2_ALL_ENABLE);
+ /*
+ * Enable the cycle counter. Default, count 1:1 no divisor.
+ */
+ l2_pm_enable_cycle_counter();
+
+}
+
+void l2_pm_free_irq(void)
+{
+ free_irq(irqid, 0);
+}
+
+void l2_pm_deinitialize(void)
+{
+ unsigned long enables = 0;
+ RCP15_L2PMCR(enables);
+ WCP15_L2PMCR(enables & ~PM_L2_GLOBAL_ENABLE);
+}
+
diff --git a/arch/arm/perfmon/perf-v7.c b/arch/arm/perfmon/perf-v7.c
new file mode 100644
index 0000000..614eedc
--- /dev/null
+++ b/arch/arm/perfmon/perf-v7.c
@@ -0,0 +1,1009 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+perf-v7.c
+DESCRIPTION
+Manipulation, initialization of the ARMV7 Performance counter register.
+
+
+EXTERNALIZED FUNCTIONS
+
+INITIALIZATION AND SEQUENCING REQUIREMENTS
+*/
+
+/*
+INCLUDE FILES FOR MODULE
+*/
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/time.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include "cp15_registers.h"
+
+/*
+DEFINITIONS AND DECLARATIONS FOR MODULE
+
+This section contains definitions for constants, macros, types, variables
+and other items needed by this module.
+*/
+
+/*
+ Constant / Define Declarations
+*/
+
+#define PM_NUM_COUNTERS 4
+#define PM_V7_ERR -1
+
+/*------------------------------------------------------------------------
+ * Global control bits
+------------------------------------------------------------------------*/
+#define PM_GLOBAL_ENABLE (1<<0)
+#define PM_EVENT_RESET (1<<1)
+#define PM_CYCLE_RESET (1<<2)
+#define PM_CLKDIV (1<<3)
+#define PM_GLOBAL_TRACE (1<<4)
+#define PM_DISABLE_PROHIBIT (1<<5)
+
+/*---------------------------------------------------------------------------
+ * Enable and clear bits for each event/trigger
+----------------------------------------------------------------------------*/
+#define PM_EV0_ENABLE (1<<0)
+#define PM_EV1_ENABLE (1<<1)
+#define PM_EV2_ENABLE (1<<2)
+#define PM_EV3_ENABLE (1<<3)
+#define PM_COUNT_ENABLE (1<<31)
+#define PM_ALL_ENABLE (0x8000000F)
+
+
+/*-----------------------------------------------------------------------------
+ * Overflow actions
+------------------------------------------------------------------------------*/
+#define PM_OVERFLOW_NOACTION (0)
+#define PM_OVERFLOW_HALT (1)
+#define PM_OVERFLOW_STOP (2)
+#define PM_OVERFLOW_SKIP (3)
+
+/*
+ * Shifts for each trigger type
+ */
+#define PM_STOP_SHIFT 24
+#define PM_RELOAD_SHIFT 22
+#define PM_RESUME_SHIFT 20
+#define PM_SUSPEND_SHIFT 18
+#define PM_START_SHIFT 16
+#define PM_STOPALL_SHIFT 15
+#define PM_STOPCOND_SHIFT 12
+#define PM_RELOADCOND_SHIFT 9
+#define PM_RESUMECOND_SHIFT 6
+#define PM_SUSPENDCOND_SHIFT 3
+#define PM_STARTCOND_SHIFT 0
+
+
+/*---------------------------------------------------------------------------
+External control register. What todo when various events happen.
+Triggering events, etc.
+----------------------------------------------------------------------------*/
+#define PM_EXTTR0 0
+#define PM_EXTTR1 1
+#define PM_EXTTR2 2
+#define PM_EXTTR3 3
+
+#define PM_COND_NO_STOP 0
+#define PM_COND_STOP_CNTOVRFLW 1
+#define PM_COND_STOP_EXTERNAL 4
+#define PM_COND_STOP_TRACE 5
+#define PM_COND_STOP_EVOVRFLW 6
+#define PM_COND_STOP_EVTYPER 7
+
+/*--------------------------------------------------------------------------
+Protect against concurrent access. There is an index register that is
+used to select the appropriate bank of registers. If multiple processes
+are writting this at different times we could have a mess...
+---------------------------------------------------------------------------*/
+#define PM_LOCK()
+#define PM_UNLOCK()
+#define PRINT printk
+
+/*--------------------------------------------------------------------------
+The Event definitions
+--------------------------------------------------------------------------*/
+#define PM_EVT_SW_INCREMENT 0
+#define PM_EVT_L1_I_MISS 1
+#define PM_EVT_ITLB_MISS 2
+#define PM_EVT_L1_D_MISS 3
+#define PM_EVT_L1_D_ACCESS 4
+#define PM_EVT_DTLB_MISS 5
+#define PM_EVT_DATA_READ 6
+#define PM_EVT_DATA_WRITE 7
+#define PM_EVT_INSTRUCTION 8
+#define PM_EVT_EXCEPTIONS 9
+#define PM_EVT_EXCEPTION_RET 10
+#define PM_EVT_CTX_CHANGE 11
+#define PM_EVT_PC_CHANGE 12
+#define PM_EVT_BRANCH 13
+#define PM_EVT_RETURN 14
+#define PM_EVT_UNALIGNED 15
+#define PM_EVT_BRANCH_MISS 16
+#define PM_EVT_EXTERNAL0 0x40
+#define PM_EVT_EXTERNAL1 0x41
+#define PM_EVT_EXTERNAL2 0x42
+#define PM_EVT_EXTERNAL3 0x43
+#define PM_EVT_TRACE0 0x44
+#define PM_EVT_TRACE1 0x45
+#define PM_EVT_TRACE2 0x46
+#define PM_EVT_TRACE3 0x47
+#define PM_EVT_PM0 0x48
+#define PM_EVT_PM1 0x49
+#define PM_EVT_PM2 0x4a
+#define PM_EVT_PM3 0x4b
+#define PM_EVT_LPM0_EVT0 0x4c
+#define PM_EVT_LPM0_EVT1 0x4d
+#define PM_EVT_LPM0_EVT2 0x4e
+#define PM_EVT_LPM0_EVT3 0x4f
+#define PM_EVT_LPM1_EVT0 0x50
+#define PM_EVT_LPM1_EVT1 0x51
+#define PM_EVT_LPM1_EVT2 0x52
+#define PM_EVT_LPM1_EVT3 0x53
+#define PM_EVT_LPM2_EVT0 0x54
+#define PM_EVT_LPM2_EVT1 0x55
+#define PM_EVT_LPM2_EVT2 0x56
+#define PM_EVT_LPM2_EVT3 0x57
+#define PM_EVT_L2_EVT0 0x58
+#define PM_EVT_L2_EVT1 0x59
+#define PM_EVT_L2_EVT2 0x5a
+#define PM_EVT_L2_EVT3 0x5b
+#define PM_EVT_VLP_EVT0 0x5c
+#define PM_EVT_VLP_EVT1 0x5d
+#define PM_EVT_VLP_EVT2 0x5e
+#define PM_EVT_VLP_EVT3 0x5f
+
+/*
+Type Declarations
+*/
+
+/*--------------------------------------------------------------------------
+A performance monitor trigger setup/initialization structure. Contains
+all of the fields necessary to setup a complex trigger with the internal
+performance monitor.
+---------------------------------------------------------------------------*/
+struct pm_trigger_s {
+ int index;
+ int event_type;
+ bool interrupt;
+ bool overflow_enable;
+ bool event_export;
+ unsigned char overflow_action;
+ unsigned char stop_index;
+ unsigned char reload_index;
+ unsigned char resume_index;
+ unsigned char suspend_index;
+ unsigned char start_index;
+ bool overflow_stop;
+ unsigned char stop_condition;
+ unsigned char reload_condition;
+ unsigned char resume_condition;
+ unsigned char suspend_condition;
+ unsigned char start_condition;
+};
+
+/*
+* Name and index place holder so we can display the event
+*/
+struct pm_name_s {
+ unsigned long index;
+ char *name;
+};
+
+/*
+Local Object Definitions
+*/
+
+unsigned long pm_cycle_overflow_count;
+unsigned long pm_overflow_count[PM_NUM_COUNTERS];
+
+/*---------------------------------------------------------------------------
+Max number of events read from the config registers
+---------------------------------------------------------------------------*/
+static int pm_max_events;
+
+/*--------------------------------------------------------------------------
+Storage area for each of the triggers
+*---------------------------------------------------------------------------*/
+static struct pm_trigger_s pm_triggers[4];
+
+/*--------------------------------------------------------------------------
+Names and indexes of the events
+--------------------------------------------------------------------------*/
+static struct pm_name_s pm_names[] = {
+ { PM_EVT_SW_INCREMENT, "SW Increment"},
+ { PM_EVT_L1_I_MISS, "L1 I MISS"},
+ { PM_EVT_ITLB_MISS, "L1 ITLB MISS"},
+ { PM_EVT_L1_D_MISS, "L1 D MISS"},
+ { PM_EVT_L1_D_ACCESS, "L1 D ACCESS"},
+ { PM_EVT_DTLB_MISS, "DTLB MISS"},
+ { PM_EVT_DATA_READ, "DATA READ"},
+ { PM_EVT_DATA_WRITE, "DATA WRITE"},
+ { PM_EVT_INSTRUCTION, "INSTRUCTIONS"},
+ { PM_EVT_EXCEPTIONS, "EXCEPTIONS"},
+ { PM_EVT_EXCEPTION_RET, "EXCEPTION RETURN"},
+ { PM_EVT_CTX_CHANGE, "CTX CHANGE"},
+ { PM_EVT_PC_CHANGE, "PC CHANGE"},
+ { PM_EVT_BRANCH, "BRANCH"},
+ { PM_EVT_RETURN, "RETURN"},
+ { PM_EVT_UNALIGNED, "UNALIGNED"},
+ { PM_EVT_BRANCH_MISS, "BRANCH MISS"},
+ { PM_EVT_EXTERNAL0, "EXTERNAL 0"},
+ { PM_EVT_EXTERNAL1, "EXTERNAL 1"},
+ { PM_EVT_EXTERNAL2, "EXTERNAL 2"},
+ { PM_EVT_EXTERNAL3, "EXTERNAL 3"},
+ { PM_EVT_TRACE0, "TRACE 0"},
+ { PM_EVT_TRACE1, "TRACE 1"},
+ { PM_EVT_TRACE2, "TRACE 2"},
+ { PM_EVT_TRACE3, "TRACE 3"},
+ { PM_EVT_PM0, "PM0"},
+ { PM_EVT_PM1, "PM1"},
+ { PM_EVT_PM2, "PM2"},
+ { PM_EVT_PM3, "PM3"},
+ { PM_EVT_LPM0_EVT0, "LPM0 E0"},
+ { PM_EVT_LPM0_EVT1, "LPM0 E1"},
+ { PM_EVT_LPM0_EVT2 , "LPM0 E2"},
+ { PM_EVT_LPM0_EVT3, "LPM0 E3"},
+ { PM_EVT_LPM1_EVT0, "LPM1 E0"},
+ { PM_EVT_LPM1_EVT1, "LPM1 E1"},
+ { PM_EVT_LPM1_EVT2, "LPM1 E2"},
+ { PM_EVT_LPM1_EVT3, "LPM1 E3"},
+ { PM_EVT_LPM2_EVT0, "LPM2 E0"},
+ { PM_EVT_LPM2_EVT1 , "LPM2 E1"},
+ { PM_EVT_LPM2_EVT2, "LPM2 E2"},
+ { PM_EVT_LPM2_EVT3, "LPM2 E3"},
+ { PM_EVT_L2_EVT0 , "L2 E0"},
+ { PM_EVT_L2_EVT1, "L2 E1"},
+ { PM_EVT_L2_EVT2, "L2 E2"},
+ { PM_EVT_L2_EVT3 , "L2 E3"},
+ { PM_EVT_VLP_EVT0 , "VLP E0"},
+ { PM_EVT_VLP_EVT1, "VLP E1"},
+ { PM_EVT_VLP_EVT2, "VLP E2"},
+ { PM_EVT_VLP_EVT3, "VLP E3"},
+};
+
+static int irqid;
+
+/*
+Function Definitions
+*/
+
+/*
+FUNCTION pm_find_event_name
+
+DESCRIPTION Find the name associated with the event index passed and return
+the pointer.
+
+DEPENDENCIES
+
+RETURN VALUE
+Pointer to text string containing the name of the event or pointer to
+an error string. Either way access to the returned string will not
+cause an access error.
+
+SIDE EFFECTS
+*/
+char *pm_find_event_name(unsigned long index)
+{
+ unsigned long i = 0;
+
+ while (pm_names[i].index != -1) {
+ if (pm_names[i].index == index)
+ return pm_names[i].name;
+ i++;
+ }
+ return "BAD INDEX";
+}
+
+/*
+FUNCTION pm_group_stop
+
+DESCRIPTION Stop a group of the performance monitors. Event monitor 0 is bit
+0, event monitor 1 bit 1, etc. The cycle count can also be disabled with
+bit 31. Macros are provided for all of the indexes including an ALL.
+
+DEPENDENCIES
+
+RETURN VALUE
+None
+
+SIDE EFFECTS
+Stops the performance monitoring for the index passed.
+*/
+void pm_group_stop(unsigned long mask)
+{
+ WCP15_PMCNTENCLR(mask);
+}
+
+/*
+FUNCTION pm_group_start
+
+DESCRIPTION Start a group of the performance monitors. Event monitor 0 is bit
+0, event monitor 1 bit 1, etc. The cycle count can also be enabled with
+bit 31. Macros are provided for all of the indexes including an ALL.
+
+DEPENDENCIES
+
+RETURN VALUE
+None
+
+SIDE EFFECTS
+Starts the performance monitoring for the index passed.
+*/
+void pm_group_start(unsigned long mask)
+{
+ WCP15_PMCNTENSET(mask);
+}
+
+/*
+FUNCTION pm_cycle_overflow_action
+
+DESCRIPTION Action to take for an overflow of the cycle counter.
+
+DEPENDENCIES
+
+RETURN VALUE
+None
+
+SIDE EFFECTS
+Modify the state actions for overflow
+*/
+void pm_cycle_overflow_action(int action)
+{
+ unsigned long reg = 0;
+
+ if ((action > PM_OVERFLOW_SKIP) || (action < 0))
+ return;
+
+ RCP15_PMACTLR(reg);
+ reg &= ~(1<<30); /*clear it*/
+ WCP15_PMACTLR(reg | (action<<30));
+}
+
+/*
+FUNCTION pm_get_overflow
+
+DESCRIPTION Return the overflow condition for the index passed.
+
+DEPENDENCIES
+
+RETURN VALUE
+0 no overflow
+!0 (anything else) overflow;
+
+SIDE EFFECTS
+*/
+unsigned long pm_get_overflow(int index)
+{
+ unsigned long overflow = 0;
+
+/*
+* Range check
+*/
+ if (index > pm_max_events)
+ return PM_V7_ERR;
+ RCP15_PMOVSR(overflow);
+
+ return overflow & (1<<index);
+}
+
+/*
+FUNCTION pm_get_cycle_overflow
+
+DESCRIPTION
+Returns if the cycle counter has overflowed or not.
+
+DEPENDENCIES
+
+RETURN VALUE
+0 no overflow
+!0 (anything else) overflow;
+
+SIDE EFFECTS
+*/
+unsigned long pm_get_cycle_overflow(void)
+{
+ unsigned long overflow = 0;
+
+ RCP15_PMOVSR(overflow);
+ return overflow & PM_COUNT_ENABLE;
+}
+
+/*
+FUNCTION pm_reset_overflow
+
+DESCRIPTION Reset the cycle counter overflow bit.
+
+DEPENDENCIES
+
+RETURN VALUE
+None
+
+SIDE EFFECTS
+*/
+void pm_reset_overflow(int index)
+{
+ WCP15_PMOVSR(1<<index);
+}
+
+/*
+FUNCTION pm_reset_cycle_overflow
+
+DESCRIPTION Reset the cycle counter overflow bit.
+
+DEPENDENCIES
+
+RETURN VALUE
+None
+
+SIDE EFFECTS
+*/
+void pm_reset_cycle_overflow(void)
+{
+ WCP15_PMOVSR(PM_COUNT_ENABLE);
+}
+
+/*
+FUNCTION pm_get_cycle_count
+
+DESCRIPTION return the count in the cycle count register.
+
+DEPENDENCIES
+
+RETURN VALUE
+The value in the cycle count register.
+
+SIDE EFFECTS
+*/
+unsigned long pm_get_cycle_count(void)
+{
+ unsigned long cnt = 0;
+ RCP15_PMCCNTR(cnt);
+ return cnt;
+}
+
+/*
+FUNCTION pm_reset_cycle_count
+
+DESCRIPTION reset the value in the cycle count register
+
+DEPENDENCIES
+
+RETURN VALUE
+NONE
+
+SIDE EFFECTS
+Resets the performance monitor cycle count register.
+Any interrupts period based on this overflow will be changed
+*/
+void pm_reset_cycle_count(void)
+{
+ WCP15_PMCNTENCLR(PM_COUNT_ENABLE);
+}
+
+/*
+FUNCTION pm_cycle_div_64
+
+DESCRIPTION Set the cycle counter to count every 64th cycle instead of
+every cycle when the value passed is 1, otherwise counts every cycle.
+
+DEPENDENCIES
+
+RETURN VALUE
+none
+
+SIDE EFFECTS
+Changes the rate at which cycles are counted. Anything that is reading
+the cycle count (pmGetCyucleCount) may get different results.
+*/
+void pm_cycle_div_64(int enable)
+{
+ unsigned long enables = 0;
+
+ RCP15_PMCR(enables);
+ if (enable)
+ WCP15_PMCR(enables | PM_CLKDIV);
+ else
+ WCP15_PMCR(enables & ~PM_CLKDIV);
+}
+
+/*
+FUNCTION pm_enable_cycle_counter
+
+DESCRIPTION Enable the cycle counter. Sets the bit in the enable register
+so the performance monitor counter starts up counting.
+
+DEPENDENCIES
+
+RETURN VALUE
+none
+
+SIDE EFFECTS
+*/
+void pm_enable_cycle_counter(void)
+{
+/*
+* Enable the counter.
+*/
+ WCP15_PMCNTENSET(PM_COUNT_ENABLE);
+}
+
+/*
+FUNCTION pm_disable_counter
+
+DESCRIPTION Disable a single counter based on the index passed.
+
+DEPENDENCIES
+
+RETURN VALUE
+none
+
+SIDE EFFECTS
+Any triggers that are based on the stoped counter may not trigger...
+*/
+void pm_disable_counter(int index)
+{
+ /*
+ * Range check
+ */
+ if (index > pm_max_events)
+ return;
+ WCP15_PMCNTENCLR(1<<index);
+}
+
+/*
+FUNCTION pm_enable_counter
+
+DESCRIPTION Enable the counter with the index passed.
+
+DEPENDENCIES
+
+RETURN VALUE
+none.
+
+SIDE EFFECTS
+*/
+void pm_enable_counter(int index)
+{
+ /*
+ * Range check
+ */
+ if (index > pm_max_events)
+ return;
+ WCP15_PMCNTENSET(1<<index);
+}
+
+/*
+FUNCTION pm_set_count
+
+DESCRIPTION Set the number of events in a register, used for resets
+passed.
+
+DEPENDENCIES
+
+RETURN VALUE
+-1 if the index is out of range
+
+SIDE EFFECTS
+*/
+int pm_set_count(int index, unsigned long new_value)
+{
+ unsigned long reg = 0;
+
+/*
+* Range check
+*/
+ if (index > pm_max_events)
+ return PM_V7_ERR;
+
+/*
+* Lock, select the index and read the count...unlock
+*/
+ PM_LOCK();
+ WCP15_PMSELR(index);
+ WCP15_PMXEVCNTR(new_value);
+ PM_UNLOCK();
+ return reg;
+}
+
+int pm_reset_count(int index)
+{
+ return pm_set_count(index, 0);
+}
+
+/*
+FUNCTION pm_get_count
+
+DESCRIPTION Return the number of events that have happened for the index
+passed.
+
+DEPENDENCIES
+
+RETURN VALUE
+-1 if the index is out of range
+The number of events if inrange
+
+SIDE EFFECTS
+*/
+unsigned long pm_get_count(int index)
+{
+ unsigned long reg = 0;
+
+/*
+* Range check
+*/
+ if (index > pm_max_events)
+ return PM_V7_ERR;
+
+/*
+* Lock, select the index and read the count...unlock
+*/
+ PM_LOCK();
+ WCP15_PMSELR(index);
+ RCP15_PMXEVCNTR(reg);
+ PM_UNLOCK();
+ return reg;
+}
+
+/*
+FUNCTION pm_show_event_info
+
+DESCRIPTION Display (print) the information about the event at the index
+passed. Shows the index, name and count if a valid index is passed. If
+the index is not valid, then nothing is displayed.
+
+DEPENDENCIES
+
+RETURN VALUE
+None
+
+SIDE EFFECTS
+*/
+void pm_show_event_info(unsigned long index)
+{
+ unsigned long count;
+ unsigned long event_type;
+
+ if (index > pm_max_events)
+ return;
+ if (pm_triggers[index].index > pm_max_events)
+ return;
+
+ count = pm_get_count(index);
+ event_type = pm_triggers[index].event_type;
+
+ PRINT("Event %ld Trigger %s(%ld) count:%ld\n", index,
+ pm_find_event_name(event_type), event_type, count);
+}
+
+/*
+FUNCTION pm_event_init
+
+DESCRIPTION Given the struct pm_trigger_s info passed, configure the event.
+This can be a complex trigger or a simple trigger. Any old values in the
+event are lost.
+
+DEPENDENCIES
+
+RETURN VALUE
+status
+
+SIDE EFFECTS
+stops and clears the event at the index passed.
+*/
+int pm_event_init(struct pm_trigger_s *data)
+{
+ unsigned long trigger;
+ unsigned long actlr = 0;
+
+ if (0 == data)
+ return PM_V7_ERR;
+ if (data->index > pm_max_events)
+ return PM_V7_ERR;
+
+ /*
+ * Setup the trigger based ont he passed values
+ */
+ trigger = ((data->overflow_enable&1)<<31) |
+ ((data->event_export&1)<<30) |
+ ((data->stop_index&3)<<PM_STOP_SHIFT) |
+ ((data->reload_index&3)<<PM_RELOAD_SHIFT) |
+ ((data->resume_index&3)<<PM_RESUME_SHIFT) |
+ ((data->suspend_index&3)<<PM_SUSPEND_SHIFT) |
+ ((data->start_index&3)<<PM_START_SHIFT) |
+ ((data->overflow_stop&1)<<PM_STOPALL_SHIFT) |
+ ((data->stop_condition&7)<<PM_STOPCOND_SHIFT) |
+ ((data->reload_condition&7)<<PM_RELOADCOND_SHIFT) |
+ ((data->resume_condition&7)<<PM_RESUMECOND_SHIFT) |
+ ((data->suspend_condition&7)<<PM_SUSPENDCOND_SHIFT) |
+ ((data->start_condition&7)<<PM_STARTCOND_SHIFT);
+
+ /*
+ * Disable this counter while we are updating.
+ */
+ pm_disable_counter(data->index);
+
+ /*
+ * Lock, select the bank, set the trigger event and the event type
+ * then unlock.
+ */
+ PM_LOCK();
+ RCP15_PMACTLR(actlr);
+ actlr &= ~(3<<(data->index<<1));
+ WCP15_PMACTLR(actlr | ((data->overflow_action&3) << (data->index<<1)));
+ WCP15_PMSELR(data->index);
+ WCP15_PMXEVTYPER(data->event_type);
+ WCP15_PMXEVCNTCR(trigger);
+ PM_UNLOCK();
+
+ /*
+ * Make a copy of the trigger so we know what it is when/if it triggers.
+ */
+ memcpy(&pm_triggers[data->index], data, sizeof(*data));
+
+ /*
+ * We do not re-enable this here so events can be started together with
+ * pm_group_start() that way an accurate measure can be taken...
+ */
+
+ return 0;
+}
+
+int pm_set_event(int index, unsigned long event)
+{
+ unsigned long reg = 0;
+
+ /*
+ * Range check
+ */
+ if (index > pm_max_events)
+ return PM_V7_ERR;
+
+ /*
+ * Lock, select the index and read the count...unlock
+ */
+ PM_LOCK();
+ WCP15_PMSELR(index);
+ WCP15_PMXEVTYPER(event);
+ PM_UNLOCK();
+ return reg;
+}
+
+/*
+FUNCTION pm_set_local_iu
+
+DESCRIPTION Set the local IU triggers. Note that the MSB determines if
+ these are enabled or not.
+
+DEPENDENCIES
+
+RETURN VALUE
+ NONE
+
+SIDE EFFECTS
+*/
+void pm_set_local_iu(unsigned long value)
+{
+ WCP15_LPM0EVTYPER(value);
+}
+
+/*
+FUNCTION pm_set_local_iu
+
+DESCRIPTION Set the local IU triggers. Note that the MSB determines if
+ these are enabled or not.
+
+DEPENDENCIES
+
+RETURN VALUE
+ NONE
+
+SIDE EFFECTS
+*/
+void pm_set_local_xu(unsigned long value)
+{
+ WCP15_LPM1EVTYPER(value);
+}
+
+/*
+FUNCTION pm_set_local_su
+
+DESCRIPTION Set the local SU triggers. Note that the MSB determines if
+ these are enabled or not.
+
+DEPENDENCIES
+
+RETURN VALUE
+ NONE
+
+SIDE EFFECTS
+*/
+void pm_set_local_su(unsigned long value)
+{
+ WCP15_LPM2EVTYPER(value);
+}
+
+/*
+FUNCTION pm_set_local_l2
+
+DESCRIPTION Set the local L2 triggers. Note that the MSB determines if
+ these are enabled or not.
+
+DEPENDENCIES
+
+RETURN VALUE
+ NONE
+
+SIDE EFFECTS
+*/
+void pm_set_local_l2(unsigned long value)
+{
+ WCP15_L2LPMEVTYPER(value);
+}
+
+/*
+FUNCTION pm_set_local_vu
+
+DESCRIPTION Set the local VU triggers. Note that the MSB determines if
+ these are enabled or not.
+
+DEPENDENCIES
+
+RETURN VALUE
+ NONE
+
+SIDE EFFECTS
+*/
+void pm_set_local_vu(unsigned long value)
+{
+ WCP15_VLPMEVTYPER(value);
+}
+
+/*
+FUNCTION pm_isr
+
+DESCRIPTION:
+ Performance Monitor interrupt service routine to capture overflows
+
+DEPENDENCIES
+
+RETURN VALUE
+
+SIDE EFFECTS
+*/
+static irqreturn_t pm_isr(int irq, void *d)
+{
+ int i;
+
+ for (i = 0; i < PM_NUM_COUNTERS; i++) {
+ if (pm_get_overflow(i)) {
+ pm_overflow_count[i]++;
+ pm_reset_overflow(i);
+ }
+ }
+
+ if (pm_get_cycle_overflow()) {
+ pm_cycle_overflow_count++;
+ pm_reset_cycle_overflow();
+ }
+
+ return IRQ_HANDLED;
+}
+
+
+void pm_stop_all(void)
+{
+ WCP15_PMCNTENCLR(0xFFFFFFFF);
+}
+
+void pm_reset_all(void)
+{
+ WCP15_PMCR(0xF);
+ WCP15_PMOVSR(PM_ALL_ENABLE); /* overflow clear */
+}
+
+void pm_start_all(void)
+{
+ WCP15_PMCNTENSET(PM_ALL_ENABLE);
+}
+
+/*
+FUNCTION pm_initialize
+
+DESCRIPTION Initialize the performanca monitoring for the v7 processor.
+ Ensures the cycle count is running and the event counters are enabled.
+
+DEPENDENCIES
+
+RETURN VALUE
+ NONE
+
+SIDE EFFECTS
+*/
+void pm_initialize(void)
+{
+ unsigned long reg = 0;
+ unsigned char imp;
+ unsigned char id;
+ unsigned char num;
+ unsigned long enables = 0;
+ static int initialized;
+
+ if (initialized)
+ return;
+ initialized = 1;
+
+ irqid = INT_ARMQC_PERFMON;
+ RCP15_PMCR(reg);
+ imp = (reg>>24) & 0xFF;
+ id = (reg>>16) & 0xFF;
+ pm_max_events = num = (reg>>11) & 0xFF;
+ PRINT("V7Performance Monitor Capabilities\n");
+ PRINT(" Implementor %c(%d)\n", imp, imp);
+ PRINT(" Id %d %x\n", id, id);
+ PRINT(" Num Events %d %x\n", num, num);
+ PRINT("\nCycle counter enabled by default...\n");
+
+ /*
+ * Global enable, ensure the global enable is set so all
+ * subsequent actions take effect. Also resets the counts
+ */
+ RCP15_PMCR(enables);
+ WCP15_PMCR(enables | PM_GLOBAL_ENABLE | PM_EVENT_RESET |
+ PM_CYCLE_RESET | PM_CLKDIV);
+
+ /*
+ * Enable access from user space
+ */
+ WCP15_PMUSERENR(1);
+ WCP15_PMACTLR(1);
+
+ /*
+ * Install interrupt handler and the enable the interrupts
+ */
+ pm_reset_cycle_overflow();
+ pm_reset_overflow(0);
+ pm_reset_overflow(1);
+ pm_reset_overflow(2);
+ pm_reset_overflow(3);
+
+ if (0 != request_irq(irqid, pm_isr, 0, "perfmon", 0))
+ printk(KERN_ERR "%s:%d request_irq returned error\n",
+ __FILE__, __LINE__);
+ WCP15_PMINTENSET(PM_ALL_ENABLE);
+ /*
+ * Enable the cycle counter. Default, count 1:1 no divisor.
+ */
+ pm_enable_cycle_counter();
+
+}
+
+void pm_free_irq(void)
+{
+ free_irq(irqid, 0);
+}
+
+void pm_deinitialize(void)
+{
+ unsigned long enables = 0;
+ RCP15_PMCR(enables);
+ WCP15_PMCR(enables & ~PM_GLOBAL_ENABLE);
+}
diff --git a/arch/arm/perfmon/perf.h b/arch/arm/perfmon/perf.h
new file mode 100644
index 0000000..1a9bb8b
--- /dev/null
+++ b/arch/arm/perfmon/perf.h
@@ -0,0 +1,86 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/*
+perf.h
+
+DESCRIPTION: Reads and writes the performance monitoring registers in the ARM
+by using the MRC and MCR instructions.
+*/
+#ifndef PERF_H
+#define PERF_H
+extern unsigned long perf_get_cycles(void);
+extern void perf_set_count1(unsigned long val);
+extern void perf_set_count0(unsigned long val);
+extern unsigned long perf_get_count1(void);
+extern unsigned long perf_get_count0(void);
+extern unsigned long perf_get_ctrl(void);
+extern void perf_set_ctrl(void);
+extern void perf_set_ctrl_with(unsigned long v);
+extern void perf_enable_counting(void);
+extern void perf_disable_counting(void);
+extern void perf_set_divider(int d);
+extern unsigned long perf_get_overflow(void);
+extern void perf_clear_overflow(unsigned long bit);
+extern void perf_export_event(unsigned long bit);
+extern void perf_reset_counts(void);
+extern int perf_set_event(unsigned long index, unsigned long val);
+extern unsigned long perf_get_count(unsigned long index);
+extern void perf_set_cycles(unsigned long c);
+
+extern void pm_stop_all(void);
+extern void l2_pm_stop_all(void);
+extern void pm_start_all(void);
+extern void l2_pm_start_all(void);
+extern void pm_reset_all(void);
+extern void l2_pm_reset_all(void);
+extern void pm_set_event(unsigned long monitorIndex, unsigned long eventIndex);
+extern void l2_pm_set_event(unsigned long monitorIndex,
+ unsigned long eventIndex);
+extern unsigned long pm_get_count(unsigned long monitorIndex);
+extern unsigned long l2_pm_get_count(unsigned long monitorIndex);
+extern unsigned long pm_get_cycle_count(void);
+extern unsigned long l2_pm_get_cycle_count(void);
+extern char *pm_find_event_name(unsigned long index);
+extern void pm_set_local_iu(unsigned long events);
+extern void pm_set_local_xu(unsigned long events);
+extern void pm_set_local_su(unsigned long events);
+extern void pm_set_local_l2(unsigned long events);
+extern void pm_set_local_vu(unsigned long events);
+extern void pm_set_local_bu(unsigned long events);
+extern void pm_set_local_cb(unsigned long events);
+extern void pm_set_local_mp(unsigned long events);
+extern void pm_set_local_sp(unsigned long events);
+extern void pm_set_local_scu(unsigned long events);
+extern void pm_initialize(void);
+extern void pm_deinitialize(void);
+extern void l2_pm_initialize(void);
+extern void l2_pm_deinitialize(void);
+extern void pm_free_irq(void);
+extern void l2_pm_free_irq(void);
+
+extern int per_process_perf_init(void);
+extern void per_process_perf_exit(void);
+int per_process_read(char *page, char **start, off_t off, int count,
+ int *eof, void *data);
+int per_process_write_hex(struct file *file, const char *buff,
+ unsigned long cnt, void *data);
+int per_process_read_decimal(char *page, char **start, off_t off, int count,
+ int *eof, void *data);
+int per_process_write_dec(struct file *file, const char *buff,
+ unsigned long cnt, void *data);
+void perfmon_register_callback(void);
+void _per_process_switch(unsigned long oldPid, unsigned long newPid);
+extern unsigned int pp_loaded;
+extern atomic_t pm_op_lock;
+#endif /*PERF_H*/