msm: ocmem: Add support for locking and unlocking regions
Add the common infrastucture required for locking and unlocking
OCMEM regions. This works with both the secure processor (TZ) mode
and in non-secure mode.
In non-secure mode the OCMEM driver is responsible for setting up
the region modes, the graphics OCMEM MPU and clearing OCMEM
regions using the DM clearing engine.
Signed-off-by: Naveen Ramaraj <nramaraj@codeaurora.org>
(cherry picked from commit e43d9bb7845ab199ff5242f185304f4e146eca8e)
Conflicts:
arch/arm/mach-msm/include/mach/ocmem_priv.h
arch/arm/mach-msm/ocmem_sched.c
Change-Id: I08e6b9788844d3cd583f166e54afd5edf38435dd
Signed-off-by: Sudhir Sharma <sudsha@codeaurora.org>
diff --git a/arch/arm/mach-msm/include/mach/ocmem_priv.h b/arch/arm/mach-msm/include/mach/ocmem_priv.h
index a5b0275..e963c88 100644
--- a/arch/arm/mach-msm/include/mach/ocmem_priv.h
+++ b/arch/arm/mach-msm/include/mach/ocmem_priv.h
@@ -66,9 +66,10 @@
/* Operational modes of each region */
enum region_mode {
- WIDE_MODE = 0x0,
+ MODE_NOT_SET = 0x0,
+ WIDE_MODE,
THIN_MODE,
- MODE_DEFAULT = WIDE_MODE,
+ MODE_DEFAULT = MODE_NOT_SET,
};
struct ocmem_plat_data {
@@ -208,6 +209,7 @@
int process_shrink(int, struct ocmem_handle *, unsigned long);
int ocmem_rdm_transfer(int, struct ocmem_map_list *,
unsigned long, int);
+int ocmem_clear(unsigned long, unsigned long);
unsigned long process_quota(int);
int ocmem_memory_off(int, unsigned long, unsigned long);
int ocmem_memory_on(int, unsigned long, unsigned long);
@@ -215,4 +217,8 @@
int ocmem_enable_iface_clock(void);
void ocmem_disable_core_clock(void);
void ocmem_disable_iface_clock(void);
+void ocmem_disable_br_clock(void);
+int ocmem_lock(enum ocmem_client, unsigned long, unsigned long,
+ enum region_mode);
+int ocmem_unlock(enum ocmem_client, unsigned long, unsigned long);
#endif
diff --git a/arch/arm/mach-msm/ocmem_core.c b/arch/arm/mach-msm/ocmem_core.c
index c7cc57e..33e8ae8 100644
--- a/arch/arm/mach-msm/ocmem_core.c
+++ b/arch/arm/mach-msm/ocmem_core.c
@@ -38,6 +38,7 @@
unsigned psgsc_ctrl;
bool interleaved;
unsigned int mode;
+ atomic_t mode_counter;
unsigned int num_macros;
struct ocmem_hw_macro *macro;
struct msm_rpm_request *rpm_req;
@@ -56,10 +57,13 @@
#define OC_GEN_STATUS (0xC)
#define OC_PSGSC_STATUS (0x38)
#define OC_PSGSC_CTL (0x3C)
-#define OC_REGION_CTL (0x1000)
+#define OC_REGION_MODE_CTL (0x1000)
+#define OC_GFX_MPU_START (0x1004)
+#define OC_GFX_MPU_END (0x1008)
#define NUM_PORTS_MASK (0xF << 0)
#define NUM_PORTS_SHIFT (0)
+#define GFX_MPU_SHIFT (12)
#define NUM_MACROS_MASK (0xF << 8)
#define NUM_MACROS_SHIFT (8)
@@ -72,7 +76,7 @@
#define CORE_ON (0x2)
#define PERI_ON (0x1)
#define CLK_OFF (0x4)
-#define MACRO_ON (CORE_ON|PERI_ON)
+#define MACRO_ON (0x0)
#define MACRO_SLEEP_RETENTION (CLK_OFF|CORE_ON)
#define MACRO_SLEEP_RETENTION_PERI_ON (CLK_OFF|MACRO_ON)
#define MACRO_OFF (CLK_OFF)
@@ -165,6 +169,8 @@
else
rc = ocmem_write(new_state,
ocmem_base + PSCGC_CTL_n(region_num));
+ /* Barrier to commit the region state */
+ mb();
return 0;
}
@@ -324,6 +330,9 @@
/* In narrow mode each macro is allowed to be in a different state */
/* The region mode is simply the collection of all macro states */
for (i = 0; i < region->num_macros; i++) {
+ pr_debug("aggregated region state %x\n", r_state);
+ pr_debug("macro %d\n state %x\n", i,
+ region->macro[i].m_state);
r_state &= ~M_PSCGC_CTL_n(i);
r_state |= region->macro[i].m_state << (i * 4);
}
@@ -383,6 +392,198 @@
return 0;
}
+
+static int switch_region_mode(unsigned long offset, unsigned long len,
+ enum region_mode new_mode)
+{
+ unsigned region_start = num_regions;
+ unsigned region_end = num_regions;
+ int i = 0;
+
+ if (offset < 0)
+ return -EINVAL;
+
+ if (len < region_size)
+ return -EINVAL;
+
+ pr_debug("ocmem: mode_transistion to %x\n", new_mode);
+
+ region_start = offset / region_size;
+ region_end = (offset + len - 1) / region_size;
+
+ pr_debug("ocmem: region start %u end %u\n", region_start, region_end);
+
+ if (region_start >= num_regions ||
+ (region_end >= num_regions))
+ return -EINVAL;
+
+ for (i = region_start; i <= region_end; i++) {
+ struct ocmem_hw_region *region = ®ion_ctrl[i];
+ if (region->mode == MODE_DEFAULT) {
+ /* No prior mode programming on this region */
+ /* Set the region to its new mode */
+ region->mode = new_mode;
+ atomic_inc(®ion->mode_counter);
+ pr_debug("Region (%d) switching to mode %d\n",
+ i, new_mode);
+ continue;
+ } else if (region->mode != new_mode) {
+ /* The region is currently set to a different mode */
+ if (new_mode == MODE_DEFAULT) {
+ if (atomic_dec_and_test
+ (®ion->mode_counter)) {
+ region->mode = MODE_DEFAULT;
+ pr_debug("Region (%d) restoring to default mode\n",
+ i);
+ } else {
+ /* More than 1 client in region */
+ /* Cannot move to default mode */
+ pr_debug("Region (%d) using current mode %d\n",
+ i, region->mode);
+ continue;
+ }
+ } else {
+ /* Do not switch modes */
+ pr_err("Region (%d) requested mode %x conflicts with current\n",
+ i, new_mode);
+ goto mode_switch_fail;
+ }
+ }
+ }
+ return 0;
+
+mode_switch_fail:
+ return -EINVAL;
+}
+
+#ifdef CONFIG_MSM_OCMEM_NONSECURE
+
+static int commit_region_modes(void)
+{
+ uint32_t region_mode_ctrl = 0x0;
+ unsigned pos = 0;
+ unsigned i = 0;
+
+ for (i = 0; i < num_regions; i++) {
+ struct ocmem_hw_region *region = ®ion_ctrl[i];
+ pos = i << 2;
+ if (region->mode == THIN_MODE)
+ region_mode_ctrl |= BIT(pos);
+ }
+ pr_debug("ocmem_region_mode_control %x\n", region_mode_ctrl);
+ ocmem_write(region_mode_ctrl, ocmem_base + OC_REGION_MODE_CTL);
+ /* Barrier to commit the region mode */
+ mb();
+ return 0;
+}
+
+static int ocmem_gfx_mpu_set(unsigned long offset, unsigned long len)
+{
+ int mpu_start = 0x0;
+ int mpu_end = 0x0;
+
+ if (offset)
+ mpu_start = (offset >> GFX_MPU_SHIFT) - 1;
+ if (mpu_start < 0)
+ /* Avoid underflow */
+ mpu_start = 0;
+ mpu_end = ((offset+len) >> GFX_MPU_SHIFT) - 1;
+ BUG_ON(mpu_end < 0);
+
+ pr_debug("ocmem: mpu: start %x end %x\n", mpu_start, mpu_end);
+ ocmem_write(mpu_start << GFX_MPU_SHIFT, ocmem_base + OC_GFX_MPU_START);
+ ocmem_write(mpu_end << GFX_MPU_SHIFT, ocmem_base + OC_GFX_MPU_END);
+ return 0;
+}
+
+static void ocmem_gfx_mpu_remove(void)
+{
+ ocmem_write(0x0, ocmem_base + OC_GFX_MPU_START);
+ ocmem_write(0x0, ocmem_base + OC_GFX_MPU_END);
+}
+
+static int do_lock(enum ocmem_client id, unsigned long offset,
+ unsigned long len, enum region_mode mode)
+{
+ return 0;
+}
+
+static int do_unlock(enum ocmem_client id, unsigned long offset,
+ unsigned long len)
+{
+ ocmem_clear(offset, len);
+ return 0;
+}
+#else
+static int ocmem_gfx_mpu_set(unsigned long offset, unsigned long len)
+{
+ return 0;
+}
+
+static void ocmem_gfx_mpu_remove(void)
+{
+}
+
+static int commit_region_modes(void)
+{
+ return 0;
+}
+
+static int do_lock(enum ocmem_client id, unsigned long offset,
+ unsigned long len, enum region_mode mode)
+{
+ return 0;
+}
+
+static int do_unlock(enum ocmem_client id, unsigned long offset,
+ unsigned long len)
+{
+ return 0;
+}
+#endif /* CONFIG_MSM_OCMEM_NONSECURE */
+
+int ocmem_lock(enum ocmem_client id, unsigned long offset, unsigned long len,
+ enum region_mode mode)
+{
+
+ if (len < OCMEM_MIN_ALLOC) {
+ pr_err("ocmem: Invalid len %lx for lock\n", len);
+ return -EINVAL;
+ }
+
+ if (id == OCMEM_GRAPHICS)
+ ocmem_gfx_mpu_set(offset, len);
+
+ mutex_lock(®ion_ctrl_lock);
+
+ if (switch_region_mode(offset, len, mode) < 0)
+ goto switch_region_fail;
+
+ commit_region_modes();
+
+ do_lock(id, offset, len, mode);
+
+ mutex_unlock(®ion_ctrl_lock);
+ return 0;
+
+switch_region_fail:
+ mutex_unlock(®ion_ctrl_lock);
+ return -EINVAL;
+}
+
+int ocmem_unlock(enum ocmem_client id, unsigned long offset, unsigned long len)
+{
+ if (id == OCMEM_GRAPHICS)
+ ocmem_gfx_mpu_remove();
+
+ mutex_lock(®ion_ctrl_lock);
+ do_unlock(id, offset, len);
+ switch_region_mode(offset, len , MODE_DEFAULT);
+ commit_region_modes();
+ mutex_unlock(®ion_ctrl_lock);
+ return 0;
+}
+
#if defined(CONFIG_MSM_OCMEM_POWER_DISABLE)
static int ocmem_core_set_default_state(void)
{
@@ -732,6 +933,7 @@
struct msm_rpm_request *req = NULL;
region->interleaved = interleaved;
region->mode = MODE_DEFAULT;
+ atomic_set(®ion->mode_counter, 0);
region->r_state = REGION_DEFAULT_OFF;
region->num_macros = num_banks;
diff --git a/arch/arm/mach-msm/ocmem_rdm.c b/arch/arm/mach-msm/ocmem_rdm.c
index ccbef9b..85dc85d 100644
--- a/arch/arm/mach-msm/ocmem_rdm.c
+++ b/arch/arm/mach-msm/ocmem_rdm.c
@@ -38,8 +38,12 @@
#define DM_INTR_CLR (0x8)
#define DM_INTR_MASK (0xC)
-#define DM_GEN_STATUS (0x10)
-#define DM_STATUS (0x14)
+#define DM_INT_STATUS (0x10)
+#define DM_GEN_STATUS (0x14)
+#define DM_CLR_OFFSET (0x18)
+#define DM_CLR_SIZE (0x1C)
+#define DM_CLR_PATTERN (0x20)
+#define DM_CLR_TRIGGER (0x24)
#define DM_CTRL (0x1000)
#define DM_TBL_BASE (0x1010)
#define DM_TBL_IDX(x) ((x) * 0x18)
@@ -82,8 +86,9 @@
#define DM_DIR_SHIFT 0x0
#define DM_DONE 0x1
-#define DM_INTR_ENABLE 0x0
-#define DM_INTR_DISABLE 0x1
+#define DM_MASK_RESET 0x0
+#define DM_INTR_RESET 0x20003
+#define DM_CLR_ENABLE 0x1
static void *br_base;
static void *dm_base;
@@ -122,12 +127,59 @@
static irqreturn_t ocmem_dm_irq_handler(int irq, void *dev_id)
{
+ unsigned status;
+ unsigned irq_status;
+ status = ocmem_read(dm_base + DM_GEN_STATUS);
+ irq_status = ocmem_read(dm_base + DM_INT_STATUS);
+ pr_debug("irq:dm_status %x irq_status %x\n", status, irq_status);
+ if (irq_status & BIT(0)) {
+ pr_debug("Data mover completed\n");
+ irq_status &= ~BIT(0);
+ ocmem_write(irq_status, dm_base + DM_INTR_CLR);
+ } else if (irq_status & BIT(1)) {
+ pr_debug("Data clear engine completed\n");
+ irq_status &= ~BIT(1);
+ ocmem_write(irq_status, dm_base + DM_INTR_CLR);
+ } else {
+ BUG_ON(1);
+ }
atomic_set(&dm_pending, 0);
- ocmem_write(DM_INTR_DISABLE, dm_base + DM_INTR_CLR);
wake_up_interruptible(&dm_wq);
return IRQ_HANDLED;
}
+#ifdef CONFIG_MSM_OCMEM_NONSECURE
+int ocmem_clear(unsigned long start, unsigned long size)
+{
+ atomic_set(&dm_pending, 1);
+ /* Clear DM Mask */
+ ocmem_write(DM_MASK_RESET, dm_base + DM_INTR_MASK);
+ /* Clear DM Interrupts */
+ ocmem_write(DM_INTR_RESET, dm_base + DM_INTR_CLR);
+ /* DM CLR offset */
+ ocmem_write(start, dm_base + DM_CLR_OFFSET);
+ /* DM CLR size */
+ ocmem_write(size, dm_base + DM_CLR_SIZE);
+ /* Wipe out memory as "OCMM" */
+ ocmem_write(0x4D4D434F, dm_base + DM_CLR_PATTERN);
+ /* The offset, size and pattern for clearing must be set
+ * before triggering the clearing engine
+ */
+ mb();
+ /* Trigger Data Clear */
+ ocmem_write(DM_CLR_ENABLE, dm_base + DM_CLR_TRIGGER);
+
+ wait_event_interruptible(dm_wq,
+ atomic_read(&dm_pending) == 0);
+ return 0;
+}
+#else
+int ocmem_clear(unsigned long start, unsigned long size)
+{
+ return 0;
+}
+#endif
+
/* Lock during transfers */
int ocmem_rdm_transfer(int id, struct ocmem_map_list *clist,
unsigned long start, int direction)
@@ -195,9 +247,13 @@
dm_ctrl |= (DM_BLOCK_256 << DM_BR_BLK_SHIFT);
dm_ctrl |= (direction << DM_DIR_SHIFT);
- status = ocmem_read(dm_base + DM_STATUS);
+ status = ocmem_read(dm_base + DM_GEN_STATUS);
pr_debug("Transfer status before %x\n", status);
atomic_set(&dm_pending, 1);
+ /* The DM and BR tables must be programmed before triggering the
+ * Data Mover else the coherent transfer would be corrupted
+ */
+ mb();
/* Trigger DM */
ocmem_write(dm_ctrl, dm_base + DM_CTRL);
pr_debug("ocmem: rdm: dm_ctrl %x br_ctrl %x\n", dm_ctrl, br_ctrl);
@@ -236,8 +292,10 @@
}
init_waitqueue_head(&dm_wq);
+ /* Clear DM Mask */
+ ocmem_write(DM_MASK_RESET, dm_base + DM_INTR_MASK);
/* enable dm interrupts */
- ocmem_write(DM_INTR_ENABLE, dm_base + DM_INTR_MASK);
+ ocmem_write(DM_INTR_RESET, dm_base + DM_INTR_CLR);
ocmem_disable_core_clock();
return 0;
}
diff --git a/arch/arm/mach-msm/ocmem_sched.c b/arch/arm/mach-msm/ocmem_sched.c
index 3ac8e0a..2d968aa 100644
--- a/arch/arm/mach-msm/ocmem_sched.c
+++ b/arch/arm/mach-msm/ocmem_sched.c
@@ -158,6 +158,59 @@
return ocmem_client_table[id].hw_interconnect == OCMEM_BLOCKED ? 1 : 0;
}
+inline struct ocmem_buf *handle_to_buffer(struct ocmem_handle *handle)
+{
+ if (handle)
+ return &handle->buffer;
+ else
+ return NULL;
+}
+
+inline struct ocmem_handle *buffer_to_handle(struct ocmem_buf *buffer)
+{
+ if (buffer)
+ return container_of(buffer, struct ocmem_handle, buffer);
+ else
+ return NULL;
+}
+
+inline struct ocmem_req *handle_to_req(struct ocmem_handle *handle)
+{
+ if (handle)
+ return handle->req;
+ else
+ return NULL;
+}
+
+inline struct ocmem_handle *req_to_handle(struct ocmem_req *req)
+{
+ if (req && req->buffer)
+ return container_of(req->buffer, struct ocmem_handle, buffer);
+ else
+ return NULL;
+}
+
+/* Simple wrappers which will have debug features added later */
+inline int ocmem_read(void *at)
+{
+ return readl_relaxed(at);
+}
+
+inline int ocmem_write(unsigned long val, void *at)
+{
+ writel_relaxed(val, at);
+ return 0;
+}
+
+inline int get_mode(int id)
+{
+ if (!check_id(id))
+ return MODE_NOT_SET;
+ else
+ return ocmem_client_table[id].mode == OCMEM_PERFORMANCE ?
+ WIDE_MODE : THIN_MODE;
+}
+
/* Returns the address that can be used by a device core to access OCMEM */
static unsigned long device_address(int id, unsigned long addr)
{
@@ -524,10 +577,33 @@
rc = ocmem_enable_iface_clock();
if (rc < 0)
+ goto iface_clock_fail;
+
+ rc = ocmem_enable_br_clock();
+
+ if (rc < 0)
+ goto br_clock_fail;
+
+ rc = do_map(req);
+
+ if (rc < 0) {
+ pr_err("ocmem: Failed to map request %p for %d\n",
+ req, req->owner);
goto process_map_fail;
- return do_map(req);
+ }
+ if (ocmem_lock(req->owner, phys_to_offset(req->req_start), req->req_sz,
+ get_mode(req->owner))) {
+ pr_err("ocmem: Failed to secure request %p for %d\n", req,
+ req->owner);
+ rc = -EINVAL;
+ goto lock_failed;
+ }
+
+ return 0;
+lock_failed:
+ do_unmap(req);
process_map_fail:
ocmem_disable_core_clock();
core_clock_fail:
@@ -540,6 +616,14 @@
{
int rc = 0;
+ if (ocmem_unlock(req->owner, phys_to_offset(req->req_start),
+ req->req_sz)) {
+ pr_err("ocmem: Failed to un-secure request %p for %d\n", req,
+ req->owner);
+ rc = -EINVAL;
+ goto unlock_failed;
+ }
+
rc = do_unmap(req);
if (rc < 0)
@@ -547,9 +631,9 @@
ocmem_disable_iface_clock();
ocmem_disable_core_clock();
-
return 0;
+unlock_failed:
process_unmap_fail:
pr_err("ocmem: Failed to unmap ocmem request\n");
return rc;
@@ -1254,7 +1338,6 @@
return -EINVAL;
}
-
if (req->req_sz != 0) {
offset = phys_to_offset(req->req_start);
@@ -1269,7 +1352,6 @@
}
rc = do_free(req);
-
if (rc < 0)
return -EINVAL;