msm: bam_dmux: modify DL low memory algorithm
Bam_dmux maintains a pool of dynamically allocated buffers for the BAM DMA
engine to use as destination memory for Downlink (DL) data. The allocation
requests for the buffers are done using flags to indicate delays are
tolerable and either the memory or an error should be returned immediately
to bam_dmux as the buffers are normally allocated within various hot paths
inside of the driver. If a low memory allocation failure is encountered
when attempting to replenish the pool of buffers, bam_dmux will either
attempt the memory allocations later when the next scheduled pool refilling
occurs if there is at least one buffer remaining in the pool, or will
schedule a delayed task to retry the buffer allocation until atleast one
buffer can be put into the pool.
This mechanism of handling low memory situations has two flaws. First, the
allocation type used by bam_dmux does not trigger the standard low memory
recovery mechanism within the memory management subsystem. Second, the
peripeheral on the otherside do the DMA engine has a limited hardware based
flow control mechanism that will not release flow control with only one
buffer queued to the DMA engine.
To assist in system recovery, and avoid a data stall, create a cold path
that is tolerant of memory allocation delays and is triggered when the hot
path encounters an allocation failure.
CRs-Fixed: 589665
Change-Id: If72b0eaef285ac6dfd99e97bef1cd7df154e3a48
Signed-off-by: Jeffrey Hugo <jhugo@codeaurora.org>
diff --git a/arch/arm/mach-msm/bam_dmux.c b/arch/arm/mach-msm/bam_dmux.c
index 991ccef..74bbdbe 100644
--- a/arch/arm/mach-msm/bam_dmux.c
+++ b/arch/arm/mach-msm/bam_dmux.c
@@ -215,9 +215,10 @@
static void bam_mux_write_done(struct work_struct *work);
static void handle_bam_mux_cmd(struct work_struct *work);
static void rx_timer_work_func(struct work_struct *work);
+static void queue_rx_work_func(struct work_struct *work);
static DECLARE_WORK(rx_timer_work, rx_timer_work_func);
-static struct delayed_work queue_rx_work;
+static DECLARE_WORK(queue_rx_work, queue_rx_work_func);
static struct workqueue_struct *bam_mux_rx_workqueue;
static struct workqueue_struct *bam_mux_tx_workqueue;
@@ -384,7 +385,7 @@
spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
}
-static void queue_rx(void)
+static void __queue_rx(gfp_t alloc_flags)
{
void *ptr;
struct rx_pkt_info *info;
@@ -399,23 +400,23 @@
if (in_global_reset)
goto fail;
- info = kmalloc(sizeof(struct rx_pkt_info),
- GFP_NOWAIT | __GFP_NOWARN);
+ info = kmalloc(sizeof(struct rx_pkt_info), alloc_flags);
if (!info) {
DMUX_LOG_KERR(
- "%s: unable to alloc rx_pkt_info, will retry later\n",
- __func__);
+ "%s: unable to alloc rx_pkt_info w/ flags %x, will retry later\n",
+ __func__,
+ alloc_flags);
goto fail;
}
INIT_WORK(&info->work, handle_bam_mux_cmd);
- info->skb = __dev_alloc_skb(BUFFER_SIZE,
- GFP_NOWAIT | __GFP_NOWARN);
+ info->skb = __dev_alloc_skb(BUFFER_SIZE, alloc_flags);
if (info->skb == NULL) {
DMUX_LOG_KERR(
- "%s: unable to alloc skb, will retry later\n",
- __func__);
+ "%s: unable to alloc skb w/ flags %x, will retry later\n",
+ __func__,
+ alloc_flags);
goto fail_info;
}
ptr = skb_put(info->skb, BUFFER_SIZE);
@@ -457,15 +458,30 @@
kfree(info);
fail:
- if (rx_len_cached == 0 && !in_global_reset) {
+ if (!in_global_reset) {
DMUX_LOG_KERR("%s: rescheduling\n", __func__);
- schedule_delayed_work(&queue_rx_work, msecs_to_jiffies(100));
+ schedule_work(&queue_rx_work);
}
}
+static void queue_rx(void)
+{
+ /*
+ * Hot path. Delays waiting for the allocation to find memory if its
+ * not immediately available, and delays from logging allocation
+ * failures which cannot be tolerated at this time.
+ */
+ __queue_rx(GFP_NOWAIT | __GFP_NOWARN);
+}
+
static void queue_rx_work_func(struct work_struct *work)
{
- queue_rx();
+ /*
+ * Cold path. Delays can be tolerated. Use of GFP_KERNEL should
+ * guarentee the requested memory will be found, after some ammount of
+ * delay.
+ */
+ __queue_rx(GFP_KERNEL);
}
static void bam_mux_process_data(struct sk_buff *rx_skb)
@@ -2477,7 +2493,6 @@
init_completion(&shutdown_completion);
complete_all(&shutdown_completion);
INIT_DELAYED_WORK(&ul_timeout_work, ul_timeout);
- INIT_DELAYED_WORK(&queue_rx_work, queue_rx_work_func);
wake_lock_init(&bam_wakelock, WAKE_LOCK_SUSPEND, "bam_dmux_wakelock");
init_srcu_struct(&bam_dmux_srcu);