msm: bam_dmux: pin rx workqueue to core 0
Pin the receive workqueue so that it always runs on core 0. Do this to
eliminate a race condition caused by the workqueue moving between cores in
the middle of a downlink data transfer. If hit, the race condition will
cause the TCP/IP framework to process packets out of order for a short time
which could cause performance degradation due to unnecessary
retransmissions.
CRs-Fixed: 346788
Change-Id: If1588cc5fd24f6d4af441a1728cfbb801a7d7c5b
Signed-off-by: Jeffrey Hugo <jhugo@codeaurora.org>
diff --git a/arch/arm/mach-msm/bam_dmux.c b/arch/arm/mach-msm/bam_dmux.c
index 66d2a57..1572b82 100644
--- a/arch/arm/mach-msm/bam_dmux.c
+++ b/arch/arm/mach-msm/bam_dmux.c
@@ -1074,7 +1074,7 @@
fail:
pr_err("%s: reverting to polling\n", __func__);
- queue_work(bam_mux_rx_workqueue, &rx_timer_work);
+ queue_work_on(0, bam_mux_rx_workqueue, &rx_timer_work);
}
static void rx_timer_work_func(struct work_struct *work)
@@ -1179,7 +1179,11 @@
}
grab_wakelock();
polling_mode = 1;
- queue_work(bam_mux_rx_workqueue, &rx_timer_work);
+ /*
+ * run on core 0 so that netif_rx() in rmnet uses only
+ * one queue
+ */
+ queue_work_on(0, bam_mux_rx_workqueue, &rx_timer_work);
}
break;
default:
@@ -2172,7 +2176,13 @@
if (rc)
pr_err("%s: unable to set dfab clock rate\n", __func__);
- bam_mux_rx_workqueue = create_singlethread_workqueue("bam_dmux_rx");
+ /*
+ * setup the workqueue so that it can be pinned to core 0 and not
+ * block the watchdog pet function, so that netif_rx() in rmnet
+ * only uses one queue.
+ */
+ bam_mux_rx_workqueue = alloc_workqueue("bam_dmux_rx",
+ WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1);
if (!bam_mux_rx_workqueue)
return -ENOMEM;