iwmc3200wifi: protect rx_tickets and rx_packets[] lists

Protect rx_tickets and rx_packets[] lists with spinlocks to fix the
race condition for concurrent list operations. In iwmc3200wifi both
sdio_isr_worker and rx_worker workqueues can access the rx ticket
and packets lists at the same time under high rx load.

Signed-off-by: Zhu Yi <yi.zhu@intel.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
diff --git a/drivers/net/wireless/iwmc3200wifi/rx.c b/drivers/net/wireless/iwmc3200wifi/rx.c
index 38d950b..c8a31be 100644
--- a/drivers/net/wireless/iwmc3200wifi/rx.c
+++ b/drivers/net/wireless/iwmc3200wifi/rx.c
@@ -344,10 +344,15 @@
 	u8 id_hash = IWM_RX_ID_GET_HASH(id);
 	struct iwm_rx_packet *packet;
 
+	spin_lock(&iwm->packet_lock[id_hash]);
 	list_for_each_entry(packet, &iwm->rx_packets[id_hash], node)
-		if (packet->id == id)
+		if (packet->id == id) {
+			list_del(&packet->node);
+			spin_unlock(&iwm->packet_lock[id_hash]);
 			return packet;
+		}
 
+	spin_unlock(&iwm->packet_lock[id_hash]);
 	return NULL;
 }
 
@@ -385,18 +390,22 @@
 	struct iwm_rx_packet *packet, *np;
 	int i;
 
+	spin_lock(&iwm->ticket_lock);
 	list_for_each_entry_safe(ticket, nt, &iwm->rx_tickets, node) {
 		list_del(&ticket->node);
 		iwm_rx_ticket_node_free(ticket);
 	}
+	spin_unlock(&iwm->ticket_lock);
 
 	for (i = 0; i < IWM_RX_ID_HASH; i++) {
+		spin_lock(&iwm->packet_lock[i]);
 		list_for_each_entry_safe(packet, np, &iwm->rx_packets[i],
 					 node) {
 			list_del(&packet->node);
 			kfree_skb(packet->skb);
 			kfree(packet);
 		}
+		spin_unlock(&iwm->packet_lock[i]);
 	}
 }
 
@@ -424,7 +433,9 @@
 				   ticket->action ==  IWM_RX_TICKET_RELEASE ?
 				   "RELEASE" : "DROP",
 				   ticket->id);
+			spin_lock(&iwm->ticket_lock);
 			list_add_tail(&ticket_node->node, &iwm->rx_tickets);
+			spin_unlock(&iwm->ticket_lock);
 
 			/*
 			 * We received an Rx ticket, most likely there's
@@ -457,6 +468,7 @@
 	struct iwm_rx_packet *packet;
 	u16 id, buf_offset;
 	u32 packet_size;
+	u8 id_hash;
 
 	IWM_DBG_RX(iwm, DBG, "\n");
 
@@ -474,7 +486,10 @@
 	if (IS_ERR(packet))
 		return PTR_ERR(packet);
 
-	list_add_tail(&packet->node, &iwm->rx_packets[IWM_RX_ID_GET_HASH(id)]);
+	id_hash = IWM_RX_ID_GET_HASH(id);
+	spin_lock(&iwm->packet_lock[id_hash]);
+	list_add_tail(&packet->node, &iwm->rx_packets[id_hash]);
+	spin_unlock(&iwm->packet_lock[id_hash]);
 
 	/* We might (unlikely) have received the packet _after_ the ticket */
 	queue_work(iwm->rx_wq, &iwm->rx_worker);
@@ -1664,6 +1679,7 @@
 	 * We stop whenever a ticket is missing its packet, as we're
 	 * supposed to send the packets in order.
 	 */
+	spin_lock(&iwm->ticket_lock);
 	list_for_each_entry_safe(ticket, next, &iwm->rx_tickets, node) {
 		struct iwm_rx_packet *packet =
 			iwm_rx_packet_get(iwm, le16_to_cpu(ticket->ticket->id));
@@ -1672,12 +1688,12 @@
 			IWM_DBG_RX(iwm, DBG, "Skip rx_work: Wait for ticket %d "
 				   "to be handled first\n",
 				   le16_to_cpu(ticket->ticket->id));
-			return;
+			break;
 		}
 
 		list_del(&ticket->node);
-		list_del(&packet->node);
 		iwm_rx_process_packet(iwm, packet, ticket);
 	}
+	spin_unlock(&iwm->ticket_lock);
 }