| Jiri Benc | f0706e8 | 2007-05-05 11:45:53 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * Copyright 2004, Instant802 Networks, Inc. | 
 | 3 |  * | 
 | 4 |  * This program is free software; you can redistribute it and/or modify | 
 | 5 |  * it under the terms of the GNU General Public License version 2 as | 
 | 6 |  * published by the Free Software Foundation. | 
 | 7 |  */ | 
 | 8 |  | 
 | 9 | #include <linux/netdevice.h> | 
 | 10 | #include <linux/skbuff.h> | 
 | 11 | #include <linux/module.h> | 
 | 12 | #include <linux/if_arp.h> | 
 | 13 | #include <linux/types.h> | 
 | 14 | #include <net/ip.h> | 
 | 15 | #include <net/pkt_sched.h> | 
 | 16 |  | 
 | 17 | #include <net/mac80211.h> | 
 | 18 | #include "ieee80211_i.h" | 
 | 19 | #include "wme.h" | 
 | 20 |  | 
| David S. Miller | 51cb6db | 2008-07-15 03:34:57 -0700 | [diff] [blame] | 21 | /* Default mapping in classifier to work with default | 
| Johannes Berg | e100bb6 | 2008-04-30 18:51:21 +0200 | [diff] [blame] | 22 |  * queue setup. | 
 | 23 |  */ | 
| Ron Rindjunsky | 9e72349 | 2008-01-28 14:07:18 +0200 | [diff] [blame] | 24 | const int ieee802_1d_to_ac[8] = { 2, 3, 3, 2, 1, 1, 0, 0 }; | 
| Jiri Benc | f0706e8 | 2007-05-05 11:45:53 -0700 | [diff] [blame] | 25 |  | 
| Guy Cohen | a8bdf29 | 2008-01-09 19:12:48 +0200 | [diff] [blame] | 26 | static const char llc_ip_hdr[8] = {0xAA, 0xAA, 0x3, 0, 0, 0, 0x08, 0}; | 
| Jiri Benc | f0706e8 | 2007-05-05 11:45:53 -0700 | [diff] [blame] | 27 |  | 
| David S. Miller | 51cb6db | 2008-07-15 03:34:57 -0700 | [diff] [blame] | 28 | /* Given a data frame determine the 802.1p/1d tag to use.  */ | 
 | 29 | static unsigned int classify_1d(struct sk_buff *skb) | 
| Jiri Benc | f0706e8 | 2007-05-05 11:45:53 -0700 | [diff] [blame] | 30 | { | 
| David S. Miller | 51cb6db | 2008-07-15 03:34:57 -0700 | [diff] [blame] | 31 | 	unsigned int dscp; | 
| Jiri Benc | f0706e8 | 2007-05-05 11:45:53 -0700 | [diff] [blame] | 32 |  | 
 | 33 | 	/* skb->priority values from 256->263 are magic values to | 
| David S. Miller | 51cb6db | 2008-07-15 03:34:57 -0700 | [diff] [blame] | 34 | 	 * directly indicate a specific 802.1d priority.  This is used | 
 | 35 | 	 * to allow 802.1d priority to be passed directly in from VLAN | 
 | 36 | 	 * tags, etc. | 
 | 37 | 	 */ | 
| Jiri Benc | f0706e8 | 2007-05-05 11:45:53 -0700 | [diff] [blame] | 38 | 	if (skb->priority >= 256 && skb->priority <= 263) | 
 | 39 | 		return skb->priority - 256; | 
 | 40 |  | 
| David S. Miller | 51cb6db | 2008-07-15 03:34:57 -0700 | [diff] [blame] | 41 | 	switch (skb->protocol) { | 
| Arnaldo Carvalho de Melo | 6067804 | 2008-09-20 22:20:49 -0700 | [diff] [blame] | 42 | 	case htons(ETH_P_IP): | 
| David S. Miller | 51cb6db | 2008-07-15 03:34:57 -0700 | [diff] [blame] | 43 | 		dscp = ip_hdr(skb)->tos & 0xfc; | 
 | 44 | 		break; | 
 | 45 |  | 
 | 46 | 	default: | 
| Jiri Benc | f0706e8 | 2007-05-05 11:45:53 -0700 | [diff] [blame] | 47 | 		return 0; | 
| David S. Miller | 51cb6db | 2008-07-15 03:34:57 -0700 | [diff] [blame] | 48 | 	} | 
| Jiri Benc | f0706e8 | 2007-05-05 11:45:53 -0700 | [diff] [blame] | 49 |  | 
| Jiri Benc | f0706e8 | 2007-05-05 11:45:53 -0700 | [diff] [blame] | 50 | 	return dscp >> 5; | 
 | 51 | } | 
 | 52 |  | 
 | 53 |  | 
| David S. Miller | 51cb6db | 2008-07-15 03:34:57 -0700 | [diff] [blame] | 54 | static int wme_downgrade_ac(struct sk_buff *skb) | 
| Jiri Benc | f0706e8 | 2007-05-05 11:45:53 -0700 | [diff] [blame] | 55 | { | 
 | 56 | 	switch (skb->priority) { | 
 | 57 | 	case 6: | 
 | 58 | 	case 7: | 
 | 59 | 		skb->priority = 5; /* VO -> VI */ | 
 | 60 | 		return 0; | 
 | 61 | 	case 4: | 
 | 62 | 	case 5: | 
 | 63 | 		skb->priority = 3; /* VI -> BE */ | 
 | 64 | 		return 0; | 
 | 65 | 	case 0: | 
 | 66 | 	case 3: | 
 | 67 | 		skb->priority = 2; /* BE -> BK */ | 
 | 68 | 		return 0; | 
 | 69 | 	default: | 
 | 70 | 		return -1; | 
 | 71 | 	} | 
 | 72 | } | 
 | 73 |  | 
 | 74 |  | 
| David S. Miller | 51cb6db | 2008-07-15 03:34:57 -0700 | [diff] [blame] | 75 | /* Indicate which queue to use.  */ | 
| Johannes Berg | b4a4bf5 | 2008-09-26 13:34:54 +0200 | [diff] [blame] | 76 | static u16 classify80211(struct ieee80211_local *local, struct sk_buff *skb) | 
| Jiri Benc | f0706e8 | 2007-05-05 11:45:53 -0700 | [diff] [blame] | 77 | { | 
| Jiri Benc | f0706e8 | 2007-05-05 11:45:53 -0700 | [diff] [blame] | 78 | 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | 
| Jiri Benc | f0706e8 | 2007-05-05 11:45:53 -0700 | [diff] [blame] | 79 |  | 
| Harvey Harrison | 002aaf4 | 2008-06-11 14:21:59 -0700 | [diff] [blame] | 80 | 	if (!ieee80211_is_data(hdr->frame_control)) { | 
| Jiri Benc | f0706e8 | 2007-05-05 11:45:53 -0700 | [diff] [blame] | 81 | 		/* management frames go on AC_VO queue, but are sent | 
 | 82 | 		* without QoS control fields */ | 
| Johannes Berg | e100bb6 | 2008-04-30 18:51:21 +0200 | [diff] [blame] | 83 | 		return 0; | 
| Jiri Benc | f0706e8 | 2007-05-05 11:45:53 -0700 | [diff] [blame] | 84 | 	} | 
 | 85 |  | 
| Johannes Berg | f9d540e | 2007-09-28 14:02:09 +0200 | [diff] [blame] | 86 | 	if (0 /* injected */) { | 
 | 87 | 		/* use AC from radiotap */ | 
| Jiri Benc | f0706e8 | 2007-05-05 11:45:53 -0700 | [diff] [blame] | 88 | 	} | 
 | 89 |  | 
| Harvey Harrison | 002aaf4 | 2008-06-11 14:21:59 -0700 | [diff] [blame] | 90 | 	if (!ieee80211_is_data_qos(hdr->frame_control)) { | 
| Jiri Benc | f0706e8 | 2007-05-05 11:45:53 -0700 | [diff] [blame] | 91 | 		skb->priority = 0; /* required for correct WPA/11i MIC */ | 
 | 92 | 		return ieee802_1d_to_ac[skb->priority]; | 
 | 93 | 	} | 
 | 94 |  | 
 | 95 | 	/* use the data classifier to determine what 802.1d tag the | 
| Johannes Berg | 3c3b00c | 2007-08-28 17:01:55 -0400 | [diff] [blame] | 96 | 	 * data frame has */ | 
| David S. Miller | 51cb6db | 2008-07-15 03:34:57 -0700 | [diff] [blame] | 97 | 	skb->priority = classify_1d(skb); | 
| Jiri Benc | f0706e8 | 2007-05-05 11:45:53 -0700 | [diff] [blame] | 98 |  | 
| Johannes Berg | 3c3b00c | 2007-08-28 17:01:55 -0400 | [diff] [blame] | 99 | 	/* in case we are a client verify acm is not set for this ac */ | 
| Jiri Benc | f0706e8 | 2007-05-05 11:45:53 -0700 | [diff] [blame] | 100 | 	while (unlikely(local->wmm_acm & BIT(skb->priority))) { | 
 | 101 | 		if (wme_downgrade_ac(skb)) { | 
| David S. Miller | 51cb6db | 2008-07-15 03:34:57 -0700 | [diff] [blame] | 102 | 			/* The old code would drop the packet in this | 
 | 103 | 			 * case. | 
 | 104 | 			 */ | 
 | 105 | 			return 0; | 
| Jiri Benc | f0706e8 | 2007-05-05 11:45:53 -0700 | [diff] [blame] | 106 | 		} | 
 | 107 | 	} | 
 | 108 |  | 
 | 109 | 	/* look up which queue to use for frames with this 1d tag */ | 
 | 110 | 	return ieee802_1d_to_ac[skb->priority]; | 
 | 111 | } | 
 | 112 |  | 
| David S. Miller | 51cb6db | 2008-07-15 03:34:57 -0700 | [diff] [blame] | 113 | u16 ieee80211_select_queue(struct net_device *dev, struct sk_buff *skb) | 
| Jiri Benc | f0706e8 | 2007-05-05 11:45:53 -0700 | [diff] [blame] | 114 | { | 
| Johannes Berg | b4a4bf5 | 2008-09-26 13:34:54 +0200 | [diff] [blame] | 115 | 	struct ieee80211_master_priv *mpriv = netdev_priv(dev); | 
 | 116 | 	struct ieee80211_local *local = mpriv->local; | 
| Sujith | 8b30b1f | 2008-10-24 09:55:27 +0530 | [diff] [blame] | 117 | 	struct ieee80211_hw *hw = &local->hw; | 
| Jiri Benc | f0706e8 | 2007-05-05 11:45:53 -0700 | [diff] [blame] | 118 | 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | 
| Ron Rindjunsky | 9e72349 | 2008-01-28 14:07:18 +0200 | [diff] [blame] | 119 | 	struct sta_info *sta; | 
| David S. Miller | 51cb6db | 2008-07-15 03:34:57 -0700 | [diff] [blame] | 120 | 	u16 queue; | 
| Ron Rindjunsky | 9e72349 | 2008-01-28 14:07:18 +0200 | [diff] [blame] | 121 | 	u8 tid; | 
| Jiri Benc | f0706e8 | 2007-05-05 11:45:53 -0700 | [diff] [blame] | 122 |  | 
| Johannes Berg | b4a4bf5 | 2008-09-26 13:34:54 +0200 | [diff] [blame] | 123 | 	queue = classify80211(local, skb); | 
| David S. Miller | 51cb6db | 2008-07-15 03:34:57 -0700 | [diff] [blame] | 124 | 	if (unlikely(queue >= local->hw.queues)) | 
 | 125 | 		queue = local->hw.queues - 1; | 
 | 126 |  | 
| Sujith | 8b30b1f | 2008-10-24 09:55:27 +0530 | [diff] [blame] | 127 | 	if (skb->requeue) { | 
 | 128 | 		if (!hw->ampdu_queues) | 
 | 129 | 			return queue; | 
 | 130 |  | 
| Johannes Berg | d0709a6 | 2008-02-25 16:27:46 +0100 | [diff] [blame] | 131 | 		rcu_read_lock(); | 
| Ron Rindjunsky | 9e72349 | 2008-01-28 14:07:18 +0200 | [diff] [blame] | 132 | 		sta = sta_info_get(local, hdr->addr1); | 
| Harvey Harrison | 238f74a | 2008-07-02 11:05:34 -0700 | [diff] [blame] | 133 | 		tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; | 
| Ron Rindjunsky | 9e72349 | 2008-01-28 14:07:18 +0200 | [diff] [blame] | 134 | 		if (sta) { | 
 | 135 | 			int ampdu_queue = sta->tid_to_tx_q[tid]; | 
| David S. Miller | 51cb6db | 2008-07-15 03:34:57 -0700 | [diff] [blame] | 136 |  | 
 | 137 | 			if ((ampdu_queue < ieee80211_num_queues(hw)) && | 
| Sujith | 8b30b1f | 2008-10-24 09:55:27 +0530 | [diff] [blame] | 138 | 			    test_bit(ampdu_queue, local->queue_pool)) | 
| Ron Rindjunsky | 9e72349 | 2008-01-28 14:07:18 +0200 | [diff] [blame] | 139 | 				queue = ampdu_queue; | 
| Ron Rindjunsky | 9e72349 | 2008-01-28 14:07:18 +0200 | [diff] [blame] | 140 | 		} | 
| Johannes Berg | d0709a6 | 2008-02-25 16:27:46 +0100 | [diff] [blame] | 141 | 		rcu_read_unlock(); | 
| David S. Miller | 51cb6db | 2008-07-15 03:34:57 -0700 | [diff] [blame] | 142 |  | 
 | 143 | 		return queue; | 
| Jiri Benc | f0706e8 | 2007-05-05 11:45:53 -0700 | [diff] [blame] | 144 | 	} | 
 | 145 |  | 
| David S. Miller | 51cb6db | 2008-07-15 03:34:57 -0700 | [diff] [blame] | 146 | 	/* Now we know the 1d priority, fill in the QoS header if | 
 | 147 | 	 * there is one. | 
| Jiri Benc | f0706e8 | 2007-05-05 11:45:53 -0700 | [diff] [blame] | 148 | 	 */ | 
| Harvey Harrison | 002aaf4 | 2008-06-11 14:21:59 -0700 | [diff] [blame] | 149 | 	if (ieee80211_is_data_qos(hdr->frame_control)) { | 
 | 150 | 		u8 *p = ieee80211_get_qos_ctl(hdr); | 
| Ron Rindjunsky | 9e72349 | 2008-01-28 14:07:18 +0200 | [diff] [blame] | 151 | 		u8 ack_policy = 0; | 
| Harvey Harrison | 238f74a | 2008-07-02 11:05:34 -0700 | [diff] [blame] | 152 | 		tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; | 
| Jiri Benc | f0706e8 | 2007-05-05 11:45:53 -0700 | [diff] [blame] | 153 | 		if (local->wifi_wme_noack_test) | 
| Ron Rindjunsky | 9e72349 | 2008-01-28 14:07:18 +0200 | [diff] [blame] | 154 | 			ack_policy |= QOS_CONTROL_ACK_POLICY_NOACK << | 
| Jiri Benc | f0706e8 | 2007-05-05 11:45:53 -0700 | [diff] [blame] | 155 | 					QOS_CONTROL_ACK_POLICY_SHIFT; | 
 | 156 | 		/* qos header is 2 bytes, second reserved */ | 
| Harvey Harrison | 002aaf4 | 2008-06-11 14:21:59 -0700 | [diff] [blame] | 157 | 		*p++ = ack_policy | tid; | 
| Jiri Benc | f0706e8 | 2007-05-05 11:45:53 -0700 | [diff] [blame] | 158 | 		*p = 0; | 
| Ron Rindjunsky | 9e72349 | 2008-01-28 14:07:18 +0200 | [diff] [blame] | 159 |  | 
| Sujith | 8b30b1f | 2008-10-24 09:55:27 +0530 | [diff] [blame] | 160 | 		if (!hw->ampdu_queues) | 
 | 161 | 			return queue; | 
 | 162 |  | 
| Johannes Berg | d0709a6 | 2008-02-25 16:27:46 +0100 | [diff] [blame] | 163 | 		rcu_read_lock(); | 
 | 164 |  | 
| Ron Rindjunsky | 9e72349 | 2008-01-28 14:07:18 +0200 | [diff] [blame] | 165 | 		sta = sta_info_get(local, hdr->addr1); | 
 | 166 | 		if (sta) { | 
 | 167 | 			int ampdu_queue = sta->tid_to_tx_q[tid]; | 
| David S. Miller | 51cb6db | 2008-07-15 03:34:57 -0700 | [diff] [blame] | 168 |  | 
 | 169 | 			if ((ampdu_queue < ieee80211_num_queues(hw)) && | 
| Sujith | 8b30b1f | 2008-10-24 09:55:27 +0530 | [diff] [blame] | 170 | 			    test_bit(ampdu_queue, local->queue_pool)) | 
| Ron Rindjunsky | 9e72349 | 2008-01-28 14:07:18 +0200 | [diff] [blame] | 171 | 				queue = ampdu_queue; | 
| Ron Rindjunsky | 9e72349 | 2008-01-28 14:07:18 +0200 | [diff] [blame] | 172 | 		} | 
| Johannes Berg | d0709a6 | 2008-02-25 16:27:46 +0100 | [diff] [blame] | 173 |  | 
 | 174 | 		rcu_read_unlock(); | 
| Jiri Benc | f0706e8 | 2007-05-05 11:45:53 -0700 | [diff] [blame] | 175 | 	} | 
 | 176 |  | 
| Jiri Benc | f0706e8 | 2007-05-05 11:45:53 -0700 | [diff] [blame] | 177 | 	return queue; | 
 | 178 | } | 
 | 179 |  | 
| Ron Rindjunsky | 9e72349 | 2008-01-28 14:07:18 +0200 | [diff] [blame] | 180 | int ieee80211_ht_agg_queue_add(struct ieee80211_local *local, | 
| David S. Miller | 51cb6db | 2008-07-15 03:34:57 -0700 | [diff] [blame] | 181 | 			       struct sta_info *sta, u16 tid) | 
| Ron Rindjunsky | 9e72349 | 2008-01-28 14:07:18 +0200 | [diff] [blame] | 182 | { | 
 | 183 | 	int i; | 
| Ron Rindjunsky | 9e72349 | 2008-01-28 14:07:18 +0200 | [diff] [blame] | 184 |  | 
| Johannes Berg | d0f0980 | 2008-07-29 11:32:07 +0200 | [diff] [blame] | 185 | 	/* XXX: currently broken due to cb/requeue use */ | 
 | 186 | 	return -EPERM; | 
 | 187 |  | 
| Ron Rindjunsky | 9e72349 | 2008-01-28 14:07:18 +0200 | [diff] [blame] | 188 | 	/* prepare the filter and save it for the SW queue | 
| Johannes Berg | e100bb6 | 2008-04-30 18:51:21 +0200 | [diff] [blame] | 189 | 	 * matching the received HW queue */ | 
 | 190 |  | 
 | 191 | 	if (!local->hw.ampdu_queues) | 
 | 192 | 		return -EPERM; | 
| Ron Rindjunsky | 9e72349 | 2008-01-28 14:07:18 +0200 | [diff] [blame] | 193 |  | 
 | 194 | 	/* try to get a Qdisc from the pool */ | 
| David S. Miller | 51cb6db | 2008-07-15 03:34:57 -0700 | [diff] [blame] | 195 | 	for (i = local->hw.queues; i < ieee80211_num_queues(&local->hw); i++) | 
 | 196 | 		if (!test_and_set_bit(i, local->queue_pool)) { | 
| Ron Rindjunsky | 9e72349 | 2008-01-28 14:07:18 +0200 | [diff] [blame] | 197 | 			ieee80211_stop_queue(local_to_hw(local), i); | 
 | 198 | 			sta->tid_to_tx_q[tid] = i; | 
 | 199 |  | 
 | 200 | 			/* IF there are already pending packets | 
 | 201 | 			 * on this tid first we need to drain them | 
 | 202 | 			 * on the previous queue | 
 | 203 | 			 * since HT is strict in order */ | 
 | 204 | #ifdef CONFIG_MAC80211_HT_DEBUG | 
| Johannes Berg | 0c68ae26 | 2008-10-27 15:56:10 -0700 | [diff] [blame] | 205 | 			if (net_ratelimit()) | 
| Ron Rindjunsky | 9e72349 | 2008-01-28 14:07:18 +0200 | [diff] [blame] | 206 | 				printk(KERN_DEBUG "allocated aggregation queue" | 
| Johannes Berg | 0c68ae26 | 2008-10-27 15:56:10 -0700 | [diff] [blame] | 207 | 					" %d tid %d addr %pM pool=0x%lX\n", | 
 | 208 | 					i, tid, sta->sta.addr, | 
| David S. Miller | 51cb6db | 2008-07-15 03:34:57 -0700 | [diff] [blame] | 209 | 					local->queue_pool[0]); | 
| Ron Rindjunsky | 9e72349 | 2008-01-28 14:07:18 +0200 | [diff] [blame] | 210 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | 
 | 211 | 			return 0; | 
 | 212 | 		} | 
 | 213 |  | 
 | 214 | 	return -EAGAIN; | 
 | 215 | } | 
 | 216 |  | 
 | 217 | /** | 
| David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 218 |  * the caller needs to hold netdev_get_tx_queue(local->mdev, X)->lock | 
| Ron Rindjunsky | 9e72349 | 2008-01-28 14:07:18 +0200 | [diff] [blame] | 219 |  */ | 
 | 220 | void ieee80211_ht_agg_queue_remove(struct ieee80211_local *local, | 
 | 221 | 				   struct sta_info *sta, u16 tid, | 
 | 222 | 				   u8 requeue) | 
 | 223 | { | 
| Ron Rindjunsky | 9e72349 | 2008-01-28 14:07:18 +0200 | [diff] [blame] | 224 | 	int agg_queue = sta->tid_to_tx_q[tid]; | 
| David S. Miller | 51cb6db | 2008-07-15 03:34:57 -0700 | [diff] [blame] | 225 | 	struct ieee80211_hw *hw = &local->hw; | 
| Ron Rindjunsky | 9e72349 | 2008-01-28 14:07:18 +0200 | [diff] [blame] | 226 |  | 
 | 227 | 	/* return the qdisc to the pool */ | 
| David S. Miller | 51cb6db | 2008-07-15 03:34:57 -0700 | [diff] [blame] | 228 | 	clear_bit(agg_queue, local->queue_pool); | 
 | 229 | 	sta->tid_to_tx_q[tid] = ieee80211_num_queues(hw); | 
| Ron Rindjunsky | 9e72349 | 2008-01-28 14:07:18 +0200 | [diff] [blame] | 230 |  | 
| David S. Miller | 51cb6db | 2008-07-15 03:34:57 -0700 | [diff] [blame] | 231 | 	if (requeue) { | 
| Ron Rindjunsky | 9e72349 | 2008-01-28 14:07:18 +0200 | [diff] [blame] | 232 | 		ieee80211_requeue(local, agg_queue); | 
| David S. Miller | 51cb6db | 2008-07-15 03:34:57 -0700 | [diff] [blame] | 233 | 	} else { | 
 | 234 | 		struct netdev_queue *txq; | 
| David S. Miller | 8387400 | 2008-07-17 00:53:03 -0700 | [diff] [blame] | 235 | 		spinlock_t *root_lock; | 
| David S. Miller | 35ed4e7 | 2008-08-02 23:25:50 -0700 | [diff] [blame] | 236 | 		struct Qdisc *q; | 
| David S. Miller | 51cb6db | 2008-07-15 03:34:57 -0700 | [diff] [blame] | 237 |  | 
 | 238 | 		txq = netdev_get_tx_queue(local->mdev, agg_queue); | 
| David S. Miller | 35ed4e7 | 2008-08-02 23:25:50 -0700 | [diff] [blame] | 239 | 		q = rcu_dereference(txq->qdisc); | 
 | 240 | 		root_lock = qdisc_lock(q); | 
| David S. Miller | 51cb6db | 2008-07-15 03:34:57 -0700 | [diff] [blame] | 241 |  | 
| David S. Miller | 8387400 | 2008-07-17 00:53:03 -0700 | [diff] [blame] | 242 | 		spin_lock_bh(root_lock); | 
| David S. Miller | 35ed4e7 | 2008-08-02 23:25:50 -0700 | [diff] [blame] | 243 | 		qdisc_reset(q); | 
| David S. Miller | 8387400 | 2008-07-17 00:53:03 -0700 | [diff] [blame] | 244 | 		spin_unlock_bh(root_lock); | 
| David S. Miller | 51cb6db | 2008-07-15 03:34:57 -0700 | [diff] [blame] | 245 | 	} | 
| Ron Rindjunsky | 9e72349 | 2008-01-28 14:07:18 +0200 | [diff] [blame] | 246 | } | 
 | 247 |  | 
 | 248 | void ieee80211_requeue(struct ieee80211_local *local, int queue) | 
 | 249 | { | 
| David S. Miller | 51cb6db | 2008-07-15 03:34:57 -0700 | [diff] [blame] | 250 | 	struct netdev_queue *txq = netdev_get_tx_queue(local->mdev, queue); | 
 | 251 | 	struct sk_buff_head list; | 
| David S. Miller | 8387400 | 2008-07-17 00:53:03 -0700 | [diff] [blame] | 252 | 	spinlock_t *root_lock; | 
| David S. Miller | 51cb6db | 2008-07-15 03:34:57 -0700 | [diff] [blame] | 253 | 	struct Qdisc *qdisc; | 
| Ron Rindjunsky | 0da926f | 2008-04-23 13:45:12 +0300 | [diff] [blame] | 254 | 	u32 len; | 
| Ron Rindjunsky | 9e72349 | 2008-01-28 14:07:18 +0200 | [diff] [blame] | 255 |  | 
| David S. Miller | 51cb6db | 2008-07-15 03:34:57 -0700 | [diff] [blame] | 256 | 	rcu_read_lock_bh(); | 
| Ron Rindjunsky | 9e72349 | 2008-01-28 14:07:18 +0200 | [diff] [blame] | 257 |  | 
| David S. Miller | 51cb6db | 2008-07-15 03:34:57 -0700 | [diff] [blame] | 258 | 	qdisc = rcu_dereference(txq->qdisc); | 
 | 259 | 	if (!qdisc || !qdisc->dequeue) | 
 | 260 | 		goto out_unlock; | 
 | 261 |  | 
 | 262 | 	skb_queue_head_init(&list); | 
 | 263 |  | 
| David S. Miller | 8387400 | 2008-07-17 00:53:03 -0700 | [diff] [blame] | 264 | 	root_lock = qdisc_root_lock(qdisc); | 
 | 265 | 	spin_lock(root_lock); | 
| Ron Rindjunsky | 9e72349 | 2008-01-28 14:07:18 +0200 | [diff] [blame] | 266 | 	for (len = qdisc->q.qlen; len > 0; len--) { | 
| David S. Miller | 51cb6db | 2008-07-15 03:34:57 -0700 | [diff] [blame] | 267 | 		struct sk_buff *skb = qdisc->dequeue(qdisc); | 
 | 268 |  | 
| Ron Rindjunsky | 9e72349 | 2008-01-28 14:07:18 +0200 | [diff] [blame] | 269 | 		if (skb) | 
| David S. Miller | 51cb6db | 2008-07-15 03:34:57 -0700 | [diff] [blame] | 270 | 			__skb_queue_tail(&list, skb); | 
| Ron Rindjunsky | 9e72349 | 2008-01-28 14:07:18 +0200 | [diff] [blame] | 271 | 	} | 
| David S. Miller | 8387400 | 2008-07-17 00:53:03 -0700 | [diff] [blame] | 272 | 	spin_unlock(root_lock); | 
| David S. Miller | 51cb6db | 2008-07-15 03:34:57 -0700 | [diff] [blame] | 273 |  | 
 | 274 | 	for (len = list.qlen; len > 0; len--) { | 
 | 275 | 		struct sk_buff *skb = __skb_dequeue(&list); | 
 | 276 | 		u16 new_queue; | 
 | 277 |  | 
 | 278 | 		BUG_ON(!skb); | 
 | 279 | 		new_queue = ieee80211_select_queue(local->mdev, skb); | 
 | 280 | 		skb_set_queue_mapping(skb, new_queue); | 
 | 281 |  | 
 | 282 | 		txq = netdev_get_tx_queue(local->mdev, new_queue); | 
 | 283 |  | 
| David S. Miller | 51cb6db | 2008-07-15 03:34:57 -0700 | [diff] [blame] | 284 |  | 
 | 285 | 		qdisc = rcu_dereference(txq->qdisc); | 
| David S. Miller | 8387400 | 2008-07-17 00:53:03 -0700 | [diff] [blame] | 286 | 		root_lock = qdisc_root_lock(qdisc); | 
| David S. Miller | 51cb6db | 2008-07-15 03:34:57 -0700 | [diff] [blame] | 287 |  | 
| David S. Miller | 8387400 | 2008-07-17 00:53:03 -0700 | [diff] [blame] | 288 | 		spin_lock(root_lock); | 
| Jussi Kivilinna | 5f86173 | 2008-07-20 00:08:04 -0700 | [diff] [blame] | 289 | 		qdisc_enqueue_root(skb, qdisc); | 
| David S. Miller | 8387400 | 2008-07-17 00:53:03 -0700 | [diff] [blame] | 290 | 		spin_unlock(root_lock); | 
| David S. Miller | 51cb6db | 2008-07-15 03:34:57 -0700 | [diff] [blame] | 291 | 	} | 
 | 292 |  | 
 | 293 | out_unlock: | 
 | 294 | 	rcu_read_unlock_bh(); | 
| Ron Rindjunsky | 9e72349 | 2008-01-28 14:07:18 +0200 | [diff] [blame] | 295 | } |