| Luis R. Rodriguez | f078f20 | 2008-08-04 00:16:41 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * Copyright (c) 2008 Atheros Communications Inc. | 
|  | 3 | * | 
|  | 4 | * Permission to use, copy, modify, and/or distribute this software for any | 
|  | 5 | * purpose with or without fee is hereby granted, provided that the above | 
|  | 6 | * copyright notice and this permission notice appear in all copies. | 
|  | 7 | * | 
|  | 8 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | 
|  | 9 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | 
|  | 10 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | 
|  | 11 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | 
|  | 12 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | 
|  | 13 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | 
|  | 14 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | 
|  | 15 | */ | 
|  | 16 |  | 
|  | 17 | /* | 
|  | 18 | * Implementation of transmit path. | 
|  | 19 | */ | 
|  | 20 |  | 
|  | 21 | #include "core.h" | 
|  | 22 |  | 
|  | 23 | #define BITS_PER_BYTE           8 | 
|  | 24 | #define OFDM_PLCP_BITS          22 | 
|  | 25 | #define HT_RC_2_MCS(_rc)        ((_rc) & 0x0f) | 
|  | 26 | #define HT_RC_2_STREAMS(_rc)    ((((_rc) & 0x78) >> 3) + 1) | 
|  | 27 | #define L_STF                   8 | 
|  | 28 | #define L_LTF                   8 | 
|  | 29 | #define L_SIG                   4 | 
|  | 30 | #define HT_SIG                  8 | 
|  | 31 | #define HT_STF                  4 | 
|  | 32 | #define HT_LTF(_ns)             (4 * (_ns)) | 
|  | 33 | #define SYMBOL_TIME(_ns)        ((_ns) << 2) /* ns * 4 us */ | 
|  | 34 | #define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5)  /* ns * 3.6 us */ | 
|  | 35 | #define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2) | 
|  | 36 | #define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18) | 
|  | 37 |  | 
|  | 38 | #define OFDM_SIFS_TIME    	    16 | 
|  | 39 |  | 
|  | 40 | static u32 bits_per_symbol[][2] = { | 
|  | 41 | /* 20MHz 40MHz */ | 
|  | 42 | {    26,   54 },     /*  0: BPSK */ | 
|  | 43 | {    52,  108 },     /*  1: QPSK 1/2 */ | 
|  | 44 | {    78,  162 },     /*  2: QPSK 3/4 */ | 
|  | 45 | {   104,  216 },     /*  3: 16-QAM 1/2 */ | 
|  | 46 | {   156,  324 },     /*  4: 16-QAM 3/4 */ | 
|  | 47 | {   208,  432 },     /*  5: 64-QAM 2/3 */ | 
|  | 48 | {   234,  486 },     /*  6: 64-QAM 3/4 */ | 
|  | 49 | {   260,  540 },     /*  7: 64-QAM 5/6 */ | 
|  | 50 | {    52,  108 },     /*  8: BPSK */ | 
|  | 51 | {   104,  216 },     /*  9: QPSK 1/2 */ | 
|  | 52 | {   156,  324 },     /* 10: QPSK 3/4 */ | 
|  | 53 | {   208,  432 },     /* 11: 16-QAM 1/2 */ | 
|  | 54 | {   312,  648 },     /* 12: 16-QAM 3/4 */ | 
|  | 55 | {   416,  864 },     /* 13: 64-QAM 2/3 */ | 
|  | 56 | {   468,  972 },     /* 14: 64-QAM 3/4 */ | 
|  | 57 | {   520, 1080 },     /* 15: 64-QAM 5/6 */ | 
|  | 58 | }; | 
|  | 59 |  | 
|  | 60 | #define IS_HT_RATE(_rate)     ((_rate) & 0x80) | 
|  | 61 |  | 
|  | 62 | /* | 
|  | 63 | * Insert a chain of ath_buf (descriptors) on a multicast txq | 
|  | 64 | * but do NOT start tx DMA on this queue. | 
|  | 65 | * NB: must be called with txq lock held | 
|  | 66 | */ | 
|  | 67 |  | 
|  | 68 | static void ath_tx_mcastqaddbuf(struct ath_softc *sc, | 
|  | 69 | struct ath_txq *txq, | 
|  | 70 | struct list_head *head) | 
|  | 71 | { | 
|  | 72 | struct ath_hal *ah = sc->sc_ah; | 
|  | 73 | struct ath_buf *bf; | 
|  | 74 |  | 
|  | 75 | if (list_empty(head)) | 
|  | 76 | return; | 
|  | 77 |  | 
|  | 78 | /* | 
|  | 79 | * Insert the frame on the outbound list and | 
|  | 80 | * pass it on to the hardware. | 
|  | 81 | */ | 
|  | 82 | bf = list_first_entry(head, struct ath_buf, list); | 
|  | 83 |  | 
|  | 84 | /* | 
|  | 85 | * The CAB queue is started from the SWBA handler since | 
|  | 86 | * frames only go out on DTIM and to avoid possible races. | 
|  | 87 | */ | 
|  | 88 | ath9k_hw_set_interrupts(ah, 0); | 
|  | 89 |  | 
|  | 90 | /* | 
|  | 91 | * If there is anything in the mcastq, we want to set | 
|  | 92 | * the "more data" bit in the last item in the queue to | 
|  | 93 | * indicate that there is "more data". It makes sense to add | 
|  | 94 | * it here since you are *always* going to have | 
|  | 95 | * more data when adding to this queue, no matter where | 
|  | 96 | * you call from. | 
|  | 97 | */ | 
|  | 98 |  | 
|  | 99 | if (txq->axq_depth) { | 
|  | 100 | struct ath_buf *lbf; | 
|  | 101 | struct ieee80211_hdr *hdr; | 
|  | 102 |  | 
|  | 103 | /* | 
|  | 104 | * Add the "more data flag" to the last frame | 
|  | 105 | */ | 
|  | 106 |  | 
|  | 107 | lbf = list_entry(txq->axq_q.prev, struct ath_buf, list); | 
|  | 108 | hdr = (struct ieee80211_hdr *) | 
|  | 109 | ((struct sk_buff *)(lbf->bf_mpdu))->data; | 
|  | 110 | hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA); | 
|  | 111 | } | 
|  | 112 |  | 
|  | 113 | /* | 
|  | 114 | * Now, concat the frame onto the queue | 
|  | 115 | */ | 
|  | 116 | list_splice_tail_init(head, &txq->axq_q); | 
|  | 117 | txq->axq_depth++; | 
|  | 118 | txq->axq_totalqueued++; | 
|  | 119 | txq->axq_linkbuf = list_entry(txq->axq_q.prev, struct ath_buf, list); | 
|  | 120 |  | 
|  | 121 | DPRINTF(sc, ATH_DBG_QUEUE, | 
|  | 122 | "%s: txq depth = %d\n", __func__, txq->axq_depth); | 
|  | 123 | if (txq->axq_link != NULL) { | 
|  | 124 | *txq->axq_link = bf->bf_daddr; | 
|  | 125 | DPRINTF(sc, ATH_DBG_XMIT, | 
|  | 126 | "%s: link[%u](%p)=%llx (%p)\n", | 
|  | 127 | __func__, | 
|  | 128 | txq->axq_qnum, txq->axq_link, | 
|  | 129 | ito64(bf->bf_daddr), bf->bf_desc); | 
|  | 130 | } | 
|  | 131 | txq->axq_link = &(bf->bf_lastbf->bf_desc->ds_link); | 
|  | 132 | ath9k_hw_set_interrupts(ah, sc->sc_imask); | 
|  | 133 | } | 
|  | 134 |  | 
|  | 135 | /* | 
|  | 136 | * Insert a chain of ath_buf (descriptors) on a txq and | 
|  | 137 | * assume the descriptors are already chained together by caller. | 
|  | 138 | * NB: must be called with txq lock held | 
|  | 139 | */ | 
|  | 140 |  | 
|  | 141 | static void ath_tx_txqaddbuf(struct ath_softc *sc, | 
|  | 142 | struct ath_txq *txq, struct list_head *head) | 
|  | 143 | { | 
|  | 144 | struct ath_hal *ah = sc->sc_ah; | 
|  | 145 | struct ath_buf *bf; | 
|  | 146 | /* | 
|  | 147 | * Insert the frame on the outbound list and | 
|  | 148 | * pass it on to the hardware. | 
|  | 149 | */ | 
|  | 150 |  | 
|  | 151 | if (list_empty(head)) | 
|  | 152 | return; | 
|  | 153 |  | 
|  | 154 | bf = list_first_entry(head, struct ath_buf, list); | 
|  | 155 |  | 
|  | 156 | list_splice_tail_init(head, &txq->axq_q); | 
|  | 157 | txq->axq_depth++; | 
|  | 158 | txq->axq_totalqueued++; | 
|  | 159 | txq->axq_linkbuf = list_entry(txq->axq_q.prev, struct ath_buf, list); | 
|  | 160 |  | 
|  | 161 | DPRINTF(sc, ATH_DBG_QUEUE, | 
|  | 162 | "%s: txq depth = %d\n", __func__, txq->axq_depth); | 
|  | 163 |  | 
|  | 164 | if (txq->axq_link == NULL) { | 
|  | 165 | ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr); | 
|  | 166 | DPRINTF(sc, ATH_DBG_XMIT, | 
|  | 167 | "%s: TXDP[%u] = %llx (%p)\n", | 
|  | 168 | __func__, txq->axq_qnum, | 
|  | 169 | ito64(bf->bf_daddr), bf->bf_desc); | 
|  | 170 | } else { | 
|  | 171 | *txq->axq_link = bf->bf_daddr; | 
|  | 172 | DPRINTF(sc, ATH_DBG_XMIT, "%s: link[%u] (%p)=%llx (%p)\n", | 
|  | 173 | __func__, | 
|  | 174 | txq->axq_qnum, txq->axq_link, | 
|  | 175 | ito64(bf->bf_daddr), bf->bf_desc); | 
|  | 176 | } | 
|  | 177 | txq->axq_link = &(bf->bf_lastbf->bf_desc->ds_link); | 
|  | 178 | ath9k_hw_txstart(ah, txq->axq_qnum); | 
|  | 179 | } | 
|  | 180 |  | 
|  | 181 | /* Get transmit rate index using rate in Kbps */ | 
|  | 182 |  | 
|  | 183 | static int ath_tx_findindex(const struct ath9k_rate_table *rt, int rate) | 
|  | 184 | { | 
|  | 185 | int i; | 
|  | 186 | int ndx = 0; | 
|  | 187 |  | 
|  | 188 | for (i = 0; i < rt->rateCount; i++) { | 
|  | 189 | if (rt->info[i].rateKbps == rate) { | 
|  | 190 | ndx = i; | 
|  | 191 | break; | 
|  | 192 | } | 
|  | 193 | } | 
|  | 194 |  | 
|  | 195 | return ndx; | 
|  | 196 | } | 
|  | 197 |  | 
|  | 198 | /* Check if it's okay to send out aggregates */ | 
|  | 199 |  | 
|  | 200 | static int ath_aggr_query(struct ath_softc *sc, | 
|  | 201 | struct ath_node *an, u8 tidno) | 
|  | 202 | { | 
|  | 203 | struct ath_atx_tid *tid; | 
|  | 204 | tid = ATH_AN_2_TID(an, tidno); | 
|  | 205 |  | 
|  | 206 | if (tid->addba_exchangecomplete || tid->addba_exchangeinprogress) | 
|  | 207 | return 1; | 
|  | 208 | else | 
|  | 209 | return 0; | 
|  | 210 | } | 
|  | 211 |  | 
|  | 212 | static enum ath9k_pkt_type get_hal_packet_type(struct ieee80211_hdr *hdr) | 
|  | 213 | { | 
|  | 214 | enum ath9k_pkt_type htype; | 
|  | 215 | __le16 fc; | 
|  | 216 |  | 
|  | 217 | fc = hdr->frame_control; | 
|  | 218 |  | 
|  | 219 | /* Calculate Atheros packet type from IEEE80211 packet header */ | 
|  | 220 |  | 
|  | 221 | if (ieee80211_is_beacon(fc)) | 
|  | 222 | htype = ATH9K_PKT_TYPE_BEACON; | 
|  | 223 | else if (ieee80211_is_probe_resp(fc)) | 
|  | 224 | htype = ATH9K_PKT_TYPE_PROBE_RESP; | 
|  | 225 | else if (ieee80211_is_atim(fc)) | 
|  | 226 | htype = ATH9K_PKT_TYPE_ATIM; | 
|  | 227 | else if (ieee80211_is_pspoll(fc)) | 
|  | 228 | htype = ATH9K_PKT_TYPE_PSPOLL; | 
|  | 229 | else | 
|  | 230 | htype = ATH9K_PKT_TYPE_NORMAL; | 
|  | 231 |  | 
|  | 232 | return htype; | 
|  | 233 | } | 
|  | 234 |  | 
|  | 235 | static void fill_min_rates(struct sk_buff *skb, struct ath_tx_control *txctl) | 
|  | 236 | { | 
|  | 237 | struct ieee80211_hdr *hdr; | 
|  | 238 | struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); | 
|  | 239 | struct ath_tx_info_priv *tx_info_priv; | 
|  | 240 | __le16 fc; | 
|  | 241 |  | 
|  | 242 | hdr = (struct ieee80211_hdr *)skb->data; | 
|  | 243 | fc = hdr->frame_control; | 
|  | 244 | tx_info_priv = (struct ath_tx_info_priv *)tx_info->driver_data[0]; | 
|  | 245 |  | 
|  | 246 | if (ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc)) { | 
|  | 247 | txctl->use_minrate = 1; | 
|  | 248 | txctl->min_rate = tx_info_priv->min_rate; | 
|  | 249 | } else if (ieee80211_is_data(fc)) { | 
|  | 250 | if (ieee80211_is_nullfunc(fc) || | 
|  | 251 | /* Port Access Entity (IEEE 802.1X) */ | 
|  | 252 | (skb->protocol == cpu_to_be16(0x888E))) { | 
|  | 253 | txctl->use_minrate = 1; | 
|  | 254 | txctl->min_rate = tx_info_priv->min_rate; | 
|  | 255 | } | 
|  | 256 | if (is_multicast_ether_addr(hdr->addr1)) | 
|  | 257 | txctl->mcast_rate = tx_info_priv->min_rate; | 
|  | 258 | } | 
|  | 259 |  | 
|  | 260 | } | 
|  | 261 |  | 
|  | 262 | /* This function will setup additional txctl information, mostly rate stuff */ | 
|  | 263 | /* FIXME: seqno, ps */ | 
|  | 264 | static int ath_tx_prepare(struct ath_softc *sc, | 
|  | 265 | struct sk_buff *skb, | 
|  | 266 | struct ath_tx_control *txctl) | 
|  | 267 | { | 
|  | 268 | struct ieee80211_hw *hw = sc->hw; | 
|  | 269 | struct ieee80211_hdr *hdr; | 
|  | 270 | struct ath_rc_series *rcs; | 
|  | 271 | struct ath_txq *txq = NULL; | 
|  | 272 | const struct ath9k_rate_table *rt; | 
|  | 273 | struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); | 
|  | 274 | struct ath_tx_info_priv *tx_info_priv; | 
|  | 275 | int hdrlen; | 
|  | 276 | u8 rix, antenna; | 
|  | 277 | __le16 fc; | 
|  | 278 | u8 *qc; | 
|  | 279 |  | 
|  | 280 | memset(txctl, 0, sizeof(struct ath_tx_control)); | 
|  | 281 |  | 
|  | 282 | txctl->dev = sc; | 
|  | 283 | hdr = (struct ieee80211_hdr *)skb->data; | 
|  | 284 | hdrlen = ieee80211_get_hdrlen_from_skb(skb); | 
|  | 285 | fc = hdr->frame_control; | 
|  | 286 |  | 
|  | 287 | rt = sc->sc_currates; | 
|  | 288 | BUG_ON(!rt); | 
|  | 289 |  | 
|  | 290 | /* Fill misc fields */ | 
|  | 291 |  | 
|  | 292 | spin_lock_bh(&sc->node_lock); | 
|  | 293 | txctl->an = ath_node_get(sc, hdr->addr1); | 
|  | 294 | /* create a temp node, if the node is not there already */ | 
|  | 295 | if (!txctl->an) | 
|  | 296 | txctl->an = ath_node_attach(sc, hdr->addr1, 0); | 
|  | 297 | spin_unlock_bh(&sc->node_lock); | 
|  | 298 |  | 
|  | 299 | if (ieee80211_is_data_qos(fc)) { | 
|  | 300 | qc = ieee80211_get_qos_ctl(hdr); | 
|  | 301 | txctl->tidno = qc[0] & 0xf; | 
|  | 302 | } | 
|  | 303 |  | 
|  | 304 | txctl->if_id = 0; | 
|  | 305 | txctl->nextfraglen = 0; | 
|  | 306 | txctl->frmlen = skb->len + FCS_LEN - (hdrlen & 3); | 
|  | 307 | txctl->txpower = MAX_RATE_POWER; /* FIXME */ | 
|  | 308 |  | 
|  | 309 | /* Fill Key related fields */ | 
|  | 310 |  | 
|  | 311 | txctl->keytype = ATH9K_KEY_TYPE_CLEAR; | 
|  | 312 | txctl->keyix = ATH9K_TXKEYIX_INVALID; | 
|  | 313 |  | 
|  | 314 | if (tx_info->control.hw_key) { | 
|  | 315 | txctl->keyix = tx_info->control.hw_key->hw_key_idx; | 
|  | 316 | txctl->frmlen += tx_info->control.icv_len; | 
|  | 317 |  | 
| Senthil Balasubramanian | d0be7cc | 2008-09-17 12:39:49 +0530 | [diff] [blame] | 318 | if (tx_info->control.hw_key->alg == ALG_WEP) | 
| Luis R. Rodriguez | f078f20 | 2008-08-04 00:16:41 -0700 | [diff] [blame] | 319 | txctl->keytype = ATH9K_KEY_TYPE_WEP; | 
| Senthil Balasubramanian | d0be7cc | 2008-09-17 12:39:49 +0530 | [diff] [blame] | 320 | else if (tx_info->control.hw_key->alg == ALG_TKIP) | 
| Luis R. Rodriguez | f078f20 | 2008-08-04 00:16:41 -0700 | [diff] [blame] | 321 | txctl->keytype = ATH9K_KEY_TYPE_TKIP; | 
| Senthil Balasubramanian | d0be7cc | 2008-09-17 12:39:49 +0530 | [diff] [blame] | 322 | else if (tx_info->control.hw_key->alg == ALG_CCMP) | 
| Luis R. Rodriguez | f078f20 | 2008-08-04 00:16:41 -0700 | [diff] [blame] | 323 | txctl->keytype = ATH9K_KEY_TYPE_AES; | 
|  | 324 | } | 
|  | 325 |  | 
|  | 326 | /* Fill packet type */ | 
|  | 327 |  | 
|  | 328 | txctl->atype = get_hal_packet_type(hdr); | 
|  | 329 |  | 
|  | 330 | /* Fill qnum */ | 
|  | 331 |  | 
|  | 332 | txctl->qnum = ath_get_hal_qnum(skb_get_queue_mapping(skb), sc); | 
|  | 333 | txq = &sc->sc_txq[txctl->qnum]; | 
|  | 334 | spin_lock_bh(&txq->axq_lock); | 
|  | 335 |  | 
|  | 336 | /* Try to avoid running out of descriptors */ | 
|  | 337 | if (txq->axq_depth >= (ATH_TXBUF - 20)) { | 
|  | 338 | DPRINTF(sc, ATH_DBG_FATAL, | 
|  | 339 | "%s: TX queue: %d is full, depth: %d\n", | 
|  | 340 | __func__, | 
|  | 341 | txctl->qnum, | 
|  | 342 | txq->axq_depth); | 
|  | 343 | ieee80211_stop_queue(hw, skb_get_queue_mapping(skb)); | 
|  | 344 | txq->stopped = 1; | 
|  | 345 | spin_unlock_bh(&txq->axq_lock); | 
|  | 346 | return -1; | 
|  | 347 | } | 
|  | 348 |  | 
|  | 349 | spin_unlock_bh(&txq->axq_lock); | 
|  | 350 |  | 
|  | 351 | /* Fill rate */ | 
|  | 352 |  | 
|  | 353 | fill_min_rates(skb, txctl); | 
|  | 354 |  | 
|  | 355 | /* Fill flags */ | 
|  | 356 |  | 
|  | 357 | txctl->flags = ATH9K_TXDESC_CLRDMASK;    /* needed for crypto errors */ | 
|  | 358 |  | 
|  | 359 | if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) | 
| Jouni Malinen | 9aab3e3 | 2008-08-11 14:01:51 +0300 | [diff] [blame] | 360 | txctl->flags |= ATH9K_TXDESC_NOACK; | 
| Luis R. Rodriguez | f078f20 | 2008-08-04 00:16:41 -0700 | [diff] [blame] | 361 | if (tx_info->flags & IEEE80211_TX_CTL_USE_RTS_CTS) | 
| Jouni Malinen | 9aab3e3 | 2008-08-11 14:01:51 +0300 | [diff] [blame] | 362 | txctl->flags |= ATH9K_TXDESC_RTSENA; | 
| Luis R. Rodriguez | f078f20 | 2008-08-04 00:16:41 -0700 | [diff] [blame] | 363 |  | 
|  | 364 | /* | 
|  | 365 | * Setup for rate calculations. | 
|  | 366 | */ | 
|  | 367 | tx_info_priv = (struct ath_tx_info_priv *)tx_info->driver_data[0]; | 
|  | 368 | rcs = tx_info_priv->rcs; | 
|  | 369 |  | 
|  | 370 | if (ieee80211_is_data(fc) && !txctl->use_minrate) { | 
|  | 371 |  | 
|  | 372 | /* Enable HT only for DATA frames and not for EAPOL */ | 
|  | 373 | txctl->ht = (hw->conf.ht_conf.ht_supported && | 
|  | 374 | (tx_info->flags & IEEE80211_TX_CTL_AMPDU)); | 
|  | 375 |  | 
|  | 376 | if (is_multicast_ether_addr(hdr->addr1)) { | 
|  | 377 | rcs[0].rix = (u8) | 
|  | 378 | ath_tx_findindex(rt, txctl->mcast_rate); | 
|  | 379 |  | 
|  | 380 | /* | 
|  | 381 | * mcast packets are not re-tried. | 
|  | 382 | */ | 
|  | 383 | rcs[0].tries = 1; | 
|  | 384 | } | 
|  | 385 | /* For HT capable stations, we save tidno for later use. | 
|  | 386 | * We also override seqno set by upper layer with the one | 
|  | 387 | * in tx aggregation state. | 
|  | 388 | * | 
|  | 389 | * First, the fragmentation stat is determined. | 
|  | 390 | * If fragmentation is on, the sequence number is | 
|  | 391 | * not overridden, since it has been | 
|  | 392 | * incremented by the fragmentation routine. | 
|  | 393 | */ | 
|  | 394 | if (likely(!(txctl->flags & ATH9K_TXDESC_FRAG_IS_ON)) && | 
|  | 395 | txctl->ht && sc->sc_txaggr) { | 
|  | 396 | struct ath_atx_tid *tid; | 
|  | 397 |  | 
|  | 398 | tid = ATH_AN_2_TID(txctl->an, txctl->tidno); | 
|  | 399 |  | 
|  | 400 | hdr->seq_ctrl = cpu_to_le16(tid->seq_next << | 
|  | 401 | IEEE80211_SEQ_SEQ_SHIFT); | 
|  | 402 | txctl->seqno = tid->seq_next; | 
|  | 403 | INCR(tid->seq_next, IEEE80211_SEQ_MAX); | 
|  | 404 | } | 
|  | 405 | } else { | 
|  | 406 | /* for management and control frames, | 
|  | 407 | * or for NULL and EAPOL frames */ | 
|  | 408 | if (txctl->min_rate) | 
|  | 409 | rcs[0].rix = ath_rate_findrateix(sc, txctl->min_rate); | 
|  | 410 | else | 
| Sujith | 86b89ee | 2008-08-07 10:54:57 +0530 | [diff] [blame] | 411 | rcs[0].rix = 0; | 
| Luis R. Rodriguez | f078f20 | 2008-08-04 00:16:41 -0700 | [diff] [blame] | 412 | rcs[0].tries = ATH_MGT_TXMAXTRY; | 
|  | 413 | } | 
|  | 414 | rix = rcs[0].rix; | 
|  | 415 |  | 
|  | 416 | /* | 
|  | 417 | * Calculate duration.  This logically belongs in the 802.11 | 
|  | 418 | * layer but it lacks sufficient information to calculate it. | 
|  | 419 | */ | 
|  | 420 | if ((txctl->flags & ATH9K_TXDESC_NOACK) == 0 && !ieee80211_is_ctl(fc)) { | 
|  | 421 | u16 dur; | 
|  | 422 | /* | 
|  | 423 | * XXX not right with fragmentation. | 
|  | 424 | */ | 
|  | 425 | if (sc->sc_flags & ATH_PREAMBLE_SHORT) | 
|  | 426 | dur = rt->info[rix].spAckDuration; | 
|  | 427 | else | 
|  | 428 | dur = rt->info[rix].lpAckDuration; | 
|  | 429 |  | 
|  | 430 | if (le16_to_cpu(hdr->frame_control) & | 
|  | 431 | IEEE80211_FCTL_MOREFRAGS) { | 
|  | 432 | dur += dur;  /* Add additional 'SIFS + ACK' */ | 
|  | 433 |  | 
|  | 434 | /* | 
|  | 435 | ** Compute size of next fragment in order to compute | 
|  | 436 | ** durations needed to update NAV. | 
|  | 437 | ** The last fragment uses the ACK duration only. | 
|  | 438 | ** Add time for next fragment. | 
|  | 439 | */ | 
|  | 440 | dur += ath9k_hw_computetxtime(sc->sc_ah, rt, | 
|  | 441 | txctl->nextfraglen, | 
|  | 442 | rix, sc->sc_flags & ATH_PREAMBLE_SHORT); | 
|  | 443 | } | 
|  | 444 |  | 
|  | 445 | if (ieee80211_has_morefrags(fc) || | 
|  | 446 | (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG)) { | 
|  | 447 | /* | 
|  | 448 | **  Force hardware to use computed duration for next | 
|  | 449 | **  fragment by disabling multi-rate retry, which | 
|  | 450 | **  updates duration based on the multi-rate | 
|  | 451 | **  duration table. | 
|  | 452 | */ | 
|  | 453 | rcs[1].tries = rcs[2].tries = rcs[3].tries = 0; | 
|  | 454 | rcs[1].rix = rcs[2].rix = rcs[3].rix = 0; | 
|  | 455 | /* reset tries but keep rate index */ | 
|  | 456 | rcs[0].tries = ATH_TXMAXTRY; | 
|  | 457 | } | 
|  | 458 |  | 
|  | 459 | hdr->duration_id = cpu_to_le16(dur); | 
|  | 460 | } | 
|  | 461 |  | 
|  | 462 | /* | 
|  | 463 | * Determine if a tx interrupt should be generated for | 
|  | 464 | * this descriptor.  We take a tx interrupt to reap | 
|  | 465 | * descriptors when the h/w hits an EOL condition or | 
|  | 466 | * when the descriptor is specifically marked to generate | 
|  | 467 | * an interrupt.  We periodically mark descriptors in this | 
|  | 468 | * way to insure timely replenishing of the supply needed | 
|  | 469 | * for sending frames.  Defering interrupts reduces system | 
|  | 470 | * load and potentially allows more concurrent work to be | 
|  | 471 | * done but if done to aggressively can cause senders to | 
|  | 472 | * backup. | 
|  | 473 | * | 
|  | 474 | * NB: use >= to deal with sc_txintrperiod changing | 
|  | 475 | *     dynamically through sysctl. | 
|  | 476 | */ | 
|  | 477 | spin_lock_bh(&txq->axq_lock); | 
|  | 478 | if ((++txq->axq_intrcnt >= sc->sc_txintrperiod)) { | 
|  | 479 | txctl->flags |= ATH9K_TXDESC_INTREQ; | 
|  | 480 | txq->axq_intrcnt = 0; | 
|  | 481 | } | 
|  | 482 | spin_unlock_bh(&txq->axq_lock); | 
|  | 483 |  | 
|  | 484 | if (is_multicast_ether_addr(hdr->addr1)) { | 
|  | 485 | antenna = sc->sc_mcastantenna + 1; | 
|  | 486 | sc->sc_mcastantenna = (sc->sc_mcastantenna + 1) & 0x1; | 
|  | 487 | } else | 
|  | 488 | antenna = sc->sc_txantenna; | 
|  | 489 |  | 
|  | 490 | #ifdef USE_LEGACY_HAL | 
|  | 491 | txctl->antenna = antenna; | 
|  | 492 | #endif | 
|  | 493 | return 0; | 
|  | 494 | } | 
|  | 495 |  | 
|  | 496 | /* To complete a chain of buffers associated a frame */ | 
|  | 497 |  | 
|  | 498 | static void ath_tx_complete_buf(struct ath_softc *sc, | 
|  | 499 | struct ath_buf *bf, | 
|  | 500 | struct list_head *bf_q, | 
|  | 501 | int txok, int sendbar) | 
|  | 502 | { | 
|  | 503 | struct sk_buff *skb = bf->bf_mpdu; | 
|  | 504 | struct ath_xmit_status tx_status; | 
|  | 505 | dma_addr_t *pa; | 
|  | 506 |  | 
|  | 507 | /* | 
|  | 508 | * Set retry information. | 
|  | 509 | * NB: Don't use the information in the descriptor, because the frame | 
|  | 510 | * could be software retried. | 
|  | 511 | */ | 
|  | 512 | tx_status.retries = bf->bf_retries; | 
|  | 513 | tx_status.flags = 0; | 
|  | 514 |  | 
|  | 515 | if (sendbar) | 
|  | 516 | tx_status.flags = ATH_TX_BAR; | 
|  | 517 |  | 
|  | 518 | if (!txok) { | 
|  | 519 | tx_status.flags |= ATH_TX_ERROR; | 
|  | 520 |  | 
|  | 521 | if (bf->bf_isxretried) | 
|  | 522 | tx_status.flags |= ATH_TX_XRETRY; | 
|  | 523 | } | 
|  | 524 | /* Unmap this frame */ | 
|  | 525 | pa = get_dma_mem_context(bf, bf_dmacontext); | 
|  | 526 | pci_unmap_single(sc->pdev, | 
|  | 527 | *pa, | 
|  | 528 | skb->len, | 
|  | 529 | PCI_DMA_TODEVICE); | 
|  | 530 | /* complete this frame */ | 
|  | 531 | ath_tx_complete(sc, skb, &tx_status, bf->bf_node); | 
|  | 532 |  | 
|  | 533 | /* | 
|  | 534 | * Return the list of ath_buf of this mpdu to free queue | 
|  | 535 | */ | 
|  | 536 | spin_lock_bh(&sc->sc_txbuflock); | 
|  | 537 | list_splice_tail_init(bf_q, &sc->sc_txbuf); | 
|  | 538 | spin_unlock_bh(&sc->sc_txbuflock); | 
|  | 539 | } | 
|  | 540 |  | 
|  | 541 | /* | 
|  | 542 | * queue up a dest/ac pair for tx scheduling | 
|  | 543 | * NB: must be called with txq lock held | 
|  | 544 | */ | 
|  | 545 |  | 
|  | 546 | static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid) | 
|  | 547 | { | 
|  | 548 | struct ath_atx_ac *ac = tid->ac; | 
|  | 549 |  | 
|  | 550 | /* | 
|  | 551 | * if tid is paused, hold off | 
|  | 552 | */ | 
|  | 553 | if (tid->paused) | 
|  | 554 | return; | 
|  | 555 |  | 
|  | 556 | /* | 
|  | 557 | * add tid to ac atmost once | 
|  | 558 | */ | 
|  | 559 | if (tid->sched) | 
|  | 560 | return; | 
|  | 561 |  | 
|  | 562 | tid->sched = true; | 
|  | 563 | list_add_tail(&tid->list, &ac->tid_q); | 
|  | 564 |  | 
|  | 565 | /* | 
|  | 566 | * add node ac to txq atmost once | 
|  | 567 | */ | 
|  | 568 | if (ac->sched) | 
|  | 569 | return; | 
|  | 570 |  | 
|  | 571 | ac->sched = true; | 
|  | 572 | list_add_tail(&ac->list, &txq->axq_acq); | 
|  | 573 | } | 
|  | 574 |  | 
|  | 575 | /* pause a tid */ | 
|  | 576 |  | 
|  | 577 | static void ath_tx_pause_tid(struct ath_softc *sc, struct ath_atx_tid *tid) | 
|  | 578 | { | 
|  | 579 | struct ath_txq *txq = &sc->sc_txq[tid->ac->qnum]; | 
|  | 580 |  | 
|  | 581 | spin_lock_bh(&txq->axq_lock); | 
|  | 582 |  | 
|  | 583 | tid->paused++; | 
|  | 584 |  | 
|  | 585 | spin_unlock_bh(&txq->axq_lock); | 
|  | 586 | } | 
|  | 587 |  | 
|  | 588 | /* resume a tid and schedule aggregate */ | 
|  | 589 |  | 
|  | 590 | void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid) | 
|  | 591 | { | 
|  | 592 | struct ath_txq *txq = &sc->sc_txq[tid->ac->qnum]; | 
|  | 593 |  | 
|  | 594 | ASSERT(tid->paused > 0); | 
|  | 595 | spin_lock_bh(&txq->axq_lock); | 
|  | 596 |  | 
|  | 597 | tid->paused--; | 
|  | 598 |  | 
|  | 599 | if (tid->paused > 0) | 
|  | 600 | goto unlock; | 
|  | 601 |  | 
|  | 602 | if (list_empty(&tid->buf_q)) | 
|  | 603 | goto unlock; | 
|  | 604 |  | 
|  | 605 | /* | 
|  | 606 | * Add this TID to scheduler and try to send out aggregates | 
|  | 607 | */ | 
|  | 608 | ath_tx_queue_tid(txq, tid); | 
|  | 609 | ath_txq_schedule(sc, txq); | 
|  | 610 | unlock: | 
|  | 611 | spin_unlock_bh(&txq->axq_lock); | 
|  | 612 | } | 
|  | 613 |  | 
|  | 614 | /* Compute the number of bad frames */ | 
|  | 615 |  | 
|  | 616 | static int ath_tx_num_badfrms(struct ath_softc *sc, | 
|  | 617 | struct ath_buf *bf, int txok) | 
|  | 618 | { | 
|  | 619 | struct ath_node *an = bf->bf_node; | 
|  | 620 | int isnodegone = (an->an_flags & ATH_NODE_CLEAN); | 
|  | 621 | struct ath_buf *bf_last = bf->bf_lastbf; | 
|  | 622 | struct ath_desc *ds = bf_last->bf_desc; | 
|  | 623 | u16 seq_st = 0; | 
|  | 624 | u32 ba[WME_BA_BMP_SIZE >> 5]; | 
|  | 625 | int ba_index; | 
|  | 626 | int nbad = 0; | 
|  | 627 | int isaggr = 0; | 
|  | 628 |  | 
|  | 629 | if (isnodegone || ds->ds_txstat.ts_flags == ATH9K_TX_SW_ABORTED) | 
|  | 630 | return 0; | 
|  | 631 |  | 
|  | 632 | isaggr = bf->bf_isaggr; | 
|  | 633 | if (isaggr) { | 
|  | 634 | seq_st = ATH_DS_BA_SEQ(ds); | 
|  | 635 | memcpy(ba, ATH_DS_BA_BITMAP(ds), WME_BA_BMP_SIZE >> 3); | 
|  | 636 | } | 
|  | 637 |  | 
|  | 638 | while (bf) { | 
|  | 639 | ba_index = ATH_BA_INDEX(seq_st, bf->bf_seqno); | 
|  | 640 | if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index))) | 
|  | 641 | nbad++; | 
|  | 642 |  | 
|  | 643 | bf = bf->bf_next; | 
|  | 644 | } | 
|  | 645 |  | 
|  | 646 | return nbad; | 
|  | 647 | } | 
|  | 648 |  | 
|  | 649 | static void ath_tx_set_retry(struct ath_softc *sc, struct ath_buf *bf) | 
|  | 650 | { | 
|  | 651 | struct sk_buff *skb; | 
|  | 652 | struct ieee80211_hdr *hdr; | 
|  | 653 |  | 
|  | 654 | bf->bf_isretried = 1; | 
|  | 655 | bf->bf_retries++; | 
|  | 656 |  | 
|  | 657 | skb = bf->bf_mpdu; | 
|  | 658 | hdr = (struct ieee80211_hdr *)skb->data; | 
|  | 659 | hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY); | 
|  | 660 | } | 
|  | 661 |  | 
|  | 662 | /* Update block ack window */ | 
|  | 663 |  | 
|  | 664 | static void ath_tx_update_baw(struct ath_softc *sc, | 
|  | 665 | struct ath_atx_tid *tid, int seqno) | 
|  | 666 | { | 
|  | 667 | int index, cindex; | 
|  | 668 |  | 
|  | 669 | index  = ATH_BA_INDEX(tid->seq_start, seqno); | 
|  | 670 | cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); | 
|  | 671 |  | 
|  | 672 | tid->tx_buf[cindex] = NULL; | 
|  | 673 |  | 
|  | 674 | while (tid->baw_head != tid->baw_tail && !tid->tx_buf[tid->baw_head]) { | 
|  | 675 | INCR(tid->seq_start, IEEE80211_SEQ_MAX); | 
|  | 676 | INCR(tid->baw_head, ATH_TID_MAX_BUFS); | 
|  | 677 | } | 
|  | 678 | } | 
|  | 679 |  | 
|  | 680 | /* | 
|  | 681 | * ath_pkt_dur - compute packet duration (NB: not NAV) | 
|  | 682 | * | 
|  | 683 | * rix - rate index | 
|  | 684 | * pktlen - total bytes (delims + data + fcs + pads + pad delims) | 
|  | 685 | * width  - 0 for 20 MHz, 1 for 40 MHz | 
|  | 686 | * half_gi - to use 4us v/s 3.6 us for symbol time | 
|  | 687 | */ | 
|  | 688 |  | 
|  | 689 | static u32 ath_pkt_duration(struct ath_softc *sc, | 
|  | 690 | u8 rix, | 
|  | 691 | struct ath_buf *bf, | 
|  | 692 | int width, | 
|  | 693 | int half_gi, | 
|  | 694 | bool shortPreamble) | 
|  | 695 | { | 
|  | 696 | const struct ath9k_rate_table *rt = sc->sc_currates; | 
|  | 697 | u32 nbits, nsymbits, duration, nsymbols; | 
|  | 698 | u8 rc; | 
|  | 699 | int streams, pktlen; | 
|  | 700 |  | 
|  | 701 | pktlen = bf->bf_isaggr ? bf->bf_al : bf->bf_frmlen; | 
|  | 702 | rc = rt->info[rix].rateCode; | 
|  | 703 |  | 
|  | 704 | /* | 
|  | 705 | * for legacy rates, use old function to compute packet duration | 
|  | 706 | */ | 
|  | 707 | if (!IS_HT_RATE(rc)) | 
|  | 708 | return ath9k_hw_computetxtime(sc->sc_ah, | 
|  | 709 | rt, | 
|  | 710 | pktlen, | 
|  | 711 | rix, | 
|  | 712 | shortPreamble); | 
|  | 713 | /* | 
|  | 714 | * find number of symbols: PLCP + data | 
|  | 715 | */ | 
|  | 716 | nbits = (pktlen << 3) + OFDM_PLCP_BITS; | 
|  | 717 | nsymbits = bits_per_symbol[HT_RC_2_MCS(rc)][width]; | 
|  | 718 | nsymbols = (nbits + nsymbits - 1) / nsymbits; | 
|  | 719 |  | 
|  | 720 | if (!half_gi) | 
|  | 721 | duration = SYMBOL_TIME(nsymbols); | 
|  | 722 | else | 
|  | 723 | duration = SYMBOL_TIME_HALFGI(nsymbols); | 
|  | 724 |  | 
|  | 725 | /* | 
|  | 726 | * addup duration for legacy/ht training and signal fields | 
|  | 727 | */ | 
|  | 728 | streams = HT_RC_2_STREAMS(rc); | 
|  | 729 | duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams); | 
|  | 730 | return duration; | 
|  | 731 | } | 
|  | 732 |  | 
|  | 733 | /* Rate module function to set rate related fields in tx descriptor */ | 
|  | 734 |  | 
|  | 735 | static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf) | 
|  | 736 | { | 
|  | 737 | struct ath_hal *ah = sc->sc_ah; | 
|  | 738 | const struct ath9k_rate_table *rt; | 
|  | 739 | struct ath_desc *ds = bf->bf_desc; | 
|  | 740 | struct ath_desc *lastds = bf->bf_lastbf->bf_desc; | 
|  | 741 | struct ath9k_11n_rate_series series[4]; | 
|  | 742 | int i, flags, rtsctsena = 0, dynamic_mimops = 0; | 
|  | 743 | u32 ctsduration = 0; | 
|  | 744 | u8 rix = 0, cix, ctsrate = 0; | 
|  | 745 | u32 aggr_limit_with_rts = sc->sc_rtsaggrlimit; | 
|  | 746 | struct ath_node *an = (struct ath_node *) bf->bf_node; | 
|  | 747 |  | 
|  | 748 | /* | 
|  | 749 | * get the cix for the lowest valid rix. | 
|  | 750 | */ | 
|  | 751 | rt = sc->sc_currates; | 
|  | 752 | for (i = 4; i--;) { | 
|  | 753 | if (bf->bf_rcs[i].tries) { | 
|  | 754 | rix = bf->bf_rcs[i].rix; | 
|  | 755 | break; | 
|  | 756 | } | 
|  | 757 | } | 
|  | 758 | flags = (bf->bf_flags & (ATH9K_TXDESC_RTSENA | ATH9K_TXDESC_CTSENA)); | 
|  | 759 | cix = rt->info[rix].controlRate; | 
|  | 760 |  | 
|  | 761 | /* | 
|  | 762 | * If 802.11g protection is enabled, determine whether | 
|  | 763 | * to use RTS/CTS or just CTS.  Note that this is only | 
|  | 764 | * done for OFDM/HT unicast frames. | 
|  | 765 | */ | 
|  | 766 | if (sc->sc_protmode != PROT_M_NONE && | 
|  | 767 | (rt->info[rix].phy == PHY_OFDM || | 
|  | 768 | rt->info[rix].phy == PHY_HT) && | 
|  | 769 | (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0) { | 
|  | 770 | if (sc->sc_protmode == PROT_M_RTSCTS) | 
|  | 771 | flags = ATH9K_TXDESC_RTSENA; | 
|  | 772 | else if (sc->sc_protmode == PROT_M_CTSONLY) | 
|  | 773 | flags = ATH9K_TXDESC_CTSENA; | 
|  | 774 |  | 
|  | 775 | cix = rt->info[sc->sc_protrix].controlRate; | 
|  | 776 | rtsctsena = 1; | 
|  | 777 | } | 
|  | 778 |  | 
|  | 779 | /* For 11n, the default behavior is to enable RTS for | 
|  | 780 | * hw retried frames. We enable the global flag here and | 
|  | 781 | * let rate series flags determine which rates will actually | 
|  | 782 | * use RTS. | 
|  | 783 | */ | 
| Sujith | 60b67f5 | 2008-08-07 10:52:38 +0530 | [diff] [blame] | 784 | if ((ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) && bf->bf_isdata) { | 
| Luis R. Rodriguez | f078f20 | 2008-08-04 00:16:41 -0700 | [diff] [blame] | 785 | BUG_ON(!an); | 
|  | 786 | /* | 
|  | 787 | * 802.11g protection not needed, use our default behavior | 
|  | 788 | */ | 
|  | 789 | if (!rtsctsena) | 
|  | 790 | flags = ATH9K_TXDESC_RTSENA; | 
|  | 791 | /* | 
|  | 792 | * For dynamic MIMO PS, RTS needs to precede the first aggregate | 
|  | 793 | * and the second aggregate should have any protection at all. | 
|  | 794 | */ | 
|  | 795 | if (an->an_smmode == ATH_SM_PWRSAV_DYNAMIC) { | 
|  | 796 | if (!bf->bf_aggrburst) { | 
|  | 797 | flags = ATH9K_TXDESC_RTSENA; | 
|  | 798 | dynamic_mimops = 1; | 
|  | 799 | } else { | 
|  | 800 | flags = 0; | 
|  | 801 | } | 
|  | 802 | } | 
|  | 803 | } | 
|  | 804 |  | 
|  | 805 | /* | 
|  | 806 | * Set protection if aggregate protection on | 
|  | 807 | */ | 
|  | 808 | if (sc->sc_config.ath_aggr_prot && | 
|  | 809 | (!bf->bf_isaggr || (bf->bf_isaggr && bf->bf_al < 8192))) { | 
|  | 810 | flags = ATH9K_TXDESC_RTSENA; | 
|  | 811 | cix = rt->info[sc->sc_protrix].controlRate; | 
|  | 812 | rtsctsena = 1; | 
|  | 813 | } | 
|  | 814 |  | 
|  | 815 | /* | 
|  | 816 | *  For AR5416 - RTS cannot be followed by a frame larger than 8K. | 
|  | 817 | */ | 
|  | 818 | if (bf->bf_isaggr && (bf->bf_al > aggr_limit_with_rts)) { | 
|  | 819 | /* | 
|  | 820 | * Ensure that in the case of SM Dynamic power save | 
|  | 821 | * while we are bursting the second aggregate the | 
|  | 822 | * RTS is cleared. | 
|  | 823 | */ | 
|  | 824 | flags &= ~(ATH9K_TXDESC_RTSENA); | 
|  | 825 | } | 
|  | 826 |  | 
|  | 827 | /* | 
|  | 828 | * CTS transmit rate is derived from the transmit rate | 
|  | 829 | * by looking in the h/w rate table.  We must also factor | 
|  | 830 | * in whether or not a short preamble is to be used. | 
|  | 831 | */ | 
|  | 832 | /* NB: cix is set above where RTS/CTS is enabled */ | 
|  | 833 | BUG_ON(cix == 0xff); | 
|  | 834 | ctsrate = rt->info[cix].rateCode | | 
|  | 835 | (bf->bf_shpreamble ? rt->info[cix].shortPreamble : 0); | 
|  | 836 |  | 
|  | 837 | /* | 
|  | 838 | * Setup HAL rate series | 
|  | 839 | */ | 
|  | 840 | memzero(series, sizeof(struct ath9k_11n_rate_series) * 4); | 
|  | 841 |  | 
|  | 842 | for (i = 0; i < 4; i++) { | 
|  | 843 | if (!bf->bf_rcs[i].tries) | 
|  | 844 | continue; | 
|  | 845 |  | 
|  | 846 | rix = bf->bf_rcs[i].rix; | 
|  | 847 |  | 
|  | 848 | series[i].Rate = rt->info[rix].rateCode | | 
|  | 849 | (bf->bf_shpreamble ? rt->info[rix].shortPreamble : 0); | 
|  | 850 |  | 
|  | 851 | series[i].Tries = bf->bf_rcs[i].tries; | 
|  | 852 |  | 
|  | 853 | series[i].RateFlags = ( | 
|  | 854 | (bf->bf_rcs[i].flags & ATH_RC_RTSCTS_FLAG) ? | 
|  | 855 | ATH9K_RATESERIES_RTS_CTS : 0) | | 
|  | 856 | ((bf->bf_rcs[i].flags & ATH_RC_CW40_FLAG) ? | 
|  | 857 | ATH9K_RATESERIES_2040 : 0) | | 
|  | 858 | ((bf->bf_rcs[i].flags & ATH_RC_SGI_FLAG) ? | 
|  | 859 | ATH9K_RATESERIES_HALFGI : 0); | 
|  | 860 |  | 
|  | 861 | series[i].PktDuration = ath_pkt_duration( | 
|  | 862 | sc, rix, bf, | 
|  | 863 | (bf->bf_rcs[i].flags & ATH_RC_CW40_FLAG) != 0, | 
|  | 864 | (bf->bf_rcs[i].flags & ATH_RC_SGI_FLAG), | 
|  | 865 | bf->bf_shpreamble); | 
|  | 866 |  | 
|  | 867 | if ((an->an_smmode == ATH_SM_PWRSAV_STATIC) && | 
|  | 868 | (bf->bf_rcs[i].flags & ATH_RC_DS_FLAG) == 0) { | 
|  | 869 | /* | 
|  | 870 | * When sending to an HT node that has enabled static | 
|  | 871 | * SM/MIMO power save, send at single stream rates but | 
|  | 872 | * use maximum allowed transmit chains per user, | 
|  | 873 | * hardware, regulatory, or country limits for | 
|  | 874 | * better range. | 
|  | 875 | */ | 
|  | 876 | series[i].ChSel = sc->sc_tx_chainmask; | 
|  | 877 | } else { | 
|  | 878 | if (bf->bf_ht) | 
|  | 879 | series[i].ChSel = | 
|  | 880 | ath_chainmask_sel_logic(sc, an); | 
|  | 881 | else | 
|  | 882 | series[i].ChSel = sc->sc_tx_chainmask; | 
|  | 883 | } | 
|  | 884 |  | 
|  | 885 | if (rtsctsena) | 
|  | 886 | series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS; | 
|  | 887 |  | 
|  | 888 | /* | 
|  | 889 | * Set RTS for all rates if node is in dynamic powersave | 
|  | 890 | * mode and we are using dual stream rates. | 
|  | 891 | */ | 
|  | 892 | if (dynamic_mimops && (bf->bf_rcs[i].flags & ATH_RC_DS_FLAG)) | 
|  | 893 | series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS; | 
|  | 894 | } | 
|  | 895 |  | 
|  | 896 | /* | 
|  | 897 | * For non-HT devices, calculate RTS/CTS duration in software | 
|  | 898 | * and disable multi-rate retry. | 
|  | 899 | */ | 
| Sujith | 60b67f5 | 2008-08-07 10:52:38 +0530 | [diff] [blame] | 900 | if (flags && !(ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT)) { | 
| Luis R. Rodriguez | f078f20 | 2008-08-04 00:16:41 -0700 | [diff] [blame] | 901 | /* | 
|  | 902 | * Compute the transmit duration based on the frame | 
|  | 903 | * size and the size of an ACK frame.  We call into the | 
|  | 904 | * HAL to do the computation since it depends on the | 
|  | 905 | * characteristics of the actual PHY being used. | 
|  | 906 | * | 
|  | 907 | * NB: CTS is assumed the same size as an ACK so we can | 
|  | 908 | *     use the precalculated ACK durations. | 
|  | 909 | */ | 
|  | 910 | if (flags & ATH9K_TXDESC_RTSENA) {    /* SIFS + CTS */ | 
|  | 911 | ctsduration += bf->bf_shpreamble ? | 
|  | 912 | rt->info[cix].spAckDuration : | 
|  | 913 | rt->info[cix].lpAckDuration; | 
|  | 914 | } | 
|  | 915 |  | 
|  | 916 | ctsduration += series[0].PktDuration; | 
|  | 917 |  | 
|  | 918 | if ((bf->bf_flags & ATH9K_TXDESC_NOACK) == 0) { /* SIFS + ACK */ | 
|  | 919 | ctsduration += bf->bf_shpreamble ? | 
|  | 920 | rt->info[rix].spAckDuration : | 
|  | 921 | rt->info[rix].lpAckDuration; | 
|  | 922 | } | 
|  | 923 |  | 
|  | 924 | /* | 
|  | 925 | * Disable multi-rate retry when using RTS/CTS by clearing | 
|  | 926 | * series 1, 2 and 3. | 
|  | 927 | */ | 
|  | 928 | memzero(&series[1], sizeof(struct ath9k_11n_rate_series) * 3); | 
|  | 929 | } | 
|  | 930 |  | 
|  | 931 | /* | 
|  | 932 | * set dur_update_en for l-sig computation except for PS-Poll frames | 
|  | 933 | */ | 
|  | 934 | ath9k_hw_set11n_ratescenario(ah, ds, lastds, | 
|  | 935 | !bf->bf_ispspoll, | 
|  | 936 | ctsrate, | 
|  | 937 | ctsduration, | 
|  | 938 | series, 4, flags); | 
|  | 939 | if (sc->sc_config.ath_aggr_prot && flags) | 
|  | 940 | ath9k_hw_set11n_burstduration(ah, ds, 8192); | 
|  | 941 | } | 
|  | 942 |  | 
|  | 943 | /* | 
|  | 944 | * Function to send a normal HT (non-AMPDU) frame | 
|  | 945 | * NB: must be called with txq lock held | 
|  | 946 | */ | 
|  | 947 |  | 
|  | 948 | static int ath_tx_send_normal(struct ath_softc *sc, | 
|  | 949 | struct ath_txq *txq, | 
|  | 950 | struct ath_atx_tid *tid, | 
|  | 951 | struct list_head *bf_head) | 
|  | 952 | { | 
|  | 953 | struct ath_buf *bf; | 
|  | 954 | struct sk_buff *skb; | 
|  | 955 | struct ieee80211_tx_info *tx_info; | 
|  | 956 | struct ath_tx_info_priv *tx_info_priv; | 
|  | 957 |  | 
|  | 958 | BUG_ON(list_empty(bf_head)); | 
|  | 959 |  | 
|  | 960 | bf = list_first_entry(bf_head, struct ath_buf, list); | 
|  | 961 | bf->bf_isampdu = 0; /* regular HT frame */ | 
|  | 962 |  | 
|  | 963 | skb = (struct sk_buff *)bf->bf_mpdu; | 
|  | 964 | tx_info = IEEE80211_SKB_CB(skb); | 
|  | 965 | tx_info_priv = (struct ath_tx_info_priv *)tx_info->driver_data[0]; | 
|  | 966 | memcpy(bf->bf_rcs, tx_info_priv->rcs, 4 * sizeof(tx_info_priv->rcs[0])); | 
|  | 967 |  | 
|  | 968 | /* update starting sequence number for subsequent ADDBA request */ | 
|  | 969 | INCR(tid->seq_start, IEEE80211_SEQ_MAX); | 
|  | 970 |  | 
|  | 971 | /* Queue to h/w without aggregation */ | 
|  | 972 | bf->bf_nframes = 1; | 
|  | 973 | bf->bf_lastbf = bf->bf_lastfrm; /* one single frame */ | 
|  | 974 | ath_buf_set_rate(sc, bf); | 
|  | 975 | ath_tx_txqaddbuf(sc, txq, bf_head); | 
|  | 976 |  | 
|  | 977 | return 0; | 
|  | 978 | } | 
|  | 979 |  | 
|  | 980 | /* flush tid's software queue and send frames as non-ampdu's */ | 
|  | 981 |  | 
|  | 982 | static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid) | 
|  | 983 | { | 
|  | 984 | struct ath_txq *txq = &sc->sc_txq[tid->ac->qnum]; | 
|  | 985 | struct ath_buf *bf; | 
|  | 986 | struct list_head bf_head; | 
|  | 987 | INIT_LIST_HEAD(&bf_head); | 
|  | 988 |  | 
|  | 989 | ASSERT(tid->paused > 0); | 
|  | 990 | spin_lock_bh(&txq->axq_lock); | 
|  | 991 |  | 
|  | 992 | tid->paused--; | 
|  | 993 |  | 
|  | 994 | if (tid->paused > 0) { | 
|  | 995 | spin_unlock_bh(&txq->axq_lock); | 
|  | 996 | return; | 
|  | 997 | } | 
|  | 998 |  | 
|  | 999 | while (!list_empty(&tid->buf_q)) { | 
|  | 1000 | bf = list_first_entry(&tid->buf_q, struct ath_buf, list); | 
|  | 1001 | ASSERT(!bf->bf_isretried); | 
|  | 1002 | list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list); | 
|  | 1003 | ath_tx_send_normal(sc, txq, tid, &bf_head); | 
|  | 1004 | } | 
|  | 1005 |  | 
|  | 1006 | spin_unlock_bh(&txq->axq_lock); | 
|  | 1007 | } | 
|  | 1008 |  | 
|  | 1009 | /* Completion routine of an aggregate */ | 
|  | 1010 |  | 
|  | 1011 | static void ath_tx_complete_aggr_rifs(struct ath_softc *sc, | 
|  | 1012 | struct ath_txq *txq, | 
|  | 1013 | struct ath_buf *bf, | 
|  | 1014 | struct list_head *bf_q, | 
|  | 1015 | int txok) | 
|  | 1016 | { | 
|  | 1017 | struct ath_node *an = bf->bf_node; | 
|  | 1018 | struct ath_atx_tid *tid = ATH_AN_2_TID(an, bf->bf_tidno); | 
|  | 1019 | struct ath_buf *bf_last = bf->bf_lastbf; | 
|  | 1020 | struct ath_desc *ds = bf_last->bf_desc; | 
|  | 1021 | struct ath_buf *bf_next, *bf_lastq = NULL; | 
|  | 1022 | struct list_head bf_head, bf_pending; | 
|  | 1023 | u16 seq_st = 0; | 
|  | 1024 | u32 ba[WME_BA_BMP_SIZE >> 5]; | 
|  | 1025 | int isaggr, txfail, txpending, sendbar = 0, needreset = 0; | 
|  | 1026 | int isnodegone = (an->an_flags & ATH_NODE_CLEAN); | 
|  | 1027 |  | 
|  | 1028 | isaggr = bf->bf_isaggr; | 
|  | 1029 | if (isaggr) { | 
|  | 1030 | if (txok) { | 
|  | 1031 | if (ATH_DS_TX_BA(ds)) { | 
|  | 1032 | /* | 
|  | 1033 | * extract starting sequence and | 
|  | 1034 | * block-ack bitmap | 
|  | 1035 | */ | 
|  | 1036 | seq_st = ATH_DS_BA_SEQ(ds); | 
|  | 1037 | memcpy(ba, | 
|  | 1038 | ATH_DS_BA_BITMAP(ds), | 
|  | 1039 | WME_BA_BMP_SIZE >> 3); | 
|  | 1040 | } else { | 
|  | 1041 | memzero(ba, WME_BA_BMP_SIZE >> 3); | 
|  | 1042 |  | 
|  | 1043 | /* | 
|  | 1044 | * AR5416 can become deaf/mute when BA | 
|  | 1045 | * issue happens. Chip needs to be reset. | 
|  | 1046 | * But AP code may have sychronization issues | 
|  | 1047 | * when perform internal reset in this routine. | 
|  | 1048 | * Only enable reset in STA mode for now. | 
|  | 1049 | */ | 
|  | 1050 | if (sc->sc_opmode == ATH9K_M_STA) | 
|  | 1051 | needreset = 1; | 
|  | 1052 | } | 
|  | 1053 | } else { | 
|  | 1054 | memzero(ba, WME_BA_BMP_SIZE >> 3); | 
|  | 1055 | } | 
|  | 1056 | } | 
|  | 1057 |  | 
|  | 1058 | INIT_LIST_HEAD(&bf_pending); | 
|  | 1059 | INIT_LIST_HEAD(&bf_head); | 
|  | 1060 |  | 
|  | 1061 | while (bf) { | 
|  | 1062 | txfail = txpending = 0; | 
|  | 1063 | bf_next = bf->bf_next; | 
|  | 1064 |  | 
|  | 1065 | if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, bf->bf_seqno))) { | 
|  | 1066 | /* transmit completion, subframe is | 
|  | 1067 | * acked by block ack */ | 
|  | 1068 | } else if (!isaggr && txok) { | 
|  | 1069 | /* transmit completion */ | 
|  | 1070 | } else { | 
|  | 1071 |  | 
|  | 1072 | if (!tid->cleanup_inprogress && !isnodegone && | 
|  | 1073 | ds->ds_txstat.ts_flags != ATH9K_TX_SW_ABORTED) { | 
|  | 1074 | if (bf->bf_retries < ATH_MAX_SW_RETRIES) { | 
|  | 1075 | ath_tx_set_retry(sc, bf); | 
|  | 1076 | txpending = 1; | 
|  | 1077 | } else { | 
|  | 1078 | bf->bf_isxretried = 1; | 
|  | 1079 | txfail = 1; | 
|  | 1080 | sendbar = 1; | 
|  | 1081 | } | 
|  | 1082 | } else { | 
|  | 1083 | /* | 
|  | 1084 | * cleanup in progress, just fail | 
|  | 1085 | * the un-acked sub-frames | 
|  | 1086 | */ | 
|  | 1087 | txfail = 1; | 
|  | 1088 | } | 
|  | 1089 | } | 
|  | 1090 | /* | 
|  | 1091 | * Remove ath_buf's of this sub-frame from aggregate queue. | 
|  | 1092 | */ | 
|  | 1093 | if (bf_next == NULL) {  /* last subframe in the aggregate */ | 
|  | 1094 | ASSERT(bf->bf_lastfrm == bf_last); | 
|  | 1095 |  | 
|  | 1096 | /* | 
|  | 1097 | * The last descriptor of the last sub frame could be | 
|  | 1098 | * a holding descriptor for h/w. If that's the case, | 
|  | 1099 | * bf->bf_lastfrm won't be in the bf_q. | 
|  | 1100 | * Make sure we handle bf_q properly here. | 
|  | 1101 | */ | 
|  | 1102 |  | 
|  | 1103 | if (!list_empty(bf_q)) { | 
|  | 1104 | bf_lastq = list_entry(bf_q->prev, | 
|  | 1105 | struct ath_buf, list); | 
|  | 1106 | list_cut_position(&bf_head, | 
|  | 1107 | bf_q, &bf_lastq->list); | 
|  | 1108 | } else { | 
|  | 1109 | /* | 
|  | 1110 | * XXX: if the last subframe only has one | 
|  | 1111 | * descriptor which is also being used as | 
|  | 1112 | * a holding descriptor. Then the ath_buf | 
|  | 1113 | * is not in the bf_q at all. | 
|  | 1114 | */ | 
|  | 1115 | INIT_LIST_HEAD(&bf_head); | 
|  | 1116 | } | 
|  | 1117 | } else { | 
|  | 1118 | ASSERT(!list_empty(bf_q)); | 
|  | 1119 | list_cut_position(&bf_head, | 
|  | 1120 | bf_q, &bf->bf_lastfrm->list); | 
|  | 1121 | } | 
|  | 1122 |  | 
|  | 1123 | if (!txpending) { | 
|  | 1124 | /* | 
|  | 1125 | * complete the acked-ones/xretried ones; update | 
|  | 1126 | * block-ack window | 
|  | 1127 | */ | 
|  | 1128 | spin_lock_bh(&txq->axq_lock); | 
|  | 1129 | ath_tx_update_baw(sc, tid, bf->bf_seqno); | 
|  | 1130 | spin_unlock_bh(&txq->axq_lock); | 
|  | 1131 |  | 
|  | 1132 | /* complete this sub-frame */ | 
|  | 1133 | ath_tx_complete_buf(sc, bf, &bf_head, !txfail, sendbar); | 
|  | 1134 | } else { | 
|  | 1135 | /* | 
|  | 1136 | * retry the un-acked ones | 
|  | 1137 | */ | 
|  | 1138 | /* | 
|  | 1139 | * XXX: if the last descriptor is holding descriptor, | 
|  | 1140 | * in order to requeue the frame to software queue, we | 
|  | 1141 | * need to allocate a new descriptor and | 
|  | 1142 | * copy the content of holding descriptor to it. | 
|  | 1143 | */ | 
|  | 1144 | if (bf->bf_next == NULL && | 
|  | 1145 | bf_last->bf_status & ATH_BUFSTATUS_STALE) { | 
|  | 1146 | struct ath_buf *tbf; | 
|  | 1147 |  | 
|  | 1148 | /* allocate new descriptor */ | 
|  | 1149 | spin_lock_bh(&sc->sc_txbuflock); | 
|  | 1150 | ASSERT(!list_empty((&sc->sc_txbuf))); | 
|  | 1151 | tbf = list_first_entry(&sc->sc_txbuf, | 
|  | 1152 | struct ath_buf, list); | 
|  | 1153 | list_del(&tbf->list); | 
|  | 1154 | spin_unlock_bh(&sc->sc_txbuflock); | 
|  | 1155 |  | 
|  | 1156 | ATH_TXBUF_RESET(tbf); | 
|  | 1157 |  | 
|  | 1158 | /* copy descriptor content */ | 
|  | 1159 | tbf->bf_mpdu = bf_last->bf_mpdu; | 
|  | 1160 | tbf->bf_node = bf_last->bf_node; | 
|  | 1161 | tbf->bf_buf_addr = bf_last->bf_buf_addr; | 
|  | 1162 | *(tbf->bf_desc) = *(bf_last->bf_desc); | 
|  | 1163 |  | 
|  | 1164 | /* link it to the frame */ | 
|  | 1165 | if (bf_lastq) { | 
|  | 1166 | bf_lastq->bf_desc->ds_link = | 
|  | 1167 | tbf->bf_daddr; | 
|  | 1168 | bf->bf_lastfrm = tbf; | 
|  | 1169 | ath9k_hw_cleartxdesc(sc->sc_ah, | 
|  | 1170 | bf->bf_lastfrm->bf_desc); | 
|  | 1171 | } else { | 
|  | 1172 | tbf->bf_state = bf_last->bf_state; | 
|  | 1173 | tbf->bf_lastfrm = tbf; | 
|  | 1174 | ath9k_hw_cleartxdesc(sc->sc_ah, | 
|  | 1175 | tbf->bf_lastfrm->bf_desc); | 
|  | 1176 |  | 
|  | 1177 | /* copy the DMA context */ | 
|  | 1178 | copy_dma_mem_context( | 
|  | 1179 | get_dma_mem_context(tbf, | 
|  | 1180 | bf_dmacontext), | 
|  | 1181 | get_dma_mem_context(bf_last, | 
|  | 1182 | bf_dmacontext)); | 
|  | 1183 | } | 
|  | 1184 | list_add_tail(&tbf->list, &bf_head); | 
|  | 1185 | } else { | 
|  | 1186 | /* | 
|  | 1187 | * Clear descriptor status words for | 
|  | 1188 | * software retry | 
|  | 1189 | */ | 
|  | 1190 | ath9k_hw_cleartxdesc(sc->sc_ah, | 
|  | 1191 | bf->bf_lastfrm->bf_desc); | 
|  | 1192 | } | 
|  | 1193 |  | 
|  | 1194 | /* | 
|  | 1195 | * Put this buffer to the temporary pending | 
|  | 1196 | * queue to retain ordering | 
|  | 1197 | */ | 
|  | 1198 | list_splice_tail_init(&bf_head, &bf_pending); | 
|  | 1199 | } | 
|  | 1200 |  | 
|  | 1201 | bf = bf_next; | 
|  | 1202 | } | 
|  | 1203 |  | 
|  | 1204 | /* | 
|  | 1205 | * node is already gone. no more assocication | 
|  | 1206 | * with the node. the node might have been freed | 
|  | 1207 | * any  node acces can result in panic.note tid | 
|  | 1208 | * is part of the node. | 
|  | 1209 | */ | 
|  | 1210 | if (isnodegone) | 
|  | 1211 | return; | 
|  | 1212 |  | 
|  | 1213 | if (tid->cleanup_inprogress) { | 
|  | 1214 | /* check to see if we're done with cleaning the h/w queue */ | 
|  | 1215 | spin_lock_bh(&txq->axq_lock); | 
|  | 1216 |  | 
|  | 1217 | if (tid->baw_head == tid->baw_tail) { | 
|  | 1218 | tid->addba_exchangecomplete = 0; | 
|  | 1219 | tid->addba_exchangeattempts = 0; | 
|  | 1220 | spin_unlock_bh(&txq->axq_lock); | 
|  | 1221 |  | 
|  | 1222 | tid->cleanup_inprogress = false; | 
|  | 1223 |  | 
|  | 1224 | /* send buffered frames as singles */ | 
|  | 1225 | ath_tx_flush_tid(sc, tid); | 
|  | 1226 | } else | 
|  | 1227 | spin_unlock_bh(&txq->axq_lock); | 
|  | 1228 |  | 
|  | 1229 | return; | 
|  | 1230 | } | 
|  | 1231 |  | 
|  | 1232 | /* | 
|  | 1233 | * prepend un-acked frames to the beginning of the pending frame queue | 
|  | 1234 | */ | 
|  | 1235 | if (!list_empty(&bf_pending)) { | 
|  | 1236 | spin_lock_bh(&txq->axq_lock); | 
|  | 1237 | /* Note: we _prepend_, we _do_not_ at to | 
|  | 1238 | * the end of the queue ! */ | 
|  | 1239 | list_splice(&bf_pending, &tid->buf_q); | 
|  | 1240 | ath_tx_queue_tid(txq, tid); | 
|  | 1241 | spin_unlock_bh(&txq->axq_lock); | 
|  | 1242 | } | 
|  | 1243 |  | 
|  | 1244 | if (needreset) | 
|  | 1245 | ath_internal_reset(sc); | 
|  | 1246 |  | 
|  | 1247 | return; | 
|  | 1248 | } | 
|  | 1249 |  | 
|  | 1250 | /* Process completed xmit descriptors from the specified queue */ | 
|  | 1251 |  | 
|  | 1252 | static int ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) | 
|  | 1253 | { | 
|  | 1254 | struct ath_hal *ah = sc->sc_ah; | 
|  | 1255 | struct ath_buf *bf, *lastbf, *bf_held = NULL; | 
|  | 1256 | struct list_head bf_head; | 
|  | 1257 | struct ath_desc *ds, *tmp_ds; | 
|  | 1258 | struct sk_buff *skb; | 
|  | 1259 | struct ieee80211_tx_info *tx_info; | 
|  | 1260 | struct ath_tx_info_priv *tx_info_priv; | 
|  | 1261 | int nacked, txok, nbad = 0, isrifs = 0; | 
|  | 1262 | int status; | 
|  | 1263 |  | 
|  | 1264 | DPRINTF(sc, ATH_DBG_QUEUE, | 
|  | 1265 | "%s: tx queue %d (%x), link %p\n", __func__, | 
|  | 1266 | txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum), | 
|  | 1267 | txq->axq_link); | 
|  | 1268 |  | 
|  | 1269 | nacked = 0; | 
|  | 1270 | for (;;) { | 
|  | 1271 | spin_lock_bh(&txq->axq_lock); | 
|  | 1272 | txq->axq_intrcnt = 0; /* reset periodic desc intr count */ | 
|  | 1273 | if (list_empty(&txq->axq_q)) { | 
|  | 1274 | txq->axq_link = NULL; | 
|  | 1275 | txq->axq_linkbuf = NULL; | 
|  | 1276 | spin_unlock_bh(&txq->axq_lock); | 
|  | 1277 | break; | 
|  | 1278 | } | 
|  | 1279 | bf = list_first_entry(&txq->axq_q, struct ath_buf, list); | 
|  | 1280 |  | 
|  | 1281 | /* | 
|  | 1282 | * There is a race condition that a BH gets scheduled | 
|  | 1283 | * after sw writes TxE and before hw re-load the last | 
|  | 1284 | * descriptor to get the newly chained one. | 
|  | 1285 | * Software must keep the last DONE descriptor as a | 
|  | 1286 | * holding descriptor - software does so by marking | 
|  | 1287 | * it with the STALE flag. | 
|  | 1288 | */ | 
|  | 1289 | bf_held = NULL; | 
|  | 1290 | if (bf->bf_status & ATH_BUFSTATUS_STALE) { | 
|  | 1291 | bf_held = bf; | 
|  | 1292 | if (list_is_last(&bf_held->list, &txq->axq_q)) { | 
|  | 1293 | /* FIXME: | 
|  | 1294 | * The holding descriptor is the last | 
|  | 1295 | * descriptor in queue. It's safe to remove | 
|  | 1296 | * the last holding descriptor in BH context. | 
|  | 1297 | */ | 
|  | 1298 | spin_unlock_bh(&txq->axq_lock); | 
|  | 1299 | break; | 
|  | 1300 | } else { | 
|  | 1301 | /* Lets work with the next buffer now */ | 
|  | 1302 | bf = list_entry(bf_held->list.next, | 
|  | 1303 | struct ath_buf, list); | 
|  | 1304 | } | 
|  | 1305 | } | 
|  | 1306 |  | 
|  | 1307 | lastbf = bf->bf_lastbf; | 
|  | 1308 | ds = lastbf->bf_desc;    /* NB: last decriptor */ | 
|  | 1309 |  | 
|  | 1310 | status = ath9k_hw_txprocdesc(ah, ds); | 
|  | 1311 | if (status == -EINPROGRESS) { | 
|  | 1312 | spin_unlock_bh(&txq->axq_lock); | 
|  | 1313 | break; | 
|  | 1314 | } | 
|  | 1315 | if (bf->bf_desc == txq->axq_lastdsWithCTS) | 
|  | 1316 | txq->axq_lastdsWithCTS = NULL; | 
|  | 1317 | if (ds == txq->axq_gatingds) | 
|  | 1318 | txq->axq_gatingds = NULL; | 
|  | 1319 |  | 
|  | 1320 | /* | 
|  | 1321 | * Remove ath_buf's of the same transmit unit from txq, | 
|  | 1322 | * however leave the last descriptor back as the holding | 
|  | 1323 | * descriptor for hw. | 
|  | 1324 | */ | 
|  | 1325 | lastbf->bf_status |= ATH_BUFSTATUS_STALE; | 
|  | 1326 | INIT_LIST_HEAD(&bf_head); | 
|  | 1327 |  | 
|  | 1328 | if (!list_is_singular(&lastbf->list)) | 
|  | 1329 | list_cut_position(&bf_head, | 
|  | 1330 | &txq->axq_q, lastbf->list.prev); | 
|  | 1331 |  | 
|  | 1332 | txq->axq_depth--; | 
|  | 1333 |  | 
|  | 1334 | if (bf->bf_isaggr) | 
|  | 1335 | txq->axq_aggr_depth--; | 
|  | 1336 |  | 
|  | 1337 | txok = (ds->ds_txstat.ts_status == 0); | 
|  | 1338 |  | 
|  | 1339 | spin_unlock_bh(&txq->axq_lock); | 
|  | 1340 |  | 
|  | 1341 | if (bf_held) { | 
|  | 1342 | list_del(&bf_held->list); | 
|  | 1343 | spin_lock_bh(&sc->sc_txbuflock); | 
|  | 1344 | list_add_tail(&bf_held->list, &sc->sc_txbuf); | 
|  | 1345 | spin_unlock_bh(&sc->sc_txbuflock); | 
|  | 1346 | } | 
|  | 1347 |  | 
|  | 1348 | if (!bf->bf_isampdu) { | 
|  | 1349 | /* | 
|  | 1350 | * This frame is sent out as a single frame. | 
|  | 1351 | * Use hardware retry status for this frame. | 
|  | 1352 | */ | 
|  | 1353 | bf->bf_retries = ds->ds_txstat.ts_longretry; | 
|  | 1354 | if (ds->ds_txstat.ts_status & ATH9K_TXERR_XRETRY) | 
|  | 1355 | bf->bf_isxretried = 1; | 
|  | 1356 | nbad = 0; | 
|  | 1357 | } else { | 
|  | 1358 | nbad = ath_tx_num_badfrms(sc, bf, txok); | 
|  | 1359 | } | 
|  | 1360 | skb = bf->bf_mpdu; | 
|  | 1361 | tx_info = IEEE80211_SKB_CB(skb); | 
|  | 1362 | tx_info_priv = (struct ath_tx_info_priv *) | 
|  | 1363 | tx_info->driver_data[0]; | 
|  | 1364 | if (ds->ds_txstat.ts_status & ATH9K_TXERR_FILT) | 
|  | 1365 | tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED; | 
|  | 1366 | if ((ds->ds_txstat.ts_status & ATH9K_TXERR_FILT) == 0 && | 
|  | 1367 | (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0) { | 
|  | 1368 | if (ds->ds_txstat.ts_status == 0) | 
|  | 1369 | nacked++; | 
|  | 1370 |  | 
|  | 1371 | if (bf->bf_isdata) { | 
|  | 1372 | if (isrifs) | 
|  | 1373 | tmp_ds = bf->bf_rifslast->bf_desc; | 
|  | 1374 | else | 
|  | 1375 | tmp_ds = ds; | 
|  | 1376 | memcpy(&tx_info_priv->tx, | 
|  | 1377 | &tmp_ds->ds_txstat, | 
|  | 1378 | sizeof(tx_info_priv->tx)); | 
|  | 1379 | tx_info_priv->n_frames = bf->bf_nframes; | 
|  | 1380 | tx_info_priv->n_bad_frames = nbad; | 
|  | 1381 | } | 
|  | 1382 | } | 
|  | 1383 |  | 
|  | 1384 | /* | 
|  | 1385 | * Complete this transmit unit | 
|  | 1386 | */ | 
|  | 1387 | if (bf->bf_isampdu) | 
|  | 1388 | ath_tx_complete_aggr_rifs(sc, txq, bf, &bf_head, txok); | 
|  | 1389 | else | 
|  | 1390 | ath_tx_complete_buf(sc, bf, &bf_head, txok, 0); | 
|  | 1391 |  | 
|  | 1392 | /* Wake up mac80211 queue */ | 
|  | 1393 |  | 
|  | 1394 | spin_lock_bh(&txq->axq_lock); | 
|  | 1395 | if (txq->stopped && ath_txq_depth(sc, txq->axq_qnum) <= | 
|  | 1396 | (ATH_TXBUF - 20)) { | 
|  | 1397 | int qnum; | 
|  | 1398 | qnum = ath_get_mac80211_qnum(txq->axq_qnum, sc); | 
|  | 1399 | if (qnum != -1) { | 
|  | 1400 | ieee80211_wake_queue(sc->hw, qnum); | 
|  | 1401 | txq->stopped = 0; | 
|  | 1402 | } | 
|  | 1403 |  | 
|  | 1404 | } | 
|  | 1405 |  | 
|  | 1406 | /* | 
|  | 1407 | * schedule any pending packets if aggregation is enabled | 
|  | 1408 | */ | 
|  | 1409 | if (sc->sc_txaggr) | 
|  | 1410 | ath_txq_schedule(sc, txq); | 
|  | 1411 | spin_unlock_bh(&txq->axq_lock); | 
|  | 1412 | } | 
|  | 1413 | return nacked; | 
|  | 1414 | } | 
|  | 1415 |  | 
|  | 1416 | static void ath_tx_stopdma(struct ath_softc *sc, struct ath_txq *txq) | 
|  | 1417 | { | 
|  | 1418 | struct ath_hal *ah = sc->sc_ah; | 
|  | 1419 |  | 
|  | 1420 | (void) ath9k_hw_stoptxdma(ah, txq->axq_qnum); | 
|  | 1421 | DPRINTF(sc, ATH_DBG_XMIT, "%s: tx queue [%u] %x, link %p\n", | 
|  | 1422 | __func__, txq->axq_qnum, | 
|  | 1423 | ath9k_hw_gettxbuf(ah, txq->axq_qnum), txq->axq_link); | 
|  | 1424 | } | 
|  | 1425 |  | 
|  | 1426 | /* Drain only the data queues */ | 
|  | 1427 |  | 
|  | 1428 | static void ath_drain_txdataq(struct ath_softc *sc, bool retry_tx) | 
|  | 1429 | { | 
|  | 1430 | struct ath_hal *ah = sc->sc_ah; | 
|  | 1431 | int i; | 
|  | 1432 | int npend = 0; | 
|  | 1433 | enum ath9k_ht_macmode ht_macmode = ath_cwm_macmode(sc); | 
|  | 1434 |  | 
|  | 1435 | /* XXX return value */ | 
|  | 1436 | if (!sc->sc_invalid) { | 
|  | 1437 | for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { | 
|  | 1438 | if (ATH_TXQ_SETUP(sc, i)) { | 
|  | 1439 | ath_tx_stopdma(sc, &sc->sc_txq[i]); | 
|  | 1440 |  | 
|  | 1441 | /* The TxDMA may not really be stopped. | 
|  | 1442 | * Double check the hal tx pending count */ | 
|  | 1443 | npend += ath9k_hw_numtxpending(ah, | 
|  | 1444 | sc->sc_txq[i].axq_qnum); | 
|  | 1445 | } | 
|  | 1446 | } | 
|  | 1447 | } | 
|  | 1448 |  | 
|  | 1449 | if (npend) { | 
|  | 1450 | int status; | 
|  | 1451 |  | 
|  | 1452 | /* TxDMA not stopped, reset the hal */ | 
|  | 1453 | DPRINTF(sc, ATH_DBG_XMIT, | 
|  | 1454 | "%s: Unable to stop TxDMA. Reset HAL!\n", __func__); | 
|  | 1455 |  | 
|  | 1456 | spin_lock_bh(&sc->sc_resetlock); | 
|  | 1457 | if (!ath9k_hw_reset(ah, sc->sc_opmode, | 
|  | 1458 | &sc->sc_curchan, ht_macmode, | 
|  | 1459 | sc->sc_tx_chainmask, sc->sc_rx_chainmask, | 
|  | 1460 | sc->sc_ht_extprotspacing, true, &status)) { | 
|  | 1461 |  | 
|  | 1462 | DPRINTF(sc, ATH_DBG_FATAL, | 
|  | 1463 | "%s: unable to reset hardware; hal status %u\n", | 
|  | 1464 | __func__, | 
|  | 1465 | status); | 
|  | 1466 | } | 
|  | 1467 | spin_unlock_bh(&sc->sc_resetlock); | 
|  | 1468 | } | 
|  | 1469 |  | 
|  | 1470 | for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { | 
|  | 1471 | if (ATH_TXQ_SETUP(sc, i)) | 
|  | 1472 | ath_tx_draintxq(sc, &sc->sc_txq[i], retry_tx); | 
|  | 1473 | } | 
|  | 1474 | } | 
|  | 1475 |  | 
|  | 1476 | /* Add a sub-frame to block ack window */ | 
|  | 1477 |  | 
|  | 1478 | static void ath_tx_addto_baw(struct ath_softc *sc, | 
|  | 1479 | struct ath_atx_tid *tid, | 
|  | 1480 | struct ath_buf *bf) | 
|  | 1481 | { | 
|  | 1482 | int index, cindex; | 
|  | 1483 |  | 
|  | 1484 | if (bf->bf_isretried) | 
|  | 1485 | return; | 
|  | 1486 |  | 
|  | 1487 | index  = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno); | 
|  | 1488 | cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); | 
|  | 1489 |  | 
|  | 1490 | ASSERT(tid->tx_buf[cindex] == NULL); | 
|  | 1491 | tid->tx_buf[cindex] = bf; | 
|  | 1492 |  | 
|  | 1493 | if (index >= ((tid->baw_tail - tid->baw_head) & | 
|  | 1494 | (ATH_TID_MAX_BUFS - 1))) { | 
|  | 1495 | tid->baw_tail = cindex; | 
|  | 1496 | INCR(tid->baw_tail, ATH_TID_MAX_BUFS); | 
|  | 1497 | } | 
|  | 1498 | } | 
|  | 1499 |  | 
|  | 1500 | /* | 
|  | 1501 | * Function to send an A-MPDU | 
|  | 1502 | * NB: must be called with txq lock held | 
|  | 1503 | */ | 
|  | 1504 |  | 
|  | 1505 | static int ath_tx_send_ampdu(struct ath_softc *sc, | 
|  | 1506 | struct ath_txq *txq, | 
|  | 1507 | struct ath_atx_tid *tid, | 
|  | 1508 | struct list_head *bf_head, | 
|  | 1509 | struct ath_tx_control *txctl) | 
|  | 1510 | { | 
|  | 1511 | struct ath_buf *bf; | 
|  | 1512 | struct sk_buff *skb; | 
|  | 1513 | struct ieee80211_tx_info *tx_info; | 
|  | 1514 | struct ath_tx_info_priv *tx_info_priv; | 
|  | 1515 |  | 
|  | 1516 | BUG_ON(list_empty(bf_head)); | 
|  | 1517 |  | 
|  | 1518 | bf = list_first_entry(bf_head, struct ath_buf, list); | 
|  | 1519 | bf->bf_isampdu = 1; | 
|  | 1520 | bf->bf_seqno = txctl->seqno; /* save seqno and tidno in buffer */ | 
|  | 1521 | bf->bf_tidno = txctl->tidno; | 
|  | 1522 |  | 
|  | 1523 | /* | 
|  | 1524 | * Do not queue to h/w when any of the following conditions is true: | 
|  | 1525 | * - there are pending frames in software queue | 
|  | 1526 | * - the TID is currently paused for ADDBA/BAR request | 
|  | 1527 | * - seqno is not within block-ack window | 
|  | 1528 | * - h/w queue depth exceeds low water mark | 
|  | 1529 | */ | 
|  | 1530 | if (!list_empty(&tid->buf_q) || tid->paused || | 
|  | 1531 | !BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno) || | 
|  | 1532 | txq->axq_depth >= ATH_AGGR_MIN_QDEPTH) { | 
|  | 1533 | /* | 
|  | 1534 | * Add this frame to software queue for scheduling later | 
|  | 1535 | * for aggregation. | 
|  | 1536 | */ | 
|  | 1537 | list_splice_tail_init(bf_head, &tid->buf_q); | 
|  | 1538 | ath_tx_queue_tid(txq, tid); | 
|  | 1539 | return 0; | 
|  | 1540 | } | 
|  | 1541 |  | 
|  | 1542 | skb = (struct sk_buff *)bf->bf_mpdu; | 
|  | 1543 | tx_info = IEEE80211_SKB_CB(skb); | 
|  | 1544 | tx_info_priv = (struct ath_tx_info_priv *)tx_info->driver_data[0]; | 
|  | 1545 | memcpy(bf->bf_rcs, tx_info_priv->rcs, 4 * sizeof(tx_info_priv->rcs[0])); | 
|  | 1546 |  | 
|  | 1547 | /* Add sub-frame to BAW */ | 
|  | 1548 | ath_tx_addto_baw(sc, tid, bf); | 
|  | 1549 |  | 
|  | 1550 | /* Queue to h/w without aggregation */ | 
|  | 1551 | bf->bf_nframes = 1; | 
|  | 1552 | bf->bf_lastbf = bf->bf_lastfrm; /* one single frame */ | 
|  | 1553 | ath_buf_set_rate(sc, bf); | 
|  | 1554 | ath_tx_txqaddbuf(sc, txq, bf_head); | 
|  | 1555 | return 0; | 
|  | 1556 | } | 
|  | 1557 |  | 
|  | 1558 | /* | 
|  | 1559 | * looks up the rate | 
|  | 1560 | * returns aggr limit based on lowest of the rates | 
|  | 1561 | */ | 
|  | 1562 |  | 
|  | 1563 | static u32 ath_lookup_rate(struct ath_softc *sc, | 
|  | 1564 | struct ath_buf *bf) | 
|  | 1565 | { | 
|  | 1566 | const struct ath9k_rate_table *rt = sc->sc_currates; | 
|  | 1567 | struct sk_buff *skb; | 
|  | 1568 | struct ieee80211_tx_info *tx_info; | 
|  | 1569 | struct ath_tx_info_priv *tx_info_priv; | 
|  | 1570 | u32 max_4ms_framelen, frame_length; | 
|  | 1571 | u16 aggr_limit, legacy = 0, maxampdu; | 
|  | 1572 | int i; | 
|  | 1573 |  | 
|  | 1574 |  | 
|  | 1575 | skb = (struct sk_buff *)bf->bf_mpdu; | 
|  | 1576 | tx_info = IEEE80211_SKB_CB(skb); | 
|  | 1577 | tx_info_priv = (struct ath_tx_info_priv *) | 
|  | 1578 | tx_info->driver_data[0]; | 
|  | 1579 | memcpy(bf->bf_rcs, | 
|  | 1580 | tx_info_priv->rcs, 4 * sizeof(tx_info_priv->rcs[0])); | 
|  | 1581 |  | 
|  | 1582 | /* | 
|  | 1583 | * Find the lowest frame length among the rate series that will have a | 
|  | 1584 | * 4ms transmit duration. | 
|  | 1585 | * TODO - TXOP limit needs to be considered. | 
|  | 1586 | */ | 
|  | 1587 | max_4ms_framelen = ATH_AMPDU_LIMIT_MAX; | 
|  | 1588 |  | 
|  | 1589 | for (i = 0; i < 4; i++) { | 
|  | 1590 | if (bf->bf_rcs[i].tries) { | 
|  | 1591 | frame_length = bf->bf_rcs[i].max_4ms_framelen; | 
|  | 1592 |  | 
|  | 1593 | if (rt->info[bf->bf_rcs[i].rix].phy != PHY_HT) { | 
|  | 1594 | legacy = 1; | 
|  | 1595 | break; | 
|  | 1596 | } | 
|  | 1597 |  | 
|  | 1598 | max_4ms_framelen = min(max_4ms_framelen, frame_length); | 
|  | 1599 | } | 
|  | 1600 | } | 
|  | 1601 |  | 
|  | 1602 | /* | 
|  | 1603 | * limit aggregate size by the minimum rate if rate selected is | 
|  | 1604 | * not a probe rate, if rate selected is a probe rate then | 
|  | 1605 | * avoid aggregation of this packet. | 
|  | 1606 | */ | 
|  | 1607 | if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy) | 
|  | 1608 | return 0; | 
|  | 1609 |  | 
|  | 1610 | aggr_limit = min(max_4ms_framelen, | 
|  | 1611 | (u32)ATH_AMPDU_LIMIT_DEFAULT); | 
|  | 1612 |  | 
|  | 1613 | /* | 
|  | 1614 | * h/w can accept aggregates upto 16 bit lengths (65535). | 
|  | 1615 | * The IE, however can hold upto 65536, which shows up here | 
|  | 1616 | * as zero. Ignore 65536 since we  are constrained by hw. | 
|  | 1617 | */ | 
|  | 1618 | maxampdu = sc->sc_ht_info.maxampdu; | 
|  | 1619 | if (maxampdu) | 
|  | 1620 | aggr_limit = min(aggr_limit, maxampdu); | 
|  | 1621 |  | 
|  | 1622 | return aggr_limit; | 
|  | 1623 | } | 
|  | 1624 |  | 
|  | 1625 | /* | 
|  | 1626 | * returns the number of delimiters to be added to | 
|  | 1627 | * meet the minimum required mpdudensity. | 
|  | 1628 | * caller should make sure that the rate is  HT rate . | 
|  | 1629 | */ | 
|  | 1630 |  | 
|  | 1631 | static int ath_compute_num_delims(struct ath_softc *sc, | 
|  | 1632 | struct ath_buf *bf, | 
|  | 1633 | u16 frmlen) | 
|  | 1634 | { | 
|  | 1635 | const struct ath9k_rate_table *rt = sc->sc_currates; | 
|  | 1636 | u32 nsymbits, nsymbols, mpdudensity; | 
|  | 1637 | u16 minlen; | 
|  | 1638 | u8 rc, flags, rix; | 
|  | 1639 | int width, half_gi, ndelim, mindelim; | 
|  | 1640 |  | 
|  | 1641 | /* Select standard number of delimiters based on frame length alone */ | 
|  | 1642 | ndelim = ATH_AGGR_GET_NDELIM(frmlen); | 
|  | 1643 |  | 
|  | 1644 | /* | 
|  | 1645 | * If encryption enabled, hardware requires some more padding between | 
|  | 1646 | * subframes. | 
|  | 1647 | * TODO - this could be improved to be dependent on the rate. | 
|  | 1648 | *      The hardware can keep up at lower rates, but not higher rates | 
|  | 1649 | */ | 
|  | 1650 | if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) | 
|  | 1651 | ndelim += ATH_AGGR_ENCRYPTDELIM; | 
|  | 1652 |  | 
|  | 1653 | /* | 
|  | 1654 | * Convert desired mpdu density from microeconds to bytes based | 
|  | 1655 | * on highest rate in rate series (i.e. first rate) to determine | 
|  | 1656 | * required minimum length for subframe. Take into account | 
|  | 1657 | * whether high rate is 20 or 40Mhz and half or full GI. | 
|  | 1658 | */ | 
|  | 1659 | mpdudensity = sc->sc_ht_info.mpdudensity; | 
|  | 1660 |  | 
|  | 1661 | /* | 
|  | 1662 | * If there is no mpdu density restriction, no further calculation | 
|  | 1663 | * is needed. | 
|  | 1664 | */ | 
|  | 1665 | if (mpdudensity == 0) | 
|  | 1666 | return ndelim; | 
|  | 1667 |  | 
|  | 1668 | rix = bf->bf_rcs[0].rix; | 
|  | 1669 | flags = bf->bf_rcs[0].flags; | 
|  | 1670 | rc = rt->info[rix].rateCode; | 
|  | 1671 | width = (flags & ATH_RC_CW40_FLAG) ? 1 : 0; | 
|  | 1672 | half_gi = (flags & ATH_RC_SGI_FLAG) ? 1 : 0; | 
|  | 1673 |  | 
|  | 1674 | if (half_gi) | 
|  | 1675 | nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(mpdudensity); | 
|  | 1676 | else | 
|  | 1677 | nsymbols = NUM_SYMBOLS_PER_USEC(mpdudensity); | 
|  | 1678 |  | 
|  | 1679 | if (nsymbols == 0) | 
|  | 1680 | nsymbols = 1; | 
|  | 1681 |  | 
|  | 1682 | nsymbits = bits_per_symbol[HT_RC_2_MCS(rc)][width]; | 
|  | 1683 | minlen = (nsymbols * nsymbits) / BITS_PER_BYTE; | 
|  | 1684 |  | 
|  | 1685 | /* Is frame shorter than required minimum length? */ | 
|  | 1686 | if (frmlen < minlen) { | 
|  | 1687 | /* Get the minimum number of delimiters required. */ | 
|  | 1688 | mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ; | 
|  | 1689 | ndelim = max(mindelim, ndelim); | 
|  | 1690 | } | 
|  | 1691 |  | 
|  | 1692 | return ndelim; | 
|  | 1693 | } | 
|  | 1694 |  | 
|  | 1695 | /* | 
|  | 1696 | * For aggregation from software buffer queue. | 
|  | 1697 | * NB: must be called with txq lock held | 
|  | 1698 | */ | 
|  | 1699 |  | 
|  | 1700 | static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc, | 
|  | 1701 | struct ath_atx_tid *tid, | 
|  | 1702 | struct list_head *bf_q, | 
|  | 1703 | struct ath_buf **bf_last, | 
|  | 1704 | struct aggr_rifs_param *param, | 
|  | 1705 | int *prev_frames) | 
|  | 1706 | { | 
|  | 1707 | #define PADBYTES(_len) ((4 - ((_len) % 4)) % 4) | 
|  | 1708 | struct ath_buf *bf, *tbf, *bf_first, *bf_prev = NULL; | 
|  | 1709 | struct list_head bf_head; | 
|  | 1710 | int rl = 0, nframes = 0, ndelim; | 
|  | 1711 | u16 aggr_limit = 0, al = 0, bpad = 0, | 
|  | 1712 | al_delta, h_baw = tid->baw_size / 2; | 
|  | 1713 | enum ATH_AGGR_STATUS status = ATH_AGGR_DONE; | 
|  | 1714 | int prev_al = 0, is_ds_rate = 0; | 
|  | 1715 | INIT_LIST_HEAD(&bf_head); | 
|  | 1716 |  | 
|  | 1717 | BUG_ON(list_empty(&tid->buf_q)); | 
|  | 1718 |  | 
|  | 1719 | bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list); | 
|  | 1720 |  | 
|  | 1721 | do { | 
|  | 1722 | bf = list_first_entry(&tid->buf_q, struct ath_buf, list); | 
|  | 1723 |  | 
|  | 1724 | /* | 
|  | 1725 | * do not step over block-ack window | 
|  | 1726 | */ | 
|  | 1727 | if (!BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno)) { | 
|  | 1728 | status = ATH_AGGR_BAW_CLOSED; | 
|  | 1729 | break; | 
|  | 1730 | } | 
|  | 1731 |  | 
|  | 1732 | if (!rl) { | 
|  | 1733 | aggr_limit = ath_lookup_rate(sc, bf); | 
|  | 1734 | rl = 1; | 
|  | 1735 | /* | 
|  | 1736 | * Is rate dual stream | 
|  | 1737 | */ | 
|  | 1738 | is_ds_rate = | 
|  | 1739 | (bf->bf_rcs[0].flags & ATH_RC_DS_FLAG) ? 1 : 0; | 
|  | 1740 | } | 
|  | 1741 |  | 
|  | 1742 | /* | 
|  | 1743 | * do not exceed aggregation limit | 
|  | 1744 | */ | 
|  | 1745 | al_delta = ATH_AGGR_DELIM_SZ + bf->bf_frmlen; | 
|  | 1746 |  | 
|  | 1747 | if (nframes && (aggr_limit < | 
|  | 1748 | (al + bpad + al_delta + prev_al))) { | 
|  | 1749 | status = ATH_AGGR_LIMITED; | 
|  | 1750 | break; | 
|  | 1751 | } | 
|  | 1752 |  | 
|  | 1753 | /* | 
|  | 1754 | * do not exceed subframe limit | 
|  | 1755 | */ | 
|  | 1756 | if ((nframes + *prev_frames) >= | 
|  | 1757 | min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) { | 
|  | 1758 | status = ATH_AGGR_LIMITED; | 
|  | 1759 | break; | 
|  | 1760 | } | 
|  | 1761 |  | 
|  | 1762 | /* | 
|  | 1763 | * add padding for previous frame to aggregation length | 
|  | 1764 | */ | 
|  | 1765 | al += bpad + al_delta; | 
|  | 1766 |  | 
|  | 1767 | /* | 
|  | 1768 | * Get the delimiters needed to meet the MPDU | 
|  | 1769 | * density for this node. | 
|  | 1770 | */ | 
|  | 1771 | ndelim = ath_compute_num_delims(sc, bf_first, bf->bf_frmlen); | 
|  | 1772 |  | 
|  | 1773 | bpad = PADBYTES(al_delta) + (ndelim << 2); | 
|  | 1774 |  | 
|  | 1775 | bf->bf_next = NULL; | 
|  | 1776 | bf->bf_lastfrm->bf_desc->ds_link = 0; | 
|  | 1777 |  | 
|  | 1778 | /* | 
|  | 1779 | * this packet is part of an aggregate | 
|  | 1780 | * - remove all descriptors belonging to this frame from | 
|  | 1781 | *   software queue | 
|  | 1782 | * - add it to block ack window | 
|  | 1783 | * - set up descriptors for aggregation | 
|  | 1784 | */ | 
|  | 1785 | list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list); | 
|  | 1786 | ath_tx_addto_baw(sc, tid, bf); | 
|  | 1787 |  | 
|  | 1788 | list_for_each_entry(tbf, &bf_head, list) { | 
|  | 1789 | ath9k_hw_set11n_aggr_middle(sc->sc_ah, | 
|  | 1790 | tbf->bf_desc, ndelim); | 
|  | 1791 | } | 
|  | 1792 |  | 
|  | 1793 | /* | 
|  | 1794 | * link buffers of this frame to the aggregate | 
|  | 1795 | */ | 
|  | 1796 | list_splice_tail_init(&bf_head, bf_q); | 
|  | 1797 | nframes++; | 
|  | 1798 |  | 
|  | 1799 | if (bf_prev) { | 
|  | 1800 | bf_prev->bf_next = bf; | 
|  | 1801 | bf_prev->bf_lastfrm->bf_desc->ds_link = bf->bf_daddr; | 
|  | 1802 | } | 
|  | 1803 | bf_prev = bf; | 
|  | 1804 |  | 
|  | 1805 | #ifdef AGGR_NOSHORT | 
|  | 1806 | /* | 
|  | 1807 | * terminate aggregation on a small packet boundary | 
|  | 1808 | */ | 
|  | 1809 | if (bf->bf_frmlen < ATH_AGGR_MINPLEN) { | 
|  | 1810 | status = ATH_AGGR_SHORTPKT; | 
|  | 1811 | break; | 
|  | 1812 | } | 
|  | 1813 | #endif | 
|  | 1814 | } while (!list_empty(&tid->buf_q)); | 
|  | 1815 |  | 
|  | 1816 | bf_first->bf_al = al; | 
|  | 1817 | bf_first->bf_nframes = nframes; | 
|  | 1818 | *bf_last = bf_prev; | 
|  | 1819 | return status; | 
|  | 1820 | #undef PADBYTES | 
|  | 1821 | } | 
|  | 1822 |  | 
|  | 1823 | /* | 
|  | 1824 | * process pending frames possibly doing a-mpdu aggregation | 
|  | 1825 | * NB: must be called with txq lock held | 
|  | 1826 | */ | 
|  | 1827 |  | 
|  | 1828 | static void ath_tx_sched_aggr(struct ath_softc *sc, | 
|  | 1829 | struct ath_txq *txq, struct ath_atx_tid *tid) | 
|  | 1830 | { | 
|  | 1831 | struct ath_buf *bf, *tbf, *bf_last, *bf_lastaggr = NULL; | 
|  | 1832 | enum ATH_AGGR_STATUS status; | 
|  | 1833 | struct list_head bf_q; | 
|  | 1834 | struct aggr_rifs_param param = {0, 0, 0, 0, NULL}; | 
|  | 1835 | int prev_frames = 0; | 
|  | 1836 |  | 
|  | 1837 | do { | 
|  | 1838 | if (list_empty(&tid->buf_q)) | 
|  | 1839 | return; | 
|  | 1840 |  | 
|  | 1841 | INIT_LIST_HEAD(&bf_q); | 
|  | 1842 |  | 
|  | 1843 | status = ath_tx_form_aggr(sc, tid, &bf_q, &bf_lastaggr, ¶m, | 
|  | 1844 | &prev_frames); | 
|  | 1845 |  | 
|  | 1846 | /* | 
|  | 1847 | * no frames picked up to be aggregated; block-ack | 
|  | 1848 | * window is not open | 
|  | 1849 | */ | 
|  | 1850 | if (list_empty(&bf_q)) | 
|  | 1851 | break; | 
|  | 1852 |  | 
|  | 1853 | bf = list_first_entry(&bf_q, struct ath_buf, list); | 
|  | 1854 | bf_last = list_entry(bf_q.prev, struct ath_buf, list); | 
|  | 1855 | bf->bf_lastbf = bf_last; | 
|  | 1856 |  | 
|  | 1857 | /* | 
|  | 1858 | * if only one frame, send as non-aggregate | 
|  | 1859 | */ | 
|  | 1860 | if (bf->bf_nframes == 1) { | 
|  | 1861 | ASSERT(bf->bf_lastfrm == bf_last); | 
|  | 1862 |  | 
|  | 1863 | bf->bf_isaggr = 0; | 
|  | 1864 | /* | 
|  | 1865 | * clear aggr bits for every descriptor | 
|  | 1866 | * XXX TODO: is there a way to optimize it? | 
|  | 1867 | */ | 
|  | 1868 | list_for_each_entry(tbf, &bf_q, list) { | 
|  | 1869 | ath9k_hw_clr11n_aggr(sc->sc_ah, tbf->bf_desc); | 
|  | 1870 | } | 
|  | 1871 |  | 
|  | 1872 | ath_buf_set_rate(sc, bf); | 
|  | 1873 | ath_tx_txqaddbuf(sc, txq, &bf_q); | 
|  | 1874 | continue; | 
|  | 1875 | } | 
|  | 1876 |  | 
|  | 1877 | /* | 
|  | 1878 | * setup first desc with rate and aggr info | 
|  | 1879 | */ | 
|  | 1880 | bf->bf_isaggr  = 1; | 
|  | 1881 | ath_buf_set_rate(sc, bf); | 
|  | 1882 | ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, bf->bf_al); | 
|  | 1883 |  | 
|  | 1884 | /* | 
|  | 1885 | * anchor last frame of aggregate correctly | 
|  | 1886 | */ | 
|  | 1887 | ASSERT(bf_lastaggr); | 
|  | 1888 | ASSERT(bf_lastaggr->bf_lastfrm == bf_last); | 
|  | 1889 | tbf = bf_lastaggr; | 
|  | 1890 | ath9k_hw_set11n_aggr_last(sc->sc_ah, tbf->bf_desc); | 
|  | 1891 |  | 
|  | 1892 | /* XXX: We don't enter into this loop, consider removing this */ | 
|  | 1893 | while (!list_empty(&bf_q) && !list_is_last(&tbf->list, &bf_q)) { | 
|  | 1894 | tbf = list_entry(tbf->list.next, struct ath_buf, list); | 
|  | 1895 | ath9k_hw_set11n_aggr_last(sc->sc_ah, tbf->bf_desc); | 
|  | 1896 | } | 
|  | 1897 |  | 
|  | 1898 | txq->axq_aggr_depth++; | 
|  | 1899 |  | 
|  | 1900 | /* | 
|  | 1901 | * Normal aggregate, queue to hardware | 
|  | 1902 | */ | 
|  | 1903 | ath_tx_txqaddbuf(sc, txq, &bf_q); | 
|  | 1904 |  | 
|  | 1905 | } while (txq->axq_depth < ATH_AGGR_MIN_QDEPTH && | 
|  | 1906 | status != ATH_AGGR_BAW_CLOSED); | 
|  | 1907 | } | 
|  | 1908 |  | 
|  | 1909 | /* Called with txq lock held */ | 
|  | 1910 |  | 
|  | 1911 | static void ath_tid_drain(struct ath_softc *sc, | 
|  | 1912 | struct ath_txq *txq, | 
|  | 1913 | struct ath_atx_tid *tid, | 
|  | 1914 | bool bh_flag) | 
|  | 1915 | { | 
|  | 1916 | struct ath_buf *bf; | 
|  | 1917 | struct list_head bf_head; | 
|  | 1918 | INIT_LIST_HEAD(&bf_head); | 
|  | 1919 |  | 
|  | 1920 | for (;;) { | 
|  | 1921 | if (list_empty(&tid->buf_q)) | 
|  | 1922 | break; | 
|  | 1923 | bf = list_first_entry(&tid->buf_q, struct ath_buf, list); | 
|  | 1924 |  | 
|  | 1925 | list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list); | 
|  | 1926 |  | 
|  | 1927 | /* update baw for software retried frame */ | 
|  | 1928 | if (bf->bf_isretried) | 
|  | 1929 | ath_tx_update_baw(sc, tid, bf->bf_seqno); | 
|  | 1930 |  | 
|  | 1931 | /* | 
|  | 1932 | * do not indicate packets while holding txq spinlock. | 
|  | 1933 | * unlock is intentional here | 
|  | 1934 | */ | 
|  | 1935 | if (likely(bh_flag)) | 
|  | 1936 | spin_unlock_bh(&txq->axq_lock); | 
|  | 1937 | else | 
|  | 1938 | spin_unlock(&txq->axq_lock); | 
|  | 1939 |  | 
|  | 1940 | /* complete this sub-frame */ | 
|  | 1941 | ath_tx_complete_buf(sc, bf, &bf_head, 0, 0); | 
|  | 1942 |  | 
|  | 1943 | if (likely(bh_flag)) | 
|  | 1944 | spin_lock_bh(&txq->axq_lock); | 
|  | 1945 | else | 
|  | 1946 | spin_lock(&txq->axq_lock); | 
|  | 1947 | } | 
|  | 1948 |  | 
|  | 1949 | /* | 
|  | 1950 | * TODO: For frame(s) that are in the retry state, we will reuse the | 
|  | 1951 | * sequence number(s) without setting the retry bit. The | 
|  | 1952 | * alternative is to give up on these and BAR the receiver's window | 
|  | 1953 | * forward. | 
|  | 1954 | */ | 
|  | 1955 | tid->seq_next = tid->seq_start; | 
|  | 1956 | tid->baw_tail = tid->baw_head; | 
|  | 1957 | } | 
|  | 1958 |  | 
|  | 1959 | /* | 
|  | 1960 | * Drain all pending buffers | 
|  | 1961 | * NB: must be called with txq lock held | 
|  | 1962 | */ | 
|  | 1963 |  | 
|  | 1964 | static void ath_txq_drain_pending_buffers(struct ath_softc *sc, | 
|  | 1965 | struct ath_txq *txq, | 
|  | 1966 | bool bh_flag) | 
|  | 1967 | { | 
|  | 1968 | struct ath_atx_ac *ac, *ac_tmp; | 
|  | 1969 | struct ath_atx_tid *tid, *tid_tmp; | 
|  | 1970 |  | 
|  | 1971 | list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) { | 
|  | 1972 | list_del(&ac->list); | 
|  | 1973 | ac->sched = false; | 
|  | 1974 | list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) { | 
|  | 1975 | list_del(&tid->list); | 
|  | 1976 | tid->sched = false; | 
|  | 1977 | ath_tid_drain(sc, txq, tid, bh_flag); | 
|  | 1978 | } | 
|  | 1979 | } | 
|  | 1980 | } | 
|  | 1981 |  | 
|  | 1982 | static int ath_tx_start_dma(struct ath_softc *sc, | 
|  | 1983 | struct sk_buff *skb, | 
|  | 1984 | struct scatterlist *sg, | 
|  | 1985 | u32 n_sg, | 
|  | 1986 | struct ath_tx_control *txctl) | 
|  | 1987 | { | 
|  | 1988 | struct ath_node *an = txctl->an; | 
|  | 1989 | struct ath_buf *bf = NULL; | 
|  | 1990 | struct list_head bf_head; | 
|  | 1991 | struct ath_desc *ds; | 
|  | 1992 | struct ath_hal *ah = sc->sc_ah; | 
|  | 1993 | struct ath_txq *txq = &sc->sc_txq[txctl->qnum]; | 
|  | 1994 | struct ath_tx_info_priv *tx_info_priv; | 
|  | 1995 | struct ath_rc_series *rcs; | 
|  | 1996 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | 
|  | 1997 | struct ieee80211_tx_info *tx_info =  IEEE80211_SKB_CB(skb); | 
|  | 1998 | __le16 fc = hdr->frame_control; | 
|  | 1999 |  | 
|  | 2000 | /* For each sglist entry, allocate an ath_buf for DMA */ | 
|  | 2001 | INIT_LIST_HEAD(&bf_head); | 
|  | 2002 | spin_lock_bh(&sc->sc_txbuflock); | 
|  | 2003 | if (unlikely(list_empty(&sc->sc_txbuf))) { | 
|  | 2004 | spin_unlock_bh(&sc->sc_txbuflock); | 
|  | 2005 | return -ENOMEM; | 
|  | 2006 | } | 
|  | 2007 |  | 
|  | 2008 | bf = list_first_entry(&sc->sc_txbuf, struct ath_buf, list); | 
|  | 2009 | list_del(&bf->list); | 
|  | 2010 | spin_unlock_bh(&sc->sc_txbuflock); | 
|  | 2011 |  | 
|  | 2012 | list_add_tail(&bf->list, &bf_head); | 
|  | 2013 |  | 
|  | 2014 | /* set up this buffer */ | 
|  | 2015 | ATH_TXBUF_RESET(bf); | 
|  | 2016 | bf->bf_frmlen = txctl->frmlen; | 
|  | 2017 | bf->bf_isdata = ieee80211_is_data(fc); | 
|  | 2018 | bf->bf_isbar = ieee80211_is_back_req(fc); | 
|  | 2019 | bf->bf_ispspoll = ieee80211_is_pspoll(fc); | 
|  | 2020 | bf->bf_flags = txctl->flags; | 
|  | 2021 | bf->bf_shpreamble = sc->sc_flags & ATH_PREAMBLE_SHORT; | 
|  | 2022 | bf->bf_keytype = txctl->keytype; | 
|  | 2023 | tx_info_priv = (struct ath_tx_info_priv *)tx_info->driver_data[0]; | 
|  | 2024 | rcs = tx_info_priv->rcs; | 
|  | 2025 | bf->bf_rcs[0] = rcs[0]; | 
|  | 2026 | bf->bf_rcs[1] = rcs[1]; | 
|  | 2027 | bf->bf_rcs[2] = rcs[2]; | 
|  | 2028 | bf->bf_rcs[3] = rcs[3]; | 
|  | 2029 | bf->bf_node = an; | 
|  | 2030 | bf->bf_mpdu = skb; | 
|  | 2031 | bf->bf_buf_addr = sg_dma_address(sg); | 
|  | 2032 |  | 
|  | 2033 | /* setup descriptor */ | 
|  | 2034 | ds = bf->bf_desc; | 
|  | 2035 | ds->ds_link = 0; | 
|  | 2036 | ds->ds_data = bf->bf_buf_addr; | 
|  | 2037 |  | 
|  | 2038 | /* | 
|  | 2039 | * Save the DMA context in the first ath_buf | 
|  | 2040 | */ | 
|  | 2041 | copy_dma_mem_context(get_dma_mem_context(bf, bf_dmacontext), | 
|  | 2042 | get_dma_mem_context(txctl, dmacontext)); | 
|  | 2043 |  | 
|  | 2044 | /* | 
|  | 2045 | * Formulate first tx descriptor with tx controls. | 
|  | 2046 | */ | 
|  | 2047 | ath9k_hw_set11n_txdesc(ah, | 
|  | 2048 | ds, | 
|  | 2049 | bf->bf_frmlen, /* frame length */ | 
|  | 2050 | txctl->atype, /* Atheros packet type */ | 
|  | 2051 | min(txctl->txpower, (u16)60), /* txpower */ | 
|  | 2052 | txctl->keyix,            /* key cache index */ | 
|  | 2053 | txctl->keytype,          /* key type */ | 
|  | 2054 | txctl->flags);           /* flags */ | 
|  | 2055 | ath9k_hw_filltxdesc(ah, | 
|  | 2056 | ds, | 
|  | 2057 | sg_dma_len(sg),     /* segment length */ | 
|  | 2058 | true,            /* first segment */ | 
|  | 2059 | (n_sg == 1) ? true : false, /* last segment */ | 
|  | 2060 | ds);                /* first descriptor */ | 
|  | 2061 |  | 
|  | 2062 | bf->bf_lastfrm = bf; | 
|  | 2063 | bf->bf_ht = txctl->ht; | 
|  | 2064 |  | 
|  | 2065 | spin_lock_bh(&txq->axq_lock); | 
|  | 2066 |  | 
|  | 2067 | if (txctl->ht && sc->sc_txaggr) { | 
|  | 2068 | struct ath_atx_tid *tid = ATH_AN_2_TID(an, txctl->tidno); | 
|  | 2069 | if (ath_aggr_query(sc, an, txctl->tidno)) { | 
|  | 2070 | /* | 
|  | 2071 | * Try aggregation if it's a unicast data frame | 
|  | 2072 | * and the destination is HT capable. | 
|  | 2073 | */ | 
|  | 2074 | ath_tx_send_ampdu(sc, txq, tid, &bf_head, txctl); | 
|  | 2075 | } else { | 
|  | 2076 | /* | 
|  | 2077 | * Send this frame as regular when ADDBA exchange | 
|  | 2078 | * is neither complete nor pending. | 
|  | 2079 | */ | 
|  | 2080 | ath_tx_send_normal(sc, txq, tid, &bf_head); | 
|  | 2081 | } | 
|  | 2082 | } else { | 
|  | 2083 | bf->bf_lastbf = bf; | 
|  | 2084 | bf->bf_nframes = 1; | 
|  | 2085 | ath_buf_set_rate(sc, bf); | 
|  | 2086 |  | 
|  | 2087 | if (ieee80211_is_back_req(fc)) { | 
|  | 2088 | /* This is required for resuming tid | 
|  | 2089 | * during BAR completion */ | 
|  | 2090 | bf->bf_tidno = txctl->tidno; | 
|  | 2091 | } | 
|  | 2092 |  | 
|  | 2093 | if (is_multicast_ether_addr(hdr->addr1)) { | 
|  | 2094 | struct ath_vap *avp = sc->sc_vaps[txctl->if_id]; | 
|  | 2095 |  | 
|  | 2096 | /* | 
|  | 2097 | * When servicing one or more stations in power-save | 
|  | 2098 | * mode (or) if there is some mcast data waiting on | 
|  | 2099 | * mcast queue (to prevent out of order delivery of | 
|  | 2100 | * mcast,bcast packets) multicast frames must be | 
|  | 2101 | * buffered until after the beacon. We use the private | 
|  | 2102 | * mcast queue for that. | 
|  | 2103 | */ | 
|  | 2104 | /* XXX? more bit in 802.11 frame header */ | 
|  | 2105 | spin_lock_bh(&avp->av_mcastq.axq_lock); | 
|  | 2106 | if (txctl->ps || avp->av_mcastq.axq_depth) | 
|  | 2107 | ath_tx_mcastqaddbuf(sc, | 
|  | 2108 | &avp->av_mcastq, &bf_head); | 
|  | 2109 | else | 
|  | 2110 | ath_tx_txqaddbuf(sc, txq, &bf_head); | 
|  | 2111 | spin_unlock_bh(&avp->av_mcastq.axq_lock); | 
|  | 2112 | } else | 
|  | 2113 | ath_tx_txqaddbuf(sc, txq, &bf_head); | 
|  | 2114 | } | 
|  | 2115 | spin_unlock_bh(&txq->axq_lock); | 
|  | 2116 | return 0; | 
|  | 2117 | } | 
|  | 2118 |  | 
|  | 2119 | static void xmit_map_sg(struct ath_softc *sc, | 
|  | 2120 | struct sk_buff *skb, | 
|  | 2121 | dma_addr_t *pa, | 
|  | 2122 | struct ath_tx_control *txctl) | 
|  | 2123 | { | 
|  | 2124 | struct ath_xmit_status tx_status; | 
|  | 2125 | struct ath_atx_tid *tid; | 
|  | 2126 | struct scatterlist sg; | 
|  | 2127 |  | 
|  | 2128 | *pa = pci_map_single(sc->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); | 
|  | 2129 |  | 
|  | 2130 | /* setup S/G list */ | 
|  | 2131 | memset(&sg, 0, sizeof(struct scatterlist)); | 
|  | 2132 | sg_dma_address(&sg) = *pa; | 
|  | 2133 | sg_dma_len(&sg) = skb->len; | 
|  | 2134 |  | 
|  | 2135 | if (ath_tx_start_dma(sc, skb, &sg, 1, txctl) != 0) { | 
|  | 2136 | /* | 
|  | 2137 | *  We have to do drop frame here. | 
|  | 2138 | */ | 
|  | 2139 | pci_unmap_single(sc->pdev, *pa, skb->len, PCI_DMA_TODEVICE); | 
|  | 2140 |  | 
|  | 2141 | tx_status.retries = 0; | 
|  | 2142 | tx_status.flags = ATH_TX_ERROR; | 
|  | 2143 |  | 
|  | 2144 | if (txctl->ht && sc->sc_txaggr) { | 
|  | 2145 | /* Reclaim the seqno. */ | 
|  | 2146 | tid = ATH_AN_2_TID((struct ath_node *) | 
|  | 2147 | txctl->an, txctl->tidno); | 
|  | 2148 | DECR(tid->seq_next, IEEE80211_SEQ_MAX); | 
|  | 2149 | } | 
|  | 2150 | ath_tx_complete(sc, skb, &tx_status, txctl->an); | 
|  | 2151 | } | 
|  | 2152 | } | 
|  | 2153 |  | 
|  | 2154 | /* Initialize TX queue and h/w */ | 
|  | 2155 |  | 
|  | 2156 | int ath_tx_init(struct ath_softc *sc, int nbufs) | 
|  | 2157 | { | 
|  | 2158 | int error = 0; | 
|  | 2159 |  | 
|  | 2160 | do { | 
|  | 2161 | spin_lock_init(&sc->sc_txbuflock); | 
|  | 2162 |  | 
|  | 2163 | /* Setup tx descriptors */ | 
|  | 2164 | error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf, | 
|  | 2165 | "tx", nbufs * ATH_FRAG_PER_MSDU, ATH_TXDESC); | 
|  | 2166 | if (error != 0) { | 
|  | 2167 | DPRINTF(sc, ATH_DBG_FATAL, | 
|  | 2168 | "%s: failed to allocate tx descriptors: %d\n", | 
|  | 2169 | __func__, error); | 
|  | 2170 | break; | 
|  | 2171 | } | 
|  | 2172 |  | 
|  | 2173 | /* XXX allocate beacon state together with vap */ | 
|  | 2174 | error = ath_descdma_setup(sc, &sc->sc_bdma, &sc->sc_bbuf, | 
|  | 2175 | "beacon", ATH_BCBUF, 1); | 
|  | 2176 | if (error != 0) { | 
|  | 2177 | DPRINTF(sc, ATH_DBG_FATAL, | 
|  | 2178 | "%s: failed to allocate " | 
|  | 2179 | "beacon descripotrs: %d\n", | 
|  | 2180 | __func__, error); | 
|  | 2181 | break; | 
|  | 2182 | } | 
|  | 2183 |  | 
|  | 2184 | } while (0); | 
|  | 2185 |  | 
|  | 2186 | if (error != 0) | 
|  | 2187 | ath_tx_cleanup(sc); | 
|  | 2188 |  | 
|  | 2189 | return error; | 
|  | 2190 | } | 
|  | 2191 |  | 
|  | 2192 | /* Reclaim all tx queue resources */ | 
|  | 2193 |  | 
|  | 2194 | int ath_tx_cleanup(struct ath_softc *sc) | 
|  | 2195 | { | 
|  | 2196 | /* cleanup beacon descriptors */ | 
|  | 2197 | if (sc->sc_bdma.dd_desc_len != 0) | 
|  | 2198 | ath_descdma_cleanup(sc, &sc->sc_bdma, &sc->sc_bbuf); | 
|  | 2199 |  | 
|  | 2200 | /* cleanup tx descriptors */ | 
|  | 2201 | if (sc->sc_txdma.dd_desc_len != 0) | 
|  | 2202 | ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); | 
|  | 2203 |  | 
|  | 2204 | return 0; | 
|  | 2205 | } | 
|  | 2206 |  | 
|  | 2207 | /* Setup a h/w transmit queue */ | 
|  | 2208 |  | 
|  | 2209 | struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype) | 
|  | 2210 | { | 
|  | 2211 | struct ath_hal *ah = sc->sc_ah; | 
| Sujith | ea9880f | 2008-08-07 10:53:10 +0530 | [diff] [blame] | 2212 | struct ath9k_tx_queue_info qi; | 
| Luis R. Rodriguez | f078f20 | 2008-08-04 00:16:41 -0700 | [diff] [blame] | 2213 | int qnum; | 
|  | 2214 |  | 
|  | 2215 | memzero(&qi, sizeof(qi)); | 
|  | 2216 | qi.tqi_subtype = subtype; | 
|  | 2217 | qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT; | 
|  | 2218 | qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT; | 
|  | 2219 | qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT; | 
| Sujith | ea9880f | 2008-08-07 10:53:10 +0530 | [diff] [blame] | 2220 | qi.tqi_physCompBuf = 0; | 
| Luis R. Rodriguez | f078f20 | 2008-08-04 00:16:41 -0700 | [diff] [blame] | 2221 |  | 
|  | 2222 | /* | 
|  | 2223 | * Enable interrupts only for EOL and DESC conditions. | 
|  | 2224 | * We mark tx descriptors to receive a DESC interrupt | 
|  | 2225 | * when a tx queue gets deep; otherwise waiting for the | 
|  | 2226 | * EOL to reap descriptors.  Note that this is done to | 
|  | 2227 | * reduce interrupt load and this only defers reaping | 
|  | 2228 | * descriptors, never transmitting frames.  Aside from | 
|  | 2229 | * reducing interrupts this also permits more concurrency. | 
|  | 2230 | * The only potential downside is if the tx queue backs | 
|  | 2231 | * up in which case the top half of the kernel may backup | 
|  | 2232 | * due to a lack of tx descriptors. | 
|  | 2233 | * | 
|  | 2234 | * The UAPSD queue is an exception, since we take a desc- | 
|  | 2235 | * based intr on the EOSP frames. | 
|  | 2236 | */ | 
|  | 2237 | if (qtype == ATH9K_TX_QUEUE_UAPSD) | 
|  | 2238 | qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE; | 
|  | 2239 | else | 
|  | 2240 | qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE | | 
|  | 2241 | TXQ_FLAG_TXDESCINT_ENABLE; | 
|  | 2242 | qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi); | 
|  | 2243 | if (qnum == -1) { | 
|  | 2244 | /* | 
|  | 2245 | * NB: don't print a message, this happens | 
|  | 2246 | * normally on parts with too few tx queues | 
|  | 2247 | */ | 
|  | 2248 | return NULL; | 
|  | 2249 | } | 
|  | 2250 | if (qnum >= ARRAY_SIZE(sc->sc_txq)) { | 
|  | 2251 | DPRINTF(sc, ATH_DBG_FATAL, | 
|  | 2252 | "%s: hal qnum %u out of range, max %u!\n", | 
|  | 2253 | __func__, qnum, (unsigned int)ARRAY_SIZE(sc->sc_txq)); | 
|  | 2254 | ath9k_hw_releasetxqueue(ah, qnum); | 
|  | 2255 | return NULL; | 
|  | 2256 | } | 
|  | 2257 | if (!ATH_TXQ_SETUP(sc, qnum)) { | 
|  | 2258 | struct ath_txq *txq = &sc->sc_txq[qnum]; | 
|  | 2259 |  | 
|  | 2260 | txq->axq_qnum = qnum; | 
|  | 2261 | txq->axq_link = NULL; | 
|  | 2262 | INIT_LIST_HEAD(&txq->axq_q); | 
|  | 2263 | INIT_LIST_HEAD(&txq->axq_acq); | 
|  | 2264 | spin_lock_init(&txq->axq_lock); | 
|  | 2265 | txq->axq_depth = 0; | 
|  | 2266 | txq->axq_aggr_depth = 0; | 
|  | 2267 | txq->axq_totalqueued = 0; | 
|  | 2268 | txq->axq_intrcnt = 0; | 
|  | 2269 | txq->axq_linkbuf = NULL; | 
|  | 2270 | sc->sc_txqsetup |= 1<<qnum; | 
|  | 2271 | } | 
|  | 2272 | return &sc->sc_txq[qnum]; | 
|  | 2273 | } | 
|  | 2274 |  | 
|  | 2275 | /* Reclaim resources for a setup queue */ | 
|  | 2276 |  | 
|  | 2277 | void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq) | 
|  | 2278 | { | 
|  | 2279 | ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum); | 
|  | 2280 | sc->sc_txqsetup &= ~(1<<txq->axq_qnum); | 
|  | 2281 | } | 
|  | 2282 |  | 
|  | 2283 | /* | 
|  | 2284 | * Setup a hardware data transmit queue for the specified | 
|  | 2285 | * access control.  The hal may not support all requested | 
|  | 2286 | * queues in which case it will return a reference to a | 
|  | 2287 | * previously setup queue.  We record the mapping from ac's | 
|  | 2288 | * to h/w queues for use by ath_tx_start and also track | 
|  | 2289 | * the set of h/w queues being used to optimize work in the | 
|  | 2290 | * transmit interrupt handler and related routines. | 
|  | 2291 | */ | 
|  | 2292 |  | 
|  | 2293 | int ath_tx_setup(struct ath_softc *sc, int haltype) | 
|  | 2294 | { | 
|  | 2295 | struct ath_txq *txq; | 
|  | 2296 |  | 
|  | 2297 | if (haltype >= ARRAY_SIZE(sc->sc_haltype2q)) { | 
|  | 2298 | DPRINTF(sc, ATH_DBG_FATAL, | 
|  | 2299 | "%s: HAL AC %u out of range, max %zu!\n", | 
|  | 2300 | __func__, haltype, ARRAY_SIZE(sc->sc_haltype2q)); | 
|  | 2301 | return 0; | 
|  | 2302 | } | 
|  | 2303 | txq = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, haltype); | 
|  | 2304 | if (txq != NULL) { | 
|  | 2305 | sc->sc_haltype2q[haltype] = txq->axq_qnum; | 
|  | 2306 | return 1; | 
|  | 2307 | } else | 
|  | 2308 | return 0; | 
|  | 2309 | } | 
|  | 2310 |  | 
|  | 2311 | int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype) | 
|  | 2312 | { | 
|  | 2313 | int qnum; | 
|  | 2314 |  | 
|  | 2315 | switch (qtype) { | 
|  | 2316 | case ATH9K_TX_QUEUE_DATA: | 
|  | 2317 | if (haltype >= ARRAY_SIZE(sc->sc_haltype2q)) { | 
|  | 2318 | DPRINTF(sc, ATH_DBG_FATAL, | 
|  | 2319 | "%s: HAL AC %u out of range, max %zu!\n", | 
|  | 2320 | __func__, | 
|  | 2321 | haltype, ARRAY_SIZE(sc->sc_haltype2q)); | 
|  | 2322 | return -1; | 
|  | 2323 | } | 
|  | 2324 | qnum = sc->sc_haltype2q[haltype]; | 
|  | 2325 | break; | 
|  | 2326 | case ATH9K_TX_QUEUE_BEACON: | 
|  | 2327 | qnum = sc->sc_bhalq; | 
|  | 2328 | break; | 
|  | 2329 | case ATH9K_TX_QUEUE_CAB: | 
|  | 2330 | qnum = sc->sc_cabq->axq_qnum; | 
|  | 2331 | break; | 
|  | 2332 | default: | 
|  | 2333 | qnum = -1; | 
|  | 2334 | } | 
|  | 2335 | return qnum; | 
|  | 2336 | } | 
|  | 2337 |  | 
|  | 2338 | /* Update parameters for a transmit queue */ | 
|  | 2339 |  | 
| Sujith | ea9880f | 2008-08-07 10:53:10 +0530 | [diff] [blame] | 2340 | int ath_txq_update(struct ath_softc *sc, int qnum, | 
|  | 2341 | struct ath9k_tx_queue_info *qinfo) | 
| Luis R. Rodriguez | f078f20 | 2008-08-04 00:16:41 -0700 | [diff] [blame] | 2342 | { | 
|  | 2343 | struct ath_hal *ah = sc->sc_ah; | 
|  | 2344 | int error = 0; | 
| Sujith | ea9880f | 2008-08-07 10:53:10 +0530 | [diff] [blame] | 2345 | struct ath9k_tx_queue_info qi; | 
| Luis R. Rodriguez | f078f20 | 2008-08-04 00:16:41 -0700 | [diff] [blame] | 2346 |  | 
|  | 2347 | if (qnum == sc->sc_bhalq) { | 
|  | 2348 | /* | 
|  | 2349 | * XXX: for beacon queue, we just save the parameter. | 
|  | 2350 | * It will be picked up by ath_beaconq_config when | 
|  | 2351 | * it's necessary. | 
|  | 2352 | */ | 
| Sujith | ea9880f | 2008-08-07 10:53:10 +0530 | [diff] [blame] | 2353 | sc->sc_beacon_qi = *qinfo; | 
| Luis R. Rodriguez | f078f20 | 2008-08-04 00:16:41 -0700 | [diff] [blame] | 2354 | return 0; | 
|  | 2355 | } | 
|  | 2356 |  | 
|  | 2357 | ASSERT(sc->sc_txq[qnum].axq_qnum == qnum); | 
|  | 2358 |  | 
| Sujith | ea9880f | 2008-08-07 10:53:10 +0530 | [diff] [blame] | 2359 | ath9k_hw_get_txq_props(ah, qnum, &qi); | 
|  | 2360 | qi.tqi_aifs = qinfo->tqi_aifs; | 
|  | 2361 | qi.tqi_cwmin = qinfo->tqi_cwmin; | 
|  | 2362 | qi.tqi_cwmax = qinfo->tqi_cwmax; | 
|  | 2363 | qi.tqi_burstTime = qinfo->tqi_burstTime; | 
|  | 2364 | qi.tqi_readyTime = qinfo->tqi_readyTime; | 
| Luis R. Rodriguez | f078f20 | 2008-08-04 00:16:41 -0700 | [diff] [blame] | 2365 |  | 
| Sujith | ea9880f | 2008-08-07 10:53:10 +0530 | [diff] [blame] | 2366 | if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) { | 
| Luis R. Rodriguez | f078f20 | 2008-08-04 00:16:41 -0700 | [diff] [blame] | 2367 | DPRINTF(sc, ATH_DBG_FATAL, | 
|  | 2368 | "%s: unable to update hardware queue %u!\n", | 
|  | 2369 | __func__, qnum); | 
|  | 2370 | error = -EIO; | 
|  | 2371 | } else { | 
|  | 2372 | ath9k_hw_resettxqueue(ah, qnum); /* push to h/w */ | 
|  | 2373 | } | 
|  | 2374 |  | 
|  | 2375 | return error; | 
|  | 2376 | } | 
|  | 2377 |  | 
|  | 2378 | int ath_cabq_update(struct ath_softc *sc) | 
|  | 2379 | { | 
| Sujith | ea9880f | 2008-08-07 10:53:10 +0530 | [diff] [blame] | 2380 | struct ath9k_tx_queue_info qi; | 
| Luis R. Rodriguez | f078f20 | 2008-08-04 00:16:41 -0700 | [diff] [blame] | 2381 | int qnum = sc->sc_cabq->axq_qnum; | 
|  | 2382 | struct ath_beacon_config conf; | 
|  | 2383 |  | 
| Sujith | ea9880f | 2008-08-07 10:53:10 +0530 | [diff] [blame] | 2384 | ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi); | 
| Luis R. Rodriguez | f078f20 | 2008-08-04 00:16:41 -0700 | [diff] [blame] | 2385 | /* | 
|  | 2386 | * Ensure the readytime % is within the bounds. | 
|  | 2387 | */ | 
|  | 2388 | if (sc->sc_config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND) | 
|  | 2389 | sc->sc_config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND; | 
|  | 2390 | else if (sc->sc_config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND) | 
|  | 2391 | sc->sc_config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND; | 
|  | 2392 |  | 
|  | 2393 | ath_get_beaconconfig(sc, ATH_IF_ID_ANY, &conf); | 
|  | 2394 | qi.tqi_readyTime = | 
|  | 2395 | (conf.beacon_interval * sc->sc_config.cabqReadytime) / 100; | 
|  | 2396 | ath_txq_update(sc, qnum, &qi); | 
|  | 2397 |  | 
|  | 2398 | return 0; | 
|  | 2399 | } | 
|  | 2400 |  | 
|  | 2401 | int ath_tx_start(struct ath_softc *sc, struct sk_buff *skb) | 
|  | 2402 | { | 
|  | 2403 | struct ath_tx_control txctl; | 
|  | 2404 | int error = 0; | 
|  | 2405 |  | 
|  | 2406 | error = ath_tx_prepare(sc, skb, &txctl); | 
|  | 2407 | if (error == 0) | 
|  | 2408 | /* | 
|  | 2409 | * Start DMA mapping. | 
|  | 2410 | * ath_tx_start_dma() will be called either synchronously | 
|  | 2411 | * or asynchrounsly once DMA is complete. | 
|  | 2412 | */ | 
|  | 2413 | xmit_map_sg(sc, skb, | 
|  | 2414 | get_dma_mem_context(&txctl, dmacontext), | 
|  | 2415 | &txctl); | 
|  | 2416 | else | 
|  | 2417 | ath_node_put(sc, txctl.an, ATH9K_BH_STATUS_CHANGE); | 
|  | 2418 |  | 
|  | 2419 | /* failed packets will be dropped by the caller */ | 
|  | 2420 | return error; | 
|  | 2421 | } | 
|  | 2422 |  | 
|  | 2423 | /* Deferred processing of transmit interrupt */ | 
|  | 2424 |  | 
|  | 2425 | void ath_tx_tasklet(struct ath_softc *sc) | 
|  | 2426 | { | 
|  | 2427 | u64 tsf = ath9k_hw_gettsf64(sc->sc_ah); | 
|  | 2428 | int i, nacked = 0; | 
|  | 2429 | u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1); | 
|  | 2430 |  | 
|  | 2431 | ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask); | 
|  | 2432 |  | 
|  | 2433 | /* | 
|  | 2434 | * Process each active queue. | 
|  | 2435 | */ | 
|  | 2436 | for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { | 
|  | 2437 | if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i))) | 
|  | 2438 | nacked += ath_tx_processq(sc, &sc->sc_txq[i]); | 
|  | 2439 | } | 
|  | 2440 | if (nacked) | 
|  | 2441 | sc->sc_lastrx = tsf; | 
|  | 2442 | } | 
|  | 2443 |  | 
|  | 2444 | void ath_tx_draintxq(struct ath_softc *sc, | 
|  | 2445 | struct ath_txq *txq, bool retry_tx) | 
|  | 2446 | { | 
|  | 2447 | struct ath_buf *bf, *lastbf; | 
|  | 2448 | struct list_head bf_head; | 
|  | 2449 |  | 
|  | 2450 | INIT_LIST_HEAD(&bf_head); | 
|  | 2451 |  | 
|  | 2452 | /* | 
|  | 2453 | * NB: this assumes output has been stopped and | 
|  | 2454 | *     we do not need to block ath_tx_tasklet | 
|  | 2455 | */ | 
|  | 2456 | for (;;) { | 
|  | 2457 | spin_lock_bh(&txq->axq_lock); | 
|  | 2458 |  | 
|  | 2459 | if (list_empty(&txq->axq_q)) { | 
|  | 2460 | txq->axq_link = NULL; | 
|  | 2461 | txq->axq_linkbuf = NULL; | 
|  | 2462 | spin_unlock_bh(&txq->axq_lock); | 
|  | 2463 | break; | 
|  | 2464 | } | 
|  | 2465 |  | 
|  | 2466 | bf = list_first_entry(&txq->axq_q, struct ath_buf, list); | 
|  | 2467 |  | 
|  | 2468 | if (bf->bf_status & ATH_BUFSTATUS_STALE) { | 
|  | 2469 | list_del(&bf->list); | 
|  | 2470 | spin_unlock_bh(&txq->axq_lock); | 
|  | 2471 |  | 
|  | 2472 | spin_lock_bh(&sc->sc_txbuflock); | 
|  | 2473 | list_add_tail(&bf->list, &sc->sc_txbuf); | 
|  | 2474 | spin_unlock_bh(&sc->sc_txbuflock); | 
|  | 2475 | continue; | 
|  | 2476 | } | 
|  | 2477 |  | 
|  | 2478 | lastbf = bf->bf_lastbf; | 
|  | 2479 | if (!retry_tx) | 
|  | 2480 | lastbf->bf_desc->ds_txstat.ts_flags = | 
|  | 2481 | ATH9K_TX_SW_ABORTED; | 
|  | 2482 |  | 
|  | 2483 | /* remove ath_buf's of the same mpdu from txq */ | 
|  | 2484 | list_cut_position(&bf_head, &txq->axq_q, &lastbf->list); | 
|  | 2485 | txq->axq_depth--; | 
|  | 2486 |  | 
|  | 2487 | spin_unlock_bh(&txq->axq_lock); | 
|  | 2488 |  | 
|  | 2489 | if (bf->bf_isampdu) | 
|  | 2490 | ath_tx_complete_aggr_rifs(sc, txq, bf, &bf_head, 0); | 
|  | 2491 | else | 
|  | 2492 | ath_tx_complete_buf(sc, bf, &bf_head, 0, 0); | 
|  | 2493 | } | 
|  | 2494 |  | 
|  | 2495 | /* flush any pending frames if aggregation is enabled */ | 
|  | 2496 | if (sc->sc_txaggr) { | 
|  | 2497 | if (!retry_tx) { | 
|  | 2498 | spin_lock_bh(&txq->axq_lock); | 
|  | 2499 | ath_txq_drain_pending_buffers(sc, txq, | 
|  | 2500 | ATH9K_BH_STATUS_CHANGE); | 
|  | 2501 | spin_unlock_bh(&txq->axq_lock); | 
|  | 2502 | } | 
|  | 2503 | } | 
|  | 2504 | } | 
|  | 2505 |  | 
|  | 2506 | /* Drain the transmit queues and reclaim resources */ | 
|  | 2507 |  | 
|  | 2508 | void ath_draintxq(struct ath_softc *sc, bool retry_tx) | 
|  | 2509 | { | 
|  | 2510 | /* stop beacon queue. The beacon will be freed when | 
|  | 2511 | * we go to INIT state */ | 
|  | 2512 | if (!sc->sc_invalid) { | 
|  | 2513 | (void) ath9k_hw_stoptxdma(sc->sc_ah, sc->sc_bhalq); | 
|  | 2514 | DPRINTF(sc, ATH_DBG_XMIT, "%s: beacon queue %x\n", __func__, | 
|  | 2515 | ath9k_hw_gettxbuf(sc->sc_ah, sc->sc_bhalq)); | 
|  | 2516 | } | 
|  | 2517 |  | 
|  | 2518 | ath_drain_txdataq(sc, retry_tx); | 
|  | 2519 | } | 
|  | 2520 |  | 
|  | 2521 | u32 ath_txq_depth(struct ath_softc *sc, int qnum) | 
|  | 2522 | { | 
|  | 2523 | return sc->sc_txq[qnum].axq_depth; | 
|  | 2524 | } | 
|  | 2525 |  | 
|  | 2526 | u32 ath_txq_aggr_depth(struct ath_softc *sc, int qnum) | 
|  | 2527 | { | 
|  | 2528 | return sc->sc_txq[qnum].axq_aggr_depth; | 
|  | 2529 | } | 
|  | 2530 |  | 
|  | 2531 | /* Check if an ADDBA is required. A valid node must be passed. */ | 
|  | 2532 | enum ATH_AGGR_CHECK ath_tx_aggr_check(struct ath_softc *sc, | 
|  | 2533 | struct ath_node *an, | 
|  | 2534 | u8 tidno) | 
|  | 2535 | { | 
|  | 2536 | struct ath_atx_tid *txtid; | 
|  | 2537 | DECLARE_MAC_BUF(mac); | 
|  | 2538 |  | 
|  | 2539 | if (!sc->sc_txaggr) | 
|  | 2540 | return AGGR_NOT_REQUIRED; | 
|  | 2541 |  | 
|  | 2542 | /* ADDBA exchange must be completed before sending aggregates */ | 
|  | 2543 | txtid = ATH_AN_2_TID(an, tidno); | 
|  | 2544 |  | 
|  | 2545 | if (txtid->addba_exchangecomplete) | 
|  | 2546 | return AGGR_EXCHANGE_DONE; | 
|  | 2547 |  | 
|  | 2548 | if (txtid->cleanup_inprogress) | 
|  | 2549 | return AGGR_CLEANUP_PROGRESS; | 
|  | 2550 |  | 
|  | 2551 | if (txtid->addba_exchangeinprogress) | 
|  | 2552 | return AGGR_EXCHANGE_PROGRESS; | 
|  | 2553 |  | 
|  | 2554 | if (!txtid->addba_exchangecomplete) { | 
|  | 2555 | if (!txtid->addba_exchangeinprogress && | 
|  | 2556 | (txtid->addba_exchangeattempts < ADDBA_EXCHANGE_ATTEMPTS)) { | 
|  | 2557 | txtid->addba_exchangeattempts++; | 
|  | 2558 | return AGGR_REQUIRED; | 
|  | 2559 | } | 
|  | 2560 | } | 
|  | 2561 |  | 
|  | 2562 | return AGGR_NOT_REQUIRED; | 
|  | 2563 | } | 
|  | 2564 |  | 
|  | 2565 | /* Start TX aggregation */ | 
|  | 2566 |  | 
|  | 2567 | int ath_tx_aggr_start(struct ath_softc *sc, | 
|  | 2568 | const u8 *addr, | 
|  | 2569 | u16 tid, | 
|  | 2570 | u16 *ssn) | 
|  | 2571 | { | 
|  | 2572 | struct ath_atx_tid *txtid; | 
|  | 2573 | struct ath_node *an; | 
|  | 2574 |  | 
|  | 2575 | spin_lock_bh(&sc->node_lock); | 
|  | 2576 | an = ath_node_find(sc, (u8 *) addr); | 
|  | 2577 | spin_unlock_bh(&sc->node_lock); | 
|  | 2578 |  | 
|  | 2579 | if (!an) { | 
|  | 2580 | DPRINTF(sc, ATH_DBG_AGGR, | 
|  | 2581 | "%s: Node not found to initialize " | 
|  | 2582 | "TX aggregation\n", __func__); | 
|  | 2583 | return -1; | 
|  | 2584 | } | 
|  | 2585 |  | 
|  | 2586 | if (sc->sc_txaggr) { | 
|  | 2587 | txtid = ATH_AN_2_TID(an, tid); | 
|  | 2588 | txtid->addba_exchangeinprogress = 1; | 
|  | 2589 | ath_tx_pause_tid(sc, txtid); | 
|  | 2590 | } | 
|  | 2591 |  | 
|  | 2592 | return 0; | 
|  | 2593 | } | 
|  | 2594 |  | 
|  | 2595 | /* Stop tx aggregation */ | 
|  | 2596 |  | 
|  | 2597 | int ath_tx_aggr_stop(struct ath_softc *sc, | 
|  | 2598 | const u8 *addr, | 
|  | 2599 | u16 tid) | 
|  | 2600 | { | 
|  | 2601 | struct ath_node *an; | 
|  | 2602 |  | 
|  | 2603 | spin_lock_bh(&sc->node_lock); | 
|  | 2604 | an = ath_node_find(sc, (u8 *) addr); | 
|  | 2605 | spin_unlock_bh(&sc->node_lock); | 
|  | 2606 |  | 
|  | 2607 | if (!an) { | 
|  | 2608 | DPRINTF(sc, ATH_DBG_AGGR, | 
|  | 2609 | "%s: TX aggr stop for non-existent node\n", __func__); | 
|  | 2610 | return -1; | 
|  | 2611 | } | 
|  | 2612 |  | 
|  | 2613 | ath_tx_aggr_teardown(sc, an, tid); | 
|  | 2614 | return 0; | 
|  | 2615 | } | 
|  | 2616 |  | 
|  | 2617 | /* | 
|  | 2618 | * Performs transmit side cleanup when TID changes from aggregated to | 
|  | 2619 | * unaggregated. | 
|  | 2620 | * - Pause the TID and mark cleanup in progress | 
|  | 2621 | * - Discard all retry frames from the s/w queue. | 
|  | 2622 | */ | 
|  | 2623 |  | 
|  | 2624 | void ath_tx_aggr_teardown(struct ath_softc *sc, | 
|  | 2625 | struct ath_node *an, u8 tid) | 
|  | 2626 | { | 
|  | 2627 | struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid); | 
|  | 2628 | struct ath_txq *txq = &sc->sc_txq[txtid->ac->qnum]; | 
|  | 2629 | struct ath_buf *bf; | 
|  | 2630 | struct list_head bf_head; | 
|  | 2631 | INIT_LIST_HEAD(&bf_head); | 
|  | 2632 |  | 
|  | 2633 | DPRINTF(sc, ATH_DBG_AGGR, "%s: teardown TX aggregation\n", __func__); | 
|  | 2634 |  | 
|  | 2635 | if (txtid->cleanup_inprogress) /* cleanup is in progress */ | 
|  | 2636 | return; | 
|  | 2637 |  | 
|  | 2638 | if (!txtid->addba_exchangecomplete) { | 
|  | 2639 | txtid->addba_exchangeattempts = 0; | 
|  | 2640 | return; | 
|  | 2641 | } | 
|  | 2642 |  | 
|  | 2643 | /* TID must be paused first */ | 
|  | 2644 | ath_tx_pause_tid(sc, txtid); | 
|  | 2645 |  | 
|  | 2646 | /* drop all software retried frames and mark this TID */ | 
|  | 2647 | spin_lock_bh(&txq->axq_lock); | 
|  | 2648 | while (!list_empty(&txtid->buf_q)) { | 
|  | 2649 | bf = list_first_entry(&txtid->buf_q, struct ath_buf, list); | 
|  | 2650 | if (!bf->bf_isretried) { | 
|  | 2651 | /* | 
|  | 2652 | * NB: it's based on the assumption that | 
|  | 2653 | * software retried frame will always stay | 
|  | 2654 | * at the head of software queue. | 
|  | 2655 | */ | 
|  | 2656 | break; | 
|  | 2657 | } | 
|  | 2658 | list_cut_position(&bf_head, | 
|  | 2659 | &txtid->buf_q, &bf->bf_lastfrm->list); | 
|  | 2660 | ath_tx_update_baw(sc, txtid, bf->bf_seqno); | 
|  | 2661 |  | 
|  | 2662 | /* complete this sub-frame */ | 
|  | 2663 | ath_tx_complete_buf(sc, bf, &bf_head, 0, 0); | 
|  | 2664 | } | 
|  | 2665 |  | 
|  | 2666 | if (txtid->baw_head != txtid->baw_tail) { | 
|  | 2667 | spin_unlock_bh(&txq->axq_lock); | 
|  | 2668 | txtid->cleanup_inprogress = true; | 
|  | 2669 | } else { | 
|  | 2670 | txtid->addba_exchangecomplete = 0; | 
|  | 2671 | txtid->addba_exchangeattempts = 0; | 
|  | 2672 | spin_unlock_bh(&txq->axq_lock); | 
|  | 2673 | ath_tx_flush_tid(sc, txtid); | 
|  | 2674 | } | 
|  | 2675 | } | 
|  | 2676 |  | 
|  | 2677 | /* | 
|  | 2678 | * Tx scheduling logic | 
|  | 2679 | * NB: must be called with txq lock held | 
|  | 2680 | */ | 
|  | 2681 |  | 
|  | 2682 | void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq) | 
|  | 2683 | { | 
|  | 2684 | struct ath_atx_ac *ac; | 
|  | 2685 | struct ath_atx_tid *tid; | 
|  | 2686 |  | 
|  | 2687 | /* nothing to schedule */ | 
|  | 2688 | if (list_empty(&txq->axq_acq)) | 
|  | 2689 | return; | 
|  | 2690 | /* | 
|  | 2691 | * get the first node/ac pair on the queue | 
|  | 2692 | */ | 
|  | 2693 | ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list); | 
|  | 2694 | list_del(&ac->list); | 
|  | 2695 | ac->sched = false; | 
|  | 2696 |  | 
|  | 2697 | /* | 
|  | 2698 | * process a single tid per destination | 
|  | 2699 | */ | 
|  | 2700 | do { | 
|  | 2701 | /* nothing to schedule */ | 
|  | 2702 | if (list_empty(&ac->tid_q)) | 
|  | 2703 | return; | 
|  | 2704 |  | 
|  | 2705 | tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, list); | 
|  | 2706 | list_del(&tid->list); | 
|  | 2707 | tid->sched = false; | 
|  | 2708 |  | 
|  | 2709 | if (tid->paused)    /* check next tid to keep h/w busy */ | 
|  | 2710 | continue; | 
|  | 2711 |  | 
|  | 2712 | if (!(tid->an->an_smmode == ATH_SM_PWRSAV_DYNAMIC) || | 
|  | 2713 | ((txq->axq_depth % 2) == 0)) { | 
|  | 2714 | ath_tx_sched_aggr(sc, txq, tid); | 
|  | 2715 | } | 
|  | 2716 |  | 
|  | 2717 | /* | 
|  | 2718 | * add tid to round-robin queue if more frames | 
|  | 2719 | * are pending for the tid | 
|  | 2720 | */ | 
|  | 2721 | if (!list_empty(&tid->buf_q)) | 
|  | 2722 | ath_tx_queue_tid(txq, tid); | 
|  | 2723 |  | 
|  | 2724 | /* only schedule one TID at a time */ | 
|  | 2725 | break; | 
|  | 2726 | } while (!list_empty(&ac->tid_q)); | 
|  | 2727 |  | 
|  | 2728 | /* | 
|  | 2729 | * schedule AC if more TIDs need processing | 
|  | 2730 | */ | 
|  | 2731 | if (!list_empty(&ac->tid_q)) { | 
|  | 2732 | /* | 
|  | 2733 | * add dest ac to txq if not already added | 
|  | 2734 | */ | 
|  | 2735 | if (!ac->sched) { | 
|  | 2736 | ac->sched = true; | 
|  | 2737 | list_add_tail(&ac->list, &txq->axq_acq); | 
|  | 2738 | } | 
|  | 2739 | } | 
|  | 2740 | } | 
|  | 2741 |  | 
|  | 2742 | /* Initialize per-node transmit state */ | 
|  | 2743 |  | 
|  | 2744 | void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an) | 
|  | 2745 | { | 
|  | 2746 | if (sc->sc_txaggr) { | 
|  | 2747 | struct ath_atx_tid *tid; | 
|  | 2748 | struct ath_atx_ac *ac; | 
|  | 2749 | int tidno, acno; | 
|  | 2750 |  | 
|  | 2751 | sc->sc_ht_info.maxampdu = ATH_AMPDU_LIMIT_DEFAULT; | 
|  | 2752 |  | 
|  | 2753 | /* | 
|  | 2754 | * Init per tid tx state | 
|  | 2755 | */ | 
|  | 2756 | for (tidno = 0, tid = &an->an_aggr.tx.tid[tidno]; | 
|  | 2757 | tidno < WME_NUM_TID; | 
|  | 2758 | tidno++, tid++) { | 
|  | 2759 | tid->an        = an; | 
|  | 2760 | tid->tidno     = tidno; | 
|  | 2761 | tid->seq_start = tid->seq_next = 0; | 
|  | 2762 | tid->baw_size  = WME_MAX_BA; | 
|  | 2763 | tid->baw_head  = tid->baw_tail = 0; | 
|  | 2764 | tid->sched     = false; | 
|  | 2765 | tid->paused = false; | 
|  | 2766 | tid->cleanup_inprogress = false; | 
|  | 2767 | INIT_LIST_HEAD(&tid->buf_q); | 
|  | 2768 |  | 
|  | 2769 | acno = TID_TO_WME_AC(tidno); | 
|  | 2770 | tid->ac = &an->an_aggr.tx.ac[acno]; | 
|  | 2771 |  | 
|  | 2772 | /* ADDBA state */ | 
|  | 2773 | tid->addba_exchangecomplete     = 0; | 
|  | 2774 | tid->addba_exchangeinprogress   = 0; | 
|  | 2775 | tid->addba_exchangeattempts     = 0; | 
|  | 2776 | } | 
|  | 2777 |  | 
|  | 2778 | /* | 
|  | 2779 | * Init per ac tx state | 
|  | 2780 | */ | 
|  | 2781 | for (acno = 0, ac = &an->an_aggr.tx.ac[acno]; | 
|  | 2782 | acno < WME_NUM_AC; acno++, ac++) { | 
|  | 2783 | ac->sched    = false; | 
|  | 2784 | INIT_LIST_HEAD(&ac->tid_q); | 
|  | 2785 |  | 
|  | 2786 | switch (acno) { | 
|  | 2787 | case WME_AC_BE: | 
|  | 2788 | ac->qnum = ath_tx_get_qnum(sc, | 
|  | 2789 | ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE); | 
|  | 2790 | break; | 
|  | 2791 | case WME_AC_BK: | 
|  | 2792 | ac->qnum = ath_tx_get_qnum(sc, | 
|  | 2793 | ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BK); | 
|  | 2794 | break; | 
|  | 2795 | case WME_AC_VI: | 
|  | 2796 | ac->qnum = ath_tx_get_qnum(sc, | 
|  | 2797 | ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VI); | 
|  | 2798 | break; | 
|  | 2799 | case WME_AC_VO: | 
|  | 2800 | ac->qnum = ath_tx_get_qnum(sc, | 
|  | 2801 | ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VO); | 
|  | 2802 | break; | 
|  | 2803 | } | 
|  | 2804 | } | 
|  | 2805 | } | 
|  | 2806 | } | 
|  | 2807 |  | 
|  | 2808 | /* Cleanupthe pending buffers for the node. */ | 
|  | 2809 |  | 
|  | 2810 | void ath_tx_node_cleanup(struct ath_softc *sc, | 
|  | 2811 | struct ath_node *an, bool bh_flag) | 
|  | 2812 | { | 
|  | 2813 | int i; | 
|  | 2814 | struct ath_atx_ac *ac, *ac_tmp; | 
|  | 2815 | struct ath_atx_tid *tid, *tid_tmp; | 
|  | 2816 | struct ath_txq *txq; | 
|  | 2817 | for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { | 
|  | 2818 | if (ATH_TXQ_SETUP(sc, i)) { | 
|  | 2819 | txq = &sc->sc_txq[i]; | 
|  | 2820 |  | 
|  | 2821 | if (likely(bh_flag)) | 
|  | 2822 | spin_lock_bh(&txq->axq_lock); | 
|  | 2823 | else | 
|  | 2824 | spin_lock(&txq->axq_lock); | 
|  | 2825 |  | 
|  | 2826 | list_for_each_entry_safe(ac, | 
|  | 2827 | ac_tmp, &txq->axq_acq, list) { | 
|  | 2828 | tid = list_first_entry(&ac->tid_q, | 
|  | 2829 | struct ath_atx_tid, list); | 
|  | 2830 | if (tid && tid->an != an) | 
|  | 2831 | continue; | 
|  | 2832 | list_del(&ac->list); | 
|  | 2833 | ac->sched = false; | 
|  | 2834 |  | 
|  | 2835 | list_for_each_entry_safe(tid, | 
|  | 2836 | tid_tmp, &ac->tid_q, list) { | 
|  | 2837 | list_del(&tid->list); | 
|  | 2838 | tid->sched = false; | 
|  | 2839 | ath_tid_drain(sc, txq, tid, bh_flag); | 
|  | 2840 | tid->addba_exchangecomplete = 0; | 
|  | 2841 | tid->addba_exchangeattempts = 0; | 
|  | 2842 | tid->cleanup_inprogress = false; | 
|  | 2843 | } | 
|  | 2844 | } | 
|  | 2845 |  | 
|  | 2846 | if (likely(bh_flag)) | 
|  | 2847 | spin_unlock_bh(&txq->axq_lock); | 
|  | 2848 | else | 
|  | 2849 | spin_unlock(&txq->axq_lock); | 
|  | 2850 | } | 
|  | 2851 | } | 
|  | 2852 | } | 
|  | 2853 |  | 
|  | 2854 | /* Cleanup per node transmit state */ | 
|  | 2855 |  | 
|  | 2856 | void ath_tx_node_free(struct ath_softc *sc, struct ath_node *an) | 
|  | 2857 | { | 
|  | 2858 | if (sc->sc_txaggr) { | 
|  | 2859 | struct ath_atx_tid *tid; | 
|  | 2860 | int tidno, i; | 
|  | 2861 |  | 
|  | 2862 | /* Init per tid rx state */ | 
|  | 2863 | for (tidno = 0, tid = &an->an_aggr.tx.tid[tidno]; | 
|  | 2864 | tidno < WME_NUM_TID; | 
|  | 2865 | tidno++, tid++) { | 
|  | 2866 |  | 
|  | 2867 | for (i = 0; i < ATH_TID_MAX_BUFS; i++) | 
|  | 2868 | ASSERT(tid->tx_buf[i] == NULL); | 
|  | 2869 | } | 
|  | 2870 | } | 
|  | 2871 | } |