blob: 50766181d585051d487f646b68bb4d50f798ac77 [file] [log] [blame]
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18#include "bna.h"
19#include "bfa_sm.h"
20#include "bfi.h"
21
22/**
23 * IB
24 */
25#define bna_ib_find_free_ibidx(_mask, _pos)\
26do {\
27 (_pos) = 0;\
28 while (((_pos) < (BFI_IBIDX_MAX_SEGSIZE)) &&\
29 ((1 << (_pos)) & (_mask)))\
30 (_pos)++;\
31} while (0)
32
33#define bna_ib_count_ibidx(_mask, _count)\
34do {\
35 int pos = 0;\
36 (_count) = 0;\
37 while (pos < (BFI_IBIDX_MAX_SEGSIZE)) {\
38 if ((1 << pos) & (_mask))\
39 (_count) = pos + 1;\
40 pos++;\
41 } \
42} while (0)
43
44#define bna_ib_select_segpool(_count, _q_idx)\
45do {\
46 int i;\
47 (_q_idx) = -1;\
48 for (i = 0; i < BFI_IBIDX_TOTAL_POOLS; i++) {\
49 if ((_count <= ibidx_pool[i].pool_entry_size)) {\
50 (_q_idx) = i;\
51 break;\
52 } \
53 } \
54} while (0)
55
56struct bna_ibidx_pool {
57 int pool_size;
58 int pool_entry_size;
59};
60init_ibidx_pool(ibidx_pool);
61
62static struct bna_intr *
63bna_intr_get(struct bna_ib_mod *ib_mod, enum bna_intr_type intr_type,
64 int vector)
65{
66 struct bna_intr *intr;
67 struct list_head *qe;
68
69 list_for_each(qe, &ib_mod->intr_active_q) {
70 intr = (struct bna_intr *)qe;
71
72 if ((intr->intr_type == intr_type) &&
73 (intr->vector == vector)) {
74 intr->ref_count++;
75 return intr;
76 }
77 }
78
79 if (list_empty(&ib_mod->intr_free_q))
80 return NULL;
81
82 bfa_q_deq(&ib_mod->intr_free_q, &intr);
83 bfa_q_qe_init(&intr->qe);
84
85 intr->ref_count = 1;
86 intr->intr_type = intr_type;
87 intr->vector = vector;
88
89 list_add_tail(&intr->qe, &ib_mod->intr_active_q);
90
91 return intr;
92}
93
94static void
95bna_intr_put(struct bna_ib_mod *ib_mod,
96 struct bna_intr *intr)
97{
98 intr->ref_count--;
99
100 if (intr->ref_count == 0) {
101 intr->ib = NULL;
102 list_del(&intr->qe);
103 bfa_q_qe_init(&intr->qe);
104 list_add_tail(&intr->qe, &ib_mod->intr_free_q);
105 }
106}
107
108void
109bna_ib_mod_init(struct bna_ib_mod *ib_mod, struct bna *bna,
110 struct bna_res_info *res_info)
111{
112 int i;
113 int j;
114 int count;
115 u8 offset;
116 struct bna_doorbell_qset *qset;
117 unsigned long off;
118
119 ib_mod->bna = bna;
120
121 ib_mod->ib = (struct bna_ib *)
122 res_info[BNA_RES_MEM_T_IB_ARRAY].res_u.mem_info.mdl[0].kva;
123 ib_mod->intr = (struct bna_intr *)
124 res_info[BNA_RES_MEM_T_INTR_ARRAY].res_u.mem_info.mdl[0].kva;
125 ib_mod->idx_seg = (struct bna_ibidx_seg *)
126 res_info[BNA_RES_MEM_T_IDXSEG_ARRAY].res_u.mem_info.mdl[0].kva;
127
128 INIT_LIST_HEAD(&ib_mod->ib_free_q);
129 INIT_LIST_HEAD(&ib_mod->intr_free_q);
130 INIT_LIST_HEAD(&ib_mod->intr_active_q);
131
132 for (i = 0; i < BFI_IBIDX_TOTAL_POOLS; i++)
133 INIT_LIST_HEAD(&ib_mod->ibidx_seg_pool[i]);
134
135 for (i = 0; i < BFI_MAX_IB; i++) {
136 ib_mod->ib[i].ib_id = i;
137
138 ib_mod->ib[i].ib_seg_host_addr_kva =
139 res_info[BNA_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva;
140 ib_mod->ib[i].ib_seg_host_addr.lsb =
141 res_info[BNA_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb;
142 ib_mod->ib[i].ib_seg_host_addr.msb =
143 res_info[BNA_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb;
144
145 qset = (struct bna_doorbell_qset *)0;
146 off = (unsigned long)(&qset[i >> 1].ib0[(i & 0x1)
147 * (0x20 >> 2)]);
148 ib_mod->ib[i].door_bell.doorbell_addr = off +
149 BNA_GET_DOORBELL_BASE_ADDR(bna->pcidev.pci_bar_kva);
150
151 bfa_q_qe_init(&ib_mod->ib[i].qe);
152 list_add_tail(&ib_mod->ib[i].qe, &ib_mod->ib_free_q);
153
154 bfa_q_qe_init(&ib_mod->intr[i].qe);
155 list_add_tail(&ib_mod->intr[i].qe, &ib_mod->intr_free_q);
156 }
157
158 count = 0;
159 offset = 0;
160 for (i = 0; i < BFI_IBIDX_TOTAL_POOLS; i++) {
161 for (j = 0; j < ibidx_pool[i].pool_size; j++) {
162 bfa_q_qe_init(&ib_mod->idx_seg[count]);
163 ib_mod->idx_seg[count].ib_seg_size =
164 ibidx_pool[i].pool_entry_size;
165 ib_mod->idx_seg[count].ib_idx_tbl_offset = offset;
166 list_add_tail(&ib_mod->idx_seg[count].qe,
167 &ib_mod->ibidx_seg_pool[i]);
168 count++;
169 offset += ibidx_pool[i].pool_entry_size;
170 }
171 }
172}
173
174void
175bna_ib_mod_uninit(struct bna_ib_mod *ib_mod)
176{
177 int i;
178 int j;
179 struct list_head *qe;
180
181 i = 0;
182 list_for_each(qe, &ib_mod->ib_free_q)
183 i++;
184
185 i = 0;
186 list_for_each(qe, &ib_mod->intr_free_q)
187 i++;
188
189 for (i = 0; i < BFI_IBIDX_TOTAL_POOLS; i++) {
190 j = 0;
191 list_for_each(qe, &ib_mod->ibidx_seg_pool[i])
192 j++;
193 }
194
195 ib_mod->bna = NULL;
196}
197
Rasesh Modyb7ee31c2010-10-05 15:46:05 +0000198static struct bna_ib *
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700199bna_ib_get(struct bna_ib_mod *ib_mod,
200 enum bna_intr_type intr_type,
201 int vector)
202{
203 struct bna_ib *ib;
204 struct bna_intr *intr;
205
206 if (intr_type == BNA_INTR_T_INTX)
207 vector = (1 << vector);
208
209 intr = bna_intr_get(ib_mod, intr_type, vector);
210 if (intr == NULL)
211 return NULL;
212
213 if (intr->ib) {
214 if (intr->ib->ref_count == BFI_IBIDX_MAX_SEGSIZE) {
215 bna_intr_put(ib_mod, intr);
216 return NULL;
217 }
218 intr->ib->ref_count++;
219 return intr->ib;
220 }
221
222 if (list_empty(&ib_mod->ib_free_q)) {
223 bna_intr_put(ib_mod, intr);
224 return NULL;
225 }
226
227 bfa_q_deq(&ib_mod->ib_free_q, &ib);
228 bfa_q_qe_init(&ib->qe);
229
230 ib->ref_count = 1;
231 ib->start_count = 0;
232 ib->idx_mask = 0;
233
234 ib->intr = intr;
235 ib->idx_seg = NULL;
236 intr->ib = ib;
237
238 ib->bna = ib_mod->bna;
239
240 return ib;
241}
242
Rasesh Modyb7ee31c2010-10-05 15:46:05 +0000243static void
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700244bna_ib_put(struct bna_ib_mod *ib_mod, struct bna_ib *ib)
245{
246 bna_intr_put(ib_mod, ib->intr);
247
248 ib->ref_count--;
249
250 if (ib->ref_count == 0) {
251 ib->intr = NULL;
252 ib->bna = NULL;
253 list_add_tail(&ib->qe, &ib_mod->ib_free_q);
254 }
255}
256
257/* Returns index offset - starting from 0 */
Rasesh Modyb7ee31c2010-10-05 15:46:05 +0000258static int
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700259bna_ib_reserve_idx(struct bna_ib *ib)
260{
261 struct bna_ib_mod *ib_mod = &ib->bna->ib_mod;
262 struct bna_ibidx_seg *idx_seg;
263 int idx;
264 int num_idx;
265 int q_idx;
266
267 /* Find the first free index position */
268 bna_ib_find_free_ibidx(ib->idx_mask, idx);
269 if (idx == BFI_IBIDX_MAX_SEGSIZE)
270 return -1;
271
272 /*
273 * Calculate the total number of indexes held by this IB,
274 * including the index newly reserved above.
275 */
276 bna_ib_count_ibidx((ib->idx_mask | (1 << idx)), num_idx);
277
278 /* See if there is a free space in the index segment held by this IB */
279 if (ib->idx_seg && (num_idx <= ib->idx_seg->ib_seg_size)) {
280 ib->idx_mask |= (1 << idx);
281 return idx;
282 }
283
284 if (ib->start_count)
285 return -1;
286
287 /* Allocate a new segment */
288 bna_ib_select_segpool(num_idx, q_idx);
289 while (1) {
290 if (q_idx == BFI_IBIDX_TOTAL_POOLS)
291 return -1;
292 if (!list_empty(&ib_mod->ibidx_seg_pool[q_idx]))
293 break;
294 q_idx++;
295 }
296 bfa_q_deq(&ib_mod->ibidx_seg_pool[q_idx], &idx_seg);
297 bfa_q_qe_init(&idx_seg->qe);
298
299 /* Free the old segment */
300 if (ib->idx_seg) {
301 bna_ib_select_segpool(ib->idx_seg->ib_seg_size, q_idx);
302 list_add_tail(&ib->idx_seg->qe, &ib_mod->ibidx_seg_pool[q_idx]);
303 }
304
305 ib->idx_seg = idx_seg;
306
307 ib->idx_mask |= (1 << idx);
308
309 return idx;
310}
311
Rasesh Modyb7ee31c2010-10-05 15:46:05 +0000312static void
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700313bna_ib_release_idx(struct bna_ib *ib, int idx)
314{
315 struct bna_ib_mod *ib_mod = &ib->bna->ib_mod;
316 struct bna_ibidx_seg *idx_seg;
317 int num_idx;
318 int cur_q_idx;
319 int new_q_idx;
320
321 ib->idx_mask &= ~(1 << idx);
322
323 if (ib->start_count)
324 return;
325
326 bna_ib_count_ibidx(ib->idx_mask, num_idx);
327
328 /*
329 * Free the segment, if there are no more indexes in the segment
330 * held by this IB
331 */
332 if (!num_idx) {
333 bna_ib_select_segpool(ib->idx_seg->ib_seg_size, cur_q_idx);
334 list_add_tail(&ib->idx_seg->qe,
335 &ib_mod->ibidx_seg_pool[cur_q_idx]);
336 ib->idx_seg = NULL;
337 return;
338 }
339
340 /* See if we can move to a smaller segment */
341 bna_ib_select_segpool(num_idx, new_q_idx);
342 bna_ib_select_segpool(ib->idx_seg->ib_seg_size, cur_q_idx);
343 while (new_q_idx < cur_q_idx) {
344 if (!list_empty(&ib_mod->ibidx_seg_pool[new_q_idx]))
345 break;
346 new_q_idx++;
347 }
348 if (new_q_idx < cur_q_idx) {
349 /* Select the new smaller segment */
350 bfa_q_deq(&ib_mod->ibidx_seg_pool[new_q_idx], &idx_seg);
351 bfa_q_qe_init(&idx_seg->qe);
352 /* Free the old segment */
353 list_add_tail(&ib->idx_seg->qe,
354 &ib_mod->ibidx_seg_pool[cur_q_idx]);
355 ib->idx_seg = idx_seg;
356 }
357}
358
Rasesh Modyb7ee31c2010-10-05 15:46:05 +0000359static int
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700360bna_ib_config(struct bna_ib *ib, struct bna_ib_config *ib_config)
361{
362 if (ib->start_count)
363 return -1;
364
365 ib->ib_config.coalescing_timeo = ib_config->coalescing_timeo;
366 ib->ib_config.interpkt_timeo = ib_config->interpkt_timeo;
367 ib->ib_config.interpkt_count = ib_config->interpkt_count;
368 ib->ib_config.ctrl_flags = ib_config->ctrl_flags;
369
370 ib->ib_config.ctrl_flags |= BFI_IB_CF_MASTER_ENABLE;
371 if (ib->intr->intr_type == BNA_INTR_T_MSIX)
372 ib->ib_config.ctrl_flags |= BFI_IB_CF_MSIX_MODE;
373
374 return 0;
375}
376
Rasesh Modyb7ee31c2010-10-05 15:46:05 +0000377static void
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700378bna_ib_start(struct bna_ib *ib)
379{
380 struct bna_ib_blk_mem ib_cfg;
381 struct bna_ib_blk_mem *ib_mem;
382 u32 pg_num;
383 u32 intx_mask;
384 int i;
385 void __iomem *base_addr;
386 unsigned long off;
387
388 ib->start_count++;
389
390 if (ib->start_count > 1)
391 return;
392
393 ib_cfg.host_addr_lo = (u32)(ib->ib_seg_host_addr.lsb);
394 ib_cfg.host_addr_hi = (u32)(ib->ib_seg_host_addr.msb);
395
396 ib_cfg.clsc_n_ctrl_n_msix = (((u32)
397 ib->ib_config.coalescing_timeo << 16) |
398 ((u32)ib->ib_config.ctrl_flags << 8) |
399 (ib->intr->vector));
400 ib_cfg.ipkt_n_ent_n_idxof =
401 ((u32)
402 (ib->ib_config.interpkt_timeo & 0xf) << 16) |
403 ((u32)ib->idx_seg->ib_seg_size << 8) |
404 (ib->idx_seg->ib_idx_tbl_offset);
405 ib_cfg.ipkt_cnt_cfg_n_unacked = ((u32)
406 ib->ib_config.interpkt_count << 24);
407
408 pg_num = BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM + ib->bna->port_num,
409 HQM_IB_RAM_BASE_OFFSET);
410 writel(pg_num, ib->bna->regs.page_addr);
411
412 base_addr = BNA_GET_MEM_BASE_ADDR(ib->bna->pcidev.pci_bar_kva,
413 HQM_IB_RAM_BASE_OFFSET);
414
415 ib_mem = (struct bna_ib_blk_mem *)0;
416 off = (unsigned long)&ib_mem[ib->ib_id].host_addr_lo;
417 writel(htonl(ib_cfg.host_addr_lo), base_addr + off);
418
419 off = (unsigned long)&ib_mem[ib->ib_id].host_addr_hi;
420 writel(htonl(ib_cfg.host_addr_hi), base_addr + off);
421
422 off = (unsigned long)&ib_mem[ib->ib_id].clsc_n_ctrl_n_msix;
423 writel(ib_cfg.clsc_n_ctrl_n_msix, base_addr + off);
424
425 off = (unsigned long)&ib_mem[ib->ib_id].ipkt_n_ent_n_idxof;
426 writel(ib_cfg.ipkt_n_ent_n_idxof, base_addr + off);
427
428 off = (unsigned long)&ib_mem[ib->ib_id].ipkt_cnt_cfg_n_unacked;
429 writel(ib_cfg.ipkt_cnt_cfg_n_unacked, base_addr + off);
430
431 ib->door_bell.doorbell_ack = BNA_DOORBELL_IB_INT_ACK(
432 (u32)ib->ib_config.coalescing_timeo, 0);
433
434 pg_num = BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM + ib->bna->port_num,
435 HQM_INDX_TBL_RAM_BASE_OFFSET);
436 writel(pg_num, ib->bna->regs.page_addr);
437
438 base_addr = BNA_GET_MEM_BASE_ADDR(ib->bna->pcidev.pci_bar_kva,
439 HQM_INDX_TBL_RAM_BASE_OFFSET);
440 for (i = 0; i < ib->idx_seg->ib_seg_size; i++) {
441 off = (unsigned long)
442 ((ib->idx_seg->ib_idx_tbl_offset + i) * BFI_IBIDX_SIZE);
443 writel(0, base_addr + off);
444 }
445
446 if (ib->intr->intr_type == BNA_INTR_T_INTX) {
447 bna_intx_disable(ib->bna, intx_mask);
448 intx_mask &= ~(ib->intr->vector);
449 bna_intx_enable(ib->bna, intx_mask);
450 }
451}
452
Rasesh Modyb7ee31c2010-10-05 15:46:05 +0000453static void
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700454bna_ib_stop(struct bna_ib *ib)
455{
456 u32 intx_mask;
457
458 ib->start_count--;
459
460 if (ib->start_count == 0) {
461 writel(BNA_DOORBELL_IB_INT_DISABLE,
462 ib->door_bell.doorbell_addr);
463 if (ib->intr->intr_type == BNA_INTR_T_INTX) {
464 bna_intx_disable(ib->bna, intx_mask);
465 intx_mask |= (ib->intr->vector);
466 bna_intx_enable(ib->bna, intx_mask);
467 }
468 }
469}
470
Rasesh Modyb7ee31c2010-10-05 15:46:05 +0000471static void
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700472bna_ib_fail(struct bna_ib *ib)
473{
474 ib->start_count = 0;
475}
476
477/**
478 * RXF
479 */
480static void rxf_enable(struct bna_rxf *rxf);
481static void rxf_disable(struct bna_rxf *rxf);
482static void __rxf_config_set(struct bna_rxf *rxf);
483static void __rxf_rit_set(struct bna_rxf *rxf);
484static void __bna_rxf_stat_clr(struct bna_rxf *rxf);
485static int rxf_process_packet_filter(struct bna_rxf *rxf);
486static int rxf_clear_packet_filter(struct bna_rxf *rxf);
487static void rxf_reset_packet_filter(struct bna_rxf *rxf);
488static void rxf_cb_enabled(void *arg, int status);
489static void rxf_cb_disabled(void *arg, int status);
490static void bna_rxf_cb_stats_cleared(void *arg, int status);
491static void __rxf_enable(struct bna_rxf *rxf);
492static void __rxf_disable(struct bna_rxf *rxf);
493
494bfa_fsm_state_decl(bna_rxf, stopped, struct bna_rxf,
495 enum bna_rxf_event);
496bfa_fsm_state_decl(bna_rxf, start_wait, struct bna_rxf,
497 enum bna_rxf_event);
498bfa_fsm_state_decl(bna_rxf, cam_fltr_mod_wait, struct bna_rxf,
499 enum bna_rxf_event);
500bfa_fsm_state_decl(bna_rxf, started, struct bna_rxf,
501 enum bna_rxf_event);
502bfa_fsm_state_decl(bna_rxf, cam_fltr_clr_wait, struct bna_rxf,
503 enum bna_rxf_event);
504bfa_fsm_state_decl(bna_rxf, stop_wait, struct bna_rxf,
505 enum bna_rxf_event);
506bfa_fsm_state_decl(bna_rxf, pause_wait, struct bna_rxf,
507 enum bna_rxf_event);
508bfa_fsm_state_decl(bna_rxf, resume_wait, struct bna_rxf,
509 enum bna_rxf_event);
510bfa_fsm_state_decl(bna_rxf, stat_clr_wait, struct bna_rxf,
511 enum bna_rxf_event);
512
513static struct bfa_sm_table rxf_sm_table[] = {
514 {BFA_SM(bna_rxf_sm_stopped), BNA_RXF_STOPPED},
515 {BFA_SM(bna_rxf_sm_start_wait), BNA_RXF_START_WAIT},
516 {BFA_SM(bna_rxf_sm_cam_fltr_mod_wait), BNA_RXF_CAM_FLTR_MOD_WAIT},
517 {BFA_SM(bna_rxf_sm_started), BNA_RXF_STARTED},
518 {BFA_SM(bna_rxf_sm_cam_fltr_clr_wait), BNA_RXF_CAM_FLTR_CLR_WAIT},
519 {BFA_SM(bna_rxf_sm_stop_wait), BNA_RXF_STOP_WAIT},
520 {BFA_SM(bna_rxf_sm_pause_wait), BNA_RXF_PAUSE_WAIT},
521 {BFA_SM(bna_rxf_sm_resume_wait), BNA_RXF_RESUME_WAIT},
522 {BFA_SM(bna_rxf_sm_stat_clr_wait), BNA_RXF_STAT_CLR_WAIT}
523};
524
525static void
526bna_rxf_sm_stopped_entry(struct bna_rxf *rxf)
527{
528 call_rxf_stop_cbfn(rxf, BNA_CB_SUCCESS);
529}
530
531static void
532bna_rxf_sm_stopped(struct bna_rxf *rxf, enum bna_rxf_event event)
533{
534 switch (event) {
535 case RXF_E_START:
536 bfa_fsm_set_state(rxf, bna_rxf_sm_start_wait);
537 break;
538
539 case RXF_E_STOP:
540 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
541 break;
542
543 case RXF_E_FAIL:
544 /* No-op */
545 break;
546
547 case RXF_E_CAM_FLTR_MOD:
548 call_rxf_cam_fltr_cbfn(rxf, BNA_CB_SUCCESS);
549 break;
550
551 case RXF_E_STARTED:
552 case RXF_E_STOPPED:
553 case RXF_E_CAM_FLTR_RESP:
554 /**
555 * These events are received due to flushing of mbox
556 * when device fails
557 */
558 /* No-op */
559 break;
560
561 case RXF_E_PAUSE:
562 rxf->rxf_oper_state = BNA_RXF_OPER_STATE_PAUSED;
563 call_rxf_pause_cbfn(rxf, BNA_CB_SUCCESS);
564 break;
565
566 case RXF_E_RESUME:
567 rxf->rxf_oper_state = BNA_RXF_OPER_STATE_RUNNING;
568 call_rxf_resume_cbfn(rxf, BNA_CB_SUCCESS);
569 break;
570
571 default:
572 bfa_sm_fault(rxf->rx->bna, event);
573 }
574}
575
576static void
577bna_rxf_sm_start_wait_entry(struct bna_rxf *rxf)
578{
579 __rxf_config_set(rxf);
580 __rxf_rit_set(rxf);
581 rxf_enable(rxf);
582}
583
584static void
585bna_rxf_sm_start_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
586{
587 switch (event) {
588 case RXF_E_STOP:
589 /**
590 * STOP is originated from bnad. When this happens,
591 * it can not be waiting for filter update
592 */
593 call_rxf_start_cbfn(rxf, BNA_CB_INTERRUPT);
594 bfa_fsm_set_state(rxf, bna_rxf_sm_stop_wait);
595 break;
596
597 case RXF_E_FAIL:
598 call_rxf_cam_fltr_cbfn(rxf, BNA_CB_SUCCESS);
599 call_rxf_start_cbfn(rxf, BNA_CB_FAIL);
600 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
601 break;
602
603 case RXF_E_CAM_FLTR_MOD:
604 /* No-op */
605 break;
606
607 case RXF_E_STARTED:
608 /**
609 * Force rxf_process_filter() to go through initial
610 * config
611 */
612 if ((rxf->ucast_active_mac != NULL) &&
613 (rxf->ucast_pending_set == 0))
614 rxf->ucast_pending_set = 1;
615
616 if (rxf->rss_status == BNA_STATUS_T_ENABLED)
617 rxf->rxf_flags |= BNA_RXF_FL_RSS_CONFIG_PENDING;
618
619 rxf->rxf_flags |= BNA_RXF_FL_VLAN_CONFIG_PENDING;
620
621 bfa_fsm_set_state(rxf, bna_rxf_sm_cam_fltr_mod_wait);
622 break;
623
624 case RXF_E_PAUSE:
625 case RXF_E_RESUME:
626 rxf->rxf_flags |= BNA_RXF_FL_OPERSTATE_CHANGED;
627 break;
628
629 default:
630 bfa_sm_fault(rxf->rx->bna, event);
631 }
632}
633
634static void
635bna_rxf_sm_cam_fltr_mod_wait_entry(struct bna_rxf *rxf)
636{
637 if (!rxf_process_packet_filter(rxf)) {
638 /* No more pending CAM entries to update */
639 bfa_fsm_set_state(rxf, bna_rxf_sm_started);
640 }
641}
642
643static void
644bna_rxf_sm_cam_fltr_mod_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
645{
646 switch (event) {
647 case RXF_E_STOP:
648 /**
649 * STOP is originated from bnad. When this happens,
650 * it can not be waiting for filter update
651 */
652 call_rxf_start_cbfn(rxf, BNA_CB_INTERRUPT);
653 bfa_fsm_set_state(rxf, bna_rxf_sm_cam_fltr_clr_wait);
654 break;
655
656 case RXF_E_FAIL:
657 rxf_reset_packet_filter(rxf);
658 call_rxf_cam_fltr_cbfn(rxf, BNA_CB_SUCCESS);
659 call_rxf_start_cbfn(rxf, BNA_CB_FAIL);
660 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
661 break;
662
663 case RXF_E_CAM_FLTR_MOD:
664 /* No-op */
665 break;
666
667 case RXF_E_CAM_FLTR_RESP:
668 if (!rxf_process_packet_filter(rxf)) {
669 /* No more pending CAM entries to update */
670 call_rxf_cam_fltr_cbfn(rxf, BNA_CB_SUCCESS);
671 bfa_fsm_set_state(rxf, bna_rxf_sm_started);
672 }
673 break;
674
675 case RXF_E_PAUSE:
676 case RXF_E_RESUME:
677 rxf->rxf_flags |= BNA_RXF_FL_OPERSTATE_CHANGED;
678 break;
679
680 default:
681 bfa_sm_fault(rxf->rx->bna, event);
682 }
683}
684
685static void
686bna_rxf_sm_started_entry(struct bna_rxf *rxf)
687{
688 call_rxf_start_cbfn(rxf, BNA_CB_SUCCESS);
689
690 if (rxf->rxf_flags & BNA_RXF_FL_OPERSTATE_CHANGED) {
691 if (rxf->rxf_oper_state == BNA_RXF_OPER_STATE_PAUSED)
692 bfa_fsm_send_event(rxf, RXF_E_PAUSE);
693 else
694 bfa_fsm_send_event(rxf, RXF_E_RESUME);
695 }
696
697}
698
699static void
700bna_rxf_sm_started(struct bna_rxf *rxf, enum bna_rxf_event event)
701{
702 switch (event) {
703 case RXF_E_STOP:
704 bfa_fsm_set_state(rxf, bna_rxf_sm_cam_fltr_clr_wait);
705 /* Hack to get FSM start clearing CAM entries */
706 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_RESP);
707 break;
708
709 case RXF_E_FAIL:
710 rxf_reset_packet_filter(rxf);
711 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
712 break;
713
714 case RXF_E_CAM_FLTR_MOD:
715 bfa_fsm_set_state(rxf, bna_rxf_sm_cam_fltr_mod_wait);
716 break;
717
718 case RXF_E_PAUSE:
719 bfa_fsm_set_state(rxf, bna_rxf_sm_pause_wait);
720 break;
721
722 case RXF_E_RESUME:
723 bfa_fsm_set_state(rxf, bna_rxf_sm_resume_wait);
724 break;
725
726 default:
727 bfa_sm_fault(rxf->rx->bna, event);
728 }
729}
730
731static void
732bna_rxf_sm_cam_fltr_clr_wait_entry(struct bna_rxf *rxf)
733{
734 /**
735 * Note: Do not add rxf_clear_packet_filter here.
736 * It will overstep mbox when this transition happens:
737 * cam_fltr_mod_wait -> cam_fltr_clr_wait on RXF_E_STOP event
738 */
739}
740
741static void
742bna_rxf_sm_cam_fltr_clr_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
743{
744 switch (event) {
745 case RXF_E_FAIL:
746 /**
747 * FSM was in the process of stopping, initiated by
748 * bnad. When this happens, no one can be waiting for
749 * start or filter update
750 */
751 rxf_reset_packet_filter(rxf);
752 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
753 break;
754
755 case RXF_E_CAM_FLTR_RESP:
756 if (!rxf_clear_packet_filter(rxf)) {
757 /* No more pending CAM entries to clear */
758 bfa_fsm_set_state(rxf, bna_rxf_sm_stop_wait);
759 rxf_disable(rxf);
760 }
761 break;
762
763 default:
764 bfa_sm_fault(rxf->rx->bna, event);
765 }
766}
767
768static void
769bna_rxf_sm_stop_wait_entry(struct bna_rxf *rxf)
770{
771 /**
772 * NOTE: Do not add rxf_disable here.
773 * It will overstep mbox when this transition happens:
774 * start_wait -> stop_wait on RXF_E_STOP event
775 */
776}
777
778static void
779bna_rxf_sm_stop_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
780{
781 switch (event) {
782 case RXF_E_FAIL:
783 /**
784 * FSM was in the process of stopping, initiated by
785 * bnad. When this happens, no one can be waiting for
786 * start or filter update
787 */
788 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
789 break;
790
791 case RXF_E_STARTED:
792 /**
793 * This event is received due to abrupt transition from
794 * bna_rxf_sm_start_wait state on receiving
795 * RXF_E_STOP event
796 */
797 rxf_disable(rxf);
798 break;
799
800 case RXF_E_STOPPED:
801 /**
802 * FSM was in the process of stopping, initiated by
803 * bnad. When this happens, no one can be waiting for
804 * start or filter update
805 */
806 bfa_fsm_set_state(rxf, bna_rxf_sm_stat_clr_wait);
807 break;
808
809 case RXF_E_PAUSE:
810 rxf->rxf_oper_state = BNA_RXF_OPER_STATE_PAUSED;
811 break;
812
813 case RXF_E_RESUME:
814 rxf->rxf_oper_state = BNA_RXF_OPER_STATE_RUNNING;
815 break;
816
817 default:
818 bfa_sm_fault(rxf->rx->bna, event);
819 }
820}
821
822static void
823bna_rxf_sm_pause_wait_entry(struct bna_rxf *rxf)
824{
825 rxf->rxf_flags &=
826 ~(BNA_RXF_FL_OPERSTATE_CHANGED | BNA_RXF_FL_RXF_ENABLED);
827 __rxf_disable(rxf);
828}
829
830static void
831bna_rxf_sm_pause_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
832{
833 switch (event) {
834 case RXF_E_FAIL:
835 /**
836 * FSM was in the process of disabling rxf, initiated by
837 * bnad.
838 */
839 call_rxf_pause_cbfn(rxf, BNA_CB_FAIL);
840 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
841 break;
842
843 case RXF_E_STOPPED:
844 rxf->rxf_oper_state = BNA_RXF_OPER_STATE_PAUSED;
845 call_rxf_pause_cbfn(rxf, BNA_CB_SUCCESS);
846 bfa_fsm_set_state(rxf, bna_rxf_sm_started);
847 break;
848
849 /*
850 * Since PAUSE/RESUME can only be sent by bnad, we don't expect
851 * any other event during these states
852 */
853 default:
854 bfa_sm_fault(rxf->rx->bna, event);
855 }
856}
857
858static void
859bna_rxf_sm_resume_wait_entry(struct bna_rxf *rxf)
860{
861 rxf->rxf_flags &= ~(BNA_RXF_FL_OPERSTATE_CHANGED);
862 rxf->rxf_flags |= BNA_RXF_FL_RXF_ENABLED;
863 __rxf_enable(rxf);
864}
865
866static void
867bna_rxf_sm_resume_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
868{
869 switch (event) {
870 case RXF_E_FAIL:
871 /**
872 * FSM was in the process of disabling rxf, initiated by
873 * bnad.
874 */
875 call_rxf_resume_cbfn(rxf, BNA_CB_FAIL);
876 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
877 break;
878
879 case RXF_E_STARTED:
880 rxf->rxf_oper_state = BNA_RXF_OPER_STATE_RUNNING;
881 call_rxf_resume_cbfn(rxf, BNA_CB_SUCCESS);
882 bfa_fsm_set_state(rxf, bna_rxf_sm_started);
883 break;
884
885 /*
886 * Since PAUSE/RESUME can only be sent by bnad, we don't expect
887 * any other event during these states
888 */
889 default:
890 bfa_sm_fault(rxf->rx->bna, event);
891 }
892}
893
894static void
895bna_rxf_sm_stat_clr_wait_entry(struct bna_rxf *rxf)
896{
897 __bna_rxf_stat_clr(rxf);
898}
899
900static void
901bna_rxf_sm_stat_clr_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
902{
903 switch (event) {
904 case RXF_E_FAIL:
905 case RXF_E_STAT_CLEARED:
906 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
907 break;
908
909 default:
910 bfa_sm_fault(rxf->rx->bna, event);
911 }
912}
913
914static void
915__rxf_enable(struct bna_rxf *rxf)
916{
917 struct bfi_ll_rxf_multi_req ll_req;
918 u32 bm[2] = {0, 0};
919
920 if (rxf->rxf_id < 32)
921 bm[0] = 1 << rxf->rxf_id;
922 else
923 bm[1] = 1 << (rxf->rxf_id - 32);
924
925 bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_RX_REQ, 0);
926 ll_req.rxf_id_mask[0] = htonl(bm[0]);
927 ll_req.rxf_id_mask[1] = htonl(bm[1]);
928 ll_req.enable = 1;
929
930 bna_mbox_qe_fill(&rxf->mbox_qe, &ll_req, sizeof(ll_req),
931 rxf_cb_enabled, rxf);
932
933 bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe);
934}
935
936static void
937__rxf_disable(struct bna_rxf *rxf)
938{
939 struct bfi_ll_rxf_multi_req ll_req;
940 u32 bm[2] = {0, 0};
941
942 if (rxf->rxf_id < 32)
943 bm[0] = 1 << rxf->rxf_id;
944 else
945 bm[1] = 1 << (rxf->rxf_id - 32);
946
947 bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_RX_REQ, 0);
948 ll_req.rxf_id_mask[0] = htonl(bm[0]);
949 ll_req.rxf_id_mask[1] = htonl(bm[1]);
950 ll_req.enable = 0;
951
952 bna_mbox_qe_fill(&rxf->mbox_qe, &ll_req, sizeof(ll_req),
953 rxf_cb_disabled, rxf);
954
955 bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe);
956}
957
958static void
959__rxf_config_set(struct bna_rxf *rxf)
960{
961 u32 i;
962 struct bna_rss_mem *rss_mem;
963 struct bna_rx_fndb_ram *rx_fndb_ram;
964 struct bna *bna = rxf->rx->bna;
965 void __iomem *base_addr;
966 unsigned long off;
967
968 base_addr = BNA_GET_MEM_BASE_ADDR(bna->pcidev.pci_bar_kva,
969 RSS_TABLE_BASE_OFFSET);
970
971 rss_mem = (struct bna_rss_mem *)0;
972
973 /* Configure RSS if required */
974 if (rxf->ctrl_flags & BNA_RXF_CF_RSS_ENABLE) {
975 /* configure RSS Table */
976 writel(BNA_GET_PAGE_NUM(RAD0_MEM_BLK_BASE_PG_NUM +
977 bna->port_num, RSS_TABLE_BASE_OFFSET),
978 bna->regs.page_addr);
979
980 /* temporarily disable RSS, while hash value is written */
981 off = (unsigned long)&rss_mem[0].type_n_hash;
982 writel(0, base_addr + off);
983
984 for (i = 0; i < BFI_RSS_HASH_KEY_LEN; i++) {
985 off = (unsigned long)
986 &rss_mem[0].hash_key[(BFI_RSS_HASH_KEY_LEN - 1) - i];
987 writel(htonl(rxf->rss_cfg.toeplitz_hash_key[i]),
988 base_addr + off);
989 }
990
991 off = (unsigned long)&rss_mem[0].type_n_hash;
992 writel(rxf->rss_cfg.hash_type | rxf->rss_cfg.hash_mask,
993 base_addr + off);
994 }
995
996 /* Configure RxF */
997 writel(BNA_GET_PAGE_NUM(
998 LUT0_MEM_BLK_BASE_PG_NUM + (bna->port_num * 2),
999 RX_FNDB_RAM_BASE_OFFSET),
1000 bna->regs.page_addr);
1001
1002 base_addr = BNA_GET_MEM_BASE_ADDR(bna->pcidev.pci_bar_kva,
1003 RX_FNDB_RAM_BASE_OFFSET);
1004
1005 rx_fndb_ram = (struct bna_rx_fndb_ram *)0;
1006
1007 /* We always use RSS table 0 */
1008 off = (unsigned long)&rx_fndb_ram[rxf->rxf_id].rss_prop;
1009 writel(rxf->ctrl_flags & BNA_RXF_CF_RSS_ENABLE,
1010 base_addr + off);
1011
1012 /* small large buffer enable/disable */
1013 off = (unsigned long)&rx_fndb_ram[rxf->rxf_id].size_routing_props;
1014 writel((rxf->ctrl_flags & BNA_RXF_CF_SM_LG_RXQ) | 0x80,
1015 base_addr + off);
1016
1017 /* RIT offset, HDS forced offset, multicast RxQ Id */
1018 off = (unsigned long)&rx_fndb_ram[rxf->rxf_id].rit_hds_mcastq;
1019 writel((rxf->rit_segment->rit_offset << 16) |
1020 (rxf->forced_offset << 8) |
1021 (rxf->hds_cfg.hdr_type & BNA_HDS_FORCED) | rxf->mcast_rxq_id,
1022 base_addr + off);
1023
1024 /*
1025 * default vlan tag, default function enable, strip vlan bytes,
1026 * HDS type, header size
1027 */
1028
1029 off = (unsigned long)&rx_fndb_ram[rxf->rxf_id].control_flags;
1030 writel(((u32)rxf->default_vlan_tag << 16) |
1031 (rxf->ctrl_flags &
1032 (BNA_RXF_CF_DEFAULT_VLAN |
1033 BNA_RXF_CF_DEFAULT_FUNCTION_ENABLE |
1034 BNA_RXF_CF_VLAN_STRIP)) |
1035 (rxf->hds_cfg.hdr_type & ~BNA_HDS_FORCED) |
1036 rxf->hds_cfg.header_size,
1037 base_addr + off);
1038}
1039
1040void
1041__rxf_vlan_filter_set(struct bna_rxf *rxf, enum bna_status status)
1042{
1043 struct bna *bna = rxf->rx->bna;
1044 int i;
1045
1046 writel(BNA_GET_PAGE_NUM(LUT0_MEM_BLK_BASE_PG_NUM +
1047 (bna->port_num * 2), VLAN_RAM_BASE_OFFSET),
1048 bna->regs.page_addr);
1049
1050 if (status == BNA_STATUS_T_ENABLED) {
1051 /* enable VLAN filtering on this function */
1052 for (i = 0; i <= BFI_MAX_VLAN / 32; i++) {
1053 writel(rxf->vlan_filter_table[i],
1054 BNA_GET_VLAN_MEM_ENTRY_ADDR
1055 (bna->pcidev.pci_bar_kva, rxf->rxf_id,
1056 i * 32));
1057 }
1058 } else {
1059 /* disable VLAN filtering on this function */
1060 for (i = 0; i <= BFI_MAX_VLAN / 32; i++) {
1061 writel(0xffffffff,
1062 BNA_GET_VLAN_MEM_ENTRY_ADDR
1063 (bna->pcidev.pci_bar_kva, rxf->rxf_id,
1064 i * 32));
1065 }
1066 }
1067}
1068
1069static void
1070__rxf_rit_set(struct bna_rxf *rxf)
1071{
1072 struct bna *bna = rxf->rx->bna;
1073 struct bna_rit_mem *rit_mem;
1074 int i;
1075 void __iomem *base_addr;
1076 unsigned long off;
1077
1078 base_addr = BNA_GET_MEM_BASE_ADDR(bna->pcidev.pci_bar_kva,
1079 FUNCTION_TO_RXQ_TRANSLATE);
1080
1081 rit_mem = (struct bna_rit_mem *)0;
1082
1083 writel(BNA_GET_PAGE_NUM(RXA0_MEM_BLK_BASE_PG_NUM + bna->port_num,
1084 FUNCTION_TO_RXQ_TRANSLATE),
1085 bna->regs.page_addr);
1086
1087 for (i = 0; i < rxf->rit_segment->rit_size; i++) {
1088 off = (unsigned long)&rit_mem[i + rxf->rit_segment->rit_offset];
1089 writel(rxf->rit_segment->rit[i].large_rxq_id << 6 |
1090 rxf->rit_segment->rit[i].small_rxq_id,
1091 base_addr + off);
1092 }
1093}
1094
1095static void
1096__bna_rxf_stat_clr(struct bna_rxf *rxf)
1097{
1098 struct bfi_ll_stats_req ll_req;
1099 u32 bm[2] = {0, 0};
1100
1101 if (rxf->rxf_id < 32)
1102 bm[0] = 1 << rxf->rxf_id;
1103 else
1104 bm[1] = 1 << (rxf->rxf_id - 32);
1105
1106 bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_STATS_CLEAR_REQ, 0);
1107 ll_req.stats_mask = 0;
1108 ll_req.txf_id_mask[0] = 0;
1109 ll_req.txf_id_mask[1] = 0;
1110
1111 ll_req.rxf_id_mask[0] = htonl(bm[0]);
1112 ll_req.rxf_id_mask[1] = htonl(bm[1]);
1113
1114 bna_mbox_qe_fill(&rxf->mbox_qe, &ll_req, sizeof(ll_req),
1115 bna_rxf_cb_stats_cleared, rxf);
1116 bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe);
1117}
1118
1119static void
1120rxf_enable(struct bna_rxf *rxf)
1121{
1122 if (rxf->rxf_oper_state == BNA_RXF_OPER_STATE_PAUSED)
1123 bfa_fsm_send_event(rxf, RXF_E_STARTED);
1124 else {
1125 rxf->rxf_flags |= BNA_RXF_FL_RXF_ENABLED;
1126 __rxf_enable(rxf);
1127 }
1128}
1129
1130static void
1131rxf_cb_enabled(void *arg, int status)
1132{
1133 struct bna_rxf *rxf = (struct bna_rxf *)arg;
1134
1135 bfa_q_qe_init(&rxf->mbox_qe.qe);
1136 bfa_fsm_send_event(rxf, RXF_E_STARTED);
1137}
1138
1139static void
1140rxf_disable(struct bna_rxf *rxf)
1141{
1142 if (rxf->rxf_oper_state == BNA_RXF_OPER_STATE_PAUSED)
1143 bfa_fsm_send_event(rxf, RXF_E_STOPPED);
1144 else
1145 rxf->rxf_flags &= ~BNA_RXF_FL_RXF_ENABLED;
1146 __rxf_disable(rxf);
1147}
1148
1149static void
1150rxf_cb_disabled(void *arg, int status)
1151{
1152 struct bna_rxf *rxf = (struct bna_rxf *)arg;
1153
1154 bfa_q_qe_init(&rxf->mbox_qe.qe);
1155 bfa_fsm_send_event(rxf, RXF_E_STOPPED);
1156}
1157
1158void
1159rxf_cb_cam_fltr_mbox_cmd(void *arg, int status)
1160{
1161 struct bna_rxf *rxf = (struct bna_rxf *)arg;
1162
1163 bfa_q_qe_init(&rxf->mbox_qe.qe);
1164
1165 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_RESP);
1166}
1167
1168static void
1169bna_rxf_cb_stats_cleared(void *arg, int status)
1170{
1171 struct bna_rxf *rxf = (struct bna_rxf *)arg;
1172
1173 bfa_q_qe_init(&rxf->mbox_qe.qe);
1174 bfa_fsm_send_event(rxf, RXF_E_STAT_CLEARED);
1175}
1176
1177void
1178rxf_cam_mbox_cmd(struct bna_rxf *rxf, u8 cmd,
1179 const struct bna_mac *mac_addr)
1180{
1181 struct bfi_ll_mac_addr_req req;
1182
1183 bfi_h2i_set(req.mh, BFI_MC_LL, cmd, 0);
1184
1185 req.rxf_id = rxf->rxf_id;
1186 memcpy(&req.mac_addr, (void *)&mac_addr->addr, ETH_ALEN);
1187
1188 bna_mbox_qe_fill(&rxf->mbox_qe, &req, sizeof(req),
1189 rxf_cb_cam_fltr_mbox_cmd, rxf);
1190
1191 bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe);
1192}
1193
1194static int
1195rxf_process_packet_filter_mcast(struct bna_rxf *rxf)
1196{
1197 struct bna_mac *mac = NULL;
1198 struct list_head *qe;
1199
1200 /* Add multicast entries */
1201 if (!list_empty(&rxf->mcast_pending_add_q)) {
1202 bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
1203 bfa_q_qe_init(qe);
1204 mac = (struct bna_mac *)qe;
1205 rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_ADD_REQ, mac);
1206 list_add_tail(&mac->qe, &rxf->mcast_active_q);
1207 return 1;
1208 }
1209
1210 /* Delete multicast entries previousely added */
1211 if (!list_empty(&rxf->mcast_pending_del_q)) {
1212 bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
1213 bfa_q_qe_init(qe);
1214 mac = (struct bna_mac *)qe;
1215 rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_DEL_REQ, mac);
1216 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
1217 return 1;
1218 }
1219
1220 return 0;
1221}
1222
1223static int
1224rxf_process_packet_filter_vlan(struct bna_rxf *rxf)
1225{
1226 /* Apply the VLAN filter */
1227 if (rxf->rxf_flags & BNA_RXF_FL_VLAN_CONFIG_PENDING) {
1228 rxf->rxf_flags &= ~BNA_RXF_FL_VLAN_CONFIG_PENDING;
1229 if (!(rxf->rxmode_active & BNA_RXMODE_PROMISC) &&
1230 !(rxf->rxmode_active & BNA_RXMODE_DEFAULT))
1231 __rxf_vlan_filter_set(rxf, rxf->vlan_filter_status);
1232 }
1233
1234 /* Apply RSS configuration */
1235 if (rxf->rxf_flags & BNA_RXF_FL_RSS_CONFIG_PENDING) {
1236 rxf->rxf_flags &= ~BNA_RXF_FL_RSS_CONFIG_PENDING;
1237 if (rxf->rss_status == BNA_STATUS_T_DISABLED) {
1238 /* RSS is being disabled */
1239 rxf->ctrl_flags &= ~BNA_RXF_CF_RSS_ENABLE;
1240 __rxf_rit_set(rxf);
1241 __rxf_config_set(rxf);
1242 } else {
1243 /* RSS is being enabled or reconfigured */
1244 rxf->ctrl_flags |= BNA_RXF_CF_RSS_ENABLE;
1245 __rxf_rit_set(rxf);
1246 __rxf_config_set(rxf);
1247 }
1248 }
1249
1250 return 0;
1251}
1252
1253/**
1254 * Processes pending ucast, mcast entry addition/deletion and issues mailbox
1255 * command. Also processes pending filter configuration - promiscuous mode,
1256 * default mode, allmutli mode and issues mailbox command or directly applies
1257 * to h/w
1258 */
1259static int
1260rxf_process_packet_filter(struct bna_rxf *rxf)
1261{
1262 /* Set the default MAC first */
1263 if (rxf->ucast_pending_set > 0) {
1264 rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_UCAST_SET_REQ,
1265 rxf->ucast_active_mac);
1266 rxf->ucast_pending_set--;
1267 return 1;
1268 }
1269
1270 if (rxf_process_packet_filter_ucast(rxf))
1271 return 1;
1272
1273 if (rxf_process_packet_filter_mcast(rxf))
1274 return 1;
1275
1276 if (rxf_process_packet_filter_promisc(rxf))
1277 return 1;
1278
1279 if (rxf_process_packet_filter_default(rxf))
1280 return 1;
1281
1282 if (rxf_process_packet_filter_allmulti(rxf))
1283 return 1;
1284
1285 if (rxf_process_packet_filter_vlan(rxf))
1286 return 1;
1287
1288 return 0;
1289}
1290
1291static int
1292rxf_clear_packet_filter_mcast(struct bna_rxf *rxf)
1293{
1294 struct bna_mac *mac = NULL;
1295 struct list_head *qe;
1296
1297 /* 3. delete pending mcast entries */
1298 if (!list_empty(&rxf->mcast_pending_del_q)) {
1299 bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
1300 bfa_q_qe_init(qe);
1301 mac = (struct bna_mac *)qe;
1302 rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_DEL_REQ, mac);
1303 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
1304 return 1;
1305 }
1306
1307 /* 4. clear active mcast entries; move them to pending_add_q */
1308 if (!list_empty(&rxf->mcast_active_q)) {
1309 bfa_q_deq(&rxf->mcast_active_q, &qe);
1310 bfa_q_qe_init(qe);
1311 mac = (struct bna_mac *)qe;
1312 rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_DEL_REQ, mac);
1313 list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
1314 return 1;
1315 }
1316
1317 return 0;
1318}
1319
1320/**
1321 * In the rxf stop path, processes pending ucast/mcast delete queue and issues
1322 * the mailbox command. Moves the active ucast/mcast entries to pending add q,
1323 * so that they are added to CAM again in the rxf start path. Moves the current
1324 * filter settings - promiscuous, default, allmutli - to pending filter
1325 * configuration
1326 */
1327static int
1328rxf_clear_packet_filter(struct bna_rxf *rxf)
1329{
1330 if (rxf_clear_packet_filter_ucast(rxf))
1331 return 1;
1332
1333 if (rxf_clear_packet_filter_mcast(rxf))
1334 return 1;
1335
1336 /* 5. clear active default MAC in the CAM */
1337 if (rxf->ucast_pending_set > 0)
1338 rxf->ucast_pending_set = 0;
1339
1340 if (rxf_clear_packet_filter_promisc(rxf))
1341 return 1;
1342
1343 if (rxf_clear_packet_filter_default(rxf))
1344 return 1;
1345
1346 if (rxf_clear_packet_filter_allmulti(rxf))
1347 return 1;
1348
1349 return 0;
1350}
1351
1352static void
1353rxf_reset_packet_filter_mcast(struct bna_rxf *rxf)
1354{
1355 struct list_head *qe;
1356 struct bna_mac *mac;
1357
1358 /* 3. Move active mcast entries to pending_add_q */
1359 while (!list_empty(&rxf->mcast_active_q)) {
1360 bfa_q_deq(&rxf->mcast_active_q, &qe);
1361 bfa_q_qe_init(qe);
1362 list_add_tail(qe, &rxf->mcast_pending_add_q);
1363 }
1364
1365 /* 4. Throw away delete pending mcast entries */
1366 while (!list_empty(&rxf->mcast_pending_del_q)) {
1367 bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
1368 bfa_q_qe_init(qe);
1369 mac = (struct bna_mac *)qe;
1370 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
1371 }
1372}
1373
1374/**
1375 * In the rxf fail path, throws away the ucast/mcast entries pending for
1376 * deletion, moves all active ucast/mcast entries to pending queue so that
1377 * they are added back to CAM in the rxf start path. Also moves the current
1378 * filter configuration to pending filter configuration.
1379 */
1380static void
1381rxf_reset_packet_filter(struct bna_rxf *rxf)
1382{
1383 rxf_reset_packet_filter_ucast(rxf);
1384
1385 rxf_reset_packet_filter_mcast(rxf);
1386
1387 /* 5. Turn off ucast set flag */
1388 rxf->ucast_pending_set = 0;
1389
1390 rxf_reset_packet_filter_promisc(rxf);
1391
1392 rxf_reset_packet_filter_default(rxf);
1393
1394 rxf_reset_packet_filter_allmulti(rxf);
1395}
1396
Rasesh Modyb7ee31c2010-10-05 15:46:05 +00001397static void
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001398bna_rxf_init(struct bna_rxf *rxf,
1399 struct bna_rx *rx,
1400 struct bna_rx_config *q_config)
1401{
1402 struct list_head *qe;
1403 struct bna_rxp *rxp;
1404
1405 /* rxf_id is initialized during rx_mod init */
1406 rxf->rx = rx;
1407
1408 INIT_LIST_HEAD(&rxf->ucast_pending_add_q);
1409 INIT_LIST_HEAD(&rxf->ucast_pending_del_q);
1410 rxf->ucast_pending_set = 0;
1411 INIT_LIST_HEAD(&rxf->ucast_active_q);
1412 rxf->ucast_active_mac = NULL;
1413
1414 INIT_LIST_HEAD(&rxf->mcast_pending_add_q);
1415 INIT_LIST_HEAD(&rxf->mcast_pending_del_q);
1416 INIT_LIST_HEAD(&rxf->mcast_active_q);
1417
1418 bfa_q_qe_init(&rxf->mbox_qe.qe);
1419
1420 if (q_config->vlan_strip_status == BNA_STATUS_T_ENABLED)
1421 rxf->ctrl_flags |= BNA_RXF_CF_VLAN_STRIP;
1422
1423 rxf->rxf_oper_state = (q_config->paused) ?
1424 BNA_RXF_OPER_STATE_PAUSED : BNA_RXF_OPER_STATE_RUNNING;
1425
1426 bna_rxf_adv_init(rxf, rx, q_config);
1427
1428 rxf->rit_segment = bna_rit_mod_seg_get(&rxf->rx->bna->rit_mod,
1429 q_config->num_paths);
1430
1431 list_for_each(qe, &rx->rxp_q) {
1432 rxp = (struct bna_rxp *)qe;
1433 if (q_config->rxp_type == BNA_RXP_SINGLE)
1434 rxf->mcast_rxq_id = rxp->rxq.single.only->rxq_id;
1435 else
1436 rxf->mcast_rxq_id = rxp->rxq.slr.large->rxq_id;
1437 break;
1438 }
1439
1440 rxf->vlan_filter_status = BNA_STATUS_T_DISABLED;
1441 memset(rxf->vlan_filter_table, 0,
1442 (sizeof(u32) * ((BFI_MAX_VLAN + 1) / 32)));
1443
Rasesh Mody886f7fe2010-12-23 21:45:04 +00001444 /* Set up VLAN 0 for pure priority tagged packets */
1445 rxf->vlan_filter_table[0] |= 1;
1446
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001447 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
1448}
1449
Rasesh Modyb7ee31c2010-10-05 15:46:05 +00001450static void
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001451bna_rxf_uninit(struct bna_rxf *rxf)
1452{
Rasesh Mody886f7fe2010-12-23 21:45:04 +00001453 struct bna *bna = rxf->rx->bna;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001454 struct bna_mac *mac;
1455
1456 bna_rit_mod_seg_put(&rxf->rx->bna->rit_mod, rxf->rit_segment);
1457 rxf->rit_segment = NULL;
1458
1459 rxf->ucast_pending_set = 0;
1460
1461 while (!list_empty(&rxf->ucast_pending_add_q)) {
1462 bfa_q_deq(&rxf->ucast_pending_add_q, &mac);
1463 bfa_q_qe_init(&mac->qe);
1464 bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
1465 }
1466
1467 if (rxf->ucast_active_mac) {
1468 bfa_q_qe_init(&rxf->ucast_active_mac->qe);
1469 bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod,
1470 rxf->ucast_active_mac);
1471 rxf->ucast_active_mac = NULL;
1472 }
1473
1474 while (!list_empty(&rxf->mcast_pending_add_q)) {
1475 bfa_q_deq(&rxf->mcast_pending_add_q, &mac);
1476 bfa_q_qe_init(&mac->qe);
1477 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
1478 }
1479
Rasesh Mody886f7fe2010-12-23 21:45:04 +00001480 /* Turn off pending promisc mode */
1481 if (is_promisc_enable(rxf->rxmode_pending,
1482 rxf->rxmode_pending_bitmask)) {
1483 /* system promisc state should be pending */
1484 BUG_ON(!(bna->rxf_promisc_id == rxf->rxf_id));
1485 promisc_inactive(rxf->rxmode_pending,
1486 rxf->rxmode_pending_bitmask);
1487 bna->rxf_promisc_id = BFI_MAX_RXF;
1488 }
1489 /* Promisc mode should not be active */
1490 BUG_ON(rxf->rxmode_active & BNA_RXMODE_PROMISC);
1491
1492 /* Turn off pending all-multi mode */
1493 if (is_allmulti_enable(rxf->rxmode_pending,
1494 rxf->rxmode_pending_bitmask)) {
1495 allmulti_inactive(rxf->rxmode_pending,
1496 rxf->rxmode_pending_bitmask);
1497 }
1498 /* Allmulti mode should not be active */
1499 BUG_ON(rxf->rxmode_active & BNA_RXMODE_ALLMULTI);
1500
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001501 rxf->rx = NULL;
1502}
1503
Rasesh Modyb7ee31c2010-10-05 15:46:05 +00001504static void
1505bna_rx_cb_rxf_started(struct bna_rx *rx, enum bna_cb_status status)
1506{
1507 bfa_fsm_send_event(rx, RX_E_RXF_STARTED);
1508 if (rx->rxf.rxf_id < 32)
1509 rx->bna->rx_mod.rxf_bmap[0] |= ((u32)1 << rx->rxf.rxf_id);
1510 else
1511 rx->bna->rx_mod.rxf_bmap[1] |= ((u32)
1512 1 << (rx->rxf.rxf_id - 32));
1513}
1514
1515static void
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001516bna_rxf_start(struct bna_rxf *rxf)
1517{
1518 rxf->start_cbfn = bna_rx_cb_rxf_started;
1519 rxf->start_cbarg = rxf->rx;
1520 rxf->rxf_flags &= ~BNA_RXF_FL_FAILED;
1521 bfa_fsm_send_event(rxf, RXF_E_START);
1522}
1523
Rasesh Modyb7ee31c2010-10-05 15:46:05 +00001524static void
1525bna_rx_cb_rxf_stopped(struct bna_rx *rx, enum bna_cb_status status)
1526{
1527 bfa_fsm_send_event(rx, RX_E_RXF_STOPPED);
1528 if (rx->rxf.rxf_id < 32)
1529 rx->bna->rx_mod.rxf_bmap[0] &= ~(u32)1 << rx->rxf.rxf_id;
1530 else
1531 rx->bna->rx_mod.rxf_bmap[1] &= ~(u32)
1532 1 << (rx->rxf.rxf_id - 32);
1533}
1534
1535static void
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001536bna_rxf_stop(struct bna_rxf *rxf)
1537{
1538 rxf->stop_cbfn = bna_rx_cb_rxf_stopped;
1539 rxf->stop_cbarg = rxf->rx;
1540 bfa_fsm_send_event(rxf, RXF_E_STOP);
1541}
1542
Rasesh Modyb7ee31c2010-10-05 15:46:05 +00001543static void
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001544bna_rxf_fail(struct bna_rxf *rxf)
1545{
1546 rxf->rxf_flags |= BNA_RXF_FL_FAILED;
1547 bfa_fsm_send_event(rxf, RXF_E_FAIL);
1548}
1549
1550int
1551bna_rxf_state_get(struct bna_rxf *rxf)
1552{
1553 return bfa_sm_to_state(rxf_sm_table, rxf->fsm);
1554}
1555
1556enum bna_cb_status
1557bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac,
1558 void (*cbfn)(struct bnad *, struct bna_rx *,
1559 enum bna_cb_status))
1560{
1561 struct bna_rxf *rxf = &rx->rxf;
1562
1563 if (rxf->ucast_active_mac == NULL) {
1564 rxf->ucast_active_mac =
1565 bna_ucam_mod_mac_get(&rxf->rx->bna->ucam_mod);
1566 if (rxf->ucast_active_mac == NULL)
1567 return BNA_CB_UCAST_CAM_FULL;
1568 bfa_q_qe_init(&rxf->ucast_active_mac->qe);
1569 }
1570
1571 memcpy(rxf->ucast_active_mac->addr, ucmac, ETH_ALEN);
1572 rxf->ucast_pending_set++;
1573 rxf->cam_fltr_cbfn = cbfn;
1574 rxf->cam_fltr_cbarg = rx->bna->bnad;
1575
1576 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
1577
1578 return BNA_CB_SUCCESS;
1579}
1580
1581enum bna_cb_status
1582bna_rx_mcast_add(struct bna_rx *rx, u8 *addr,
1583 void (*cbfn)(struct bnad *, struct bna_rx *,
1584 enum bna_cb_status))
1585{
1586 struct bna_rxf *rxf = &rx->rxf;
1587 struct list_head *qe;
1588 struct bna_mac *mac;
1589
1590 /* Check if already added */
1591 list_for_each(qe, &rxf->mcast_active_q) {
1592 mac = (struct bna_mac *)qe;
1593 if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
1594 if (cbfn)
1595 (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
1596 return BNA_CB_SUCCESS;
1597 }
1598 }
1599
1600 /* Check if pending addition */
1601 list_for_each(qe, &rxf->mcast_pending_add_q) {
1602 mac = (struct bna_mac *)qe;
1603 if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
1604 if (cbfn)
1605 (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
1606 return BNA_CB_SUCCESS;
1607 }
1608 }
1609
1610 mac = bna_mcam_mod_mac_get(&rxf->rx->bna->mcam_mod);
1611 if (mac == NULL)
1612 return BNA_CB_MCAST_LIST_FULL;
1613 bfa_q_qe_init(&mac->qe);
1614 memcpy(mac->addr, addr, ETH_ALEN);
1615 list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
1616
1617 rxf->cam_fltr_cbfn = cbfn;
1618 rxf->cam_fltr_cbarg = rx->bna->bnad;
1619
1620 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
1621
1622 return BNA_CB_SUCCESS;
1623}
1624
1625enum bna_cb_status
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001626bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist,
1627 void (*cbfn)(struct bnad *, struct bna_rx *,
1628 enum bna_cb_status))
1629{
1630 struct bna_rxf *rxf = &rx->rxf;
1631 struct list_head list_head;
1632 struct list_head *qe;
1633 u8 *mcaddr;
1634 struct bna_mac *mac;
1635 struct bna_mac *mac1;
1636 int skip;
1637 int delete;
1638 int need_hw_config = 0;
1639 int i;
1640
1641 /* Allocate nodes */
1642 INIT_LIST_HEAD(&list_head);
1643 for (i = 0, mcaddr = mclist; i < count; i++) {
1644 mac = bna_mcam_mod_mac_get(&rxf->rx->bna->mcam_mod);
1645 if (mac == NULL)
1646 goto err_return;
1647 bfa_q_qe_init(&mac->qe);
1648 memcpy(mac->addr, mcaddr, ETH_ALEN);
1649 list_add_tail(&mac->qe, &list_head);
1650
1651 mcaddr += ETH_ALEN;
1652 }
1653
1654 /* Schedule for addition */
1655 while (!list_empty(&list_head)) {
1656 bfa_q_deq(&list_head, &qe);
1657 mac = (struct bna_mac *)qe;
1658 bfa_q_qe_init(&mac->qe);
1659
1660 skip = 0;
1661
1662 /* Skip if already added */
1663 list_for_each(qe, &rxf->mcast_active_q) {
1664 mac1 = (struct bna_mac *)qe;
1665 if (BNA_MAC_IS_EQUAL(mac1->addr, mac->addr)) {
1666 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod,
1667 mac);
1668 skip = 1;
1669 break;
1670 }
1671 }
1672
1673 if (skip)
1674 continue;
1675
1676 /* Skip if pending addition */
1677 list_for_each(qe, &rxf->mcast_pending_add_q) {
1678 mac1 = (struct bna_mac *)qe;
1679 if (BNA_MAC_IS_EQUAL(mac1->addr, mac->addr)) {
1680 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod,
1681 mac);
1682 skip = 1;
1683 break;
1684 }
1685 }
1686
1687 if (skip)
1688 continue;
1689
1690 need_hw_config = 1;
1691 list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
1692 }
1693
1694 /**
1695 * Delete the entries that are in the pending_add_q but not
1696 * in the new list
1697 */
1698 while (!list_empty(&rxf->mcast_pending_add_q)) {
1699 bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
1700 mac = (struct bna_mac *)qe;
1701 bfa_q_qe_init(&mac->qe);
1702 for (i = 0, mcaddr = mclist, delete = 1; i < count; i++) {
1703 if (BNA_MAC_IS_EQUAL(mcaddr, mac->addr)) {
1704 delete = 0;
1705 break;
1706 }
1707 mcaddr += ETH_ALEN;
1708 }
1709 if (delete)
1710 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
1711 else
1712 list_add_tail(&mac->qe, &list_head);
1713 }
1714 while (!list_empty(&list_head)) {
1715 bfa_q_deq(&list_head, &qe);
1716 mac = (struct bna_mac *)qe;
1717 bfa_q_qe_init(&mac->qe);
1718 list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
1719 }
1720
1721 /**
1722 * Schedule entries for deletion that are in the active_q but not
1723 * in the new list
1724 */
1725 while (!list_empty(&rxf->mcast_active_q)) {
1726 bfa_q_deq(&rxf->mcast_active_q, &qe);
1727 mac = (struct bna_mac *)qe;
1728 bfa_q_qe_init(&mac->qe);
1729 for (i = 0, mcaddr = mclist, delete = 1; i < count; i++) {
1730 if (BNA_MAC_IS_EQUAL(mcaddr, mac->addr)) {
1731 delete = 0;
1732 break;
1733 }
1734 mcaddr += ETH_ALEN;
1735 }
1736 if (delete) {
1737 list_add_tail(&mac->qe, &rxf->mcast_pending_del_q);
1738 need_hw_config = 1;
1739 } else {
1740 list_add_tail(&mac->qe, &list_head);
1741 }
1742 }
1743 while (!list_empty(&list_head)) {
1744 bfa_q_deq(&list_head, &qe);
1745 mac = (struct bna_mac *)qe;
1746 bfa_q_qe_init(&mac->qe);
1747 list_add_tail(&mac->qe, &rxf->mcast_active_q);
1748 }
1749
1750 if (need_hw_config) {
1751 rxf->cam_fltr_cbfn = cbfn;
1752 rxf->cam_fltr_cbarg = rx->bna->bnad;
1753 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
1754 } else if (cbfn)
1755 (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
1756
1757 return BNA_CB_SUCCESS;
1758
1759err_return:
1760 while (!list_empty(&list_head)) {
1761 bfa_q_deq(&list_head, &qe);
1762 mac = (struct bna_mac *)qe;
1763 bfa_q_qe_init(&mac->qe);
1764 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
1765 }
1766
1767 return BNA_CB_MCAST_LIST_FULL;
1768}
1769
1770void
1771bna_rx_vlan_add(struct bna_rx *rx, int vlan_id)
1772{
1773 struct bna_rxf *rxf = &rx->rxf;
1774 int index = (vlan_id >> 5);
1775 int bit = (1 << (vlan_id & 0x1F));
1776
1777 rxf->vlan_filter_table[index] |= bit;
1778 if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
1779 rxf->rxf_flags |= BNA_RXF_FL_VLAN_CONFIG_PENDING;
1780 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
1781 }
1782}
1783
1784void
1785bna_rx_vlan_del(struct bna_rx *rx, int vlan_id)
1786{
1787 struct bna_rxf *rxf = &rx->rxf;
1788 int index = (vlan_id >> 5);
1789 int bit = (1 << (vlan_id & 0x1F));
1790
1791 rxf->vlan_filter_table[index] &= ~bit;
1792 if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
1793 rxf->rxf_flags |= BNA_RXF_FL_VLAN_CONFIG_PENDING;
1794 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
1795 }
1796}
1797
1798/**
1799 * RX
1800 */
1801#define RXQ_RCB_INIT(q, rxp, qdepth, bna, _id, unmapq_mem) do { \
1802 struct bna_doorbell_qset *_qset; \
1803 unsigned long off; \
1804 (q)->rcb->producer_index = (q)->rcb->consumer_index = 0; \
1805 (q)->rcb->q_depth = (qdepth); \
1806 (q)->rcb->unmap_q = unmapq_mem; \
1807 (q)->rcb->rxq = (q); \
1808 (q)->rcb->cq = &(rxp)->cq; \
1809 (q)->rcb->bnad = (bna)->bnad; \
1810 _qset = (struct bna_doorbell_qset *)0; \
1811 off = (unsigned long)&_qset[(q)->rxq_id].rxq[0]; \
1812 (q)->rcb->q_dbell = off + \
1813 BNA_GET_DOORBELL_BASE_ADDR((bna)->pcidev.pci_bar_kva); \
1814 (q)->rcb->id = _id; \
1815} while (0)
1816
1817#define BNA_GET_RXQS(qcfg) (((qcfg)->rxp_type == BNA_RXP_SINGLE) ? \
1818 (qcfg)->num_paths : ((qcfg)->num_paths * 2))
1819
1820#define SIZE_TO_PAGES(size) (((size) >> PAGE_SHIFT) + ((((size) &\
1821 (PAGE_SIZE - 1)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT))
1822
1823#define call_rx_stop_callback(rx, status) \
1824 if ((rx)->stop_cbfn) { \
1825 (*(rx)->stop_cbfn)((rx)->stop_cbarg, rx, (status)); \
1826 (rx)->stop_cbfn = NULL; \
1827 (rx)->stop_cbarg = NULL; \
1828 }
1829
1830/*
1831 * Since rx_enable is synchronous callback, there is no start_cbfn required.
1832 * Instead, we'll call bnad_rx_post(rxp) so that bnad can post the buffers
1833 * for each rxpath.
1834 */
1835
1836#define call_rx_disable_cbfn(rx, status) \
1837 if ((rx)->disable_cbfn) { \
1838 (*(rx)->disable_cbfn)((rx)->disable_cbarg, \
1839 status); \
1840 (rx)->disable_cbfn = NULL; \
1841 (rx)->disable_cbarg = NULL; \
1842 } \
1843
1844#define rxqs_reqd(type, num_rxqs) \
1845 (((type) == BNA_RXP_SINGLE) ? (num_rxqs) : ((num_rxqs) * 2))
1846
1847#define rx_ib_fail(rx) \
1848do { \
1849 struct bna_rxp *rxp; \
1850 struct list_head *qe; \
1851 list_for_each(qe, &(rx)->rxp_q) { \
1852 rxp = (struct bna_rxp *)qe; \
1853 bna_ib_fail(rxp->cq.ib); \
1854 } \
1855} while (0)
1856
1857static void __bna_multi_rxq_stop(struct bna_rxp *, u32 *);
1858static void __bna_rxq_start(struct bna_rxq *rxq);
1859static void __bna_cq_start(struct bna_cq *cq);
1860static void bna_rit_create(struct bna_rx *rx);
1861static void bna_rx_cb_multi_rxq_stopped(void *arg, int status);
1862static void bna_rx_cb_rxq_stopped_all(void *arg);
1863
1864bfa_fsm_state_decl(bna_rx, stopped,
1865 struct bna_rx, enum bna_rx_event);
1866bfa_fsm_state_decl(bna_rx, rxf_start_wait,
1867 struct bna_rx, enum bna_rx_event);
1868bfa_fsm_state_decl(bna_rx, started,
1869 struct bna_rx, enum bna_rx_event);
1870bfa_fsm_state_decl(bna_rx, rxf_stop_wait,
1871 struct bna_rx, enum bna_rx_event);
1872bfa_fsm_state_decl(bna_rx, rxq_stop_wait,
1873 struct bna_rx, enum bna_rx_event);
1874
Rasesh Modyb7ee31c2010-10-05 15:46:05 +00001875static const struct bfa_sm_table rx_sm_table[] = {
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001876 {BFA_SM(bna_rx_sm_stopped), BNA_RX_STOPPED},
1877 {BFA_SM(bna_rx_sm_rxf_start_wait), BNA_RX_RXF_START_WAIT},
1878 {BFA_SM(bna_rx_sm_started), BNA_RX_STARTED},
1879 {BFA_SM(bna_rx_sm_rxf_stop_wait), BNA_RX_RXF_STOP_WAIT},
1880 {BFA_SM(bna_rx_sm_rxq_stop_wait), BNA_RX_RXQ_STOP_WAIT},
1881};
1882
1883static void bna_rx_sm_stopped_entry(struct bna_rx *rx)
1884{
1885 struct bna_rxp *rxp;
1886 struct list_head *qe_rxp;
1887
1888 list_for_each(qe_rxp, &rx->rxp_q) {
1889 rxp = (struct bna_rxp *)qe_rxp;
1890 rx->rx_cleanup_cbfn(rx->bna->bnad, rxp->cq.ccb);
1891 }
1892
1893 call_rx_stop_callback(rx, BNA_CB_SUCCESS);
1894}
1895
1896static void bna_rx_sm_stopped(struct bna_rx *rx,
1897 enum bna_rx_event event)
1898{
1899 switch (event) {
1900 case RX_E_START:
1901 bfa_fsm_set_state(rx, bna_rx_sm_rxf_start_wait);
1902 break;
1903 case RX_E_STOP:
1904 call_rx_stop_callback(rx, BNA_CB_SUCCESS);
1905 break;
1906 case RX_E_FAIL:
1907 /* no-op */
1908 break;
1909 default:
1910 bfa_sm_fault(rx->bna, event);
1911 break;
1912 }
1913
1914}
1915
1916static void bna_rx_sm_rxf_start_wait_entry(struct bna_rx *rx)
1917{
1918 struct bna_rxp *rxp;
1919 struct list_head *qe_rxp;
1920 struct bna_rxq *q0 = NULL, *q1 = NULL;
1921
1922 /* Setup the RIT */
1923 bna_rit_create(rx);
1924
1925 list_for_each(qe_rxp, &rx->rxp_q) {
1926 rxp = (struct bna_rxp *)qe_rxp;
1927 bna_ib_start(rxp->cq.ib);
1928 GET_RXQS(rxp, q0, q1);
1929 q0->buffer_size = bna_port_mtu_get(&rx->bna->port);
1930 __bna_rxq_start(q0);
1931 rx->rx_post_cbfn(rx->bna->bnad, q0->rcb);
1932 if (q1) {
1933 __bna_rxq_start(q1);
1934 rx->rx_post_cbfn(rx->bna->bnad, q1->rcb);
1935 }
1936 __bna_cq_start(&rxp->cq);
1937 }
1938
1939 bna_rxf_start(&rx->rxf);
1940}
1941
1942static void bna_rx_sm_rxf_start_wait(struct bna_rx *rx,
1943 enum bna_rx_event event)
1944{
1945 switch (event) {
1946 case RX_E_STOP:
1947 bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
1948 break;
1949 case RX_E_FAIL:
1950 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1951 rx_ib_fail(rx);
1952 bna_rxf_fail(&rx->rxf);
1953 break;
1954 case RX_E_RXF_STARTED:
1955 bfa_fsm_set_state(rx, bna_rx_sm_started);
1956 break;
1957 default:
1958 bfa_sm_fault(rx->bna, event);
1959 break;
1960 }
1961}
1962
1963void
1964bna_rx_sm_started_entry(struct bna_rx *rx)
1965{
1966 struct bna_rxp *rxp;
1967 struct list_head *qe_rxp;
1968
1969 /* Start IB */
1970 list_for_each(qe_rxp, &rx->rxp_q) {
1971 rxp = (struct bna_rxp *)qe_rxp;
1972 bna_ib_ack(&rxp->cq.ib->door_bell, 0);
1973 }
1974
Rasesh Mody0613ecf2010-12-23 21:45:02 +00001975 bna_llport_rx_started(&rx->bna->port.llport);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001976}
1977
1978void
1979bna_rx_sm_started(struct bna_rx *rx, enum bna_rx_event event)
1980{
1981 switch (event) {
1982 case RX_E_FAIL:
Rasesh Mody0613ecf2010-12-23 21:45:02 +00001983 bna_llport_rx_stopped(&rx->bna->port.llport);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001984 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1985 rx_ib_fail(rx);
1986 bna_rxf_fail(&rx->rxf);
1987 break;
1988 case RX_E_STOP:
Rasesh Mody0613ecf2010-12-23 21:45:02 +00001989 bna_llport_rx_stopped(&rx->bna->port.llport);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001990 bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
1991 break;
1992 default:
1993 bfa_sm_fault(rx->bna, event);
1994 break;
1995 }
1996}
1997
1998void
1999bna_rx_sm_rxf_stop_wait_entry(struct bna_rx *rx)
2000{
2001 bna_rxf_stop(&rx->rxf);
2002}
2003
2004void
2005bna_rx_sm_rxf_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
2006{
2007 switch (event) {
2008 case RX_E_RXF_STOPPED:
2009 bfa_fsm_set_state(rx, bna_rx_sm_rxq_stop_wait);
2010 break;
2011 case RX_E_RXF_STARTED:
2012 /**
2013 * RxF was in the process of starting up when
2014 * RXF_E_STOP was issued. Ignore this event
2015 */
2016 break;
2017 case RX_E_FAIL:
2018 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
2019 rx_ib_fail(rx);
2020 bna_rxf_fail(&rx->rxf);
2021 break;
2022 default:
2023 bfa_sm_fault(rx->bna, event);
2024 break;
2025 }
2026
2027}
2028
2029void
2030bna_rx_sm_rxq_stop_wait_entry(struct bna_rx *rx)
2031{
2032 struct bna_rxp *rxp = NULL;
2033 struct bna_rxq *q0 = NULL;
2034 struct bna_rxq *q1 = NULL;
2035 struct list_head *qe;
2036 u32 rxq_mask[2] = {0, 0};
2037
2038 /* Only one call to multi-rxq-stop for all RXPs in this RX */
2039 bfa_wc_up(&rx->rxq_stop_wc);
2040 list_for_each(qe, &rx->rxp_q) {
2041 rxp = (struct bna_rxp *)qe;
2042 GET_RXQS(rxp, q0, q1);
2043 if (q0->rxq_id < 32)
2044 rxq_mask[0] |= ((u32)1 << q0->rxq_id);
2045 else
2046 rxq_mask[1] |= ((u32)1 << (q0->rxq_id - 32));
2047 if (q1) {
2048 if (q1->rxq_id < 32)
2049 rxq_mask[0] |= ((u32)1 << q1->rxq_id);
2050 else
2051 rxq_mask[1] |= ((u32)
2052 1 << (q1->rxq_id - 32));
2053 }
2054 }
2055
2056 __bna_multi_rxq_stop(rxp, rxq_mask);
2057}
2058
2059void
2060bna_rx_sm_rxq_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
2061{
2062 struct bna_rxp *rxp = NULL;
2063 struct list_head *qe;
2064
2065 switch (event) {
2066 case RX_E_RXQ_STOPPED:
2067 list_for_each(qe, &rx->rxp_q) {
2068 rxp = (struct bna_rxp *)qe;
2069 bna_ib_stop(rxp->cq.ib);
2070 }
2071 /* Fall through */
2072 case RX_E_FAIL:
2073 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
2074 break;
2075 default:
2076 bfa_sm_fault(rx->bna, event);
2077 break;
2078 }
2079}
2080
2081void
2082__bna_multi_rxq_stop(struct bna_rxp *rxp, u32 * rxq_id_mask)
2083{
2084 struct bfi_ll_q_stop_req ll_req;
2085
2086 bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_RXQ_STOP_REQ, 0);
2087 ll_req.q_id_mask[0] = htonl(rxq_id_mask[0]);
2088 ll_req.q_id_mask[1] = htonl(rxq_id_mask[1]);
2089 bna_mbox_qe_fill(&rxp->mbox_qe, &ll_req, sizeof(ll_req),
2090 bna_rx_cb_multi_rxq_stopped, rxp);
2091 bna_mbox_send(rxp->rx->bna, &rxp->mbox_qe);
2092}
2093
2094void
2095__bna_rxq_start(struct bna_rxq *rxq)
2096{
2097 struct bna_rxtx_q_mem *q_mem;
2098 struct bna_rxq_mem rxq_cfg, *rxq_mem;
2099 struct bna_dma_addr cur_q_addr;
2100 /* struct bna_doorbell_qset *qset; */
2101 struct bna_qpt *qpt;
2102 u32 pg_num;
2103 struct bna *bna = rxq->rx->bna;
2104 void __iomem *base_addr;
2105 unsigned long off;
2106
2107 qpt = &rxq->qpt;
2108 cur_q_addr = *((struct bna_dma_addr *)(qpt->kv_qpt_ptr));
2109
2110 rxq_cfg.pg_tbl_addr_lo = qpt->hw_qpt_ptr.lsb;
2111 rxq_cfg.pg_tbl_addr_hi = qpt->hw_qpt_ptr.msb;
2112 rxq_cfg.cur_q_entry_lo = cur_q_addr.lsb;
2113 rxq_cfg.cur_q_entry_hi = cur_q_addr.msb;
2114
2115 rxq_cfg.pg_cnt_n_prd_ptr = ((u32)qpt->page_count << 16) | 0x0;
2116 rxq_cfg.entry_n_pg_size = ((u32)(BFI_RXQ_WI_SIZE >> 2) << 16) |
2117 (qpt->page_size >> 2);
2118 rxq_cfg.sg_n_cq_n_cns_ptr =
2119 ((u32)(rxq->rxp->cq.cq_id & 0xff) << 16) | 0x0;
2120 rxq_cfg.buf_sz_n_q_state = ((u32)rxq->buffer_size << 16) |
2121 BNA_Q_IDLE_STATE;
2122 rxq_cfg.next_qid = 0x0 | (0x3 << 8);
2123
2124 /* Write the page number register */
2125 pg_num = BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM + bna->port_num,
2126 HQM_RXTX_Q_RAM_BASE_OFFSET);
2127 writel(pg_num, bna->regs.page_addr);
2128
2129 /* Write to h/w */
2130 base_addr = BNA_GET_MEM_BASE_ADDR(bna->pcidev.pci_bar_kva,
2131 HQM_RXTX_Q_RAM_BASE_OFFSET);
2132
2133 q_mem = (struct bna_rxtx_q_mem *)0;
2134 rxq_mem = &q_mem[rxq->rxq_id].rxq;
2135
2136 off = (unsigned long)&rxq_mem->pg_tbl_addr_lo;
2137 writel(htonl(rxq_cfg.pg_tbl_addr_lo), base_addr + off);
2138
2139 off = (unsigned long)&rxq_mem->pg_tbl_addr_hi;
2140 writel(htonl(rxq_cfg.pg_tbl_addr_hi), base_addr + off);
2141
2142 off = (unsigned long)&rxq_mem->cur_q_entry_lo;
2143 writel(htonl(rxq_cfg.cur_q_entry_lo), base_addr + off);
2144
2145 off = (unsigned long)&rxq_mem->cur_q_entry_hi;
2146 writel(htonl(rxq_cfg.cur_q_entry_hi), base_addr + off);
2147
2148 off = (unsigned long)&rxq_mem->pg_cnt_n_prd_ptr;
2149 writel(rxq_cfg.pg_cnt_n_prd_ptr, base_addr + off);
2150
2151 off = (unsigned long)&rxq_mem->entry_n_pg_size;
2152 writel(rxq_cfg.entry_n_pg_size, base_addr + off);
2153
2154 off = (unsigned long)&rxq_mem->sg_n_cq_n_cns_ptr;
2155 writel(rxq_cfg.sg_n_cq_n_cns_ptr, base_addr + off);
2156
2157 off = (unsigned long)&rxq_mem->buf_sz_n_q_state;
2158 writel(rxq_cfg.buf_sz_n_q_state, base_addr + off);
2159
2160 off = (unsigned long)&rxq_mem->next_qid;
2161 writel(rxq_cfg.next_qid, base_addr + off);
2162
2163 rxq->rcb->producer_index = 0;
2164 rxq->rcb->consumer_index = 0;
2165}
2166
2167void
2168__bna_cq_start(struct bna_cq *cq)
2169{
2170 struct bna_cq_mem cq_cfg, *cq_mem;
2171 const struct bna_qpt *qpt;
2172 struct bna_dma_addr cur_q_addr;
2173 u32 pg_num;
2174 struct bna *bna = cq->rx->bna;
2175 void __iomem *base_addr;
2176 unsigned long off;
2177
2178 qpt = &cq->qpt;
2179 cur_q_addr = *((struct bna_dma_addr *)(qpt->kv_qpt_ptr));
2180
2181 /*
2182 * Fill out structure, to be subsequently written
2183 * to hardware
2184 */
2185 cq_cfg.pg_tbl_addr_lo = qpt->hw_qpt_ptr.lsb;
2186 cq_cfg.pg_tbl_addr_hi = qpt->hw_qpt_ptr.msb;
2187 cq_cfg.cur_q_entry_lo = cur_q_addr.lsb;
2188 cq_cfg.cur_q_entry_hi = cur_q_addr.msb;
2189
2190 cq_cfg.pg_cnt_n_prd_ptr = (qpt->page_count << 16) | 0x0;
2191 cq_cfg.entry_n_pg_size =
2192 ((u32)(BFI_CQ_WI_SIZE >> 2) << 16) | (qpt->page_size >> 2);
2193 cq_cfg.int_blk_n_cns_ptr = ((((u32)cq->ib_seg_offset) << 24) |
2194 ((u32)(cq->ib->ib_id & 0xff) << 16) | 0x0);
2195 cq_cfg.q_state = BNA_Q_IDLE_STATE;
2196
2197 /* Write the page number register */
2198 pg_num = BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM + bna->port_num,
2199 HQM_CQ_RAM_BASE_OFFSET);
2200
2201 writel(pg_num, bna->regs.page_addr);
2202
2203 /* H/W write */
2204 base_addr = BNA_GET_MEM_BASE_ADDR(bna->pcidev.pci_bar_kva,
2205 HQM_CQ_RAM_BASE_OFFSET);
2206
2207 cq_mem = (struct bna_cq_mem *)0;
2208
2209 off = (unsigned long)&cq_mem[cq->cq_id].pg_tbl_addr_lo;
2210 writel(htonl(cq_cfg.pg_tbl_addr_lo), base_addr + off);
2211
2212 off = (unsigned long)&cq_mem[cq->cq_id].pg_tbl_addr_hi;
2213 writel(htonl(cq_cfg.pg_tbl_addr_hi), base_addr + off);
2214
2215 off = (unsigned long)&cq_mem[cq->cq_id].cur_q_entry_lo;
2216 writel(htonl(cq_cfg.cur_q_entry_lo), base_addr + off);
2217
2218 off = (unsigned long)&cq_mem[cq->cq_id].cur_q_entry_hi;
2219 writel(htonl(cq_cfg.cur_q_entry_hi), base_addr + off);
2220
2221 off = (unsigned long)&cq_mem[cq->cq_id].pg_cnt_n_prd_ptr;
2222 writel(cq_cfg.pg_cnt_n_prd_ptr, base_addr + off);
2223
2224 off = (unsigned long)&cq_mem[cq->cq_id].entry_n_pg_size;
2225 writel(cq_cfg.entry_n_pg_size, base_addr + off);
2226
2227 off = (unsigned long)&cq_mem[cq->cq_id].int_blk_n_cns_ptr;
2228 writel(cq_cfg.int_blk_n_cns_ptr, base_addr + off);
2229
2230 off = (unsigned long)&cq_mem[cq->cq_id].q_state;
2231 writel(cq_cfg.q_state, base_addr + off);
2232
2233 cq->ccb->producer_index = 0;
2234 *(cq->ccb->hw_producer_index) = 0;
2235}
2236
2237void
2238bna_rit_create(struct bna_rx *rx)
2239{
2240 struct list_head *qe_rxp;
2241 struct bna *bna;
2242 struct bna_rxp *rxp;
2243 struct bna_rxq *q0 = NULL;
2244 struct bna_rxq *q1 = NULL;
2245 int offset;
2246
2247 bna = rx->bna;
2248
2249 offset = 0;
2250 list_for_each(qe_rxp, &rx->rxp_q) {
2251 rxp = (struct bna_rxp *)qe_rxp;
2252 GET_RXQS(rxp, q0, q1);
2253 rx->rxf.rit_segment->rit[offset].large_rxq_id = q0->rxq_id;
2254 rx->rxf.rit_segment->rit[offset].small_rxq_id =
2255 (q1 ? q1->rxq_id : 0);
2256 offset++;
2257 }
2258}
2259
Rasesh Modyb7ee31c2010-10-05 15:46:05 +00002260static int
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002261_rx_can_satisfy(struct bna_rx_mod *rx_mod,
2262 struct bna_rx_config *rx_cfg)
2263{
2264 if ((rx_mod->rx_free_count == 0) ||
2265 (rx_mod->rxp_free_count == 0) ||
2266 (rx_mod->rxq_free_count == 0))
2267 return 0;
2268
2269 if (rx_cfg->rxp_type == BNA_RXP_SINGLE) {
2270 if ((rx_mod->rxp_free_count < rx_cfg->num_paths) ||
2271 (rx_mod->rxq_free_count < rx_cfg->num_paths))
2272 return 0;
2273 } else {
2274 if ((rx_mod->rxp_free_count < rx_cfg->num_paths) ||
2275 (rx_mod->rxq_free_count < (2 * rx_cfg->num_paths)))
2276 return 0;
2277 }
2278
2279 if (!bna_rit_mod_can_satisfy(&rx_mod->bna->rit_mod, rx_cfg->num_paths))
2280 return 0;
2281
2282 return 1;
2283}
2284
Rasesh Modyb7ee31c2010-10-05 15:46:05 +00002285static struct bna_rxq *
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002286_get_free_rxq(struct bna_rx_mod *rx_mod)
2287{
2288 struct bna_rxq *rxq = NULL;
2289 struct list_head *qe = NULL;
2290
2291 bfa_q_deq(&rx_mod->rxq_free_q, &qe);
2292 if (qe) {
2293 rx_mod->rxq_free_count--;
2294 rxq = (struct bna_rxq *)qe;
2295 }
2296 return rxq;
2297}
2298
Rasesh Modyb7ee31c2010-10-05 15:46:05 +00002299static void
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002300_put_free_rxq(struct bna_rx_mod *rx_mod, struct bna_rxq *rxq)
2301{
2302 bfa_q_qe_init(&rxq->qe);
2303 list_add_tail(&rxq->qe, &rx_mod->rxq_free_q);
2304 rx_mod->rxq_free_count++;
2305}
2306
Rasesh Modyb7ee31c2010-10-05 15:46:05 +00002307static struct bna_rxp *
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002308_get_free_rxp(struct bna_rx_mod *rx_mod)
2309{
2310 struct list_head *qe = NULL;
2311 struct bna_rxp *rxp = NULL;
2312
2313 bfa_q_deq(&rx_mod->rxp_free_q, &qe);
2314 if (qe) {
2315 rx_mod->rxp_free_count--;
2316
2317 rxp = (struct bna_rxp *)qe;
2318 }
2319
2320 return rxp;
2321}
2322
Rasesh Modyb7ee31c2010-10-05 15:46:05 +00002323static void
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002324_put_free_rxp(struct bna_rx_mod *rx_mod, struct bna_rxp *rxp)
2325{
2326 bfa_q_qe_init(&rxp->qe);
2327 list_add_tail(&rxp->qe, &rx_mod->rxp_free_q);
2328 rx_mod->rxp_free_count++;
2329}
2330
Rasesh Modyb7ee31c2010-10-05 15:46:05 +00002331static struct bna_rx *
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002332_get_free_rx(struct bna_rx_mod *rx_mod)
2333{
2334 struct list_head *qe = NULL;
2335 struct bna_rx *rx = NULL;
2336
2337 bfa_q_deq(&rx_mod->rx_free_q, &qe);
2338 if (qe) {
2339 rx_mod->rx_free_count--;
2340
2341 rx = (struct bna_rx *)qe;
2342 bfa_q_qe_init(qe);
2343 list_add_tail(&rx->qe, &rx_mod->rx_active_q);
2344 }
2345
2346 return rx;
2347}
2348
Rasesh Modyb7ee31c2010-10-05 15:46:05 +00002349static void
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002350_put_free_rx(struct bna_rx_mod *rx_mod, struct bna_rx *rx)
2351{
2352 bfa_q_qe_init(&rx->qe);
2353 list_add_tail(&rx->qe, &rx_mod->rx_free_q);
2354 rx_mod->rx_free_count++;
2355}
2356
Rasesh Modyb7ee31c2010-10-05 15:46:05 +00002357static void
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002358_rx_init(struct bna_rx *rx, struct bna *bna)
2359{
2360 rx->bna = bna;
2361 rx->rx_flags = 0;
2362
2363 INIT_LIST_HEAD(&rx->rxp_q);
2364
2365 rx->rxq_stop_wc.wc_resume = bna_rx_cb_rxq_stopped_all;
2366 rx->rxq_stop_wc.wc_cbarg = rx;
2367 rx->rxq_stop_wc.wc_count = 0;
2368
2369 rx->stop_cbfn = NULL;
2370 rx->stop_cbarg = NULL;
2371}
2372
Rasesh Modyb7ee31c2010-10-05 15:46:05 +00002373static void
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002374_rxp_add_rxqs(struct bna_rxp *rxp,
2375 struct bna_rxq *q0,
2376 struct bna_rxq *q1)
2377{
2378 switch (rxp->type) {
2379 case BNA_RXP_SINGLE:
2380 rxp->rxq.single.only = q0;
2381 rxp->rxq.single.reserved = NULL;
2382 break;
2383 case BNA_RXP_SLR:
2384 rxp->rxq.slr.large = q0;
2385 rxp->rxq.slr.small = q1;
2386 break;
2387 case BNA_RXP_HDS:
2388 rxp->rxq.hds.data = q0;
2389 rxp->rxq.hds.hdr = q1;
2390 break;
2391 default:
2392 break;
2393 }
2394}
2395
Rasesh Modyb7ee31c2010-10-05 15:46:05 +00002396static void
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002397_rxq_qpt_init(struct bna_rxq *rxq,
2398 struct bna_rxp *rxp,
2399 u32 page_count,
2400 u32 page_size,
2401 struct bna_mem_descr *qpt_mem,
2402 struct bna_mem_descr *swqpt_mem,
2403 struct bna_mem_descr *page_mem)
2404{
2405 int i;
2406
2407 rxq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
2408 rxq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
2409 rxq->qpt.kv_qpt_ptr = qpt_mem->kva;
2410 rxq->qpt.page_count = page_count;
2411 rxq->qpt.page_size = page_size;
2412
2413 rxq->rcb->sw_qpt = (void **) swqpt_mem->kva;
2414
2415 for (i = 0; i < rxq->qpt.page_count; i++) {
2416 rxq->rcb->sw_qpt[i] = page_mem[i].kva;
2417 ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].lsb =
2418 page_mem[i].dma.lsb;
2419 ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].msb =
2420 page_mem[i].dma.msb;
2421
2422 }
2423}
2424
Rasesh Modyb7ee31c2010-10-05 15:46:05 +00002425static void
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002426_rxp_cqpt_setup(struct bna_rxp *rxp,
2427 u32 page_count,
2428 u32 page_size,
2429 struct bna_mem_descr *qpt_mem,
2430 struct bna_mem_descr *swqpt_mem,
2431 struct bna_mem_descr *page_mem)
2432{
2433 int i;
2434
2435 rxp->cq.qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
2436 rxp->cq.qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
2437 rxp->cq.qpt.kv_qpt_ptr = qpt_mem->kva;
2438 rxp->cq.qpt.page_count = page_count;
2439 rxp->cq.qpt.page_size = page_size;
2440
2441 rxp->cq.ccb->sw_qpt = (void **) swqpt_mem->kva;
2442
2443 for (i = 0; i < rxp->cq.qpt.page_count; i++) {
2444 rxp->cq.ccb->sw_qpt[i] = page_mem[i].kva;
2445
2446 ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].lsb =
2447 page_mem[i].dma.lsb;
2448 ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].msb =
2449 page_mem[i].dma.msb;
2450
2451 }
2452}
2453
Rasesh Modyb7ee31c2010-10-05 15:46:05 +00002454static void
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002455_rx_add_rxp(struct bna_rx *rx, struct bna_rxp *rxp)
2456{
2457 list_add_tail(&rxp->qe, &rx->rxp_q);
2458}
2459
Rasesh Modyb7ee31c2010-10-05 15:46:05 +00002460static void
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002461_init_rxmod_queues(struct bna_rx_mod *rx_mod)
2462{
2463 INIT_LIST_HEAD(&rx_mod->rx_free_q);
2464 INIT_LIST_HEAD(&rx_mod->rxq_free_q);
2465 INIT_LIST_HEAD(&rx_mod->rxp_free_q);
2466 INIT_LIST_HEAD(&rx_mod->rx_active_q);
2467
2468 rx_mod->rx_free_count = 0;
2469 rx_mod->rxq_free_count = 0;
2470 rx_mod->rxp_free_count = 0;
2471}
2472
Rasesh Modyb7ee31c2010-10-05 15:46:05 +00002473static void
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002474_rx_ctor(struct bna_rx *rx, int id)
2475{
2476 bfa_q_qe_init(&rx->qe);
2477 INIT_LIST_HEAD(&rx->rxp_q);
2478 rx->bna = NULL;
2479
2480 rx->rxf.rxf_id = id;
2481
2482 /* FIXME: mbox_qe ctor()?? */
2483 bfa_q_qe_init(&rx->mbox_qe.qe);
2484
2485 rx->stop_cbfn = NULL;
2486 rx->stop_cbarg = NULL;
2487}
2488
2489void
2490bna_rx_cb_multi_rxq_stopped(void *arg, int status)
2491{
2492 struct bna_rxp *rxp = (struct bna_rxp *)arg;
2493
2494 bfa_wc_down(&rxp->rx->rxq_stop_wc);
2495}
2496
2497void
2498bna_rx_cb_rxq_stopped_all(void *arg)
2499{
2500 struct bna_rx *rx = (struct bna_rx *)arg;
2501
2502 bfa_fsm_send_event(rx, RX_E_RXQ_STOPPED);
2503}
2504
Rasesh Modyb7ee31c2010-10-05 15:46:05 +00002505static void
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002506bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx,
2507 enum bna_cb_status status)
2508{
2509 struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg;
2510
2511 bfa_wc_down(&rx_mod->rx_stop_wc);
2512}
2513
Rasesh Modyb7ee31c2010-10-05 15:46:05 +00002514static void
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002515bna_rx_mod_cb_rx_stopped_all(void *arg)
2516{
2517 struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg;
2518
2519 if (rx_mod->stop_cbfn)
2520 rx_mod->stop_cbfn(&rx_mod->bna->port, BNA_CB_SUCCESS);
2521 rx_mod->stop_cbfn = NULL;
2522}
2523
Rasesh Modyb7ee31c2010-10-05 15:46:05 +00002524static void
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002525bna_rx_start(struct bna_rx *rx)
2526{
2527 rx->rx_flags |= BNA_RX_F_PORT_ENABLED;
2528 if (rx->rx_flags & BNA_RX_F_ENABLE)
2529 bfa_fsm_send_event(rx, RX_E_START);
2530}
2531
Rasesh Modyb7ee31c2010-10-05 15:46:05 +00002532static void
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002533bna_rx_stop(struct bna_rx *rx)
2534{
2535 rx->rx_flags &= ~BNA_RX_F_PORT_ENABLED;
2536 if (rx->fsm == (bfa_fsm_t) bna_rx_sm_stopped)
2537 bna_rx_mod_cb_rx_stopped(&rx->bna->rx_mod, rx, BNA_CB_SUCCESS);
2538 else {
2539 rx->stop_cbfn = bna_rx_mod_cb_rx_stopped;
2540 rx->stop_cbarg = &rx->bna->rx_mod;
2541 bfa_fsm_send_event(rx, RX_E_STOP);
2542 }
2543}
2544
Rasesh Modyb7ee31c2010-10-05 15:46:05 +00002545static void
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002546bna_rx_fail(struct bna_rx *rx)
2547{
2548 /* Indicate port is not enabled, and failed */
2549 rx->rx_flags &= ~BNA_RX_F_PORT_ENABLED;
2550 rx->rx_flags |= BNA_RX_F_PORT_FAILED;
2551 bfa_fsm_send_event(rx, RX_E_FAIL);
2552}
2553
2554void
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002555bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
2556{
2557 struct bna_rx *rx;
2558 struct list_head *qe;
2559
2560 rx_mod->flags |= BNA_RX_MOD_F_PORT_STARTED;
2561 if (type == BNA_RX_T_LOOPBACK)
2562 rx_mod->flags |= BNA_RX_MOD_F_PORT_LOOPBACK;
2563
2564 list_for_each(qe, &rx_mod->rx_active_q) {
2565 rx = (struct bna_rx *)qe;
2566 if (rx->type == type)
2567 bna_rx_start(rx);
2568 }
2569}
2570
2571void
2572bna_rx_mod_stop(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
2573{
2574 struct bna_rx *rx;
2575 struct list_head *qe;
2576
2577 rx_mod->flags &= ~BNA_RX_MOD_F_PORT_STARTED;
2578 rx_mod->flags &= ~BNA_RX_MOD_F_PORT_LOOPBACK;
2579
2580 rx_mod->stop_cbfn = bna_port_cb_rx_stopped;
2581
2582 /**
2583 * Before calling bna_rx_stop(), increment rx_stop_wc as many times
2584 * as we are going to call bna_rx_stop
2585 */
2586 list_for_each(qe, &rx_mod->rx_active_q) {
2587 rx = (struct bna_rx *)qe;
2588 if (rx->type == type)
2589 bfa_wc_up(&rx_mod->rx_stop_wc);
2590 }
2591
2592 if (rx_mod->rx_stop_wc.wc_count == 0) {
2593 rx_mod->stop_cbfn(&rx_mod->bna->port, BNA_CB_SUCCESS);
2594 rx_mod->stop_cbfn = NULL;
2595 return;
2596 }
2597
2598 list_for_each(qe, &rx_mod->rx_active_q) {
2599 rx = (struct bna_rx *)qe;
2600 if (rx->type == type)
2601 bna_rx_stop(rx);
2602 }
2603}
2604
2605void
2606bna_rx_mod_fail(struct bna_rx_mod *rx_mod)
2607{
2608 struct bna_rx *rx;
2609 struct list_head *qe;
2610
2611 rx_mod->flags &= ~BNA_RX_MOD_F_PORT_STARTED;
2612 rx_mod->flags &= ~BNA_RX_MOD_F_PORT_LOOPBACK;
2613
2614 list_for_each(qe, &rx_mod->rx_active_q) {
2615 rx = (struct bna_rx *)qe;
2616 bna_rx_fail(rx);
2617 }
2618}
2619
2620void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna,
2621 struct bna_res_info *res_info)
2622{
2623 int index;
2624 struct bna_rx *rx_ptr;
2625 struct bna_rxp *rxp_ptr;
2626 struct bna_rxq *rxq_ptr;
2627
2628 rx_mod->bna = bna;
2629 rx_mod->flags = 0;
2630
2631 rx_mod->rx = (struct bna_rx *)
2632 res_info[BNA_RES_MEM_T_RX_ARRAY].res_u.mem_info.mdl[0].kva;
2633 rx_mod->rxp = (struct bna_rxp *)
2634 res_info[BNA_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mdl[0].kva;
2635 rx_mod->rxq = (struct bna_rxq *)
2636 res_info[BNA_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mdl[0].kva;
2637
2638 /* Initialize the queues */
2639 _init_rxmod_queues(rx_mod);
2640
2641 /* Build RX queues */
2642 for (index = 0; index < BFI_MAX_RXQ; index++) {
2643 rx_ptr = &rx_mod->rx[index];
2644 _rx_ctor(rx_ptr, index);
2645 list_add_tail(&rx_ptr->qe, &rx_mod->rx_free_q);
2646 rx_mod->rx_free_count++;
2647 }
2648
2649 /* build RX-path queue */
2650 for (index = 0; index < BFI_MAX_RXQ; index++) {
2651 rxp_ptr = &rx_mod->rxp[index];
2652 rxp_ptr->cq.cq_id = index;
2653 bfa_q_qe_init(&rxp_ptr->qe);
2654 list_add_tail(&rxp_ptr->qe, &rx_mod->rxp_free_q);
2655 rx_mod->rxp_free_count++;
2656 }
2657
2658 /* build RXQ queue */
2659 for (index = 0; index < BFI_MAX_RXQ; index++) {
2660 rxq_ptr = &rx_mod->rxq[index];
2661 rxq_ptr->rxq_id = index;
2662
2663 bfa_q_qe_init(&rxq_ptr->qe);
2664 list_add_tail(&rxq_ptr->qe, &rx_mod->rxq_free_q);
2665 rx_mod->rxq_free_count++;
2666 }
2667
2668 rx_mod->rx_stop_wc.wc_resume = bna_rx_mod_cb_rx_stopped_all;
2669 rx_mod->rx_stop_wc.wc_cbarg = rx_mod;
2670 rx_mod->rx_stop_wc.wc_count = 0;
2671}
2672
2673void
2674bna_rx_mod_uninit(struct bna_rx_mod *rx_mod)
2675{
2676 struct list_head *qe;
2677 int i;
2678
2679 i = 0;
2680 list_for_each(qe, &rx_mod->rx_free_q)
2681 i++;
2682
2683 i = 0;
2684 list_for_each(qe, &rx_mod->rxp_free_q)
2685 i++;
2686
2687 i = 0;
2688 list_for_each(qe, &rx_mod->rxq_free_q)
2689 i++;
2690
2691 rx_mod->bna = NULL;
2692}
2693
2694int
2695bna_rx_state_get(struct bna_rx *rx)
2696{
2697 return bfa_sm_to_state(rx_sm_table, rx->fsm);
2698}
2699
2700void
2701bna_rx_res_req(struct bna_rx_config *q_cfg, struct bna_res_info *res_info)
2702{
2703 u32 cq_size, hq_size, dq_size;
2704 u32 cpage_count, hpage_count, dpage_count;
2705 struct bna_mem_info *mem_info;
2706 u32 cq_depth;
2707 u32 hq_depth;
2708 u32 dq_depth;
2709
2710 dq_depth = q_cfg->q_depth;
2711 hq_depth = ((q_cfg->rxp_type == BNA_RXP_SINGLE) ? 0 : q_cfg->q_depth);
2712 cq_depth = dq_depth + hq_depth;
2713
2714 BNA_TO_POWER_OF_2_HIGH(cq_depth);
2715 cq_size = cq_depth * BFI_CQ_WI_SIZE;
2716 cq_size = ALIGN(cq_size, PAGE_SIZE);
2717 cpage_count = SIZE_TO_PAGES(cq_size);
2718
2719 BNA_TO_POWER_OF_2_HIGH(dq_depth);
2720 dq_size = dq_depth * BFI_RXQ_WI_SIZE;
2721 dq_size = ALIGN(dq_size, PAGE_SIZE);
2722 dpage_count = SIZE_TO_PAGES(dq_size);
2723
2724 if (BNA_RXP_SINGLE != q_cfg->rxp_type) {
2725 BNA_TO_POWER_OF_2_HIGH(hq_depth);
2726 hq_size = hq_depth * BFI_RXQ_WI_SIZE;
2727 hq_size = ALIGN(hq_size, PAGE_SIZE);
2728 hpage_count = SIZE_TO_PAGES(hq_size);
2729 } else {
2730 hpage_count = 0;
2731 }
2732
2733 /* CCB structures */
2734 res_info[BNA_RX_RES_MEM_T_CCB].res_type = BNA_RES_T_MEM;
2735 mem_info = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info;
2736 mem_info->mem_type = BNA_MEM_T_KVA;
2737 mem_info->len = sizeof(struct bna_ccb);
2738 mem_info->num = q_cfg->num_paths;
2739
2740 /* RCB structures */
2741 res_info[BNA_RX_RES_MEM_T_RCB].res_type = BNA_RES_T_MEM;
2742 mem_info = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info;
2743 mem_info->mem_type = BNA_MEM_T_KVA;
2744 mem_info->len = sizeof(struct bna_rcb);
2745 mem_info->num = BNA_GET_RXQS(q_cfg);
2746
2747 /* Completion QPT */
2748 res_info[BNA_RX_RES_MEM_T_CQPT].res_type = BNA_RES_T_MEM;
2749 mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info;
2750 mem_info->mem_type = BNA_MEM_T_DMA;
2751 mem_info->len = cpage_count * sizeof(struct bna_dma_addr);
2752 mem_info->num = q_cfg->num_paths;
2753
2754 /* Completion s/w QPT */
2755 res_info[BNA_RX_RES_MEM_T_CSWQPT].res_type = BNA_RES_T_MEM;
2756 mem_info = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info;
2757 mem_info->mem_type = BNA_MEM_T_KVA;
2758 mem_info->len = cpage_count * sizeof(void *);
2759 mem_info->num = q_cfg->num_paths;
2760
2761 /* Completion QPT pages */
2762 res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_type = BNA_RES_T_MEM;
2763 mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info;
2764 mem_info->mem_type = BNA_MEM_T_DMA;
2765 mem_info->len = PAGE_SIZE;
2766 mem_info->num = cpage_count * q_cfg->num_paths;
2767
2768 /* Data QPTs */
2769 res_info[BNA_RX_RES_MEM_T_DQPT].res_type = BNA_RES_T_MEM;
2770 mem_info = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info;
2771 mem_info->mem_type = BNA_MEM_T_DMA;
2772 mem_info->len = dpage_count * sizeof(struct bna_dma_addr);
2773 mem_info->num = q_cfg->num_paths;
2774
2775 /* Data s/w QPTs */
2776 res_info[BNA_RX_RES_MEM_T_DSWQPT].res_type = BNA_RES_T_MEM;
2777 mem_info = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info;
2778 mem_info->mem_type = BNA_MEM_T_KVA;
2779 mem_info->len = dpage_count * sizeof(void *);
2780 mem_info->num = q_cfg->num_paths;
2781
2782 /* Data QPT pages */
2783 res_info[BNA_RX_RES_MEM_T_DPAGE].res_type = BNA_RES_T_MEM;
2784 mem_info = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info;
2785 mem_info->mem_type = BNA_MEM_T_DMA;
2786 mem_info->len = PAGE_SIZE;
2787 mem_info->num = dpage_count * q_cfg->num_paths;
2788
2789 /* Hdr QPTs */
2790 res_info[BNA_RX_RES_MEM_T_HQPT].res_type = BNA_RES_T_MEM;
2791 mem_info = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info;
2792 mem_info->mem_type = BNA_MEM_T_DMA;
2793 mem_info->len = hpage_count * sizeof(struct bna_dma_addr);
2794 mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
2795
2796 /* Hdr s/w QPTs */
2797 res_info[BNA_RX_RES_MEM_T_HSWQPT].res_type = BNA_RES_T_MEM;
2798 mem_info = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info;
2799 mem_info->mem_type = BNA_MEM_T_KVA;
2800 mem_info->len = hpage_count * sizeof(void *);
2801 mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
2802
2803 /* Hdr QPT pages */
2804 res_info[BNA_RX_RES_MEM_T_HPAGE].res_type = BNA_RES_T_MEM;
2805 mem_info = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info;
2806 mem_info->mem_type = BNA_MEM_T_DMA;
2807 mem_info->len = (hpage_count ? PAGE_SIZE : 0);
2808 mem_info->num = (hpage_count ? (hpage_count * q_cfg->num_paths) : 0);
2809
2810 /* RX Interrupts */
2811 res_info[BNA_RX_RES_T_INTR].res_type = BNA_RES_T_INTR;
2812 res_info[BNA_RX_RES_T_INTR].res_u.intr_info.intr_type = BNA_INTR_T_MSIX;
2813 res_info[BNA_RX_RES_T_INTR].res_u.intr_info.num = q_cfg->num_paths;
2814}
2815
2816struct bna_rx *
2817bna_rx_create(struct bna *bna, struct bnad *bnad,
2818 struct bna_rx_config *rx_cfg,
2819 struct bna_rx_event_cbfn *rx_cbfn,
2820 struct bna_res_info *res_info,
2821 void *priv)
2822{
2823 struct bna_rx_mod *rx_mod = &bna->rx_mod;
2824 struct bna_rx *rx;
2825 struct bna_rxp *rxp;
2826 struct bna_rxq *q0;
2827 struct bna_rxq *q1;
2828 struct bna_intr_info *intr_info;
2829 u32 page_count;
2830 struct bna_mem_descr *ccb_mem;
2831 struct bna_mem_descr *rcb_mem;
2832 struct bna_mem_descr *unmapq_mem;
2833 struct bna_mem_descr *cqpt_mem;
2834 struct bna_mem_descr *cswqpt_mem;
2835 struct bna_mem_descr *cpage_mem;
2836 struct bna_mem_descr *hqpt_mem; /* Header/Small Q qpt */
2837 struct bna_mem_descr *dqpt_mem; /* Data/Large Q qpt */
2838 struct bna_mem_descr *hsqpt_mem; /* s/w qpt for hdr */
2839 struct bna_mem_descr *dsqpt_mem; /* s/w qpt for data */
2840 struct bna_mem_descr *hpage_mem; /* hdr page mem */
2841 struct bna_mem_descr *dpage_mem; /* data page mem */
2842 int i, cpage_idx = 0, dpage_idx = 0, hpage_idx = 0, ret;
2843 int dpage_count, hpage_count, rcb_idx;
2844 struct bna_ib_config ibcfg;
2845 /* Fail if we don't have enough RXPs, RXQs */
2846 if (!_rx_can_satisfy(rx_mod, rx_cfg))
2847 return NULL;
2848
2849 /* Initialize resource pointers */
2850 intr_info = &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
2851 ccb_mem = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info.mdl[0];
2852 rcb_mem = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info.mdl[0];
2853 unmapq_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[0];
2854 cqpt_mem = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info.mdl[0];
2855 cswqpt_mem = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info.mdl[0];
2856 cpage_mem = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.mdl[0];
2857 hqpt_mem = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info.mdl[0];
2858 dqpt_mem = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info.mdl[0];
2859 hsqpt_mem = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info.mdl[0];
2860 dsqpt_mem = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info.mdl[0];
2861 hpage_mem = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.mdl[0];
2862 dpage_mem = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.mdl[0];
2863
2864 /* Compute q depth & page count */
2865 page_count = res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.num /
2866 rx_cfg->num_paths;
2867
2868 dpage_count = res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.num /
2869 rx_cfg->num_paths;
2870
2871 hpage_count = res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.num /
2872 rx_cfg->num_paths;
2873 /* Get RX pointer */
2874 rx = _get_free_rx(rx_mod);
2875 _rx_init(rx, bna);
2876 rx->priv = priv;
2877 rx->type = rx_cfg->rx_type;
2878
2879 rx->rcb_setup_cbfn = rx_cbfn->rcb_setup_cbfn;
2880 rx->rcb_destroy_cbfn = rx_cbfn->rcb_destroy_cbfn;
2881 rx->ccb_setup_cbfn = rx_cbfn->ccb_setup_cbfn;
2882 rx->ccb_destroy_cbfn = rx_cbfn->ccb_destroy_cbfn;
2883 /* Following callbacks are mandatory */
2884 rx->rx_cleanup_cbfn = rx_cbfn->rx_cleanup_cbfn;
2885 rx->rx_post_cbfn = rx_cbfn->rx_post_cbfn;
2886
2887 if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_PORT_STARTED) {
2888 switch (rx->type) {
2889 case BNA_RX_T_REGULAR:
2890 if (!(rx->bna->rx_mod.flags &
2891 BNA_RX_MOD_F_PORT_LOOPBACK))
2892 rx->rx_flags |= BNA_RX_F_PORT_ENABLED;
2893 break;
2894 case BNA_RX_T_LOOPBACK:
2895 if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_PORT_LOOPBACK)
2896 rx->rx_flags |= BNA_RX_F_PORT_ENABLED;
2897 break;
2898 }
2899 }
2900
2901 for (i = 0, rcb_idx = 0; i < rx_cfg->num_paths; i++) {
2902 rxp = _get_free_rxp(rx_mod);
2903 rxp->type = rx_cfg->rxp_type;
2904 rxp->rx = rx;
2905 rxp->cq.rx = rx;
2906
2907 /* Get required RXQs, and queue them to rx-path */
2908 q0 = _get_free_rxq(rx_mod);
2909 if (BNA_RXP_SINGLE == rx_cfg->rxp_type)
2910 q1 = NULL;
2911 else
2912 q1 = _get_free_rxq(rx_mod);
2913
2914 /* Initialize IB */
2915 if (1 == intr_info->num) {
2916 rxp->cq.ib = bna_ib_get(&bna->ib_mod,
2917 intr_info->intr_type,
2918 intr_info->idl[0].vector);
2919 rxp->vector = intr_info->idl[0].vector;
2920 } else {
2921 rxp->cq.ib = bna_ib_get(&bna->ib_mod,
2922 intr_info->intr_type,
2923 intr_info->idl[i].vector);
2924
2925 /* Map the MSI-x vector used for this RXP */
2926 rxp->vector = intr_info->idl[i].vector;
2927 }
2928
2929 rxp->cq.ib_seg_offset = bna_ib_reserve_idx(rxp->cq.ib);
2930
2931 ibcfg.coalescing_timeo = BFI_RX_COALESCING_TIMEO;
2932 ibcfg.interpkt_count = BFI_RX_INTERPKT_COUNT;
2933 ibcfg.interpkt_timeo = BFI_RX_INTERPKT_TIMEO;
2934 ibcfg.ctrl_flags = BFI_IB_CF_INT_ENABLE;
2935
2936 ret = bna_ib_config(rxp->cq.ib, &ibcfg);
2937
2938 /* Link rxqs to rxp */
2939 _rxp_add_rxqs(rxp, q0, q1);
2940
2941 /* Link rxp to rx */
2942 _rx_add_rxp(rx, rxp);
2943
2944 q0->rx = rx;
2945 q0->rxp = rxp;
2946
2947 /* Initialize RCB for the large / data q */
2948 q0->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
2949 RXQ_RCB_INIT(q0, rxp, rx_cfg->q_depth, bna, 0,
2950 (void *)unmapq_mem[rcb_idx].kva);
2951 rcb_idx++;
2952 (q0)->rx_packets = (q0)->rx_bytes = 0;
2953 (q0)->rx_packets_with_error = (q0)->rxbuf_alloc_failed = 0;
2954
2955 /* Initialize RXQs */
2956 _rxq_qpt_init(q0, rxp, dpage_count, PAGE_SIZE,
2957 &dqpt_mem[i], &dsqpt_mem[i], &dpage_mem[dpage_idx]);
2958 q0->rcb->page_idx = dpage_idx;
2959 q0->rcb->page_count = dpage_count;
2960 dpage_idx += dpage_count;
2961
2962 /* Call bnad to complete rcb setup */
2963 if (rx->rcb_setup_cbfn)
2964 rx->rcb_setup_cbfn(bnad, q0->rcb);
2965
2966 if (q1) {
2967 q1->rx = rx;
2968 q1->rxp = rxp;
2969
2970 q1->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
2971 RXQ_RCB_INIT(q1, rxp, rx_cfg->q_depth, bna, 1,
2972 (void *)unmapq_mem[rcb_idx].kva);
2973 rcb_idx++;
2974 (q1)->buffer_size = (rx_cfg)->small_buff_size;
2975 (q1)->rx_packets = (q1)->rx_bytes = 0;
2976 (q1)->rx_packets_with_error =
2977 (q1)->rxbuf_alloc_failed = 0;
2978
2979 _rxq_qpt_init(q1, rxp, hpage_count, PAGE_SIZE,
2980 &hqpt_mem[i], &hsqpt_mem[i],
2981 &hpage_mem[hpage_idx]);
2982 q1->rcb->page_idx = hpage_idx;
2983 q1->rcb->page_count = hpage_count;
2984 hpage_idx += hpage_count;
2985
2986 /* Call bnad to complete rcb setup */
2987 if (rx->rcb_setup_cbfn)
2988 rx->rcb_setup_cbfn(bnad, q1->rcb);
2989 }
2990 /* Setup RXP::CQ */
2991 rxp->cq.ccb = (struct bna_ccb *) ccb_mem[i].kva;
2992 _rxp_cqpt_setup(rxp, page_count, PAGE_SIZE,
2993 &cqpt_mem[i], &cswqpt_mem[i], &cpage_mem[cpage_idx]);
2994 rxp->cq.ccb->page_idx = cpage_idx;
2995 rxp->cq.ccb->page_count = page_count;
2996 cpage_idx += page_count;
2997
2998 rxp->cq.ccb->pkt_rate.small_pkt_cnt = 0;
2999 rxp->cq.ccb->pkt_rate.large_pkt_cnt = 0;
3000
3001 rxp->cq.ccb->producer_index = 0;
3002 rxp->cq.ccb->q_depth = rx_cfg->q_depth +
3003 ((rx_cfg->rxp_type == BNA_RXP_SINGLE) ?
3004 0 : rx_cfg->q_depth);
3005 rxp->cq.ccb->i_dbell = &rxp->cq.ib->door_bell;
3006 rxp->cq.ccb->rcb[0] = q0->rcb;
3007 if (q1)
3008 rxp->cq.ccb->rcb[1] = q1->rcb;
3009 rxp->cq.ccb->cq = &rxp->cq;
3010 rxp->cq.ccb->bnad = bna->bnad;
3011 rxp->cq.ccb->hw_producer_index =
3012 ((volatile u32 *)rxp->cq.ib->ib_seg_host_addr_kva +
3013 (rxp->cq.ib_seg_offset * BFI_IBIDX_SIZE));
3014 *(rxp->cq.ccb->hw_producer_index) = 0;
3015 rxp->cq.ccb->intr_type = intr_info->intr_type;
3016 rxp->cq.ccb->intr_vector = (intr_info->num == 1) ?
3017 intr_info->idl[0].vector :
3018 intr_info->idl[i].vector;
3019 rxp->cq.ccb->rx_coalescing_timeo =
3020 rxp->cq.ib->ib_config.coalescing_timeo;
3021 rxp->cq.ccb->id = i;
3022
3023 /* Call bnad to complete CCB setup */
3024 if (rx->ccb_setup_cbfn)
3025 rx->ccb_setup_cbfn(bnad, rxp->cq.ccb);
3026
3027 } /* for each rx-path */
3028
3029 bna_rxf_init(&rx->rxf, rx, rx_cfg);
3030
3031 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
3032
3033 return rx;
3034}
3035
3036void
3037bna_rx_destroy(struct bna_rx *rx)
3038{
3039 struct bna_rx_mod *rx_mod = &rx->bna->rx_mod;
3040 struct bna_ib_mod *ib_mod = &rx->bna->ib_mod;
3041 struct bna_rxq *q0 = NULL;
3042 struct bna_rxq *q1 = NULL;
3043 struct bna_rxp *rxp;
3044 struct list_head *qe;
3045
3046 bna_rxf_uninit(&rx->rxf);
3047
3048 while (!list_empty(&rx->rxp_q)) {
3049 bfa_q_deq(&rx->rxp_q, &rxp);
3050 GET_RXQS(rxp, q0, q1);
3051 /* Callback to bnad for destroying RCB */
3052 if (rx->rcb_destroy_cbfn)
3053 rx->rcb_destroy_cbfn(rx->bna->bnad, q0->rcb);
3054 q0->rcb = NULL;
3055 q0->rxp = NULL;
3056 q0->rx = NULL;
3057 _put_free_rxq(rx_mod, q0);
3058 if (q1) {
3059 /* Callback to bnad for destroying RCB */
3060 if (rx->rcb_destroy_cbfn)
3061 rx->rcb_destroy_cbfn(rx->bna->bnad, q1->rcb);
3062 q1->rcb = NULL;
3063 q1->rxp = NULL;
3064 q1->rx = NULL;
3065 _put_free_rxq(rx_mod, q1);
3066 }
3067 rxp->rxq.slr.large = NULL;
3068 rxp->rxq.slr.small = NULL;
3069 if (rxp->cq.ib) {
3070 if (rxp->cq.ib_seg_offset != 0xff)
3071 bna_ib_release_idx(rxp->cq.ib,
3072 rxp->cq.ib_seg_offset);
3073 bna_ib_put(ib_mod, rxp->cq.ib);
3074 rxp->cq.ib = NULL;
3075 }
3076 /* Callback to bnad for destroying CCB */
3077 if (rx->ccb_destroy_cbfn)
3078 rx->ccb_destroy_cbfn(rx->bna->bnad, rxp->cq.ccb);
3079 rxp->cq.ccb = NULL;
3080 rxp->rx = NULL;
3081 _put_free_rxp(rx_mod, rxp);
3082 }
3083
3084 list_for_each(qe, &rx_mod->rx_active_q) {
3085 if (qe == &rx->qe) {
3086 list_del(&rx->qe);
3087 bfa_q_qe_init(&rx->qe);
3088 break;
3089 }
3090 }
3091
3092 rx->bna = NULL;
3093 rx->priv = NULL;
3094 _put_free_rx(rx_mod, rx);
3095}
3096
3097void
3098bna_rx_enable(struct bna_rx *rx)
3099{
3100 if (rx->fsm != (bfa_sm_t)bna_rx_sm_stopped)
3101 return;
3102
3103 rx->rx_flags |= BNA_RX_F_ENABLE;
3104 if (rx->rx_flags & BNA_RX_F_PORT_ENABLED)
3105 bfa_fsm_send_event(rx, RX_E_START);
3106}
3107
3108void
3109bna_rx_disable(struct bna_rx *rx, enum bna_cleanup_type type,
3110 void (*cbfn)(void *, struct bna_rx *,
3111 enum bna_cb_status))
3112{
3113 if (type == BNA_SOFT_CLEANUP) {
3114 /* h/w should not be accessed. Treat we're stopped */
3115 (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
3116 } else {
3117 rx->stop_cbfn = cbfn;
3118 rx->stop_cbarg = rx->bna->bnad;
3119
3120 rx->rx_flags &= ~BNA_RX_F_ENABLE;
3121
3122 bfa_fsm_send_event(rx, RX_E_STOP);
3123 }
3124}
3125
3126/**
3127 * TX
3128 */
3129#define call_tx_stop_cbfn(tx, status)\
3130do {\
3131 if ((tx)->stop_cbfn)\
3132 (tx)->stop_cbfn((tx)->stop_cbarg, (tx), status);\
3133 (tx)->stop_cbfn = NULL;\
3134 (tx)->stop_cbarg = NULL;\
3135} while (0)
3136
3137#define call_tx_prio_change_cbfn(tx, status)\
3138do {\
3139 if ((tx)->prio_change_cbfn)\
3140 (tx)->prio_change_cbfn((tx)->bna->bnad, (tx), status);\
3141 (tx)->prio_change_cbfn = NULL;\
3142} while (0)
3143
3144static void bna_tx_mod_cb_tx_stopped(void *tx_mod, struct bna_tx *tx,
3145 enum bna_cb_status status);
3146static void bna_tx_cb_txq_stopped(void *arg, int status);
3147static void bna_tx_cb_stats_cleared(void *arg, int status);
3148static void __bna_tx_stop(struct bna_tx *tx);
3149static void __bna_tx_start(struct bna_tx *tx);
3150static void __bna_txf_stat_clr(struct bna_tx *tx);
3151
3152enum bna_tx_event {
3153 TX_E_START = 1,
3154 TX_E_STOP = 2,
3155 TX_E_FAIL = 3,
3156 TX_E_TXQ_STOPPED = 4,
3157 TX_E_PRIO_CHANGE = 5,
3158 TX_E_STAT_CLEARED = 6,
3159};
3160
3161enum bna_tx_state {
3162 BNA_TX_STOPPED = 1,
3163 BNA_TX_STARTED = 2,
3164 BNA_TX_TXQ_STOP_WAIT = 3,
3165 BNA_TX_PRIO_STOP_WAIT = 4,
3166 BNA_TX_STAT_CLR_WAIT = 5,
3167};
3168
3169bfa_fsm_state_decl(bna_tx, stopped, struct bna_tx,
3170 enum bna_tx_event);
3171bfa_fsm_state_decl(bna_tx, started, struct bna_tx,
3172 enum bna_tx_event);
3173bfa_fsm_state_decl(bna_tx, txq_stop_wait, struct bna_tx,
3174 enum bna_tx_event);
3175bfa_fsm_state_decl(bna_tx, prio_stop_wait, struct bna_tx,
3176 enum bna_tx_event);
3177bfa_fsm_state_decl(bna_tx, stat_clr_wait, struct bna_tx,
3178 enum bna_tx_event);
3179
3180static struct bfa_sm_table tx_sm_table[] = {
3181 {BFA_SM(bna_tx_sm_stopped), BNA_TX_STOPPED},
3182 {BFA_SM(bna_tx_sm_started), BNA_TX_STARTED},
3183 {BFA_SM(bna_tx_sm_txq_stop_wait), BNA_TX_TXQ_STOP_WAIT},
3184 {BFA_SM(bna_tx_sm_prio_stop_wait), BNA_TX_PRIO_STOP_WAIT},
3185 {BFA_SM(bna_tx_sm_stat_clr_wait), BNA_TX_STAT_CLR_WAIT},
3186};
3187
3188static void
3189bna_tx_sm_stopped_entry(struct bna_tx *tx)
3190{
3191 struct bna_txq *txq;
3192 struct list_head *qe;
3193
3194 list_for_each(qe, &tx->txq_q) {
3195 txq = (struct bna_txq *)qe;
3196 (tx->tx_cleanup_cbfn)(tx->bna->bnad, txq->tcb);
3197 }
3198
3199 call_tx_stop_cbfn(tx, BNA_CB_SUCCESS);
3200}
3201
3202static void
3203bna_tx_sm_stopped(struct bna_tx *tx, enum bna_tx_event event)
3204{
3205 switch (event) {
3206 case TX_E_START:
3207 bfa_fsm_set_state(tx, bna_tx_sm_started);
3208 break;
3209
3210 case TX_E_STOP:
3211 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3212 break;
3213
3214 case TX_E_FAIL:
3215 /* No-op */
3216 break;
3217
3218 case TX_E_PRIO_CHANGE:
3219 call_tx_prio_change_cbfn(tx, BNA_CB_SUCCESS);
3220 break;
3221
3222 case TX_E_TXQ_STOPPED:
3223 /**
3224 * This event is received due to flushing of mbox when
3225 * device fails
3226 */
3227 /* No-op */
3228 break;
3229
3230 default:
3231 bfa_sm_fault(tx->bna, event);
3232 }
3233}
3234
3235static void
3236bna_tx_sm_started_entry(struct bna_tx *tx)
3237{
3238 struct bna_txq *txq;
3239 struct list_head *qe;
3240
3241 __bna_tx_start(tx);
3242
3243 /* Start IB */
3244 list_for_each(qe, &tx->txq_q) {
3245 txq = (struct bna_txq *)qe;
3246 bna_ib_ack(&txq->ib->door_bell, 0);
3247 }
3248}
3249
3250static void
3251bna_tx_sm_started(struct bna_tx *tx, enum bna_tx_event event)
3252{
3253 struct bna_txq *txq;
3254 struct list_head *qe;
3255
3256 switch (event) {
3257 case TX_E_STOP:
3258 bfa_fsm_set_state(tx, bna_tx_sm_txq_stop_wait);
3259 __bna_tx_stop(tx);
3260 break;
3261
3262 case TX_E_FAIL:
3263 list_for_each(qe, &tx->txq_q) {
3264 txq = (struct bna_txq *)qe;
3265 bna_ib_fail(txq->ib);
3266 (tx->tx_stall_cbfn)(tx->bna->bnad, txq->tcb);
3267 }
3268 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3269 break;
3270
3271 case TX_E_PRIO_CHANGE:
3272 bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait);
3273 break;
3274
3275 default:
3276 bfa_sm_fault(tx->bna, event);
3277 }
3278}
3279
3280static void
3281bna_tx_sm_txq_stop_wait_entry(struct bna_tx *tx)
3282{
3283}
3284
3285static void
3286bna_tx_sm_txq_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
3287{
3288 struct bna_txq *txq;
3289 struct list_head *qe;
3290
3291 switch (event) {
3292 case TX_E_FAIL:
3293 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3294 break;
3295
3296 case TX_E_TXQ_STOPPED:
3297 list_for_each(qe, &tx->txq_q) {
3298 txq = (struct bna_txq *)qe;
3299 bna_ib_stop(txq->ib);
3300 }
3301 bfa_fsm_set_state(tx, bna_tx_sm_stat_clr_wait);
3302 break;
3303
3304 case TX_E_PRIO_CHANGE:
3305 /* No-op */
3306 break;
3307
3308 default:
3309 bfa_sm_fault(tx->bna, event);
3310 }
3311}
3312
3313static void
3314bna_tx_sm_prio_stop_wait_entry(struct bna_tx *tx)
3315{
3316 __bna_tx_stop(tx);
3317}
3318
3319static void
3320bna_tx_sm_prio_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
3321{
3322 struct bna_txq *txq;
3323 struct list_head *qe;
3324
3325 switch (event) {
3326 case TX_E_STOP:
3327 bfa_fsm_set_state(tx, bna_tx_sm_txq_stop_wait);
3328 break;
3329
3330 case TX_E_FAIL:
3331 call_tx_prio_change_cbfn(tx, BNA_CB_FAIL);
3332 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3333 break;
3334
3335 case TX_E_TXQ_STOPPED:
3336 list_for_each(qe, &tx->txq_q) {
3337 txq = (struct bna_txq *)qe;
3338 bna_ib_stop(txq->ib);
3339 (tx->tx_cleanup_cbfn)(tx->bna->bnad, txq->tcb);
3340 }
3341 call_tx_prio_change_cbfn(tx, BNA_CB_SUCCESS);
3342 bfa_fsm_set_state(tx, bna_tx_sm_started);
3343 break;
3344
3345 case TX_E_PRIO_CHANGE:
3346 /* No-op */
3347 break;
3348
3349 default:
3350 bfa_sm_fault(tx->bna, event);
3351 }
3352}
3353
3354static void
3355bna_tx_sm_stat_clr_wait_entry(struct bna_tx *tx)
3356{
3357 __bna_txf_stat_clr(tx);
3358}
3359
3360static void
3361bna_tx_sm_stat_clr_wait(struct bna_tx *tx, enum bna_tx_event event)
3362{
3363 switch (event) {
3364 case TX_E_FAIL:
3365 case TX_E_STAT_CLEARED:
3366 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3367 break;
3368
3369 default:
3370 bfa_sm_fault(tx->bna, event);
3371 }
3372}
3373
3374static void
3375__bna_txq_start(struct bna_tx *tx, struct bna_txq *txq)
3376{
3377 struct bna_rxtx_q_mem *q_mem;
3378 struct bna_txq_mem txq_cfg;
3379 struct bna_txq_mem *txq_mem;
3380 struct bna_dma_addr cur_q_addr;
3381 u32 pg_num;
3382 void __iomem *base_addr;
3383 unsigned long off;
3384
3385 /* Fill out structure, to be subsequently written to hardware */
3386 txq_cfg.pg_tbl_addr_lo = txq->qpt.hw_qpt_ptr.lsb;
3387 txq_cfg.pg_tbl_addr_hi = txq->qpt.hw_qpt_ptr.msb;
3388 cur_q_addr = *((struct bna_dma_addr *)(txq->qpt.kv_qpt_ptr));
3389 txq_cfg.cur_q_entry_lo = cur_q_addr.lsb;
3390 txq_cfg.cur_q_entry_hi = cur_q_addr.msb;
3391
3392 txq_cfg.pg_cnt_n_prd_ptr = (txq->qpt.page_count << 16) | 0x0;
3393
3394 txq_cfg.entry_n_pg_size = ((u32)(BFI_TXQ_WI_SIZE >> 2) << 16) |
3395 (txq->qpt.page_size >> 2);
3396 txq_cfg.int_blk_n_cns_ptr = ((((u32)txq->ib_seg_offset) << 24) |
3397 ((u32)(txq->ib->ib_id & 0xff) << 16) | 0x0);
3398
3399 txq_cfg.cns_ptr2_n_q_state = BNA_Q_IDLE_STATE;
3400 txq_cfg.nxt_qid_n_fid_n_pri = (((tx->txf.txf_id & 0x3f) << 3) |
Rasesh Mody0613ecf2010-12-23 21:45:02 +00003401 (txq->priority & 0x7));
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003402 txq_cfg.wvc_n_cquota_n_rquota =
3403 ((((u32)BFI_TX_MAX_WRR_QUOTA & 0xfff) << 12) |
3404 (BFI_TX_MAX_WRR_QUOTA & 0xfff));
3405
3406 /* Setup the page and write to H/W */
3407
3408 pg_num = BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM + tx->bna->port_num,
3409 HQM_RXTX_Q_RAM_BASE_OFFSET);
3410 writel(pg_num, tx->bna->regs.page_addr);
3411
3412 base_addr = BNA_GET_MEM_BASE_ADDR(tx->bna->pcidev.pci_bar_kva,
3413 HQM_RXTX_Q_RAM_BASE_OFFSET);
3414 q_mem = (struct bna_rxtx_q_mem *)0;
3415 txq_mem = &q_mem[txq->txq_id].txq;
3416
3417 /*
3418 * The following 4 lines, is a hack b'cos the H/W needs to read
3419 * these DMA addresses as little endian
3420 */
3421
3422 off = (unsigned long)&txq_mem->pg_tbl_addr_lo;
3423 writel(htonl(txq_cfg.pg_tbl_addr_lo), base_addr + off);
3424
3425 off = (unsigned long)&txq_mem->pg_tbl_addr_hi;
3426 writel(htonl(txq_cfg.pg_tbl_addr_hi), base_addr + off);
3427
3428 off = (unsigned long)&txq_mem->cur_q_entry_lo;
3429 writel(htonl(txq_cfg.cur_q_entry_lo), base_addr + off);
3430
3431 off = (unsigned long)&txq_mem->cur_q_entry_hi;
3432 writel(htonl(txq_cfg.cur_q_entry_hi), base_addr + off);
3433
3434 off = (unsigned long)&txq_mem->pg_cnt_n_prd_ptr;
3435 writel(txq_cfg.pg_cnt_n_prd_ptr, base_addr + off);
3436
3437 off = (unsigned long)&txq_mem->entry_n_pg_size;
3438 writel(txq_cfg.entry_n_pg_size, base_addr + off);
3439
3440 off = (unsigned long)&txq_mem->int_blk_n_cns_ptr;
3441 writel(txq_cfg.int_blk_n_cns_ptr, base_addr + off);
3442
3443 off = (unsigned long)&txq_mem->cns_ptr2_n_q_state;
3444 writel(txq_cfg.cns_ptr2_n_q_state, base_addr + off);
3445
3446 off = (unsigned long)&txq_mem->nxt_qid_n_fid_n_pri;
3447 writel(txq_cfg.nxt_qid_n_fid_n_pri, base_addr + off);
3448
3449 off = (unsigned long)&txq_mem->wvc_n_cquota_n_rquota;
3450 writel(txq_cfg.wvc_n_cquota_n_rquota, base_addr + off);
3451
3452 txq->tcb->producer_index = 0;
3453 txq->tcb->consumer_index = 0;
3454 *(txq->tcb->hw_consumer_index) = 0;
3455
3456}
3457
3458static void
3459__bna_txq_stop(struct bna_tx *tx, struct bna_txq *txq)
3460{
3461 struct bfi_ll_q_stop_req ll_req;
3462 u32 bit_mask[2] = {0, 0};
3463 if (txq->txq_id < 32)
3464 bit_mask[0] = (u32)1 << txq->txq_id;
3465 else
3466 bit_mask[1] = (u32)1 << (txq->txq_id - 32);
3467
3468 memset(&ll_req, 0, sizeof(ll_req));
3469 ll_req.mh.msg_class = BFI_MC_LL;
3470 ll_req.mh.msg_id = BFI_LL_H2I_TXQ_STOP_REQ;
3471 ll_req.mh.mtag.h2i.lpu_id = 0;
3472 ll_req.q_id_mask[0] = htonl(bit_mask[0]);
3473 ll_req.q_id_mask[1] = htonl(bit_mask[1]);
3474
3475 bna_mbox_qe_fill(&tx->mbox_qe, &ll_req, sizeof(ll_req),
3476 bna_tx_cb_txq_stopped, tx);
3477
3478 bna_mbox_send(tx->bna, &tx->mbox_qe);
3479}
3480
3481static void
3482__bna_txf_start(struct bna_tx *tx)
3483{
3484 struct bna_tx_fndb_ram *tx_fndb;
3485 struct bna_txf *txf = &tx->txf;
3486 void __iomem *base_addr;
3487 unsigned long off;
3488
3489 writel(BNA_GET_PAGE_NUM(LUT0_MEM_BLK_BASE_PG_NUM +
3490 (tx->bna->port_num * 2), TX_FNDB_RAM_BASE_OFFSET),
3491 tx->bna->regs.page_addr);
3492
3493 base_addr = BNA_GET_MEM_BASE_ADDR(tx->bna->pcidev.pci_bar_kva,
3494 TX_FNDB_RAM_BASE_OFFSET);
3495
3496 tx_fndb = (struct bna_tx_fndb_ram *)0;
3497 off = (unsigned long)&tx_fndb[txf->txf_id].vlan_n_ctrl_flags;
3498
3499 writel(((u32)txf->vlan << 16) | txf->ctrl_flags,
3500 base_addr + off);
3501
3502 if (tx->txf.txf_id < 32)
3503 tx->bna->tx_mod.txf_bmap[0] |= ((u32)1 << tx->txf.txf_id);
3504 else
3505 tx->bna->tx_mod.txf_bmap[1] |= ((u32)
3506 1 << (tx->txf.txf_id - 32));
3507}
3508
3509static void
3510__bna_txf_stop(struct bna_tx *tx)
3511{
3512 struct bna_tx_fndb_ram *tx_fndb;
3513 u32 page_num;
3514 u32 ctl_flags;
3515 struct bna_txf *txf = &tx->txf;
3516 void __iomem *base_addr;
3517 unsigned long off;
3518
3519 /* retrieve the running txf_flags & turn off enable bit */
3520 page_num = BNA_GET_PAGE_NUM(LUT0_MEM_BLK_BASE_PG_NUM +
3521 (tx->bna->port_num * 2), TX_FNDB_RAM_BASE_OFFSET);
3522 writel(page_num, tx->bna->regs.page_addr);
3523
3524 base_addr = BNA_GET_MEM_BASE_ADDR(tx->bna->pcidev.pci_bar_kva,
3525 TX_FNDB_RAM_BASE_OFFSET);
3526 tx_fndb = (struct bna_tx_fndb_ram *)0;
3527 off = (unsigned long)&tx_fndb[txf->txf_id].vlan_n_ctrl_flags;
3528
3529 ctl_flags = readl(base_addr + off);
3530 ctl_flags &= ~BFI_TXF_CF_ENABLE;
3531
3532 writel(ctl_flags, base_addr + off);
3533
3534 if (tx->txf.txf_id < 32)
3535 tx->bna->tx_mod.txf_bmap[0] &= ~((u32)1 << tx->txf.txf_id);
3536 else
3537 tx->bna->tx_mod.txf_bmap[0] &= ~((u32)
3538 1 << (tx->txf.txf_id - 32));
3539}
3540
3541static void
3542__bna_txf_stat_clr(struct bna_tx *tx)
3543{
3544 struct bfi_ll_stats_req ll_req;
3545 u32 txf_bmap[2] = {0, 0};
3546 if (tx->txf.txf_id < 32)
3547 txf_bmap[0] = ((u32)1 << tx->txf.txf_id);
3548 else
3549 txf_bmap[1] = ((u32)1 << (tx->txf.txf_id - 32));
3550 bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_STATS_CLEAR_REQ, 0);
3551 ll_req.stats_mask = 0;
3552 ll_req.rxf_id_mask[0] = 0;
3553 ll_req.rxf_id_mask[1] = 0;
3554 ll_req.txf_id_mask[0] = htonl(txf_bmap[0]);
3555 ll_req.txf_id_mask[1] = htonl(txf_bmap[1]);
3556
3557 bna_mbox_qe_fill(&tx->mbox_qe, &ll_req, sizeof(ll_req),
3558 bna_tx_cb_stats_cleared, tx);
3559 bna_mbox_send(tx->bna, &tx->mbox_qe);
3560}
3561
3562static void
3563__bna_tx_start(struct bna_tx *tx)
3564{
3565 struct bna_txq *txq;
3566 struct list_head *qe;
3567
3568 list_for_each(qe, &tx->txq_q) {
3569 txq = (struct bna_txq *)qe;
3570 bna_ib_start(txq->ib);
3571 __bna_txq_start(tx, txq);
3572 }
3573
3574 __bna_txf_start(tx);
3575
3576 list_for_each(qe, &tx->txq_q) {
3577 txq = (struct bna_txq *)qe;
3578 txq->tcb->priority = txq->priority;
3579 (tx->tx_resume_cbfn)(tx->bna->bnad, txq->tcb);
3580 }
3581}
3582
3583static void
3584__bna_tx_stop(struct bna_tx *tx)
3585{
3586 struct bna_txq *txq;
3587 struct list_head *qe;
3588
3589 list_for_each(qe, &tx->txq_q) {
3590 txq = (struct bna_txq *)qe;
3591 (tx->tx_stall_cbfn)(tx->bna->bnad, txq->tcb);
3592 }
3593
3594 __bna_txf_stop(tx);
3595
3596 list_for_each(qe, &tx->txq_q) {
3597 txq = (struct bna_txq *)qe;
3598 bfa_wc_up(&tx->txq_stop_wc);
3599 }
3600
3601 list_for_each(qe, &tx->txq_q) {
3602 txq = (struct bna_txq *)qe;
3603 __bna_txq_stop(tx, txq);
3604 }
3605}
3606
3607static void
3608bna_txq_qpt_setup(struct bna_txq *txq, int page_count, int page_size,
3609 struct bna_mem_descr *qpt_mem,
3610 struct bna_mem_descr *swqpt_mem,
3611 struct bna_mem_descr *page_mem)
3612{
3613 int i;
3614
3615 txq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
3616 txq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
3617 txq->qpt.kv_qpt_ptr = qpt_mem->kva;
3618 txq->qpt.page_count = page_count;
3619 txq->qpt.page_size = page_size;
3620
3621 txq->tcb->sw_qpt = (void **) swqpt_mem->kva;
3622
3623 for (i = 0; i < page_count; i++) {
3624 txq->tcb->sw_qpt[i] = page_mem[i].kva;
3625
3626 ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].lsb =
3627 page_mem[i].dma.lsb;
3628 ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].msb =
3629 page_mem[i].dma.msb;
3630
3631 }
3632}
3633
3634static void
3635bna_tx_free(struct bna_tx *tx)
3636{
3637 struct bna_tx_mod *tx_mod = &tx->bna->tx_mod;
3638 struct bna_txq *txq;
3639 struct bna_ib_mod *ib_mod = &tx->bna->ib_mod;
3640 struct list_head *qe;
3641
3642 while (!list_empty(&tx->txq_q)) {
3643 bfa_q_deq(&tx->txq_q, &txq);
3644 bfa_q_qe_init(&txq->qe);
3645 if (txq->ib) {
3646 if (txq->ib_seg_offset != -1)
3647 bna_ib_release_idx(txq->ib,
3648 txq->ib_seg_offset);
3649 bna_ib_put(ib_mod, txq->ib);
3650 txq->ib = NULL;
3651 }
3652 txq->tcb = NULL;
3653 txq->tx = NULL;
3654 list_add_tail(&txq->qe, &tx_mod->txq_free_q);
3655 }
3656
3657 list_for_each(qe, &tx_mod->tx_active_q) {
3658 if (qe == &tx->qe) {
3659 list_del(&tx->qe);
3660 bfa_q_qe_init(&tx->qe);
3661 break;
3662 }
3663 }
3664
3665 tx->bna = NULL;
3666 tx->priv = NULL;
3667 list_add_tail(&tx->qe, &tx_mod->tx_free_q);
3668}
3669
3670static void
3671bna_tx_cb_txq_stopped(void *arg, int status)
3672{
3673 struct bna_tx *tx = (struct bna_tx *)arg;
3674
3675 bfa_q_qe_init(&tx->mbox_qe.qe);
3676 bfa_wc_down(&tx->txq_stop_wc);
3677}
3678
3679static void
3680bna_tx_cb_txq_stopped_all(void *arg)
3681{
3682 struct bna_tx *tx = (struct bna_tx *)arg;
3683
3684 bfa_fsm_send_event(tx, TX_E_TXQ_STOPPED);
3685}
3686
3687static void
3688bna_tx_cb_stats_cleared(void *arg, int status)
3689{
3690 struct bna_tx *tx = (struct bna_tx *)arg;
3691
3692 bfa_q_qe_init(&tx->mbox_qe.qe);
3693
3694 bfa_fsm_send_event(tx, TX_E_STAT_CLEARED);
3695}
3696
3697static void
3698bna_tx_start(struct bna_tx *tx)
3699{
3700 tx->flags |= BNA_TX_F_PORT_STARTED;
3701 if (tx->flags & BNA_TX_F_ENABLED)
3702 bfa_fsm_send_event(tx, TX_E_START);
3703}
3704
3705static void
3706bna_tx_stop(struct bna_tx *tx)
3707{
3708 tx->stop_cbfn = bna_tx_mod_cb_tx_stopped;
3709 tx->stop_cbarg = &tx->bna->tx_mod;
3710
3711 tx->flags &= ~BNA_TX_F_PORT_STARTED;
3712 bfa_fsm_send_event(tx, TX_E_STOP);
3713}
3714
3715static void
3716bna_tx_fail(struct bna_tx *tx)
3717{
3718 tx->flags &= ~BNA_TX_F_PORT_STARTED;
3719 bfa_fsm_send_event(tx, TX_E_FAIL);
3720}
3721
Rasesh Modyb7ee31c2010-10-05 15:46:05 +00003722static void
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003723bna_tx_prio_changed(struct bna_tx *tx, int prio)
3724{
3725 struct bna_txq *txq;
3726 struct list_head *qe;
3727
3728 list_for_each(qe, &tx->txq_q) {
3729 txq = (struct bna_txq *)qe;
3730 txq->priority = prio;
3731 }
3732
3733 bfa_fsm_send_event(tx, TX_E_PRIO_CHANGE);
3734}
3735
3736static void
3737bna_tx_cee_link_status(struct bna_tx *tx, int cee_link)
3738{
3739 if (cee_link)
3740 tx->flags |= BNA_TX_F_PRIO_LOCK;
3741 else
3742 tx->flags &= ~BNA_TX_F_PRIO_LOCK;
3743}
3744
3745static void
3746bna_tx_mod_cb_tx_stopped(void *arg, struct bna_tx *tx,
3747 enum bna_cb_status status)
3748{
3749 struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg;
3750
3751 bfa_wc_down(&tx_mod->tx_stop_wc);
3752}
3753
3754static void
3755bna_tx_mod_cb_tx_stopped_all(void *arg)
3756{
3757 struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg;
3758
3759 if (tx_mod->stop_cbfn)
3760 tx_mod->stop_cbfn(&tx_mod->bna->port, BNA_CB_SUCCESS);
3761 tx_mod->stop_cbfn = NULL;
3762}
3763
3764void
3765bna_tx_res_req(int num_txq, int txq_depth, struct bna_res_info *res_info)
3766{
3767 u32 q_size;
3768 u32 page_count;
3769 struct bna_mem_info *mem_info;
3770
3771 res_info[BNA_TX_RES_MEM_T_TCB].res_type = BNA_RES_T_MEM;
3772 mem_info = &res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info;
3773 mem_info->mem_type = BNA_MEM_T_KVA;
3774 mem_info->len = sizeof(struct bna_tcb);
3775 mem_info->num = num_txq;
3776
3777 q_size = txq_depth * BFI_TXQ_WI_SIZE;
3778 q_size = ALIGN(q_size, PAGE_SIZE);
3779 page_count = q_size >> PAGE_SHIFT;
3780
3781 res_info[BNA_TX_RES_MEM_T_QPT].res_type = BNA_RES_T_MEM;
3782 mem_info = &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info;
3783 mem_info->mem_type = BNA_MEM_T_DMA;
3784 mem_info->len = page_count * sizeof(struct bna_dma_addr);
3785 mem_info->num = num_txq;
3786
3787 res_info[BNA_TX_RES_MEM_T_SWQPT].res_type = BNA_RES_T_MEM;
3788 mem_info = &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info;
3789 mem_info->mem_type = BNA_MEM_T_KVA;
3790 mem_info->len = page_count * sizeof(void *);
3791 mem_info->num = num_txq;
3792
3793 res_info[BNA_TX_RES_MEM_T_PAGE].res_type = BNA_RES_T_MEM;
3794 mem_info = &res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info;
3795 mem_info->mem_type = BNA_MEM_T_DMA;
3796 mem_info->len = PAGE_SIZE;
3797 mem_info->num = num_txq * page_count;
3798
3799 res_info[BNA_TX_RES_INTR_T_TXCMPL].res_type = BNA_RES_T_INTR;
3800 res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.intr_type =
3801 BNA_INTR_T_MSIX;
3802 res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.num = num_txq;
3803}
3804
3805struct bna_tx *
3806bna_tx_create(struct bna *bna, struct bnad *bnad,
3807 struct bna_tx_config *tx_cfg,
3808 struct bna_tx_event_cbfn *tx_cbfn,
3809 struct bna_res_info *res_info, void *priv)
3810{
3811 struct bna_intr_info *intr_info;
3812 struct bna_tx_mod *tx_mod = &bna->tx_mod;
3813 struct bna_tx *tx;
3814 struct bna_txq *txq;
3815 struct list_head *qe;
3816 struct bna_ib_mod *ib_mod = &bna->ib_mod;
3817 struct bna_doorbell_qset *qset;
3818 struct bna_ib_config ib_config;
3819 int page_count;
3820 int page_size;
3821 int page_idx;
3822 int i;
3823 unsigned long off;
3824
3825 intr_info = &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
3826 page_count = (res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info.num) /
3827 tx_cfg->num_txq;
3828 page_size = res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info.len;
3829
3830 /**
3831 * Get resources
3832 */
3833
3834 if ((intr_info->num != 1) && (intr_info->num != tx_cfg->num_txq))
3835 return NULL;
3836
3837 /* Tx */
3838
3839 if (list_empty(&tx_mod->tx_free_q))
3840 return NULL;
3841 bfa_q_deq(&tx_mod->tx_free_q, &tx);
3842 bfa_q_qe_init(&tx->qe);
3843
3844 /* TxQs */
3845
3846 INIT_LIST_HEAD(&tx->txq_q);
3847 for (i = 0; i < tx_cfg->num_txq; i++) {
3848 if (list_empty(&tx_mod->txq_free_q))
3849 goto err_return;
3850
3851 bfa_q_deq(&tx_mod->txq_free_q, &txq);
3852 bfa_q_qe_init(&txq->qe);
3853 list_add_tail(&txq->qe, &tx->txq_q);
3854 txq->ib = NULL;
3855 txq->ib_seg_offset = -1;
3856 txq->tx = tx;
3857 }
3858
3859 /* IBs */
3860 i = 0;
3861 list_for_each(qe, &tx->txq_q) {
3862 txq = (struct bna_txq *)qe;
3863
3864 if (intr_info->num == 1)
3865 txq->ib = bna_ib_get(ib_mod, intr_info->intr_type,
3866 intr_info->idl[0].vector);
3867 else
3868 txq->ib = bna_ib_get(ib_mod, intr_info->intr_type,
3869 intr_info->idl[i].vector);
3870
3871 if (txq->ib == NULL)
3872 goto err_return;
3873
3874 txq->ib_seg_offset = bna_ib_reserve_idx(txq->ib);
3875 if (txq->ib_seg_offset == -1)
3876 goto err_return;
3877
3878 i++;
3879 }
3880
3881 /*
3882 * Initialize
3883 */
3884
3885 /* Tx */
3886
3887 tx->tcb_setup_cbfn = tx_cbfn->tcb_setup_cbfn;
3888 tx->tcb_destroy_cbfn = tx_cbfn->tcb_destroy_cbfn;
3889 /* Following callbacks are mandatory */
3890 tx->tx_stall_cbfn = tx_cbfn->tx_stall_cbfn;
3891 tx->tx_resume_cbfn = tx_cbfn->tx_resume_cbfn;
3892 tx->tx_cleanup_cbfn = tx_cbfn->tx_cleanup_cbfn;
3893
3894 list_add_tail(&tx->qe, &tx_mod->tx_active_q);
3895 tx->bna = bna;
3896 tx->priv = priv;
3897 tx->txq_stop_wc.wc_resume = bna_tx_cb_txq_stopped_all;
3898 tx->txq_stop_wc.wc_cbarg = tx;
3899 tx->txq_stop_wc.wc_count = 0;
3900
3901 tx->type = tx_cfg->tx_type;
3902
3903 tx->flags = 0;
3904 if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_PORT_STARTED) {
3905 switch (tx->type) {
3906 case BNA_TX_T_REGULAR:
3907 if (!(tx->bna->tx_mod.flags &
3908 BNA_TX_MOD_F_PORT_LOOPBACK))
3909 tx->flags |= BNA_TX_F_PORT_STARTED;
3910 break;
3911 case BNA_TX_T_LOOPBACK:
3912 if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_PORT_LOOPBACK)
3913 tx->flags |= BNA_TX_F_PORT_STARTED;
3914 break;
3915 }
3916 }
3917 if (tx->bna->tx_mod.cee_link)
3918 tx->flags |= BNA_TX_F_PRIO_LOCK;
3919
3920 /* TxQ */
3921
3922 i = 0;
3923 page_idx = 0;
3924 list_for_each(qe, &tx->txq_q) {
3925 txq = (struct bna_txq *)qe;
3926 txq->priority = tx_mod->priority;
3927 txq->tcb = (struct bna_tcb *)
3928 res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info.mdl[i].kva;
3929 txq->tx_packets = 0;
3930 txq->tx_bytes = 0;
3931
3932 /* IB */
3933
3934 ib_config.coalescing_timeo = BFI_TX_COALESCING_TIMEO;
3935 ib_config.interpkt_timeo = 0; /* Not used */
3936 ib_config.interpkt_count = BFI_TX_INTERPKT_COUNT;
3937 ib_config.ctrl_flags = (BFI_IB_CF_INTER_PKT_DMA |
3938 BFI_IB_CF_INT_ENABLE |
3939 BFI_IB_CF_COALESCING_MODE);
3940 bna_ib_config(txq->ib, &ib_config);
3941
3942 /* TCB */
3943
3944 txq->tcb->producer_index = 0;
3945 txq->tcb->consumer_index = 0;
3946 txq->tcb->hw_consumer_index = (volatile u32 *)
3947 ((volatile u8 *)txq->ib->ib_seg_host_addr_kva +
3948 (txq->ib_seg_offset * BFI_IBIDX_SIZE));
3949 *(txq->tcb->hw_consumer_index) = 0;
3950 txq->tcb->q_depth = tx_cfg->txq_depth;
3951 txq->tcb->unmap_q = (void *)
3952 res_info[BNA_TX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[i].kva;
3953 qset = (struct bna_doorbell_qset *)0;
3954 off = (unsigned long)&qset[txq->txq_id].txq[0];
3955 txq->tcb->q_dbell = off +
3956 BNA_GET_DOORBELL_BASE_ADDR(bna->pcidev.pci_bar_kva);
3957 txq->tcb->i_dbell = &txq->ib->door_bell;
3958 txq->tcb->intr_type = intr_info->intr_type;
3959 txq->tcb->intr_vector = (intr_info->num == 1) ?
3960 intr_info->idl[0].vector :
3961 intr_info->idl[i].vector;
3962 txq->tcb->txq = txq;
3963 txq->tcb->bnad = bnad;
3964 txq->tcb->id = i;
3965
3966 /* QPT, SWQPT, Pages */
3967 bna_txq_qpt_setup(txq, page_count, page_size,
3968 &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info.mdl[i],
3969 &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info.mdl[i],
3970 &res_info[BNA_TX_RES_MEM_T_PAGE].
3971 res_u.mem_info.mdl[page_idx]);
3972 txq->tcb->page_idx = page_idx;
3973 txq->tcb->page_count = page_count;
3974 page_idx += page_count;
3975
3976 /* Callback to bnad for setting up TCB */
3977 if (tx->tcb_setup_cbfn)
3978 (tx->tcb_setup_cbfn)(bna->bnad, txq->tcb);
3979
3980 i++;
3981 }
3982
3983 /* TxF */
3984
3985 tx->txf.ctrl_flags = BFI_TXF_CF_ENABLE | BFI_TXF_CF_VLAN_WI_BASED;
3986 tx->txf.vlan = 0;
3987
3988 /* Mbox element */
3989 bfa_q_qe_init(&tx->mbox_qe.qe);
3990
3991 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3992
3993 return tx;
3994
3995err_return:
3996 bna_tx_free(tx);
3997 return NULL;
3998}
3999
4000void
4001bna_tx_destroy(struct bna_tx *tx)
4002{
4003 /* Callback to bnad for destroying TCB */
4004 if (tx->tcb_destroy_cbfn) {
4005 struct bna_txq *txq;
4006 struct list_head *qe;
4007
4008 list_for_each(qe, &tx->txq_q) {
4009 txq = (struct bna_txq *)qe;
4010 (tx->tcb_destroy_cbfn)(tx->bna->bnad, txq->tcb);
4011 }
4012 }
4013
4014 bna_tx_free(tx);
4015}
4016
4017void
4018bna_tx_enable(struct bna_tx *tx)
4019{
4020 if (tx->fsm != (bfa_sm_t)bna_tx_sm_stopped)
4021 return;
4022
4023 tx->flags |= BNA_TX_F_ENABLED;
4024
4025 if (tx->flags & BNA_TX_F_PORT_STARTED)
4026 bfa_fsm_send_event(tx, TX_E_START);
4027}
4028
4029void
4030bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type,
4031 void (*cbfn)(void *, struct bna_tx *, enum bna_cb_status))
4032{
4033 if (type == BNA_SOFT_CLEANUP) {
4034 (*cbfn)(tx->bna->bnad, tx, BNA_CB_SUCCESS);
4035 return;
4036 }
4037
4038 tx->stop_cbfn = cbfn;
4039 tx->stop_cbarg = tx->bna->bnad;
4040
4041 tx->flags &= ~BNA_TX_F_ENABLED;
4042
4043 bfa_fsm_send_event(tx, TX_E_STOP);
4044}
4045
4046int
4047bna_tx_state_get(struct bna_tx *tx)
4048{
4049 return bfa_sm_to_state(tx_sm_table, tx->fsm);
4050}
4051
4052void
4053bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna,
4054 struct bna_res_info *res_info)
4055{
4056 int i;
4057
4058 tx_mod->bna = bna;
4059 tx_mod->flags = 0;
4060
4061 tx_mod->tx = (struct bna_tx *)
4062 res_info[BNA_RES_MEM_T_TX_ARRAY].res_u.mem_info.mdl[0].kva;
4063 tx_mod->txq = (struct bna_txq *)
4064 res_info[BNA_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mdl[0].kva;
4065
4066 INIT_LIST_HEAD(&tx_mod->tx_free_q);
4067 INIT_LIST_HEAD(&tx_mod->tx_active_q);
4068
4069 INIT_LIST_HEAD(&tx_mod->txq_free_q);
4070
4071 for (i = 0; i < BFI_MAX_TXQ; i++) {
4072 tx_mod->tx[i].txf.txf_id = i;
4073 bfa_q_qe_init(&tx_mod->tx[i].qe);
4074 list_add_tail(&tx_mod->tx[i].qe, &tx_mod->tx_free_q);
4075
4076 tx_mod->txq[i].txq_id = i;
4077 bfa_q_qe_init(&tx_mod->txq[i].qe);
4078 list_add_tail(&tx_mod->txq[i].qe, &tx_mod->txq_free_q);
4079 }
4080
4081 tx_mod->tx_stop_wc.wc_resume = bna_tx_mod_cb_tx_stopped_all;
4082 tx_mod->tx_stop_wc.wc_cbarg = tx_mod;
4083 tx_mod->tx_stop_wc.wc_count = 0;
4084}
4085
4086void
4087bna_tx_mod_uninit(struct bna_tx_mod *tx_mod)
4088{
4089 struct list_head *qe;
4090 int i;
4091
4092 i = 0;
4093 list_for_each(qe, &tx_mod->tx_free_q)
4094 i++;
4095
4096 i = 0;
4097 list_for_each(qe, &tx_mod->txq_free_q)
4098 i++;
4099
4100 tx_mod->bna = NULL;
4101}
4102
4103void
4104bna_tx_mod_start(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
4105{
4106 struct bna_tx *tx;
4107 struct list_head *qe;
4108
4109 tx_mod->flags |= BNA_TX_MOD_F_PORT_STARTED;
4110 if (type == BNA_TX_T_LOOPBACK)
4111 tx_mod->flags |= BNA_TX_MOD_F_PORT_LOOPBACK;
4112
4113 list_for_each(qe, &tx_mod->tx_active_q) {
4114 tx = (struct bna_tx *)qe;
4115 if (tx->type == type)
4116 bna_tx_start(tx);
4117 }
4118}
4119
4120void
4121bna_tx_mod_stop(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
4122{
4123 struct bna_tx *tx;
4124 struct list_head *qe;
4125
4126 tx_mod->flags &= ~BNA_TX_MOD_F_PORT_STARTED;
4127 tx_mod->flags &= ~BNA_TX_MOD_F_PORT_LOOPBACK;
4128
4129 tx_mod->stop_cbfn = bna_port_cb_tx_stopped;
4130
4131 /**
4132 * Before calling bna_tx_stop(), increment tx_stop_wc as many times
4133 * as we are going to call bna_tx_stop
4134 */
4135 list_for_each(qe, &tx_mod->tx_active_q) {
4136 tx = (struct bna_tx *)qe;
4137 if (tx->type == type)
4138 bfa_wc_up(&tx_mod->tx_stop_wc);
4139 }
4140
4141 if (tx_mod->tx_stop_wc.wc_count == 0) {
4142 tx_mod->stop_cbfn(&tx_mod->bna->port, BNA_CB_SUCCESS);
4143 tx_mod->stop_cbfn = NULL;
4144 return;
4145 }
4146
4147 list_for_each(qe, &tx_mod->tx_active_q) {
4148 tx = (struct bna_tx *)qe;
4149 if (tx->type == type)
4150 bna_tx_stop(tx);
4151 }
4152}
4153
4154void
4155bna_tx_mod_fail(struct bna_tx_mod *tx_mod)
4156{
4157 struct bna_tx *tx;
4158 struct list_head *qe;
4159
4160 tx_mod->flags &= ~BNA_TX_MOD_F_PORT_STARTED;
4161 tx_mod->flags &= ~BNA_TX_MOD_F_PORT_LOOPBACK;
4162
4163 list_for_each(qe, &tx_mod->tx_active_q) {
4164 tx = (struct bna_tx *)qe;
4165 bna_tx_fail(tx);
4166 }
4167}
4168
4169void
4170bna_tx_mod_prio_changed(struct bna_tx_mod *tx_mod, int prio)
4171{
4172 struct bna_tx *tx;
4173 struct list_head *qe;
4174
4175 if (prio != tx_mod->priority) {
4176 tx_mod->priority = prio;
4177
4178 list_for_each(qe, &tx_mod->tx_active_q) {
4179 tx = (struct bna_tx *)qe;
4180 bna_tx_prio_changed(tx, prio);
4181 }
4182 }
4183}
4184
4185void
4186bna_tx_mod_cee_link_status(struct bna_tx_mod *tx_mod, int cee_link)
4187{
4188 struct bna_tx *tx;
4189 struct list_head *qe;
4190
4191 tx_mod->cee_link = cee_link;
4192
4193 list_for_each(qe, &tx_mod->tx_active_q) {
4194 tx = (struct bna_tx *)qe;
4195 bna_tx_cee_link_status(tx, cee_link);
4196 }
4197}