blob: c8b3997e9eeba71dd6dd1ed2ac691d6127d3f5a8 [file] [log] [blame]
Ramkrishna Vepa11324132009-04-01 18:14:58 +00001/******************************************************************************
2 * This software may be used and distributed according to the terms of
3 * the GNU General Public License (GPL), incorporated herein by reference.
4 * Drivers based on or derived from this code fall under the GPL and must
5 * retain the authorship, copyright and license notice. This file is not
6 * a complete program and may only be used when the entire operating
7 * system is licensed under the GPL.
8 * See the file COPYING in this distribution for more information.
9 *
10 * vxge-traffic.c: Driver for Neterion Inc's X3100 Series 10GbE PCIe I/O
11 * Virtualized Server Adapter.
12 * Copyright(c) 2002-2009 Neterion Inc.
13 ******************************************************************************/
14#include <linux/etherdevice.h>
15
16#include "vxge-traffic.h"
17#include "vxge-config.h"
18#include "vxge-main.h"
19
20/*
21 * vxge_hw_vpath_intr_enable - Enable vpath interrupts.
22 * @vp: Virtual Path handle.
23 *
24 * Enable vpath interrupts. The function is to be executed the last in
25 * vpath initialization sequence.
26 *
27 * See also: vxge_hw_vpath_intr_disable()
28 */
29enum vxge_hw_status vxge_hw_vpath_intr_enable(struct __vxge_hw_vpath_handle *vp)
30{
31 u64 val64;
32
33 struct __vxge_hw_virtualpath *vpath;
34 struct vxge_hw_vpath_reg __iomem *vp_reg;
35 enum vxge_hw_status status = VXGE_HW_OK;
36 if (vp == NULL) {
37 status = VXGE_HW_ERR_INVALID_HANDLE;
38 goto exit;
39 }
40
41 vpath = vp->vpath;
42
43 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
44 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
45 goto exit;
46 }
47
48 vp_reg = vpath->vp_reg;
49
50 writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_reg);
51
52 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
53 &vp_reg->general_errors_reg);
54
55 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
56 &vp_reg->pci_config_errors_reg);
57
58 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
59 &vp_reg->mrpcim_to_vpath_alarm_reg);
60
61 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
62 &vp_reg->srpcim_to_vpath_alarm_reg);
63
64 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
65 &vp_reg->vpath_ppif_int_status);
66
67 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
68 &vp_reg->srpcim_msg_to_vpath_reg);
69
70 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
71 &vp_reg->vpath_pcipif_int_status);
72
73 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
74 &vp_reg->prc_alarm_reg);
75
76 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
77 &vp_reg->wrdma_alarm_status);
78
79 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
80 &vp_reg->asic_ntwk_vp_err_reg);
81
82 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
83 &vp_reg->xgmac_vp_int_status);
84
85 val64 = readq(&vp_reg->vpath_general_int_status);
86
87 /* Mask unwanted interrupts */
88
89 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
90 &vp_reg->vpath_pcipif_int_mask);
91
92 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
93 &vp_reg->srpcim_msg_to_vpath_mask);
94
95 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
96 &vp_reg->srpcim_to_vpath_alarm_mask);
97
98 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
99 &vp_reg->mrpcim_to_vpath_alarm_mask);
100
101 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
102 &vp_reg->pci_config_errors_mask);
103
104 /* Unmask the individual interrupts */
105
106 writeq((u32)vxge_bVALn((VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO1_OVRFLOW|
107 VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO2_OVRFLOW|
108 VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ|
109 VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR), 0, 32),
110 &vp_reg->general_errors_mask);
111
112 __vxge_hw_pio_mem_write32_upper(
113 (u32)vxge_bVALn((VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_OVRWR|
114 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_OVRWR|
115 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON|
116 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON|
117 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR|
roel kluind77dd8d2009-05-15 10:19:51 +0000118 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_DMA_ERR), 0, 32),
Ramkrishna Vepa11324132009-04-01 18:14:58 +0000119 &vp_reg->kdfcctl_errors_mask);
120
121 __vxge_hw_pio_mem_write32_upper(0, &vp_reg->vpath_ppif_int_mask);
122
123 __vxge_hw_pio_mem_write32_upper(
124 (u32)vxge_bVALn(VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP, 0, 32),
125 &vp_reg->prc_alarm_mask);
126
127 __vxge_hw_pio_mem_write32_upper(0, &vp_reg->wrdma_alarm_mask);
128 __vxge_hw_pio_mem_write32_upper(0, &vp_reg->xgmac_vp_int_mask);
129
130 if (vpath->hldev->first_vp_id != vpath->vp_id)
131 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
132 &vp_reg->asic_ntwk_vp_err_mask);
133 else
134 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn((
135 VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT |
136 VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK), 0, 32),
137 &vp_reg->asic_ntwk_vp_err_mask);
138
139 __vxge_hw_pio_mem_write32_upper(0,
140 &vp_reg->vpath_general_int_mask);
141exit:
142 return status;
143
144}
145
146/*
147 * vxge_hw_vpath_intr_disable - Disable vpath interrupts.
148 * @vp: Virtual Path handle.
149 *
150 * Disable vpath interrupts. The function is to be executed the last in
151 * vpath initialization sequence.
152 *
153 * See also: vxge_hw_vpath_intr_enable()
154 */
155enum vxge_hw_status vxge_hw_vpath_intr_disable(
156 struct __vxge_hw_vpath_handle *vp)
157{
158 u64 val64;
159
160 struct __vxge_hw_virtualpath *vpath;
161 enum vxge_hw_status status = VXGE_HW_OK;
162 struct vxge_hw_vpath_reg __iomem *vp_reg;
163 if (vp == NULL) {
164 status = VXGE_HW_ERR_INVALID_HANDLE;
165 goto exit;
166 }
167
168 vpath = vp->vpath;
169
170 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
171 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
172 goto exit;
173 }
174 vp_reg = vpath->vp_reg;
175
176 __vxge_hw_pio_mem_write32_upper(
177 (u32)VXGE_HW_INTR_MASK_ALL,
178 &vp_reg->vpath_general_int_mask);
179
180 val64 = VXGE_HW_TIM_CLR_INT_EN_VP(1 << (16 - vpath->vp_id));
181
182 writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_mask);
183
184 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
185 &vp_reg->general_errors_mask);
186
187 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
188 &vp_reg->pci_config_errors_mask);
189
190 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
191 &vp_reg->mrpcim_to_vpath_alarm_mask);
192
193 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
194 &vp_reg->srpcim_to_vpath_alarm_mask);
195
196 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
197 &vp_reg->vpath_ppif_int_mask);
198
199 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
200 &vp_reg->srpcim_msg_to_vpath_mask);
201
202 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
203 &vp_reg->vpath_pcipif_int_mask);
204
205 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
206 &vp_reg->wrdma_alarm_mask);
207
208 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
209 &vp_reg->prc_alarm_mask);
210
211 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
212 &vp_reg->xgmac_vp_int_mask);
213
214 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
215 &vp_reg->asic_ntwk_vp_err_mask);
216
217exit:
218 return status;
219}
220
221/**
222 * vxge_hw_channel_msix_mask - Mask MSIX Vector.
223 * @channeh: Channel for rx or tx handle
224 * @msix_id: MSIX ID
225 *
226 * The function masks the msix interrupt for the given msix_id
227 *
228 * Returns: 0
229 */
230void vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channel, int msix_id)
231{
232
233 __vxge_hw_pio_mem_write32_upper(
234 (u32)vxge_bVALn(vxge_mBIT(channel->first_vp_id+(msix_id/4)),
235 0, 32),
236 &channel->common_reg->set_msix_mask_vect[msix_id%4]);
237
238 return;
239}
240
241/**
242 * vxge_hw_channel_msix_unmask - Unmask the MSIX Vector.
243 * @channeh: Channel for rx or tx handle
244 * @msix_id: MSI ID
245 *
246 * The function unmasks the msix interrupt for the given msix_id
247 *
248 * Returns: 0
249 */
250void
251vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channel, int msix_id)
252{
253
254 __vxge_hw_pio_mem_write32_upper(
255 (u32)vxge_bVALn(vxge_mBIT(channel->first_vp_id+(msix_id/4)),
256 0, 32),
257 &channel->common_reg->clear_msix_mask_vect[msix_id%4]);
258
259 return;
260}
261
262/**
263 * vxge_hw_device_set_intr_type - Updates the configuration
264 * with new interrupt type.
265 * @hldev: HW device handle.
266 * @intr_mode: New interrupt type
267 */
268u32 vxge_hw_device_set_intr_type(struct __vxge_hw_device *hldev, u32 intr_mode)
269{
270
271 if ((intr_mode != VXGE_HW_INTR_MODE_IRQLINE) &&
272 (intr_mode != VXGE_HW_INTR_MODE_MSIX) &&
273 (intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) &&
274 (intr_mode != VXGE_HW_INTR_MODE_DEF))
275 intr_mode = VXGE_HW_INTR_MODE_IRQLINE;
276
277 hldev->config.intr_mode = intr_mode;
278 return intr_mode;
279}
280
281/**
282 * vxge_hw_device_intr_enable - Enable interrupts.
283 * @hldev: HW device handle.
284 * @op: One of the enum vxge_hw_device_intr enumerated values specifying
285 * the type(s) of interrupts to enable.
286 *
287 * Enable Titan interrupts. The function is to be executed the last in
288 * Titan initialization sequence.
289 *
290 * See also: vxge_hw_device_intr_disable()
291 */
292void vxge_hw_device_intr_enable(struct __vxge_hw_device *hldev)
293{
294 u32 i;
295 u64 val64;
296 u32 val32;
297
Sreenivasa Honnureb5f10c2009-10-05 01:57:29 +0000298 vxge_hw_device_mask_all(hldev);
299
Ramkrishna Vepa11324132009-04-01 18:14:58 +0000300 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
301
302 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
303 continue;
304
305 vxge_hw_vpath_intr_enable(
306 VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
307 }
308
309 if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE) {
310 val64 = hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
311 hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX];
312
313 if (val64 != 0) {
314 writeq(val64, &hldev->common_reg->tim_int_status0);
315
316 writeq(~val64, &hldev->common_reg->tim_int_mask0);
317 }
318
319 val32 = hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
320 hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX];
321
322 if (val32 != 0) {
323 __vxge_hw_pio_mem_write32_upper(val32,
324 &hldev->common_reg->tim_int_status1);
325
326 __vxge_hw_pio_mem_write32_upper(~val32,
327 &hldev->common_reg->tim_int_mask1);
328 }
329 }
330
331 val64 = readq(&hldev->common_reg->titan_general_int_status);
332
333 vxge_hw_device_unmask_all(hldev);
334
335 return;
336}
337
338/**
339 * vxge_hw_device_intr_disable - Disable Titan interrupts.
340 * @hldev: HW device handle.
341 * @op: One of the enum vxge_hw_device_intr enumerated values specifying
342 * the type(s) of interrupts to disable.
343 *
344 * Disable Titan interrupts.
345 *
346 * See also: vxge_hw_device_intr_enable()
347 */
348void vxge_hw_device_intr_disable(struct __vxge_hw_device *hldev)
349{
350 u32 i;
351
352 vxge_hw_device_mask_all(hldev);
353
354 /* mask all the tim interrupts */
355 writeq(VXGE_HW_INTR_MASK_ALL, &hldev->common_reg->tim_int_mask0);
356 __vxge_hw_pio_mem_write32_upper(VXGE_HW_DEFAULT_32,
357 &hldev->common_reg->tim_int_mask1);
358
359 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
360
361 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
362 continue;
363
364 vxge_hw_vpath_intr_disable(
365 VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
366 }
367
368 return;
369}
370
371/**
372 * vxge_hw_device_mask_all - Mask all device interrupts.
373 * @hldev: HW device handle.
374 *
375 * Mask all device interrupts.
376 *
377 * See also: vxge_hw_device_unmask_all()
378 */
379void vxge_hw_device_mask_all(struct __vxge_hw_device *hldev)
380{
381 u64 val64;
382
383 val64 = VXGE_HW_TITAN_MASK_ALL_INT_ALARM |
384 VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
385
386 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
387 &hldev->common_reg->titan_mask_all_int);
388
389 return;
390}
391
392/**
393 * vxge_hw_device_unmask_all - Unmask all device interrupts.
394 * @hldev: HW device handle.
395 *
396 * Unmask all device interrupts.
397 *
398 * See also: vxge_hw_device_mask_all()
399 */
400void vxge_hw_device_unmask_all(struct __vxge_hw_device *hldev)
401{
402 u64 val64 = 0;
403
404 if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE)
405 val64 = VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
406
407 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
408 &hldev->common_reg->titan_mask_all_int);
409
410 return;
411}
412
413/**
414 * vxge_hw_device_flush_io - Flush io writes.
415 * @hldev: HW device handle.
416 *
417 * The function performs a read operation to flush io writes.
418 *
419 * Returns: void
420 */
421void vxge_hw_device_flush_io(struct __vxge_hw_device *hldev)
422{
423 u32 val32;
424
425 val32 = readl(&hldev->common_reg->titan_general_int_status);
426}
427
428/**
429 * vxge_hw_device_begin_irq - Begin IRQ processing.
430 * @hldev: HW device handle.
431 * @skip_alarms: Do not clear the alarms
432 * @reason: "Reason" for the interrupt, the value of Titan's
433 * general_int_status register.
434 *
435 * The function performs two actions, It first checks whether (shared IRQ) the
436 * interrupt was raised by the device. Next, it masks the device interrupts.
437 *
438 * Note:
439 * vxge_hw_device_begin_irq() does not flush MMIO writes through the
440 * bridge. Therefore, two back-to-back interrupts are potentially possible.
441 *
442 * Returns: 0, if the interrupt is not "ours" (note that in this case the
443 * device remain enabled).
444 * Otherwise, vxge_hw_device_begin_irq() returns 64bit general adapter
445 * status.
446 */
447enum vxge_hw_status vxge_hw_device_begin_irq(struct __vxge_hw_device *hldev,
448 u32 skip_alarms, u64 *reason)
449{
450 u32 i;
451 u64 val64;
452 u64 adapter_status;
453 u64 vpath_mask;
454 enum vxge_hw_status ret = VXGE_HW_OK;
455
456 val64 = readq(&hldev->common_reg->titan_general_int_status);
457
458 if (unlikely(!val64)) {
459 /* not Titan interrupt */
460 *reason = 0;
461 ret = VXGE_HW_ERR_WRONG_IRQ;
462 goto exit;
463 }
464
465 if (unlikely(val64 == VXGE_HW_ALL_FOXES)) {
466
467 adapter_status = readq(&hldev->common_reg->adapter_status);
468
469 if (adapter_status == VXGE_HW_ALL_FOXES) {
470
471 __vxge_hw_device_handle_error(hldev,
472 NULL_VPID, VXGE_HW_EVENT_SLOT_FREEZE);
473 *reason = 0;
474 ret = VXGE_HW_ERR_SLOT_FREEZE;
475 goto exit;
476 }
477 }
478
479 hldev->stats.sw_dev_info_stats.total_intr_cnt++;
480
481 *reason = val64;
482
483 vpath_mask = hldev->vpaths_deployed >>
484 (64 - VXGE_HW_MAX_VIRTUAL_PATHS);
485
486 if (val64 &
487 VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(vpath_mask)) {
488 hldev->stats.sw_dev_info_stats.traffic_intr_cnt++;
489
490 return VXGE_HW_OK;
491 }
492
493 hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt++;
494
495 if (unlikely(val64 &
496 VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_ALARM_INT)) {
497
498 enum vxge_hw_status error_level = VXGE_HW_OK;
499
500 hldev->stats.sw_dev_err_stats.vpath_alarms++;
501
502 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
503
504 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
505 continue;
506
507 ret = __vxge_hw_vpath_alarm_process(
508 &hldev->virtual_paths[i], skip_alarms);
509
David S. Millera4fe91e2009-04-29 17:53:20 -0700510 error_level = VXGE_HW_SET_LEVEL(ret, error_level);
Ramkrishna Vepa11324132009-04-01 18:14:58 +0000511
512 if (unlikely((ret == VXGE_HW_ERR_CRITICAL) ||
513 (ret == VXGE_HW_ERR_SLOT_FREEZE)))
514 break;
515 }
516
517 ret = error_level;
518 }
519exit:
520 return ret;
521}
522
523/*
524 * __vxge_hw_device_handle_link_up_ind
525 * @hldev: HW device handle.
526 *
527 * Link up indication handler. The function is invoked by HW when
528 * Titan indicates that the link is up for programmable amount of time.
529 */
530enum vxge_hw_status
531__vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev)
532{
533 /*
534 * If the previous link state is not down, return.
535 */
536 if (hldev->link_state == VXGE_HW_LINK_UP)
537 goto exit;
538
539 hldev->link_state = VXGE_HW_LINK_UP;
540
541 /* notify driver */
542 if (hldev->uld_callbacks.link_up)
543 hldev->uld_callbacks.link_up(hldev);
544exit:
545 return VXGE_HW_OK;
546}
547
548/*
549 * __vxge_hw_device_handle_link_down_ind
550 * @hldev: HW device handle.
551 *
552 * Link down indication handler. The function is invoked by HW when
553 * Titan indicates that the link is down.
554 */
555enum vxge_hw_status
556__vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev)
557{
558 /*
559 * If the previous link state is not down, return.
560 */
561 if (hldev->link_state == VXGE_HW_LINK_DOWN)
562 goto exit;
563
564 hldev->link_state = VXGE_HW_LINK_DOWN;
565
566 /* notify driver */
567 if (hldev->uld_callbacks.link_down)
568 hldev->uld_callbacks.link_down(hldev);
569exit:
570 return VXGE_HW_OK;
571}
572
573/**
574 * __vxge_hw_device_handle_error - Handle error
575 * @hldev: HW device
576 * @vp_id: Vpath Id
577 * @type: Error type. Please see enum vxge_hw_event{}
578 *
579 * Handle error.
580 */
581enum vxge_hw_status
582__vxge_hw_device_handle_error(
583 struct __vxge_hw_device *hldev,
584 u32 vp_id,
585 enum vxge_hw_event type)
586{
587 switch (type) {
588 case VXGE_HW_EVENT_UNKNOWN:
589 break;
590 case VXGE_HW_EVENT_RESET_START:
591 case VXGE_HW_EVENT_RESET_COMPLETE:
592 case VXGE_HW_EVENT_LINK_DOWN:
593 case VXGE_HW_EVENT_LINK_UP:
594 goto out;
595 case VXGE_HW_EVENT_ALARM_CLEARED:
596 goto out;
597 case VXGE_HW_EVENT_ECCERR:
598 case VXGE_HW_EVENT_MRPCIM_ECCERR:
599 goto out;
600 case VXGE_HW_EVENT_FIFO_ERR:
601 case VXGE_HW_EVENT_VPATH_ERR:
602 case VXGE_HW_EVENT_CRITICAL_ERR:
603 case VXGE_HW_EVENT_SERR:
604 break;
605 case VXGE_HW_EVENT_SRPCIM_SERR:
606 case VXGE_HW_EVENT_MRPCIM_SERR:
607 goto out;
608 case VXGE_HW_EVENT_SLOT_FREEZE:
609 break;
610 default:
611 vxge_assert(0);
612 goto out;
613 }
614
615 /* notify driver */
616 if (hldev->uld_callbacks.crit_err)
617 hldev->uld_callbacks.crit_err(
618 (struct __vxge_hw_device *)hldev,
619 type, vp_id);
620out:
621
622 return VXGE_HW_OK;
623}
624
625/**
626 * vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the
627 * condition that has caused the Tx and RX interrupt.
628 * @hldev: HW device.
629 *
630 * Acknowledge (that is, clear) the condition that has caused
631 * the Tx and Rx interrupt.
632 * See also: vxge_hw_device_begin_irq(),
633 * vxge_hw_device_mask_tx_rx(), vxge_hw_device_unmask_tx_rx().
634 */
635void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev)
636{
637
638 if ((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
639 (hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
640 writeq((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
641 hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX]),
642 &hldev->common_reg->tim_int_status0);
643 }
644
645 if ((hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
646 (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
647 __vxge_hw_pio_mem_write32_upper(
648 (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
649 hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX]),
650 &hldev->common_reg->tim_int_status1);
651 }
652
653 return;
654}
655
656/*
657 * vxge_hw_channel_dtr_alloc - Allocate a dtr from the channel
658 * @channel: Channel
659 * @dtrh: Buffer to return the DTR pointer
660 *
661 * Allocates a dtr from the reserve array. If the reserve array is empty,
662 * it swaps the reserve and free arrays.
663 *
664 */
665enum vxge_hw_status
666vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh)
667{
668 void **tmp_arr;
669
670 if (channel->reserve_ptr - channel->reserve_top > 0) {
671_alloc_after_swap:
672 *dtrh = channel->reserve_arr[--channel->reserve_ptr];
673
674 return VXGE_HW_OK;
675 }
676
677 /* switch between empty and full arrays */
678
679 /* the idea behind such a design is that by having free and reserved
680 * arrays separated we basically separated irq and non-irq parts.
681 * i.e. no additional lock need to be done when we free a resource */
682
683 if (channel->length - channel->free_ptr > 0) {
684
685 tmp_arr = channel->reserve_arr;
686 channel->reserve_arr = channel->free_arr;
687 channel->free_arr = tmp_arr;
688 channel->reserve_ptr = channel->length;
689 channel->reserve_top = channel->free_ptr;
690 channel->free_ptr = channel->length;
691
692 channel->stats->reserve_free_swaps_cnt++;
693
694 goto _alloc_after_swap;
695 }
696
697 channel->stats->full_cnt++;
698
699 *dtrh = NULL;
700 return VXGE_HW_INF_OUT_OF_DESCRIPTORS;
701}
702
703/*
704 * vxge_hw_channel_dtr_post - Post a dtr to the channel
705 * @channelh: Channel
706 * @dtrh: DTR pointer
707 *
708 * Posts a dtr to work array.
709 *
710 */
711void vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh)
712{
713 vxge_assert(channel->work_arr[channel->post_index] == NULL);
714
715 channel->work_arr[channel->post_index++] = dtrh;
716
717 /* wrap-around */
718 if (channel->post_index == channel->length)
719 channel->post_index = 0;
720}
721
722/*
723 * vxge_hw_channel_dtr_try_complete - Returns next completed dtr
724 * @channel: Channel
725 * @dtr: Buffer to return the next completed DTR pointer
726 *
727 * Returns the next completed dtr with out removing it from work array
728 *
729 */
730void
731vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel, void **dtrh)
732{
733 vxge_assert(channel->compl_index < channel->length);
734
735 *dtrh = channel->work_arr[channel->compl_index];
Benjamin LaHaise3f23e432009-08-04 10:21:39 +0000736 prefetch(*dtrh);
Ramkrishna Vepa11324132009-04-01 18:14:58 +0000737}
738
739/*
740 * vxge_hw_channel_dtr_complete - Removes next completed dtr from the work array
741 * @channel: Channel handle
742 *
743 * Removes the next completed dtr from work array
744 *
745 */
746void vxge_hw_channel_dtr_complete(struct __vxge_hw_channel *channel)
747{
748 channel->work_arr[channel->compl_index] = NULL;
749
750 /* wrap-around */
751 if (++channel->compl_index == channel->length)
752 channel->compl_index = 0;
753
754 channel->stats->total_compl_cnt++;
755}
756
757/*
758 * vxge_hw_channel_dtr_free - Frees a dtr
759 * @channel: Channel handle
760 * @dtr: DTR pointer
761 *
762 * Returns the dtr to free array
763 *
764 */
765void vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh)
766{
767 channel->free_arr[--channel->free_ptr] = dtrh;
768}
769
770/*
771 * vxge_hw_channel_dtr_count
772 * @channel: Channel handle. Obtained via vxge_hw_channel_open().
773 *
774 * Retreive number of DTRs available. This function can not be called
775 * from data path. ring_initial_replenishi() is the only user.
776 */
777int vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel)
778{
779 return (channel->reserve_ptr - channel->reserve_top) +
780 (channel->length - channel->free_ptr);
781}
782
783/**
784 * vxge_hw_ring_rxd_reserve - Reserve ring descriptor.
785 * @ring: Handle to the ring object used for receive
786 * @rxdh: Reserved descriptor. On success HW fills this "out" parameter
787 * with a valid handle.
788 *
789 * Reserve Rx descriptor for the subsequent filling-in driver
790 * and posting on the corresponding channel (@channelh)
791 * via vxge_hw_ring_rxd_post().
792 *
793 * Returns: VXGE_HW_OK - success.
794 * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available.
795 *
796 */
797enum vxge_hw_status vxge_hw_ring_rxd_reserve(struct __vxge_hw_ring *ring,
798 void **rxdh)
799{
800 enum vxge_hw_status status;
801 struct __vxge_hw_channel *channel;
802
803 channel = &ring->channel;
804
805 status = vxge_hw_channel_dtr_alloc(channel, rxdh);
806
807 if (status == VXGE_HW_OK) {
808 struct vxge_hw_ring_rxd_1 *rxdp =
809 (struct vxge_hw_ring_rxd_1 *)*rxdh;
810
811 rxdp->control_0 = rxdp->control_1 = 0;
812 }
813
814 return status;
815}
816
817/**
818 * vxge_hw_ring_rxd_free - Free descriptor.
819 * @ring: Handle to the ring object used for receive
820 * @rxdh: Descriptor handle.
821 *
822 * Free the reserved descriptor. This operation is "symmetrical" to
823 * vxge_hw_ring_rxd_reserve. The "free-ing" completes the descriptor's
824 * lifecycle.
825 *
826 * After free-ing (see vxge_hw_ring_rxd_free()) the descriptor again can
827 * be:
828 *
829 * - reserved (vxge_hw_ring_rxd_reserve);
830 *
831 * - posted (vxge_hw_ring_rxd_post);
832 *
833 * - completed (vxge_hw_ring_rxd_next_completed);
834 *
835 * - and recycled again (vxge_hw_ring_rxd_free).
836 *
837 * For alternative state transitions and more details please refer to
838 * the design doc.
839 *
840 */
841void vxge_hw_ring_rxd_free(struct __vxge_hw_ring *ring, void *rxdh)
842{
843 struct __vxge_hw_channel *channel;
844
845 channel = &ring->channel;
846
847 vxge_hw_channel_dtr_free(channel, rxdh);
848
849}
850
851/**
852 * vxge_hw_ring_rxd_pre_post - Prepare rxd and post
853 * @ring: Handle to the ring object used for receive
854 * @rxdh: Descriptor handle.
855 *
856 * This routine prepares a rxd and posts
857 */
858void vxge_hw_ring_rxd_pre_post(struct __vxge_hw_ring *ring, void *rxdh)
859{
860 struct __vxge_hw_channel *channel;
861
862 channel = &ring->channel;
863
864 vxge_hw_channel_dtr_post(channel, rxdh);
865}
866
867/**
868 * vxge_hw_ring_rxd_post_post - Process rxd after post.
869 * @ring: Handle to the ring object used for receive
870 * @rxdh: Descriptor handle.
871 *
872 * Processes rxd after post
873 */
874void vxge_hw_ring_rxd_post_post(struct __vxge_hw_ring *ring, void *rxdh)
875{
876 struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
877 struct __vxge_hw_channel *channel;
878
879 channel = &ring->channel;
880
Sreenivasa Honnur18dec742010-03-28 22:07:34 +0000881 rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
Ramkrishna Vepa11324132009-04-01 18:14:58 +0000882
883 if (ring->stats->common_stats.usage_cnt > 0)
884 ring->stats->common_stats.usage_cnt--;
885}
886
887/**
888 * vxge_hw_ring_rxd_post - Post descriptor on the ring.
889 * @ring: Handle to the ring object used for receive
890 * @rxdh: Descriptor obtained via vxge_hw_ring_rxd_reserve().
891 *
892 * Post descriptor on the ring.
893 * Prior to posting the descriptor should be filled in accordance with
894 * Host/Titan interface specification for a given service (LL, etc.).
895 *
896 */
897void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring, void *rxdh)
898{
899 struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
900 struct __vxge_hw_channel *channel;
901
902 channel = &ring->channel;
903
904 wmb();
Sreenivasa Honnur18dec742010-03-28 22:07:34 +0000905 rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
Ramkrishna Vepa11324132009-04-01 18:14:58 +0000906
907 vxge_hw_channel_dtr_post(channel, rxdh);
908
909 if (ring->stats->common_stats.usage_cnt > 0)
910 ring->stats->common_stats.usage_cnt--;
911}
912
913/**
914 * vxge_hw_ring_rxd_post_post_wmb - Process rxd after post with memory barrier.
915 * @ring: Handle to the ring object used for receive
916 * @rxdh: Descriptor handle.
917 *
918 * Processes rxd after post with memory barrier.
919 */
920void vxge_hw_ring_rxd_post_post_wmb(struct __vxge_hw_ring *ring, void *rxdh)
921{
922 struct __vxge_hw_channel *channel;
923
924 channel = &ring->channel;
925
926 wmb();
927 vxge_hw_ring_rxd_post_post(ring, rxdh);
928}
929
930/**
931 * vxge_hw_ring_rxd_next_completed - Get the _next_ completed descriptor.
932 * @ring: Handle to the ring object used for receive
933 * @rxdh: Descriptor handle. Returned by HW.
934 * @t_code: Transfer code, as per Titan User Guide,
935 * Receive Descriptor Format. Returned by HW.
936 *
937 * Retrieve the _next_ completed descriptor.
938 * HW uses ring callback (*vxge_hw_ring_callback_f) to notifiy
939 * driver of new completed descriptors. After that
940 * the driver can use vxge_hw_ring_rxd_next_completed to retrieve the rest
941 * completions (the very first completion is passed by HW via
942 * vxge_hw_ring_callback_f).
943 *
944 * Implementation-wise, the driver is free to call
945 * vxge_hw_ring_rxd_next_completed either immediately from inside the
946 * ring callback, or in a deferred fashion and separate (from HW)
947 * context.
948 *
949 * Non-zero @t_code means failure to fill-in receive buffer(s)
950 * of the descriptor.
951 * For instance, parity error detected during the data transfer.
952 * In this case Titan will complete the descriptor and indicate
953 * for the host that the received data is not to be used.
954 * For details please refer to Titan User Guide.
955 *
956 * Returns: VXGE_HW_OK - success.
957 * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
958 * are currently available for processing.
959 *
960 * See also: vxge_hw_ring_callback_f{},
961 * vxge_hw_fifo_rxd_next_completed(), enum vxge_hw_status{}.
962 */
963enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
964 struct __vxge_hw_ring *ring, void **rxdh, u8 *t_code)
965{
966 struct __vxge_hw_channel *channel;
967 struct vxge_hw_ring_rxd_1 *rxdp;
968 enum vxge_hw_status status = VXGE_HW_OK;
Sreenivasa Honnur18dec742010-03-28 22:07:34 +0000969 u64 control_0, own;
Ramkrishna Vepa11324132009-04-01 18:14:58 +0000970
971 channel = &ring->channel;
972
973 vxge_hw_channel_dtr_try_complete(channel, rxdh);
974
975 rxdp = (struct vxge_hw_ring_rxd_1 *)*rxdh;
976 if (rxdp == NULL) {
977 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
978 goto exit;
979 }
980
Sreenivasa Honnur18dec742010-03-28 22:07:34 +0000981 control_0 = rxdp->control_0;
982 own = control_0 & VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
983 *t_code = (u8)VXGE_HW_RING_RXD_T_CODE_GET(control_0);
984
Ramkrishna Vepa11324132009-04-01 18:14:58 +0000985 /* check whether it is not the end */
Sreenivasa Honnur18dec742010-03-28 22:07:34 +0000986 if (!own || ((*t_code == VXGE_HW_RING_T_CODE_FRM_DROP) && own)) {
Ramkrishna Vepa11324132009-04-01 18:14:58 +0000987
988 vxge_assert(((struct vxge_hw_ring_rxd_1 *)rxdp)->host_control !=
989 0);
990
991 ++ring->cmpl_cnt;
992 vxge_hw_channel_dtr_complete(channel);
993
Ramkrishna Vepa11324132009-04-01 18:14:58 +0000994 vxge_assert(*t_code != VXGE_HW_RING_RXD_T_CODE_UNUSED);
995
996 ring->stats->common_stats.usage_cnt++;
997 if (ring->stats->common_stats.usage_max <
998 ring->stats->common_stats.usage_cnt)
999 ring->stats->common_stats.usage_max =
1000 ring->stats->common_stats.usage_cnt;
1001
1002 status = VXGE_HW_OK;
1003 goto exit;
1004 }
1005
1006 /* reset it. since we don't want to return
1007 * garbage to the driver */
1008 *rxdh = NULL;
1009 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1010exit:
1011 return status;
1012}
1013
1014/**
1015 * vxge_hw_ring_handle_tcode - Handle transfer code.
1016 * @ring: Handle to the ring object used for receive
1017 * @rxdh: Descriptor handle.
1018 * @t_code: One of the enumerated (and documented in the Titan user guide)
1019 * "transfer codes".
1020 *
1021 * Handle descriptor's transfer code. The latter comes with each completed
1022 * descriptor.
1023 *
1024 * Returns: one of the enum vxge_hw_status{} enumerated types.
1025 * VXGE_HW_OK - for success.
1026 * VXGE_HW_ERR_CRITICAL - when encounters critical error.
1027 */
1028enum vxge_hw_status vxge_hw_ring_handle_tcode(
1029 struct __vxge_hw_ring *ring, void *rxdh, u8 t_code)
1030{
1031 struct __vxge_hw_channel *channel;
1032 enum vxge_hw_status status = VXGE_HW_OK;
1033
1034 channel = &ring->channel;
1035
1036 /* If the t_code is not supported and if the
1037 * t_code is other than 0x5 (unparseable packet
1038 * such as unknown UPV6 header), Drop it !!!
1039 */
1040
Sreenivasa Honnur18dec742010-03-28 22:07:34 +00001041 if (t_code == VXGE_HW_RING_T_CODE_OK ||
1042 t_code == VXGE_HW_RING_T_CODE_L3_PKT_ERR) {
Ramkrishna Vepa11324132009-04-01 18:14:58 +00001043 status = VXGE_HW_OK;
1044 goto exit;
1045 }
1046
Sreenivasa Honnur18dec742010-03-28 22:07:34 +00001047 if (t_code > VXGE_HW_RING_T_CODE_MULTI_ERR) {
Ramkrishna Vepa11324132009-04-01 18:14:58 +00001048 status = VXGE_HW_ERR_INVALID_TCODE;
1049 goto exit;
1050 }
1051
1052 ring->stats->rxd_t_code_err_cnt[t_code]++;
1053exit:
1054 return status;
1055}
1056
1057/**
1058 * __vxge_hw_non_offload_db_post - Post non offload doorbell
1059 *
1060 * @fifo: fifohandle
1061 * @txdl_ptr: The starting location of the TxDL in host memory
1062 * @num_txds: The highest TxD in this TxDL (0 to 255 means 1 to 256)
1063 * @no_snoop: No snoop flags
1064 *
1065 * This function posts a non-offload doorbell to doorbell FIFO
1066 *
1067 */
1068static void __vxge_hw_non_offload_db_post(struct __vxge_hw_fifo *fifo,
1069 u64 txdl_ptr, u32 num_txds, u32 no_snoop)
1070{
1071 struct __vxge_hw_channel *channel;
1072
1073 channel = &fifo->channel;
1074
1075 writeq(VXGE_HW_NODBW_TYPE(VXGE_HW_NODBW_TYPE_NODBW) |
1076 VXGE_HW_NODBW_LAST_TXD_NUMBER(num_txds) |
1077 VXGE_HW_NODBW_GET_NO_SNOOP(no_snoop),
1078 &fifo->nofl_db->control_0);
1079
Benjamin LaHaiseff1b9742009-08-04 10:21:21 +00001080 mmiowb();
Ramkrishna Vepa11324132009-04-01 18:14:58 +00001081
1082 writeq(txdl_ptr, &fifo->nofl_db->txdl_ptr);
Ramkrishna Vepa11324132009-04-01 18:14:58 +00001083
Benjamin LaHaiseff1b9742009-08-04 10:21:21 +00001084 mmiowb();
Ramkrishna Vepa11324132009-04-01 18:14:58 +00001085}
1086
1087/**
1088 * vxge_hw_fifo_free_txdl_count_get - returns the number of txdls available in
1089 * the fifo
1090 * @fifoh: Handle to the fifo object used for non offload send
1091 */
1092u32 vxge_hw_fifo_free_txdl_count_get(struct __vxge_hw_fifo *fifoh)
1093{
1094 return vxge_hw_channel_dtr_count(&fifoh->channel);
1095}
1096
1097/**
1098 * vxge_hw_fifo_txdl_reserve - Reserve fifo descriptor.
1099 * @fifoh: Handle to the fifo object used for non offload send
1100 * @txdlh: Reserved descriptor. On success HW fills this "out" parameter
1101 * with a valid handle.
1102 * @txdl_priv: Buffer to return the pointer to per txdl space
1103 *
1104 * Reserve a single TxDL (that is, fifo descriptor)
1105 * for the subsequent filling-in by driver)
1106 * and posting on the corresponding channel (@channelh)
1107 * via vxge_hw_fifo_txdl_post().
1108 *
1109 * Note: it is the responsibility of driver to reserve multiple descriptors
1110 * for lengthy (e.g., LSO) transmit operation. A single fifo descriptor
1111 * carries up to configured number (fifo.max_frags) of contiguous buffers.
1112 *
1113 * Returns: VXGE_HW_OK - success;
1114 * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available
1115 *
1116 */
1117enum vxge_hw_status vxge_hw_fifo_txdl_reserve(
1118 struct __vxge_hw_fifo *fifo,
1119 void **txdlh, void **txdl_priv)
1120{
1121 struct __vxge_hw_channel *channel;
1122 enum vxge_hw_status status;
1123 int i;
1124
1125 channel = &fifo->channel;
1126
1127 status = vxge_hw_channel_dtr_alloc(channel, txdlh);
1128
1129 if (status == VXGE_HW_OK) {
1130 struct vxge_hw_fifo_txd *txdp =
1131 (struct vxge_hw_fifo_txd *)*txdlh;
1132 struct __vxge_hw_fifo_txdl_priv *priv;
1133
1134 priv = __vxge_hw_fifo_txdl_priv(fifo, txdp);
1135
1136 /* reset the TxDL's private */
1137 priv->align_dma_offset = 0;
1138 priv->align_vaddr_start = priv->align_vaddr;
1139 priv->align_used_frags = 0;
1140 priv->frags = 0;
1141 priv->alloc_frags = fifo->config->max_frags;
1142 priv->next_txdl_priv = NULL;
1143
1144 *txdl_priv = (void *)(size_t)txdp->host_control;
1145
1146 for (i = 0; i < fifo->config->max_frags; i++) {
1147 txdp = ((struct vxge_hw_fifo_txd *)*txdlh) + i;
1148 txdp->control_0 = txdp->control_1 = 0;
1149 }
1150 }
1151
1152 return status;
1153}
1154
1155/**
1156 * vxge_hw_fifo_txdl_buffer_set - Set transmit buffer pointer in the
1157 * descriptor.
1158 * @fifo: Handle to the fifo object used for non offload send
1159 * @txdlh: Descriptor handle.
1160 * @frag_idx: Index of the data buffer in the caller's scatter-gather list
1161 * (of buffers).
1162 * @dma_pointer: DMA address of the data buffer referenced by @frag_idx.
1163 * @size: Size of the data buffer (in bytes).
1164 *
1165 * This API is part of the preparation of the transmit descriptor for posting
1166 * (via vxge_hw_fifo_txdl_post()). The related "preparation" APIs include
1167 * vxge_hw_fifo_txdl_mss_set() and vxge_hw_fifo_txdl_cksum_set_bits().
1168 * All three APIs fill in the fields of the fifo descriptor,
1169 * in accordance with the Titan specification.
1170 *
1171 */
1172void vxge_hw_fifo_txdl_buffer_set(struct __vxge_hw_fifo *fifo,
1173 void *txdlh, u32 frag_idx,
1174 dma_addr_t dma_pointer, u32 size)
1175{
1176 struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1177 struct vxge_hw_fifo_txd *txdp, *txdp_last;
1178 struct __vxge_hw_channel *channel;
1179
1180 channel = &fifo->channel;
1181
1182 txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
1183 txdp = (struct vxge_hw_fifo_txd *)txdlh + txdl_priv->frags;
1184
1185 if (frag_idx != 0)
1186 txdp->control_0 = txdp->control_1 = 0;
1187 else {
1188 txdp->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
1189 VXGE_HW_FIFO_TXD_GATHER_CODE_FIRST);
1190 txdp->control_1 |= fifo->interrupt_type;
1191 txdp->control_1 |= VXGE_HW_FIFO_TXD_INT_NUMBER(
1192 fifo->tx_intr_num);
1193 if (txdl_priv->frags) {
1194 txdp_last = (struct vxge_hw_fifo_txd *)txdlh +
1195 (txdl_priv->frags - 1);
1196 txdp_last->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
1197 VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
1198 }
1199 }
1200
1201 vxge_assert(frag_idx < txdl_priv->alloc_frags);
1202
1203 txdp->buffer_pointer = (u64)dma_pointer;
1204 txdp->control_0 |= VXGE_HW_FIFO_TXD_BUFFER_SIZE(size);
1205 fifo->stats->total_buffers++;
1206 txdl_priv->frags++;
1207}
1208
1209/**
1210 * vxge_hw_fifo_txdl_post - Post descriptor on the fifo channel.
1211 * @fifo: Handle to the fifo object used for non offload send
1212 * @txdlh: Descriptor obtained via vxge_hw_fifo_txdl_reserve()
1213 * @frags: Number of contiguous buffers that are part of a single
1214 * transmit operation.
1215 *
1216 * Post descriptor on the 'fifo' type channel for transmission.
1217 * Prior to posting the descriptor should be filled in accordance with
1218 * Host/Titan interface specification for a given service (LL, etc.).
1219 *
1220 */
1221void vxge_hw_fifo_txdl_post(struct __vxge_hw_fifo *fifo, void *txdlh)
1222{
1223 struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1224 struct vxge_hw_fifo_txd *txdp_last;
1225 struct vxge_hw_fifo_txd *txdp_first;
1226 struct __vxge_hw_channel *channel;
1227
1228 channel = &fifo->channel;
1229
1230 txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
1231 txdp_first = (struct vxge_hw_fifo_txd *)txdlh;
1232
1233 txdp_last = (struct vxge_hw_fifo_txd *)txdlh + (txdl_priv->frags - 1);
1234 txdp_last->control_0 |=
1235 VXGE_HW_FIFO_TXD_GATHER_CODE(VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
1236 txdp_first->control_0 |= VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER;
1237
1238 vxge_hw_channel_dtr_post(&fifo->channel, txdlh);
1239
1240 __vxge_hw_non_offload_db_post(fifo,
Sreenivasa Honnura4a987d2009-10-05 01:54:42 +00001241 (u64)txdl_priv->dma_addr,
Ramkrishna Vepa11324132009-04-01 18:14:58 +00001242 txdl_priv->frags - 1,
1243 fifo->no_snoop_bits);
1244
1245 fifo->stats->total_posts++;
1246 fifo->stats->common_stats.usage_cnt++;
1247 if (fifo->stats->common_stats.usage_max <
1248 fifo->stats->common_stats.usage_cnt)
1249 fifo->stats->common_stats.usage_max =
1250 fifo->stats->common_stats.usage_cnt;
1251}
1252
1253/**
1254 * vxge_hw_fifo_txdl_next_completed - Retrieve next completed descriptor.
1255 * @fifo: Handle to the fifo object used for non offload send
1256 * @txdlh: Descriptor handle. Returned by HW.
1257 * @t_code: Transfer code, as per Titan User Guide,
1258 * Transmit Descriptor Format.
1259 * Returned by HW.
1260 *
1261 * Retrieve the _next_ completed descriptor.
1262 * HW uses channel callback (*vxge_hw_channel_callback_f) to notifiy
1263 * driver of new completed descriptors. After that
1264 * the driver can use vxge_hw_fifo_txdl_next_completed to retrieve the rest
1265 * completions (the very first completion is passed by HW via
1266 * vxge_hw_channel_callback_f).
1267 *
1268 * Implementation-wise, the driver is free to call
1269 * vxge_hw_fifo_txdl_next_completed either immediately from inside the
1270 * channel callback, or in a deferred fashion and separate (from HW)
1271 * context.
1272 *
1273 * Non-zero @t_code means failure to process the descriptor.
1274 * The failure could happen, for instance, when the link is
1275 * down, in which case Titan completes the descriptor because it
1276 * is not able to send the data out.
1277 *
1278 * For details please refer to Titan User Guide.
1279 *
1280 * Returns: VXGE_HW_OK - success.
1281 * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
1282 * are currently available for processing.
1283 *
1284 */
1285enum vxge_hw_status vxge_hw_fifo_txdl_next_completed(
1286 struct __vxge_hw_fifo *fifo, void **txdlh,
1287 enum vxge_hw_fifo_tcode *t_code)
1288{
1289 struct __vxge_hw_channel *channel;
1290 struct vxge_hw_fifo_txd *txdp;
1291 enum vxge_hw_status status = VXGE_HW_OK;
1292
1293 channel = &fifo->channel;
1294
1295 vxge_hw_channel_dtr_try_complete(channel, txdlh);
1296
1297 txdp = (struct vxge_hw_fifo_txd *)*txdlh;
1298 if (txdp == NULL) {
1299 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1300 goto exit;
1301 }
1302
1303 /* check whether host owns it */
1304 if (!(txdp->control_0 & VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER)) {
1305
1306 vxge_assert(txdp->host_control != 0);
1307
1308 vxge_hw_channel_dtr_complete(channel);
1309
1310 *t_code = (u8)VXGE_HW_FIFO_TXD_T_CODE_GET(txdp->control_0);
1311
1312 if (fifo->stats->common_stats.usage_cnt > 0)
1313 fifo->stats->common_stats.usage_cnt--;
1314
1315 status = VXGE_HW_OK;
1316 goto exit;
1317 }
1318
1319 /* no more completions */
1320 *txdlh = NULL;
1321 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1322exit:
1323 return status;
1324}
1325
1326/**
1327 * vxge_hw_fifo_handle_tcode - Handle transfer code.
1328 * @fifo: Handle to the fifo object used for non offload send
1329 * @txdlh: Descriptor handle.
1330 * @t_code: One of the enumerated (and documented in the Titan user guide)
1331 * "transfer codes".
1332 *
1333 * Handle descriptor's transfer code. The latter comes with each completed
1334 * descriptor.
1335 *
1336 * Returns: one of the enum vxge_hw_status{} enumerated types.
1337 * VXGE_HW_OK - for success.
1338 * VXGE_HW_ERR_CRITICAL - when encounters critical error.
1339 */
1340enum vxge_hw_status vxge_hw_fifo_handle_tcode(struct __vxge_hw_fifo *fifo,
1341 void *txdlh,
1342 enum vxge_hw_fifo_tcode t_code)
1343{
1344 struct __vxge_hw_channel *channel;
1345
1346 enum vxge_hw_status status = VXGE_HW_OK;
1347 channel = &fifo->channel;
1348
1349 if (((t_code & 0x7) < 0) || ((t_code & 0x7) > 0x4)) {
1350 status = VXGE_HW_ERR_INVALID_TCODE;
1351 goto exit;
1352 }
1353
1354 fifo->stats->txd_t_code_err_cnt[t_code]++;
1355exit:
1356 return status;
1357}
1358
1359/**
1360 * vxge_hw_fifo_txdl_free - Free descriptor.
1361 * @fifo: Handle to the fifo object used for non offload send
1362 * @txdlh: Descriptor handle.
1363 *
1364 * Free the reserved descriptor. This operation is "symmetrical" to
1365 * vxge_hw_fifo_txdl_reserve. The "free-ing" completes the descriptor's
1366 * lifecycle.
1367 *
1368 * After free-ing (see vxge_hw_fifo_txdl_free()) the descriptor again can
1369 * be:
1370 *
1371 * - reserved (vxge_hw_fifo_txdl_reserve);
1372 *
1373 * - posted (vxge_hw_fifo_txdl_post);
1374 *
1375 * - completed (vxge_hw_fifo_txdl_next_completed);
1376 *
1377 * - and recycled again (vxge_hw_fifo_txdl_free).
1378 *
1379 * For alternative state transitions and more details please refer to
1380 * the design doc.
1381 *
1382 */
1383void vxge_hw_fifo_txdl_free(struct __vxge_hw_fifo *fifo, void *txdlh)
1384{
1385 struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1386 u32 max_frags;
1387 struct __vxge_hw_channel *channel;
1388
1389 channel = &fifo->channel;
1390
1391 txdl_priv = __vxge_hw_fifo_txdl_priv(fifo,
1392 (struct vxge_hw_fifo_txd *)txdlh);
1393
1394 max_frags = fifo->config->max_frags;
1395
1396 vxge_hw_channel_dtr_free(channel, txdlh);
1397}
1398
1399/**
1400 * vxge_hw_vpath_mac_addr_add - Add the mac address entry for this vpath
1401 * to MAC address table.
1402 * @vp: Vpath handle.
1403 * @macaddr: MAC address to be added for this vpath into the list
1404 * @macaddr_mask: MAC address mask for macaddr
1405 * @duplicate_mode: Duplicate MAC address add mode. Please see
1406 * enum vxge_hw_vpath_mac_addr_add_mode{}
1407 *
1408 * Adds the given mac address and mac address mask into the list for this
1409 * vpath.
1410 * see also: vxge_hw_vpath_mac_addr_delete, vxge_hw_vpath_mac_addr_get and
1411 * vxge_hw_vpath_mac_addr_get_next
1412 *
1413 */
1414enum vxge_hw_status
1415vxge_hw_vpath_mac_addr_add(
1416 struct __vxge_hw_vpath_handle *vp,
1417 u8 (macaddr)[ETH_ALEN],
1418 u8 (macaddr_mask)[ETH_ALEN],
1419 enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode)
1420{
1421 u32 i;
1422 u64 data1 = 0ULL;
1423 u64 data2 = 0ULL;
1424 enum vxge_hw_status status = VXGE_HW_OK;
1425
1426 if (vp == NULL) {
1427 status = VXGE_HW_ERR_INVALID_HANDLE;
1428 goto exit;
1429 }
1430
1431 for (i = 0; i < ETH_ALEN; i++) {
1432 data1 <<= 8;
1433 data1 |= (u8)macaddr[i];
1434
1435 data2 <<= 8;
1436 data2 |= (u8)macaddr_mask[i];
1437 }
1438
1439 switch (duplicate_mode) {
1440 case VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE:
1441 i = 0;
1442 break;
1443 case VXGE_HW_VPATH_MAC_ADDR_DISCARD_DUPLICATE:
1444 i = 1;
1445 break;
1446 case VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE:
1447 i = 2;
1448 break;
1449 default:
1450 i = 0;
1451 break;
1452 }
1453
1454 status = __vxge_hw_vpath_rts_table_set(vp,
1455 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
1456 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1457 0,
1458 VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
1459 VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2)|
1460 VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MODE(i));
1461exit:
1462 return status;
1463}
1464
1465/**
1466 * vxge_hw_vpath_mac_addr_get - Get the first mac address entry for this vpath
1467 * from MAC address table.
1468 * @vp: Vpath handle.
1469 * @macaddr: First MAC address entry for this vpath in the list
1470 * @macaddr_mask: MAC address mask for macaddr
1471 *
1472 * Returns the first mac address and mac address mask in the list for this
1473 * vpath.
1474 * see also: vxge_hw_vpath_mac_addr_get_next
1475 *
1476 */
1477enum vxge_hw_status
1478vxge_hw_vpath_mac_addr_get(
1479 struct __vxge_hw_vpath_handle *vp,
1480 u8 (macaddr)[ETH_ALEN],
1481 u8 (macaddr_mask)[ETH_ALEN])
1482{
1483 u32 i;
1484 u64 data1 = 0ULL;
1485 u64 data2 = 0ULL;
1486 enum vxge_hw_status status = VXGE_HW_OK;
1487
1488 if (vp == NULL) {
1489 status = VXGE_HW_ERR_INVALID_HANDLE;
1490 goto exit;
1491 }
1492
1493 status = __vxge_hw_vpath_rts_table_get(vp,
1494 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
1495 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1496 0, &data1, &data2);
1497
1498 if (status != VXGE_HW_OK)
1499 goto exit;
1500
1501 data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
1502
1503 data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
1504
1505 for (i = ETH_ALEN; i > 0; i--) {
1506 macaddr[i-1] = (u8)(data1 & 0xFF);
1507 data1 >>= 8;
1508
1509 macaddr_mask[i-1] = (u8)(data2 & 0xFF);
1510 data2 >>= 8;
1511 }
1512exit:
1513 return status;
1514}
1515
1516/**
1517 * vxge_hw_vpath_mac_addr_get_next - Get the next mac address entry for this
1518 * vpath
1519 * from MAC address table.
1520 * @vp: Vpath handle.
1521 * @macaddr: Next MAC address entry for this vpath in the list
1522 * @macaddr_mask: MAC address mask for macaddr
1523 *
1524 * Returns the next mac address and mac address mask in the list for this
1525 * vpath.
1526 * see also: vxge_hw_vpath_mac_addr_get
1527 *
1528 */
1529enum vxge_hw_status
1530vxge_hw_vpath_mac_addr_get_next(
1531 struct __vxge_hw_vpath_handle *vp,
1532 u8 (macaddr)[ETH_ALEN],
1533 u8 (macaddr_mask)[ETH_ALEN])
1534{
1535 u32 i;
1536 u64 data1 = 0ULL;
1537 u64 data2 = 0ULL;
1538 enum vxge_hw_status status = VXGE_HW_OK;
1539
1540 if (vp == NULL) {
1541 status = VXGE_HW_ERR_INVALID_HANDLE;
1542 goto exit;
1543 }
1544
1545 status = __vxge_hw_vpath_rts_table_get(vp,
1546 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY,
1547 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1548 0, &data1, &data2);
1549
1550 if (status != VXGE_HW_OK)
1551 goto exit;
1552
1553 data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
1554
1555 data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
1556
1557 for (i = ETH_ALEN; i > 0; i--) {
1558 macaddr[i-1] = (u8)(data1 & 0xFF);
1559 data1 >>= 8;
1560
1561 macaddr_mask[i-1] = (u8)(data2 & 0xFF);
1562 data2 >>= 8;
1563 }
1564
1565exit:
1566 return status;
1567}
1568
1569/**
1570 * vxge_hw_vpath_mac_addr_delete - Delete the mac address entry for this vpath
1571 * to MAC address table.
1572 * @vp: Vpath handle.
1573 * @macaddr: MAC address to be added for this vpath into the list
1574 * @macaddr_mask: MAC address mask for macaddr
1575 *
1576 * Delete the given mac address and mac address mask into the list for this
1577 * vpath.
1578 * see also: vxge_hw_vpath_mac_addr_add, vxge_hw_vpath_mac_addr_get and
1579 * vxge_hw_vpath_mac_addr_get_next
1580 *
1581 */
1582enum vxge_hw_status
1583vxge_hw_vpath_mac_addr_delete(
1584 struct __vxge_hw_vpath_handle *vp,
1585 u8 (macaddr)[ETH_ALEN],
1586 u8 (macaddr_mask)[ETH_ALEN])
1587{
1588 u32 i;
1589 u64 data1 = 0ULL;
1590 u64 data2 = 0ULL;
1591 enum vxge_hw_status status = VXGE_HW_OK;
1592
1593 if (vp == NULL) {
1594 status = VXGE_HW_ERR_INVALID_HANDLE;
1595 goto exit;
1596 }
1597
1598 for (i = 0; i < ETH_ALEN; i++) {
1599 data1 <<= 8;
1600 data1 |= (u8)macaddr[i];
1601
1602 data2 <<= 8;
1603 data2 |= (u8)macaddr_mask[i];
1604 }
1605
1606 status = __vxge_hw_vpath_rts_table_set(vp,
1607 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
1608 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1609 0,
1610 VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
1611 VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2));
1612exit:
1613 return status;
1614}
1615
1616/**
1617 * vxge_hw_vpath_vid_add - Add the vlan id entry for this vpath
1618 * to vlan id table.
1619 * @vp: Vpath handle.
1620 * @vid: vlan id to be added for this vpath into the list
1621 *
1622 * Adds the given vlan id into the list for this vpath.
1623 * see also: vxge_hw_vpath_vid_delete, vxge_hw_vpath_vid_get and
1624 * vxge_hw_vpath_vid_get_next
1625 *
1626 */
1627enum vxge_hw_status
1628vxge_hw_vpath_vid_add(struct __vxge_hw_vpath_handle *vp, u64 vid)
1629{
1630 enum vxge_hw_status status = VXGE_HW_OK;
1631
1632 if (vp == NULL) {
1633 status = VXGE_HW_ERR_INVALID_HANDLE;
1634 goto exit;
1635 }
1636
1637 status = __vxge_hw_vpath_rts_table_set(vp,
1638 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
1639 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
1640 0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
1641exit:
1642 return status;
1643}
1644
1645/**
1646 * vxge_hw_vpath_vid_get - Get the first vid entry for this vpath
1647 * from vlan id table.
1648 * @vp: Vpath handle.
1649 * @vid: Buffer to return vlan id
1650 *
1651 * Returns the first vlan id in the list for this vpath.
1652 * see also: vxge_hw_vpath_vid_get_next
1653 *
1654 */
1655enum vxge_hw_status
1656vxge_hw_vpath_vid_get(struct __vxge_hw_vpath_handle *vp, u64 *vid)
1657{
1658 u64 data;
1659 enum vxge_hw_status status = VXGE_HW_OK;
1660
1661 if (vp == NULL) {
1662 status = VXGE_HW_ERR_INVALID_HANDLE;
1663 goto exit;
1664 }
1665
1666 status = __vxge_hw_vpath_rts_table_get(vp,
1667 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
1668 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
1669 0, vid, &data);
1670
1671 *vid = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_VLAN_ID(*vid);
1672exit:
1673 return status;
1674}
1675
1676/**
1677 * vxge_hw_vpath_vid_get_next - Get the next vid entry for this vpath
1678 * from vlan id table.
1679 * @vp: Vpath handle.
1680 * @vid: Buffer to return vlan id
1681 *
1682 * Returns the next vlan id in the list for this vpath.
1683 * see also: vxge_hw_vpath_vid_get
1684 *
1685 */
1686enum vxge_hw_status
1687vxge_hw_vpath_vid_get_next(struct __vxge_hw_vpath_handle *vp, u64 *vid)
1688{
1689 u64 data;
1690 enum vxge_hw_status status = VXGE_HW_OK;
1691
1692 if (vp == NULL) {
1693 status = VXGE_HW_ERR_INVALID_HANDLE;
1694 goto exit;
1695 }
1696
1697 status = __vxge_hw_vpath_rts_table_get(vp,
1698 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY,
1699 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
1700 0, vid, &data);
1701
1702 *vid = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_VLAN_ID(*vid);
1703exit:
1704 return status;
1705}
1706
1707/**
1708 * vxge_hw_vpath_vid_delete - Delete the vlan id entry for this vpath
1709 * to vlan id table.
1710 * @vp: Vpath handle.
1711 * @vid: vlan id to be added for this vpath into the list
1712 *
1713 * Adds the given vlan id into the list for this vpath.
1714 * see also: vxge_hw_vpath_vid_add, vxge_hw_vpath_vid_get and
1715 * vxge_hw_vpath_vid_get_next
1716 *
1717 */
1718enum vxge_hw_status
1719vxge_hw_vpath_vid_delete(struct __vxge_hw_vpath_handle *vp, u64 vid)
1720{
1721 enum vxge_hw_status status = VXGE_HW_OK;
1722
1723 if (vp == NULL) {
1724 status = VXGE_HW_ERR_INVALID_HANDLE;
1725 goto exit;
1726 }
1727
1728 status = __vxge_hw_vpath_rts_table_set(vp,
1729 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
1730 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
1731 0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
1732exit:
1733 return status;
1734}
1735
1736/**
1737 * vxge_hw_vpath_promisc_enable - Enable promiscuous mode.
1738 * @vp: Vpath handle.
1739 *
1740 * Enable promiscuous mode of Titan-e operation.
1741 *
1742 * See also: vxge_hw_vpath_promisc_disable().
1743 */
1744enum vxge_hw_status vxge_hw_vpath_promisc_enable(
1745 struct __vxge_hw_vpath_handle *vp)
1746{
1747 u64 val64;
1748 struct __vxge_hw_virtualpath *vpath;
1749 enum vxge_hw_status status = VXGE_HW_OK;
1750
1751 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
1752 status = VXGE_HW_ERR_INVALID_HANDLE;
1753 goto exit;
1754 }
1755
1756 vpath = vp->vpath;
1757
1758 /* Enable promiscous mode for function 0 only */
1759 if (!(vpath->hldev->access_rights &
1760 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM))
1761 return VXGE_HW_OK;
1762
1763 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
1764
1765 if (!(val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN)) {
1766
1767 val64 |= VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
1768 VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
1769 VXGE_HW_RXMAC_VCFG0_BCAST_EN |
1770 VXGE_HW_RXMAC_VCFG0_ALL_VID_EN;
1771
1772 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
1773 }
1774exit:
1775 return status;
1776}
1777
1778/**
1779 * vxge_hw_vpath_promisc_disable - Disable promiscuous mode.
1780 * @vp: Vpath handle.
1781 *
1782 * Disable promiscuous mode of Titan-e operation.
1783 *
1784 * See also: vxge_hw_vpath_promisc_enable().
1785 */
1786enum vxge_hw_status vxge_hw_vpath_promisc_disable(
1787 struct __vxge_hw_vpath_handle *vp)
1788{
1789 u64 val64;
1790 struct __vxge_hw_virtualpath *vpath;
1791 enum vxge_hw_status status = VXGE_HW_OK;
1792
1793 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
1794 status = VXGE_HW_ERR_INVALID_HANDLE;
1795 goto exit;
1796 }
1797
1798 vpath = vp->vpath;
1799
1800 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
1801
1802 if (val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN) {
1803
1804 val64 &= ~(VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
1805 VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
1806 VXGE_HW_RXMAC_VCFG0_ALL_VID_EN);
1807
1808 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
1809 }
1810exit:
1811 return status;
1812}
1813
1814/*
1815 * vxge_hw_vpath_bcast_enable - Enable broadcast
1816 * @vp: Vpath handle.
1817 *
1818 * Enable receiving broadcasts.
1819 */
1820enum vxge_hw_status vxge_hw_vpath_bcast_enable(
1821 struct __vxge_hw_vpath_handle *vp)
1822{
1823 u64 val64;
1824 struct __vxge_hw_virtualpath *vpath;
1825 enum vxge_hw_status status = VXGE_HW_OK;
1826
1827 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
1828 status = VXGE_HW_ERR_INVALID_HANDLE;
1829 goto exit;
1830 }
1831
1832 vpath = vp->vpath;
1833
1834 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
1835
1836 if (!(val64 & VXGE_HW_RXMAC_VCFG0_BCAST_EN)) {
1837 val64 |= VXGE_HW_RXMAC_VCFG0_BCAST_EN;
1838 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
1839 }
1840exit:
1841 return status;
1842}
1843
1844/**
1845 * vxge_hw_vpath_mcast_enable - Enable multicast addresses.
1846 * @vp: Vpath handle.
1847 *
1848 * Enable Titan-e multicast addresses.
1849 * Returns: VXGE_HW_OK on success.
1850 *
1851 */
1852enum vxge_hw_status vxge_hw_vpath_mcast_enable(
1853 struct __vxge_hw_vpath_handle *vp)
1854{
1855 u64 val64;
1856 struct __vxge_hw_virtualpath *vpath;
1857 enum vxge_hw_status status = VXGE_HW_OK;
1858
1859 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
1860 status = VXGE_HW_ERR_INVALID_HANDLE;
1861 goto exit;
1862 }
1863
1864 vpath = vp->vpath;
1865
1866 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
1867
1868 if (!(val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN)) {
1869 val64 |= VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
1870 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
1871 }
1872exit:
1873 return status;
1874}
1875
1876/**
1877 * vxge_hw_vpath_mcast_disable - Disable multicast addresses.
1878 * @vp: Vpath handle.
1879 *
1880 * Disable Titan-e multicast addresses.
1881 * Returns: VXGE_HW_OK - success.
1882 * VXGE_HW_ERR_INVALID_HANDLE - Invalid handle
1883 *
1884 */
1885enum vxge_hw_status
1886vxge_hw_vpath_mcast_disable(struct __vxge_hw_vpath_handle *vp)
1887{
1888 u64 val64;
1889 struct __vxge_hw_virtualpath *vpath;
1890 enum vxge_hw_status status = VXGE_HW_OK;
1891
1892 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
1893 status = VXGE_HW_ERR_INVALID_HANDLE;
1894 goto exit;
1895 }
1896
1897 vpath = vp->vpath;
1898
1899 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
1900
1901 if (val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN) {
1902 val64 &= ~VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
1903 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
1904 }
1905exit:
1906 return status;
1907}
1908
1909/*
1910 * __vxge_hw_vpath_alarm_process - Process Alarms.
1911 * @vpath: Virtual Path.
1912 * @skip_alarms: Do not clear the alarms
1913 *
1914 * Process vpath alarms.
1915 *
1916 */
1917enum vxge_hw_status __vxge_hw_vpath_alarm_process(
1918 struct __vxge_hw_virtualpath *vpath,
1919 u32 skip_alarms)
1920{
1921 u64 val64;
1922 u64 alarm_status;
1923 u64 pic_status;
1924 struct __vxge_hw_device *hldev = NULL;
1925 enum vxge_hw_event alarm_event = VXGE_HW_EVENT_UNKNOWN;
1926 u64 mask64;
1927 struct vxge_hw_vpath_stats_sw_info *sw_stats;
1928 struct vxge_hw_vpath_reg __iomem *vp_reg;
1929
1930 if (vpath == NULL) {
David S. Millera4fe91e2009-04-29 17:53:20 -07001931 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
Ramkrishna Vepa11324132009-04-01 18:14:58 +00001932 alarm_event);
Alexander Beregalov4e204c12009-04-23 15:31:38 +00001933 goto out2;
Ramkrishna Vepa11324132009-04-01 18:14:58 +00001934 }
1935
1936 hldev = vpath->hldev;
1937 vp_reg = vpath->vp_reg;
1938 alarm_status = readq(&vp_reg->vpath_general_int_status);
1939
1940 if (alarm_status == VXGE_HW_ALL_FOXES) {
David S. Millera4fe91e2009-04-29 17:53:20 -07001941 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_SLOT_FREEZE,
Ramkrishna Vepa11324132009-04-01 18:14:58 +00001942 alarm_event);
1943 goto out;
1944 }
1945
1946 sw_stats = vpath->sw_stats;
1947
1948 if (alarm_status & ~(
1949 VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT |
1950 VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT |
1951 VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT |
1952 VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) {
1953 sw_stats->error_stats.unknown_alarms++;
1954
David S. Millera4fe91e2009-04-29 17:53:20 -07001955 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
Ramkrishna Vepa11324132009-04-01 18:14:58 +00001956 alarm_event);
1957 goto out;
1958 }
1959
1960 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) {
1961
1962 val64 = readq(&vp_reg->xgmac_vp_int_status);
1963
1964 if (val64 &
1965 VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) {
1966
1967 val64 = readq(&vp_reg->asic_ntwk_vp_err_reg);
1968
1969 if (((val64 &
Joe Perches8e95a202009-12-03 07:58:21 +00001970 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) &&
1971 (!(val64 &
Ramkrishna Vepa11324132009-04-01 18:14:58 +00001972 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) ||
1973 ((val64 &
Joe Perches8e95a202009-12-03 07:58:21 +00001974 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) &&
1975 (!(val64 &
Ramkrishna Vepa11324132009-04-01 18:14:58 +00001976 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
Joe Perches8e95a202009-12-03 07:58:21 +00001977 ))) {
Ramkrishna Vepa11324132009-04-01 18:14:58 +00001978 sw_stats->error_stats.network_sustained_fault++;
1979
1980 writeq(
1981 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT,
1982 &vp_reg->asic_ntwk_vp_err_mask);
1983
1984 __vxge_hw_device_handle_link_down_ind(hldev);
David S. Millera4fe91e2009-04-29 17:53:20 -07001985 alarm_event = VXGE_HW_SET_LEVEL(
1986 VXGE_HW_EVENT_LINK_DOWN, alarm_event);
Ramkrishna Vepa11324132009-04-01 18:14:58 +00001987 }
1988
1989 if (((val64 &
Joe Perches8e95a202009-12-03 07:58:21 +00001990 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) &&
1991 (!(val64 &
Ramkrishna Vepa11324132009-04-01 18:14:58 +00001992 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) ||
1993 ((val64 &
Joe Perches8e95a202009-12-03 07:58:21 +00001994 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) &&
1995 (!(val64 &
Ramkrishna Vepa11324132009-04-01 18:14:58 +00001996 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
Joe Perches8e95a202009-12-03 07:58:21 +00001997 ))) {
Ramkrishna Vepa11324132009-04-01 18:14:58 +00001998
1999 sw_stats->error_stats.network_sustained_ok++;
2000
2001 writeq(
2002 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK,
2003 &vp_reg->asic_ntwk_vp_err_mask);
2004
2005 __vxge_hw_device_handle_link_up_ind(hldev);
David S. Millera4fe91e2009-04-29 17:53:20 -07002006 alarm_event = VXGE_HW_SET_LEVEL(
2007 VXGE_HW_EVENT_LINK_UP, alarm_event);
Ramkrishna Vepa11324132009-04-01 18:14:58 +00002008 }
2009
2010 writeq(VXGE_HW_INTR_MASK_ALL,
2011 &vp_reg->asic_ntwk_vp_err_reg);
2012
David S. Millera4fe91e2009-04-29 17:53:20 -07002013 alarm_event = VXGE_HW_SET_LEVEL(
2014 VXGE_HW_EVENT_ALARM_CLEARED, alarm_event);
Ramkrishna Vepa11324132009-04-01 18:14:58 +00002015
2016 if (skip_alarms)
2017 return VXGE_HW_OK;
2018 }
2019 }
2020
2021 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT) {
2022
2023 pic_status = readq(&vp_reg->vpath_ppif_int_status);
2024
2025 if (pic_status &
2026 VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT) {
2027
2028 val64 = readq(&vp_reg->general_errors_reg);
2029 mask64 = readq(&vp_reg->general_errors_mask);
2030
2031 if ((val64 &
2032 VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET) &
2033 ~mask64) {
2034 sw_stats->error_stats.ini_serr_det++;
2035
David S. Millera4fe91e2009-04-29 17:53:20 -07002036 alarm_event = VXGE_HW_SET_LEVEL(
2037 VXGE_HW_EVENT_SERR, alarm_event);
Ramkrishna Vepa11324132009-04-01 18:14:58 +00002038 }
2039
2040 if ((val64 &
2041 VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW) &
2042 ~mask64) {
2043 sw_stats->error_stats.dblgen_fifo0_overflow++;
2044
David S. Millera4fe91e2009-04-29 17:53:20 -07002045 alarm_event = VXGE_HW_SET_LEVEL(
2046 VXGE_HW_EVENT_FIFO_ERR, alarm_event);
Ramkrishna Vepa11324132009-04-01 18:14:58 +00002047 }
2048
2049 if ((val64 &
2050 VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR) &
2051 ~mask64)
2052 sw_stats->error_stats.statsb_pif_chain_error++;
2053
2054 if ((val64 &
2055 VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ) &
2056 ~mask64)
2057 sw_stats->error_stats.statsb_drop_timeout++;
2058
2059 if ((val64 &
2060 VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS) &
2061 ~mask64)
2062 sw_stats->error_stats.target_illegal_access++;
2063
2064 if (!skip_alarms) {
2065 writeq(VXGE_HW_INTR_MASK_ALL,
2066 &vp_reg->general_errors_reg);
David S. Millera4fe91e2009-04-29 17:53:20 -07002067 alarm_event = VXGE_HW_SET_LEVEL(
2068 VXGE_HW_EVENT_ALARM_CLEARED,
Ramkrishna Vepa11324132009-04-01 18:14:58 +00002069 alarm_event);
2070 }
2071 }
2072
2073 if (pic_status &
2074 VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT) {
2075
2076 val64 = readq(&vp_reg->kdfcctl_errors_reg);
2077 mask64 = readq(&vp_reg->kdfcctl_errors_mask);
2078
2079 if ((val64 &
2080 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR) &
2081 ~mask64) {
2082 sw_stats->error_stats.kdfcctl_fifo0_overwrite++;
2083
David S. Millera4fe91e2009-04-29 17:53:20 -07002084 alarm_event = VXGE_HW_SET_LEVEL(
2085 VXGE_HW_EVENT_FIFO_ERR,
Ramkrishna Vepa11324132009-04-01 18:14:58 +00002086 alarm_event);
2087 }
2088
2089 if ((val64 &
2090 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON) &
2091 ~mask64) {
2092 sw_stats->error_stats.kdfcctl_fifo0_poison++;
2093
David S. Millera4fe91e2009-04-29 17:53:20 -07002094 alarm_event = VXGE_HW_SET_LEVEL(
2095 VXGE_HW_EVENT_FIFO_ERR,
Ramkrishna Vepa11324132009-04-01 18:14:58 +00002096 alarm_event);
2097 }
2098
2099 if ((val64 &
2100 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR) &
2101 ~mask64) {
2102 sw_stats->error_stats.kdfcctl_fifo0_dma_error++;
2103
David S. Millera4fe91e2009-04-29 17:53:20 -07002104 alarm_event = VXGE_HW_SET_LEVEL(
2105 VXGE_HW_EVENT_FIFO_ERR,
Ramkrishna Vepa11324132009-04-01 18:14:58 +00002106 alarm_event);
2107 }
2108
2109 if (!skip_alarms) {
2110 writeq(VXGE_HW_INTR_MASK_ALL,
2111 &vp_reg->kdfcctl_errors_reg);
David S. Millera4fe91e2009-04-29 17:53:20 -07002112 alarm_event = VXGE_HW_SET_LEVEL(
2113 VXGE_HW_EVENT_ALARM_CLEARED,
Ramkrishna Vepa11324132009-04-01 18:14:58 +00002114 alarm_event);
2115 }
2116 }
2117
2118 }
2119
2120 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT) {
2121
2122 val64 = readq(&vp_reg->wrdma_alarm_status);
2123
2124 if (val64 & VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT) {
2125
2126 val64 = readq(&vp_reg->prc_alarm_reg);
2127 mask64 = readq(&vp_reg->prc_alarm_mask);
2128
2129 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP)&
2130 ~mask64)
2131 sw_stats->error_stats.prc_ring_bumps++;
2132
2133 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR) &
2134 ~mask64) {
2135 sw_stats->error_stats.prc_rxdcm_sc_err++;
2136
David S. Millera4fe91e2009-04-29 17:53:20 -07002137 alarm_event = VXGE_HW_SET_LEVEL(
2138 VXGE_HW_EVENT_VPATH_ERR,
Ramkrishna Vepa11324132009-04-01 18:14:58 +00002139 alarm_event);
2140 }
2141
2142 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT)
2143 & ~mask64) {
2144 sw_stats->error_stats.prc_rxdcm_sc_abort++;
2145
David S. Millera4fe91e2009-04-29 17:53:20 -07002146 alarm_event = VXGE_HW_SET_LEVEL(
2147 VXGE_HW_EVENT_VPATH_ERR,
Ramkrishna Vepa11324132009-04-01 18:14:58 +00002148 alarm_event);
2149 }
2150
2151 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR)
2152 & ~mask64) {
2153 sw_stats->error_stats.prc_quanta_size_err++;
2154
David S. Millera4fe91e2009-04-29 17:53:20 -07002155 alarm_event = VXGE_HW_SET_LEVEL(
2156 VXGE_HW_EVENT_VPATH_ERR,
Ramkrishna Vepa11324132009-04-01 18:14:58 +00002157 alarm_event);
2158 }
2159
2160 if (!skip_alarms) {
2161 writeq(VXGE_HW_INTR_MASK_ALL,
2162 &vp_reg->prc_alarm_reg);
David S. Millera4fe91e2009-04-29 17:53:20 -07002163 alarm_event = VXGE_HW_SET_LEVEL(
2164 VXGE_HW_EVENT_ALARM_CLEARED,
Ramkrishna Vepa11324132009-04-01 18:14:58 +00002165 alarm_event);
2166 }
2167 }
2168 }
2169out:
2170 hldev->stats.sw_dev_err_stats.vpath_alarms++;
Alexander Beregalov4e204c12009-04-23 15:31:38 +00002171out2:
Ramkrishna Vepa11324132009-04-01 18:14:58 +00002172 if ((alarm_event == VXGE_HW_EVENT_ALARM_CLEARED) ||
2173 (alarm_event == VXGE_HW_EVENT_UNKNOWN))
2174 return VXGE_HW_OK;
2175
2176 __vxge_hw_device_handle_error(hldev, vpath->vp_id, alarm_event);
2177
2178 if (alarm_event == VXGE_HW_EVENT_SERR)
2179 return VXGE_HW_ERR_CRITICAL;
2180
2181 return (alarm_event == VXGE_HW_EVENT_SLOT_FREEZE) ?
2182 VXGE_HW_ERR_SLOT_FREEZE :
2183 (alarm_event == VXGE_HW_EVENT_FIFO_ERR) ? VXGE_HW_ERR_FIFO :
2184 VXGE_HW_ERR_VPATH;
2185}
2186
2187/*
2188 * vxge_hw_vpath_alarm_process - Process Alarms.
2189 * @vpath: Virtual Path.
2190 * @skip_alarms: Do not clear the alarms
2191 *
2192 * Process vpath alarms.
2193 *
2194 */
2195enum vxge_hw_status vxge_hw_vpath_alarm_process(
2196 struct __vxge_hw_vpath_handle *vp,
2197 u32 skip_alarms)
2198{
2199 enum vxge_hw_status status = VXGE_HW_OK;
2200
2201 if (vp == NULL) {
2202 status = VXGE_HW_ERR_INVALID_HANDLE;
2203 goto exit;
2204 }
2205
2206 status = __vxge_hw_vpath_alarm_process(vp->vpath, skip_alarms);
2207exit:
2208 return status;
2209}
2210
2211/**
2212 * vxge_hw_vpath_msix_set - Associate MSIX vectors with TIM interrupts and
2213 * alrms
2214 * @vp: Virtual Path handle.
2215 * @tim_msix_id: MSIX vectors associated with VXGE_HW_MAX_INTR_PER_VP number of
2216 * interrupts(Can be repeated). If fifo or ring are not enabled
2217 * the MSIX vector for that should be set to 0
2218 * @alarm_msix_id: MSIX vector for alarm.
2219 *
2220 * This API will associate a given MSIX vector numbers with the four TIM
2221 * interrupts and alarm interrupt.
2222 */
2223enum vxge_hw_status
2224vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id,
2225 int alarm_msix_id)
2226{
2227 u64 val64;
2228 struct __vxge_hw_virtualpath *vpath = vp->vpath;
2229 struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
2230 u32 first_vp_id = vpath->hldev->first_vp_id;
2231
2232 val64 = VXGE_HW_INTERRUPT_CFG0_GROUP0_MSIX_FOR_TXTI(
2233 (first_vp_id * 4) + tim_msix_id[0]) |
2234 VXGE_HW_INTERRUPT_CFG0_GROUP1_MSIX_FOR_TXTI(
2235 (first_vp_id * 4) + tim_msix_id[1]) |
2236 VXGE_HW_INTERRUPT_CFG0_GROUP2_MSIX_FOR_TXTI(
2237 (first_vp_id * 4) + tim_msix_id[2]);
2238
2239 val64 |= VXGE_HW_INTERRUPT_CFG0_GROUP3_MSIX_FOR_TXTI(
2240 (first_vp_id * 4) + tim_msix_id[3]);
2241
2242 writeq(val64, &vp_reg->interrupt_cfg0);
2243
2244 writeq(VXGE_HW_INTERRUPT_CFG2_ALARM_MAP_TO_MSG(
2245 (first_vp_id * 4) + alarm_msix_id),
2246 &vp_reg->interrupt_cfg2);
2247
2248 if (vpath->hldev->config.intr_mode ==
2249 VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
2250 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2251 VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN,
2252 0, 32), &vp_reg->one_shot_vect1_en);
2253 }
2254
2255 if (vpath->hldev->config.intr_mode ==
2256 VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
2257 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2258 VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN,
2259 0, 32), &vp_reg->one_shot_vect2_en);
2260
2261 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2262 VXGE_HW_ONE_SHOT_VECT3_EN_ONE_SHOT_VECT3_EN,
2263 0, 32), &vp_reg->one_shot_vect3_en);
2264 }
2265
2266 return VXGE_HW_OK;
2267}
2268
2269/**
2270 * vxge_hw_vpath_msix_mask - Mask MSIX Vector.
2271 * @vp: Virtual Path handle.
2272 * @msix_id: MSIX ID
2273 *
2274 * The function masks the msix interrupt for the given msix_id
2275 *
2276 * Returns: 0,
2277 * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2278 * status.
2279 * See also:
2280 */
2281void
2282vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id)
2283{
2284 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2285 __vxge_hw_pio_mem_write32_upper(
2286 (u32) vxge_bVALn(vxge_mBIT(hldev->first_vp_id +
2287 (msix_id / 4)), 0, 32),
2288 &hldev->common_reg->set_msix_mask_vect[msix_id % 4]);
2289
2290 return;
2291}
2292
2293/**
2294 * vxge_hw_vpath_msix_clear - Clear MSIX Vector.
2295 * @vp: Virtual Path handle.
2296 * @msix_id: MSI ID
2297 *
2298 * The function clears the msix interrupt for the given msix_id
2299 *
2300 * Returns: 0,
2301 * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2302 * status.
2303 * See also:
2304 */
2305void
2306vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id)
2307{
2308 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2309 if (hldev->config.intr_mode ==
2310 VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
2311 __vxge_hw_pio_mem_write32_upper(
2312 (u32)vxge_bVALn(vxge_mBIT(hldev->first_vp_id +
2313 (msix_id/4)), 0, 32),
2314 &hldev->common_reg->
2315 clr_msix_one_shot_vec[msix_id%4]);
2316 } else {
2317 __vxge_hw_pio_mem_write32_upper(
2318 (u32)vxge_bVALn(vxge_mBIT(hldev->first_vp_id +
2319 (msix_id/4)), 0, 32),
2320 &hldev->common_reg->
2321 clear_msix_mask_vect[msix_id%4]);
2322 }
2323
2324 return;
2325}
2326
2327/**
2328 * vxge_hw_vpath_msix_unmask - Unmask the MSIX Vector.
2329 * @vp: Virtual Path handle.
2330 * @msix_id: MSI ID
2331 *
2332 * The function unmasks the msix interrupt for the given msix_id
2333 *
2334 * Returns: 0,
2335 * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2336 * status.
2337 * See also:
2338 */
2339void
2340vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vp, int msix_id)
2341{
2342 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2343 __vxge_hw_pio_mem_write32_upper(
2344 (u32)vxge_bVALn(vxge_mBIT(hldev->first_vp_id +
2345 (msix_id/4)), 0, 32),
2346 &hldev->common_reg->clear_msix_mask_vect[msix_id%4]);
2347
2348 return;
2349}
2350
2351/**
2352 * vxge_hw_vpath_msix_mask_all - Mask all MSIX vectors for the vpath.
2353 * @vp: Virtual Path handle.
2354 *
2355 * The function masks all msix interrupt for the given vpath
2356 *
2357 */
2358void
2359vxge_hw_vpath_msix_mask_all(struct __vxge_hw_vpath_handle *vp)
2360{
2361
2362 __vxge_hw_pio_mem_write32_upper(
2363 (u32)vxge_bVALn(vxge_mBIT(vp->vpath->vp_id), 0, 32),
2364 &vp->vpath->hldev->common_reg->set_msix_mask_all_vect);
2365
2366 return;
2367}
2368
2369/**
2370 * vxge_hw_vpath_inta_mask_tx_rx - Mask Tx and Rx interrupts.
2371 * @vp: Virtual Path handle.
2372 *
2373 * Mask Tx and Rx vpath interrupts.
2374 *
2375 * See also: vxge_hw_vpath_inta_mask_tx_rx()
2376 */
2377void vxge_hw_vpath_inta_mask_tx_rx(struct __vxge_hw_vpath_handle *vp)
2378{
2379 u64 tim_int_mask0[4] = {[0 ...3] = 0};
2380 u32 tim_int_mask1[4] = {[0 ...3] = 0};
2381 u64 val64;
2382 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2383
2384 VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
2385 tim_int_mask1, vp->vpath->vp_id);
2386
2387 val64 = readq(&hldev->common_reg->tim_int_mask0);
2388
2389 if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
2390 (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
2391 writeq((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
2392 tim_int_mask0[VXGE_HW_VPATH_INTR_RX] | val64),
2393 &hldev->common_reg->tim_int_mask0);
2394 }
2395
2396 val64 = readl(&hldev->common_reg->tim_int_mask1);
2397
2398 if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
2399 (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
2400 __vxge_hw_pio_mem_write32_upper(
2401 (tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
2402 tim_int_mask1[VXGE_HW_VPATH_INTR_RX] | val64),
2403 &hldev->common_reg->tim_int_mask1);
2404 }
2405
2406 return;
2407}
2408
2409/**
2410 * vxge_hw_vpath_inta_unmask_tx_rx - Unmask Tx and Rx interrupts.
2411 * @vp: Virtual Path handle.
2412 *
2413 * Unmask Tx and Rx vpath interrupts.
2414 *
2415 * See also: vxge_hw_vpath_inta_mask_tx_rx()
2416 */
2417void vxge_hw_vpath_inta_unmask_tx_rx(struct __vxge_hw_vpath_handle *vp)
2418{
2419 u64 tim_int_mask0[4] = {[0 ...3] = 0};
2420 u32 tim_int_mask1[4] = {[0 ...3] = 0};
2421 u64 val64;
2422 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2423
2424 VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
2425 tim_int_mask1, vp->vpath->vp_id);
2426
2427 val64 = readq(&hldev->common_reg->tim_int_mask0);
2428
2429 if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
2430 (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
2431 writeq((~(tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
2432 tim_int_mask0[VXGE_HW_VPATH_INTR_RX])) & val64,
2433 &hldev->common_reg->tim_int_mask0);
2434 }
2435
2436 if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
2437 (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
2438 __vxge_hw_pio_mem_write32_upper(
2439 (~(tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
2440 tim_int_mask1[VXGE_HW_VPATH_INTR_RX])) & val64,
2441 &hldev->common_reg->tim_int_mask1);
2442 }
2443
2444 return;
2445}
2446
2447/**
2448 * vxge_hw_vpath_poll_rx - Poll Rx Virtual Path for completed
2449 * descriptors and process the same.
2450 * @ring: Handle to the ring object used for receive
2451 *
2452 * The function polls the Rx for the completed descriptors and calls
2453 * the driver via supplied completion callback.
2454 *
2455 * Returns: VXGE_HW_OK, if the polling is completed successful.
2456 * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed
2457 * descriptors available which are yet to be processed.
2458 *
2459 * See also: vxge_hw_vpath_poll_rx()
2460 */
2461enum vxge_hw_status vxge_hw_vpath_poll_rx(struct __vxge_hw_ring *ring)
2462{
2463 u8 t_code;
2464 enum vxge_hw_status status = VXGE_HW_OK;
2465 void *first_rxdh;
2466 u64 val64 = 0;
2467 int new_count = 0;
2468
2469 ring->cmpl_cnt = 0;
2470
2471 status = vxge_hw_ring_rxd_next_completed(ring, &first_rxdh, &t_code);
2472 if (status == VXGE_HW_OK)
2473 ring->callback(ring, first_rxdh,
2474 t_code, ring->channel.userdata);
2475
2476 if (ring->cmpl_cnt != 0) {
2477 ring->doorbell_cnt += ring->cmpl_cnt;
2478 if (ring->doorbell_cnt >= ring->rxds_limit) {
2479 /*
2480 * Each RxD is of 4 qwords, update the number of
2481 * qwords replenished
2482 */
2483 new_count = (ring->doorbell_cnt * 4);
2484
2485 /* For each block add 4 more qwords */
2486 ring->total_db_cnt += ring->doorbell_cnt;
2487 if (ring->total_db_cnt >= ring->rxds_per_block) {
2488 new_count += 4;
2489 /* Reset total count */
2490 ring->total_db_cnt %= ring->rxds_per_block;
2491 }
2492 writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(new_count),
2493 &ring->vp_reg->prc_rxd_doorbell);
2494 val64 =
2495 readl(&ring->common_reg->titan_general_int_status);
2496 ring->doorbell_cnt = 0;
2497 }
2498 }
2499
2500 return status;
2501}
2502
2503/**
2504 * vxge_hw_vpath_poll_tx - Poll Tx for completed descriptors and process
2505 * the same.
2506 * @fifo: Handle to the fifo object used for non offload send
2507 *
2508 * The function polls the Tx for the completed descriptors and calls
2509 * the driver via supplied completion callback.
2510 *
2511 * Returns: VXGE_HW_OK, if the polling is completed successful.
2512 * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed
2513 * descriptors available which are yet to be processed.
2514 *
2515 * See also: vxge_hw_vpath_poll_tx().
2516 */
2517enum vxge_hw_status vxge_hw_vpath_poll_tx(struct __vxge_hw_fifo *fifo,
Benjamin LaHaiseff67df52009-08-04 10:21:03 +00002518 struct sk_buff ***skb_ptr, int nr_skb,
2519 int *more)
Ramkrishna Vepa11324132009-04-01 18:14:58 +00002520{
2521 enum vxge_hw_fifo_tcode t_code;
2522 void *first_txdlh;
2523 enum vxge_hw_status status = VXGE_HW_OK;
2524 struct __vxge_hw_channel *channel;
2525
2526 channel = &fifo->channel;
2527
2528 status = vxge_hw_fifo_txdl_next_completed(fifo,
2529 &first_txdlh, &t_code);
2530 if (status == VXGE_HW_OK)
Benjamin LaHaiseff67df52009-08-04 10:21:03 +00002531 if (fifo->callback(fifo, first_txdlh, t_code,
2532 channel->userdata, skb_ptr, nr_skb, more) != VXGE_HW_OK)
Ramkrishna Vepa11324132009-04-01 18:14:58 +00002533 status = VXGE_HW_COMPLETIONS_REMAIN;
2534
2535 return status;
2536}