blob: a5fc8166c01d8e49a933ffdb98d92ccb4bc410aa [file] [log] [blame]
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001/******************************************************************************
2 * This software may be used and distributed according to the terms of
3 * the GNU General Public License (GPL), incorporated herein by reference.
4 * Drivers based on or derived from this code fall under the GPL and must
5 * retain the authorship, copyright and license notice. This file is not
6 * a complete program and may only be used when the entire operating
7 * system is licensed under the GPL.
8 * See the file COPYING in this distribution for more information.
9 *
10 * vxge-config.c: Driver for Neterion Inc's X3100 Series 10GbE PCIe I/O
11 * Virtualized Server Adapter.
12 * Copyright(c) 2002-2009 Neterion Inc.
13 ******************************************************************************/
14#include <linux/vmalloc.h>
15#include <linux/etherdevice.h>
16#include <linux/pci.h>
17#include <linux/pci_hotplug.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090018#include <linux/slab.h>
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +000019
20#include "vxge-traffic.h"
21#include "vxge-config.h"
22
23/*
24 * __vxge_hw_channel_allocate - Allocate memory for channel
25 * This function allocates required memory for the channel and various arrays
26 * in the channel
27 */
28struct __vxge_hw_channel*
29__vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph,
30 enum __vxge_hw_channel_type type,
31 u32 length, u32 per_dtr_space, void *userdata)
32{
33 struct __vxge_hw_channel *channel;
34 struct __vxge_hw_device *hldev;
35 int size = 0;
36 u32 vp_id;
37
38 hldev = vph->vpath->hldev;
39 vp_id = vph->vpath->vp_id;
40
41 switch (type) {
42 case VXGE_HW_CHANNEL_TYPE_FIFO:
43 size = sizeof(struct __vxge_hw_fifo);
44 break;
45 case VXGE_HW_CHANNEL_TYPE_RING:
46 size = sizeof(struct __vxge_hw_ring);
47 break;
48 default:
49 break;
50 }
51
52 channel = kzalloc(size, GFP_KERNEL);
53 if (channel == NULL)
54 goto exit0;
55 INIT_LIST_HEAD(&channel->item);
56
57 channel->common_reg = hldev->common_reg;
58 channel->first_vp_id = hldev->first_vp_id;
59 channel->type = type;
60 channel->devh = hldev;
61 channel->vph = vph;
62 channel->userdata = userdata;
63 channel->per_dtr_space = per_dtr_space;
64 channel->length = length;
65 channel->vp_id = vp_id;
66
67 channel->work_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
68 if (channel->work_arr == NULL)
69 goto exit1;
70
71 channel->free_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
72 if (channel->free_arr == NULL)
73 goto exit1;
74 channel->free_ptr = length;
75
76 channel->reserve_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
77 if (channel->reserve_arr == NULL)
78 goto exit1;
79 channel->reserve_ptr = length;
80 channel->reserve_top = 0;
81
82 channel->orig_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
83 if (channel->orig_arr == NULL)
84 goto exit1;
85
86 return channel;
87exit1:
88 __vxge_hw_channel_free(channel);
89
90exit0:
91 return NULL;
92}
93
94/*
95 * __vxge_hw_channel_free - Free memory allocated for channel
96 * This function deallocates memory from the channel and various arrays
97 * in the channel
98 */
99void __vxge_hw_channel_free(struct __vxge_hw_channel *channel)
100{
101 kfree(channel->work_arr);
102 kfree(channel->free_arr);
103 kfree(channel->reserve_arr);
104 kfree(channel->orig_arr);
105 kfree(channel);
106}
107
108/*
109 * __vxge_hw_channel_initialize - Initialize a channel
110 * This function initializes a channel by properly setting the
111 * various references
112 */
113enum vxge_hw_status
114__vxge_hw_channel_initialize(struct __vxge_hw_channel *channel)
115{
116 u32 i;
117 struct __vxge_hw_virtualpath *vpath;
118
119 vpath = channel->vph->vpath;
120
121 if ((channel->reserve_arr != NULL) && (channel->orig_arr != NULL)) {
122 for (i = 0; i < channel->length; i++)
123 channel->orig_arr[i] = channel->reserve_arr[i];
124 }
125
126 switch (channel->type) {
127 case VXGE_HW_CHANNEL_TYPE_FIFO:
128 vpath->fifoh = (struct __vxge_hw_fifo *)channel;
129 channel->stats = &((struct __vxge_hw_fifo *)
130 channel)->stats->common_stats;
131 break;
132 case VXGE_HW_CHANNEL_TYPE_RING:
133 vpath->ringh = (struct __vxge_hw_ring *)channel;
134 channel->stats = &((struct __vxge_hw_ring *)
135 channel)->stats->common_stats;
136 break;
137 default:
138 break;
139 }
140
141 return VXGE_HW_OK;
142}
143
144/*
145 * __vxge_hw_channel_reset - Resets a channel
146 * This function resets a channel by properly setting the various references
147 */
148enum vxge_hw_status
149__vxge_hw_channel_reset(struct __vxge_hw_channel *channel)
150{
151 u32 i;
152
153 for (i = 0; i < channel->length; i++) {
154 if (channel->reserve_arr != NULL)
155 channel->reserve_arr[i] = channel->orig_arr[i];
156 if (channel->free_arr != NULL)
157 channel->free_arr[i] = NULL;
158 if (channel->work_arr != NULL)
159 channel->work_arr[i] = NULL;
160 }
161 channel->free_ptr = channel->length;
162 channel->reserve_ptr = channel->length;
163 channel->reserve_top = 0;
164 channel->post_index = 0;
165 channel->compl_index = 0;
166
167 return VXGE_HW_OK;
168}
169
170/*
171 * __vxge_hw_device_pci_e_init
172 * Initialize certain PCI/PCI-X configuration registers
173 * with recommended values. Save config space for future hw resets.
174 */
175void
176__vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev)
177{
178 u16 cmd = 0;
179
180 /* Set the PErr Repconse bit and SERR in PCI command register. */
181 pci_read_config_word(hldev->pdev, PCI_COMMAND, &cmd);
182 cmd |= 0x140;
183 pci_write_config_word(hldev->pdev, PCI_COMMAND, cmd);
184
185 pci_save_state(hldev->pdev);
186
187 return;
188}
189
190/*
191 * __vxge_hw_device_register_poll
192 * Will poll certain register for specified amount of time.
193 * Will poll until masked bit is not cleared.
194 */
195enum vxge_hw_status
196__vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis)
197{
198 u64 val64;
199 u32 i = 0;
200 enum vxge_hw_status ret = VXGE_HW_FAIL;
201
202 udelay(10);
203
204 do {
205 val64 = readq(reg);
206 if (!(val64 & mask))
207 return VXGE_HW_OK;
208 udelay(100);
209 } while (++i <= 9);
210
211 i = 0;
212 do {
213 val64 = readq(reg);
214 if (!(val64 & mask))
215 return VXGE_HW_OK;
216 mdelay(1);
217 } while (++i <= max_millis);
218
219 return ret;
220}
221
222 /* __vxge_hw_device_vpath_reset_in_prog_check - Check if vpath reset
223 * in progress
224 * This routine checks the vpath reset in progress register is turned zero
225 */
226enum vxge_hw_status
227__vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog)
228{
229 enum vxge_hw_status status;
230 status = __vxge_hw_device_register_poll(vpath_rst_in_prog,
231 VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(0x1ffff),
232 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
233 return status;
234}
235
236/*
237 * __vxge_hw_device_toc_get
238 * This routine sets the swapper and reads the toc pointer and returns the
239 * memory mapped address of the toc
240 */
241struct vxge_hw_toc_reg __iomem *
242__vxge_hw_device_toc_get(void __iomem *bar0)
243{
244 u64 val64;
245 struct vxge_hw_toc_reg __iomem *toc = NULL;
246 enum vxge_hw_status status;
247
248 struct vxge_hw_legacy_reg __iomem *legacy_reg =
249 (struct vxge_hw_legacy_reg __iomem *)bar0;
250
251 status = __vxge_hw_legacy_swapper_set(legacy_reg);
252 if (status != VXGE_HW_OK)
253 goto exit;
254
255 val64 = readq(&legacy_reg->toc_first_pointer);
256 toc = (struct vxge_hw_toc_reg __iomem *)(bar0+val64);
257exit:
258 return toc;
259}
260
261/*
262 * __vxge_hw_device_reg_addr_get
263 * This routine sets the swapper and reads the toc pointer and initializes the
264 * register location pointers in the device object. It waits until the ric is
265 * completed initializing registers.
266 */
267enum vxge_hw_status
268__vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev)
269{
270 u64 val64;
271 u32 i;
272 enum vxge_hw_status status = VXGE_HW_OK;
273
274 hldev->legacy_reg = (struct vxge_hw_legacy_reg __iomem *)hldev->bar0;
275
276 hldev->toc_reg = __vxge_hw_device_toc_get(hldev->bar0);
277 if (hldev->toc_reg == NULL) {
278 status = VXGE_HW_FAIL;
279 goto exit;
280 }
281
282 val64 = readq(&hldev->toc_reg->toc_common_pointer);
283 hldev->common_reg =
284 (struct vxge_hw_common_reg __iomem *)(hldev->bar0 + val64);
285
286 val64 = readq(&hldev->toc_reg->toc_mrpcim_pointer);
287 hldev->mrpcim_reg =
288 (struct vxge_hw_mrpcim_reg __iomem *)(hldev->bar0 + val64);
289
290 for (i = 0; i < VXGE_HW_TITAN_SRPCIM_REG_SPACES; i++) {
291 val64 = readq(&hldev->toc_reg->toc_srpcim_pointer[i]);
292 hldev->srpcim_reg[i] =
293 (struct vxge_hw_srpcim_reg __iomem *)
294 (hldev->bar0 + val64);
295 }
296
297 for (i = 0; i < VXGE_HW_TITAN_VPMGMT_REG_SPACES; i++) {
298 val64 = readq(&hldev->toc_reg->toc_vpmgmt_pointer[i]);
299 hldev->vpmgmt_reg[i] =
300 (struct vxge_hw_vpmgmt_reg __iomem *)(hldev->bar0 + val64);
301 }
302
303 for (i = 0; i < VXGE_HW_TITAN_VPATH_REG_SPACES; i++) {
304 val64 = readq(&hldev->toc_reg->toc_vpath_pointer[i]);
305 hldev->vpath_reg[i] =
306 (struct vxge_hw_vpath_reg __iomem *)
307 (hldev->bar0 + val64);
308 }
309
310 val64 = readq(&hldev->toc_reg->toc_kdfc);
311
312 switch (VXGE_HW_TOC_GET_KDFC_INITIAL_BIR(val64)) {
313 case 0:
314 hldev->kdfc = (u8 __iomem *)(hldev->bar0 +
315 VXGE_HW_TOC_GET_KDFC_INITIAL_OFFSET(val64));
316 break;
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +0000317 default:
318 break;
319 }
320
321 status = __vxge_hw_device_vpath_reset_in_prog_check(
322 (u64 __iomem *)&hldev->common_reg->vpath_rst_in_prog);
323exit:
324 return status;
325}
326
327/*
328 * __vxge_hw_device_id_get
329 * This routine returns sets the device id and revision numbers into the device
330 * structure
331 */
332void __vxge_hw_device_id_get(struct __vxge_hw_device *hldev)
333{
334 u64 val64;
335
336 val64 = readq(&hldev->common_reg->titan_asic_id);
337 hldev->device_id =
338 (u16)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_DEVICE_ID(val64);
339
340 hldev->major_revision =
341 (u8)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MAJOR_REVISION(val64);
342
343 hldev->minor_revision =
344 (u8)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MINOR_REVISION(val64);
345
346 return;
347}
348
349/*
350 * __vxge_hw_device_access_rights_get: Get Access Rights of the driver
351 * This routine returns the Access Rights of the driver
352 */
353static u32
354__vxge_hw_device_access_rights_get(u32 host_type, u32 func_id)
355{
356 u32 access_rights = VXGE_HW_DEVICE_ACCESS_RIGHT_VPATH;
357
358 switch (host_type) {
359 case VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION:
Sreenivasa Honnur1dc47a92010-03-28 22:12:33 +0000360 if (func_id == 0) {
361 access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
362 VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
363 }
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +0000364 break;
365 case VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION:
366 access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
367 VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
368 break;
369 case VXGE_HW_NO_MR_SR_VH0_FUNCTION0:
370 access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
371 VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
372 break;
373 case VXGE_HW_NO_MR_SR_VH0_VIRTUAL_FUNCTION:
374 case VXGE_HW_SR_VH_VIRTUAL_FUNCTION:
375 case VXGE_HW_MR_SR_VH0_INVALID_CONFIG:
376 break;
377 case VXGE_HW_SR_VH_FUNCTION0:
378 case VXGE_HW_VH_NORMAL_FUNCTION:
379 access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
380 break;
381 }
382
383 return access_rights;
384}
385/*
Sreenivasa Honnur92cdd7c2009-10-05 01:51:38 +0000386 * __vxge_hw_device_is_privilaged
387 * This routine checks if the device function is privilaged or not
388 */
389
390enum vxge_hw_status
391__vxge_hw_device_is_privilaged(u32 host_type, u32 func_id)
392{
393 if (__vxge_hw_device_access_rights_get(host_type,
394 func_id) &
395 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)
396 return VXGE_HW_OK;
397 else
398 return VXGE_HW_ERR_PRIVILAGED_OPEARATION;
399}
400
401/*
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +0000402 * __vxge_hw_device_host_info_get
403 * This routine returns the host type assignments
404 */
405void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev)
406{
407 u64 val64;
408 u32 i;
409
410 val64 = readq(&hldev->common_reg->host_type_assignments);
411
412 hldev->host_type =
413 (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64);
414
415 hldev->vpath_assignments = readq(&hldev->common_reg->vpath_assignments);
416
417 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
418
419 if (!(hldev->vpath_assignments & vxge_mBIT(i)))
420 continue;
421
422 hldev->func_id =
423 __vxge_hw_vpath_func_id_get(i, hldev->vpmgmt_reg[i]);
424
425 hldev->access_rights = __vxge_hw_device_access_rights_get(
426 hldev->host_type, hldev->func_id);
427
428 hldev->first_vp_id = i;
429 break;
430 }
431
432 return;
433}
434
435/*
436 * __vxge_hw_verify_pci_e_info - Validate the pci-e link parameters such as
437 * link width and signalling rate.
438 */
439static enum vxge_hw_status
440__vxge_hw_verify_pci_e_info(struct __vxge_hw_device *hldev)
441{
442 int exp_cap;
443 u16 lnk;
444
445 /* Get the negotiated link width and speed from PCI config space */
446 exp_cap = pci_find_capability(hldev->pdev, PCI_CAP_ID_EXP);
447 pci_read_config_word(hldev->pdev, exp_cap + PCI_EXP_LNKSTA, &lnk);
448
449 if ((lnk & PCI_EXP_LNKSTA_CLS) != 1)
450 return VXGE_HW_ERR_INVALID_PCI_INFO;
451
452 switch ((lnk & PCI_EXP_LNKSTA_NLW) >> 4) {
453 case PCIE_LNK_WIDTH_RESRV:
454 case PCIE_LNK_X1:
455 case PCIE_LNK_X2:
456 case PCIE_LNK_X4:
457 case PCIE_LNK_X8:
458 break;
459 default:
460 return VXGE_HW_ERR_INVALID_PCI_INFO;
461 }
462
463 return VXGE_HW_OK;
464}
465
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +0000466/*
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +0000467 * __vxge_hw_device_initialize
468 * Initialize Titan-V hardware.
469 */
470enum vxge_hw_status __vxge_hw_device_initialize(struct __vxge_hw_device *hldev)
471{
472 enum vxge_hw_status status = VXGE_HW_OK;
473
Sreenivasa Honnur92cdd7c2009-10-05 01:51:38 +0000474 if (VXGE_HW_OK == __vxge_hw_device_is_privilaged(hldev->host_type,
475 hldev->func_id)) {
Sivakumar Subramani5dbc9012009-06-16 18:48:55 +0000476 /* Validate the pci-e link width and speed */
477 status = __vxge_hw_verify_pci_e_info(hldev);
478 if (status != VXGE_HW_OK)
479 goto exit;
480 }
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +0000481
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +0000482exit:
483 return status;
484}
485
486/**
487 * vxge_hw_device_hw_info_get - Get the hw information
488 * Returns the vpath mask that has the bits set for each vpath allocated
489 * for the driver, FW version information and the first mac addresse for
490 * each vpath
491 */
492enum vxge_hw_status __devinit
493vxge_hw_device_hw_info_get(void __iomem *bar0,
494 struct vxge_hw_device_hw_info *hw_info)
495{
496 u32 i;
497 u64 val64;
498 struct vxge_hw_toc_reg __iomem *toc;
499 struct vxge_hw_mrpcim_reg __iomem *mrpcim_reg;
500 struct vxge_hw_common_reg __iomem *common_reg;
501 struct vxge_hw_vpath_reg __iomem *vpath_reg;
502 struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg;
503 enum vxge_hw_status status;
504
505 memset(hw_info, 0, sizeof(struct vxge_hw_device_hw_info));
506
507 toc = __vxge_hw_device_toc_get(bar0);
508 if (toc == NULL) {
509 status = VXGE_HW_ERR_CRITICAL;
510 goto exit;
511 }
512
513 val64 = readq(&toc->toc_common_pointer);
514 common_reg = (struct vxge_hw_common_reg __iomem *)(bar0 + val64);
515
516 status = __vxge_hw_device_vpath_reset_in_prog_check(
517 (u64 __iomem *)&common_reg->vpath_rst_in_prog);
518 if (status != VXGE_HW_OK)
519 goto exit;
520
521 hw_info->vpath_mask = readq(&common_reg->vpath_assignments);
522
523 val64 = readq(&common_reg->host_type_assignments);
524
525 hw_info->host_type =
526 (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64);
527
528 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
529
530 if (!((hw_info->vpath_mask) & vxge_mBIT(i)))
531 continue;
532
533 val64 = readq(&toc->toc_vpmgmt_pointer[i]);
534
535 vpmgmt_reg = (struct vxge_hw_vpmgmt_reg __iomem *)
536 (bar0 + val64);
537
538 hw_info->func_id = __vxge_hw_vpath_func_id_get(i, vpmgmt_reg);
539 if (__vxge_hw_device_access_rights_get(hw_info->host_type,
540 hw_info->func_id) &
541 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM) {
542
543 val64 = readq(&toc->toc_mrpcim_pointer);
544
545 mrpcim_reg = (struct vxge_hw_mrpcim_reg __iomem *)
546 (bar0 + val64);
547
548 writeq(0, &mrpcim_reg->xgmac_gen_fw_memo_mask);
549 wmb();
550 }
551
552 val64 = readq(&toc->toc_vpath_pointer[i]);
553
554 vpath_reg = (struct vxge_hw_vpath_reg __iomem *)(bar0 + val64);
555
556 hw_info->function_mode =
557 __vxge_hw_vpath_pci_func_mode_get(i, vpath_reg);
558
559 status = __vxge_hw_vpath_fw_ver_get(i, vpath_reg, hw_info);
560 if (status != VXGE_HW_OK)
561 goto exit;
562
563 status = __vxge_hw_vpath_card_info_get(i, vpath_reg, hw_info);
564 if (status != VXGE_HW_OK)
565 goto exit;
566
567 break;
568 }
569
570 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
571
572 if (!((hw_info->vpath_mask) & vxge_mBIT(i)))
573 continue;
574
575 val64 = readq(&toc->toc_vpath_pointer[i]);
576 vpath_reg = (struct vxge_hw_vpath_reg __iomem *)(bar0 + val64);
577
578 status = __vxge_hw_vpath_addr_get(i, vpath_reg,
579 hw_info->mac_addrs[i],
580 hw_info->mac_addr_masks[i]);
581 if (status != VXGE_HW_OK)
582 goto exit;
583 }
584exit:
585 return status;
586}
587
588/*
589 * vxge_hw_device_initialize - Initialize Titan device.
590 * Initialize Titan device. Note that all the arguments of this public API
591 * are 'IN', including @hldev. Driver cooperates with
592 * OS to find new Titan device, locate its PCI and memory spaces.
593 *
594 * When done, the driver allocates sizeof(struct __vxge_hw_device) bytes for HW
595 * to enable the latter to perform Titan hardware initialization.
596 */
597enum vxge_hw_status __devinit
598vxge_hw_device_initialize(
599 struct __vxge_hw_device **devh,
600 struct vxge_hw_device_attr *attr,
601 struct vxge_hw_device_config *device_config)
602{
603 u32 i;
604 u32 nblocks = 0;
605 struct __vxge_hw_device *hldev = NULL;
606 enum vxge_hw_status status = VXGE_HW_OK;
607
608 status = __vxge_hw_device_config_check(device_config);
609 if (status != VXGE_HW_OK)
610 goto exit;
611
612 hldev = (struct __vxge_hw_device *)
613 vmalloc(sizeof(struct __vxge_hw_device));
614 if (hldev == NULL) {
615 status = VXGE_HW_ERR_OUT_OF_MEMORY;
616 goto exit;
617 }
618
619 memset(hldev, 0, sizeof(struct __vxge_hw_device));
620 hldev->magic = VXGE_HW_DEVICE_MAGIC;
621
622 vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_ALL);
623
624 /* apply config */
625 memcpy(&hldev->config, device_config,
626 sizeof(struct vxge_hw_device_config));
627
628 hldev->bar0 = attr->bar0;
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +0000629 hldev->pdev = attr->pdev;
630
631 hldev->uld_callbacks.link_up = attr->uld_callbacks.link_up;
632 hldev->uld_callbacks.link_down = attr->uld_callbacks.link_down;
633 hldev->uld_callbacks.crit_err = attr->uld_callbacks.crit_err;
634
635 __vxge_hw_device_pci_e_init(hldev);
636
637 status = __vxge_hw_device_reg_addr_get(hldev);
Sreenivasa Honnuraaffbd92010-04-08 01:44:39 -0700638 if (status != VXGE_HW_OK) {
639 vfree(hldev);
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +0000640 goto exit;
Sreenivasa Honnuraaffbd92010-04-08 01:44:39 -0700641 }
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +0000642 __vxge_hw_device_id_get(hldev);
643
644 __vxge_hw_device_host_info_get(hldev);
645
646 /* Incrementing for stats blocks */
647 nblocks++;
648
649 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
650
651 if (!(hldev->vpath_assignments & vxge_mBIT(i)))
652 continue;
653
654 if (device_config->vp_config[i].ring.enable ==
655 VXGE_HW_RING_ENABLE)
656 nblocks += device_config->vp_config[i].ring.ring_blocks;
657
658 if (device_config->vp_config[i].fifo.enable ==
659 VXGE_HW_FIFO_ENABLE)
660 nblocks += device_config->vp_config[i].fifo.fifo_blocks;
661 nblocks++;
662 }
663
664 if (__vxge_hw_blockpool_create(hldev,
665 &hldev->block_pool,
666 device_config->dma_blockpool_initial + nblocks,
667 device_config->dma_blockpool_max + nblocks) != VXGE_HW_OK) {
668
669 vxge_hw_device_terminate(hldev);
670 status = VXGE_HW_ERR_OUT_OF_MEMORY;
671 goto exit;
672 }
673
674 status = __vxge_hw_device_initialize(hldev);
675
676 if (status != VXGE_HW_OK) {
677 vxge_hw_device_terminate(hldev);
678 goto exit;
679 }
680
681 *devh = hldev;
682exit:
683 return status;
684}
685
686/*
687 * vxge_hw_device_terminate - Terminate Titan device.
688 * Terminate HW device.
689 */
690void
691vxge_hw_device_terminate(struct __vxge_hw_device *hldev)
692{
693 vxge_assert(hldev->magic == VXGE_HW_DEVICE_MAGIC);
694
695 hldev->magic = VXGE_HW_DEVICE_DEAD;
696 __vxge_hw_blockpool_destroy(&hldev->block_pool);
697 vfree(hldev);
698}
699
700/*
701 * vxge_hw_device_stats_get - Get the device hw statistics.
702 * Returns the vpath h/w stats for the device.
703 */
704enum vxge_hw_status
705vxge_hw_device_stats_get(struct __vxge_hw_device *hldev,
706 struct vxge_hw_device_stats_hw_info *hw_stats)
707{
708 u32 i;
709 enum vxge_hw_status status = VXGE_HW_OK;
710
711 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
712
713 if (!(hldev->vpaths_deployed & vxge_mBIT(i)) ||
714 (hldev->virtual_paths[i].vp_open ==
715 VXGE_HW_VP_NOT_OPEN))
716 continue;
717
718 memcpy(hldev->virtual_paths[i].hw_stats_sav,
719 hldev->virtual_paths[i].hw_stats,
720 sizeof(struct vxge_hw_vpath_stats_hw_info));
721
722 status = __vxge_hw_vpath_stats_get(
723 &hldev->virtual_paths[i],
724 hldev->virtual_paths[i].hw_stats);
725 }
726
727 memcpy(hw_stats, &hldev->stats.hw_dev_info_stats,
728 sizeof(struct vxge_hw_device_stats_hw_info));
729
730 return status;
731}
732
733/*
734 * vxge_hw_driver_stats_get - Get the device sw statistics.
735 * Returns the vpath s/w stats for the device.
736 */
737enum vxge_hw_status vxge_hw_driver_stats_get(
738 struct __vxge_hw_device *hldev,
739 struct vxge_hw_device_stats_sw_info *sw_stats)
740{
741 enum vxge_hw_status status = VXGE_HW_OK;
742
743 memcpy(sw_stats, &hldev->stats.sw_dev_info_stats,
744 sizeof(struct vxge_hw_device_stats_sw_info));
745
746 return status;
747}
748
749/*
750 * vxge_hw_mrpcim_stats_access - Access the statistics from the given location
751 * and offset and perform an operation
752 * Get the statistics from the given location and offset.
753 */
754enum vxge_hw_status
755vxge_hw_mrpcim_stats_access(struct __vxge_hw_device *hldev,
756 u32 operation, u32 location, u32 offset, u64 *stat)
757{
758 u64 val64;
759 enum vxge_hw_status status = VXGE_HW_OK;
760
Sreenivasa Honnur92cdd7c2009-10-05 01:51:38 +0000761 status = __vxge_hw_device_is_privilaged(hldev->host_type,
762 hldev->func_id);
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +0000763 if (status != VXGE_HW_OK)
764 goto exit;
765
766 val64 = VXGE_HW_XMAC_STATS_SYS_CMD_OP(operation) |
767 VXGE_HW_XMAC_STATS_SYS_CMD_STROBE |
768 VXGE_HW_XMAC_STATS_SYS_CMD_LOC_SEL(location) |
769 VXGE_HW_XMAC_STATS_SYS_CMD_OFFSET_SEL(offset);
770
771 status = __vxge_hw_pio_mem_write64(val64,
772 &hldev->mrpcim_reg->xmac_stats_sys_cmd,
773 VXGE_HW_XMAC_STATS_SYS_CMD_STROBE,
774 hldev->config.device_poll_millis);
775
776 if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ))
777 *stat = readq(&hldev->mrpcim_reg->xmac_stats_sys_data);
778 else
779 *stat = 0;
780exit:
781 return status;
782}
783
784/*
785 * vxge_hw_device_xmac_aggr_stats_get - Get the Statistics on aggregate port
786 * Get the Statistics on aggregate port
787 */
788enum vxge_hw_status
789vxge_hw_device_xmac_aggr_stats_get(struct __vxge_hw_device *hldev, u32 port,
790 struct vxge_hw_xmac_aggr_stats *aggr_stats)
791{
792 u64 *val64;
793 int i;
794 u32 offset = VXGE_HW_STATS_AGGRn_OFFSET;
795 enum vxge_hw_status status = VXGE_HW_OK;
796
797 val64 = (u64 *)aggr_stats;
798
Sreenivasa Honnur92cdd7c2009-10-05 01:51:38 +0000799 status = __vxge_hw_device_is_privilaged(hldev->host_type,
800 hldev->func_id);
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +0000801 if (status != VXGE_HW_OK)
802 goto exit;
803
804 for (i = 0; i < sizeof(struct vxge_hw_xmac_aggr_stats) / 8; i++) {
805 status = vxge_hw_mrpcim_stats_access(hldev,
806 VXGE_HW_STATS_OP_READ,
807 VXGE_HW_STATS_LOC_AGGR,
808 ((offset + (104 * port)) >> 3), val64);
809 if (status != VXGE_HW_OK)
810 goto exit;
811
812 offset += 8;
813 val64++;
814 }
815exit:
816 return status;
817}
818
819/*
820 * vxge_hw_device_xmac_port_stats_get - Get the Statistics on a port
821 * Get the Statistics on port
822 */
823enum vxge_hw_status
824vxge_hw_device_xmac_port_stats_get(struct __vxge_hw_device *hldev, u32 port,
825 struct vxge_hw_xmac_port_stats *port_stats)
826{
827 u64 *val64;
828 enum vxge_hw_status status = VXGE_HW_OK;
829 int i;
830 u32 offset = 0x0;
831 val64 = (u64 *) port_stats;
832
Sreenivasa Honnur92cdd7c2009-10-05 01:51:38 +0000833 status = __vxge_hw_device_is_privilaged(hldev->host_type,
834 hldev->func_id);
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +0000835 if (status != VXGE_HW_OK)
836 goto exit;
837
838 for (i = 0; i < sizeof(struct vxge_hw_xmac_port_stats) / 8; i++) {
839 status = vxge_hw_mrpcim_stats_access(hldev,
840 VXGE_HW_STATS_OP_READ,
841 VXGE_HW_STATS_LOC_AGGR,
842 ((offset + (608 * port)) >> 3), val64);
843 if (status != VXGE_HW_OK)
844 goto exit;
845
846 offset += 8;
847 val64++;
848 }
849
850exit:
851 return status;
852}
853
854/*
855 * vxge_hw_device_xmac_stats_get - Get the XMAC Statistics
856 * Get the XMAC Statistics
857 */
858enum vxge_hw_status
859vxge_hw_device_xmac_stats_get(struct __vxge_hw_device *hldev,
860 struct vxge_hw_xmac_stats *xmac_stats)
861{
862 enum vxge_hw_status status = VXGE_HW_OK;
863 u32 i;
864
865 status = vxge_hw_device_xmac_aggr_stats_get(hldev,
866 0, &xmac_stats->aggr_stats[0]);
867
868 if (status != VXGE_HW_OK)
869 goto exit;
870
871 status = vxge_hw_device_xmac_aggr_stats_get(hldev,
872 1, &xmac_stats->aggr_stats[1]);
873 if (status != VXGE_HW_OK)
874 goto exit;
875
876 for (i = 0; i <= VXGE_HW_MAC_MAX_MAC_PORT_ID; i++) {
877
878 status = vxge_hw_device_xmac_port_stats_get(hldev,
879 i, &xmac_stats->port_stats[i]);
880 if (status != VXGE_HW_OK)
881 goto exit;
882 }
883
884 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
885
886 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
887 continue;
888
889 status = __vxge_hw_vpath_xmac_tx_stats_get(
890 &hldev->virtual_paths[i],
891 &xmac_stats->vpath_tx_stats[i]);
892 if (status != VXGE_HW_OK)
893 goto exit;
894
895 status = __vxge_hw_vpath_xmac_rx_stats_get(
896 &hldev->virtual_paths[i],
897 &xmac_stats->vpath_rx_stats[i]);
898 if (status != VXGE_HW_OK)
899 goto exit;
900 }
901exit:
902 return status;
903}
904
905/*
906 * vxge_hw_device_debug_set - Set the debug module, level and timestamp
907 * This routine is used to dynamically change the debug output
908 */
909void vxge_hw_device_debug_set(struct __vxge_hw_device *hldev,
910 enum vxge_debug_level level, u32 mask)
911{
912 if (hldev == NULL)
913 return;
914
915#if defined(VXGE_DEBUG_TRACE_MASK) || \
916 defined(VXGE_DEBUG_ERR_MASK)
917 hldev->debug_module_mask = mask;
918 hldev->debug_level = level;
919#endif
920
921#if defined(VXGE_DEBUG_ERR_MASK)
922 hldev->level_err = level & VXGE_ERR;
923#endif
924
925#if defined(VXGE_DEBUG_TRACE_MASK)
926 hldev->level_trace = level & VXGE_TRACE;
927#endif
928}
929
930/*
931 * vxge_hw_device_error_level_get - Get the error level
932 * This routine returns the current error level set
933 */
934u32 vxge_hw_device_error_level_get(struct __vxge_hw_device *hldev)
935{
936#if defined(VXGE_DEBUG_ERR_MASK)
937 if (hldev == NULL)
938 return VXGE_ERR;
939 else
940 return hldev->level_err;
941#else
942 return 0;
943#endif
944}
945
946/*
947 * vxge_hw_device_trace_level_get - Get the trace level
948 * This routine returns the current trace level set
949 */
950u32 vxge_hw_device_trace_level_get(struct __vxge_hw_device *hldev)
951{
952#if defined(VXGE_DEBUG_TRACE_MASK)
953 if (hldev == NULL)
954 return VXGE_TRACE;
955 else
956 return hldev->level_trace;
957#else
958 return 0;
959#endif
960}
961/*
962 * vxge_hw_device_debug_mask_get - Get the debug mask
963 * This routine returns the current debug mask set
964 */
965u32 vxge_hw_device_debug_mask_get(struct __vxge_hw_device *hldev)
966{
967#if defined(VXGE_DEBUG_TRACE_MASK) || defined(VXGE_DEBUG_ERR_MASK)
968 if (hldev == NULL)
969 return 0;
970 return hldev->debug_module_mask;
971#else
972 return 0;
973#endif
974}
975
976/*
977 * vxge_hw_getpause_data -Pause frame frame generation and reception.
978 * Returns the Pause frame generation and reception capability of the NIC.
979 */
980enum vxge_hw_status vxge_hw_device_getpause_data(struct __vxge_hw_device *hldev,
981 u32 port, u32 *tx, u32 *rx)
982{
983 u64 val64;
984 enum vxge_hw_status status = VXGE_HW_OK;
985
986 if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
987 status = VXGE_HW_ERR_INVALID_DEVICE;
988 goto exit;
989 }
990
991 if (port > VXGE_HW_MAC_MAX_MAC_PORT_ID) {
992 status = VXGE_HW_ERR_INVALID_PORT;
993 goto exit;
994 }
995
996 if (!(hldev->access_rights & VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
997 status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
998 goto exit;
999 }
1000
1001 val64 = readq(&hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
1002 if (val64 & VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN)
1003 *tx = 1;
1004 if (val64 & VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN)
1005 *rx = 1;
1006exit:
1007 return status;
1008}
1009
1010/*
1011 * vxge_hw_device_setpause_data - set/reset pause frame generation.
1012 * It can be used to set or reset Pause frame generation or reception
1013 * support of the NIC.
1014 */
1015
1016enum vxge_hw_status vxge_hw_device_setpause_data(struct __vxge_hw_device *hldev,
1017 u32 port, u32 tx, u32 rx)
1018{
1019 u64 val64;
1020 enum vxge_hw_status status = VXGE_HW_OK;
1021
1022 if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
1023 status = VXGE_HW_ERR_INVALID_DEVICE;
1024 goto exit;
1025 }
1026
1027 if (port > VXGE_HW_MAC_MAX_MAC_PORT_ID) {
1028 status = VXGE_HW_ERR_INVALID_PORT;
1029 goto exit;
1030 }
1031
Sreenivasa Honnur92cdd7c2009-10-05 01:51:38 +00001032 status = __vxge_hw_device_is_privilaged(hldev->host_type,
1033 hldev->func_id);
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001034 if (status != VXGE_HW_OK)
1035 goto exit;
1036
1037 val64 = readq(&hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
1038 if (tx)
1039 val64 |= VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN;
1040 else
1041 val64 &= ~VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN;
1042 if (rx)
1043 val64 |= VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN;
1044 else
1045 val64 &= ~VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN;
1046
1047 writeq(val64, &hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
1048exit:
1049 return status;
1050}
1051
1052u16 vxge_hw_device_link_width_get(struct __vxge_hw_device *hldev)
1053{
1054 int link_width, exp_cap;
1055 u16 lnk;
1056
1057 exp_cap = pci_find_capability(hldev->pdev, PCI_CAP_ID_EXP);
1058 pci_read_config_word(hldev->pdev, exp_cap + PCI_EXP_LNKSTA, &lnk);
1059 link_width = (lnk & VXGE_HW_PCI_EXP_LNKCAP_LNK_WIDTH) >> 4;
1060 return link_width;
1061}
1062
1063/*
1064 * __vxge_hw_ring_block_memblock_idx - Return the memblock index
1065 * This function returns the index of memory block
1066 */
1067static inline u32
1068__vxge_hw_ring_block_memblock_idx(u8 *block)
1069{
1070 return (u32)*((u64 *)(block + VXGE_HW_RING_MEMBLOCK_IDX_OFFSET));
1071}
1072
1073/*
1074 * __vxge_hw_ring_block_memblock_idx_set - Sets the memblock index
1075 * This function sets index to a memory block
1076 */
1077static inline void
1078__vxge_hw_ring_block_memblock_idx_set(u8 *block, u32 memblock_idx)
1079{
1080 *((u64 *)(block + VXGE_HW_RING_MEMBLOCK_IDX_OFFSET)) = memblock_idx;
1081}
1082
1083/*
1084 * __vxge_hw_ring_block_next_pointer_set - Sets the next block pointer
1085 * in RxD block
1086 * Sets the next block pointer in RxD block
1087 */
1088static inline void
1089__vxge_hw_ring_block_next_pointer_set(u8 *block, dma_addr_t dma_next)
1090{
1091 *((u64 *)(block + VXGE_HW_RING_NEXT_BLOCK_POINTER_OFFSET)) = dma_next;
1092}
1093
1094/*
1095 * __vxge_hw_ring_first_block_address_get - Returns the dma address of the
1096 * first block
1097 * Returns the dma address of the first RxD block
1098 */
1099u64 __vxge_hw_ring_first_block_address_get(struct __vxge_hw_ring *ring)
1100{
1101 struct vxge_hw_mempool_dma *dma_object;
1102
1103 dma_object = ring->mempool->memblocks_dma_arr;
1104 vxge_assert(dma_object != NULL);
1105
1106 return dma_object->addr;
1107}
1108
1109/*
1110 * __vxge_hw_ring_item_dma_addr - Return the dma address of an item
1111 * This function returns the dma address of a given item
1112 */
1113static dma_addr_t __vxge_hw_ring_item_dma_addr(struct vxge_hw_mempool *mempoolh,
1114 void *item)
1115{
1116 u32 memblock_idx;
1117 void *memblock;
1118 struct vxge_hw_mempool_dma *memblock_dma_object;
1119 ptrdiff_t dma_item_offset;
1120
1121 /* get owner memblock index */
1122 memblock_idx = __vxge_hw_ring_block_memblock_idx(item);
1123
1124 /* get owner memblock by memblock index */
1125 memblock = mempoolh->memblocks_arr[memblock_idx];
1126
1127 /* get memblock DMA object by memblock index */
1128 memblock_dma_object = mempoolh->memblocks_dma_arr + memblock_idx;
1129
1130 /* calculate offset in the memblock of this item */
1131 dma_item_offset = (u8 *)item - (u8 *)memblock;
1132
1133 return memblock_dma_object->addr + dma_item_offset;
1134}
1135
1136/*
1137 * __vxge_hw_ring_rxdblock_link - Link the RxD blocks
1138 * This function returns the dma address of a given item
1139 */
1140static void __vxge_hw_ring_rxdblock_link(struct vxge_hw_mempool *mempoolh,
1141 struct __vxge_hw_ring *ring, u32 from,
1142 u32 to)
1143{
1144 u8 *to_item , *from_item;
1145 dma_addr_t to_dma;
1146
1147 /* get "from" RxD block */
1148 from_item = mempoolh->items_arr[from];
1149 vxge_assert(from_item);
1150
1151 /* get "to" RxD block */
1152 to_item = mempoolh->items_arr[to];
1153 vxge_assert(to_item);
1154
1155 /* return address of the beginning of previous RxD block */
1156 to_dma = __vxge_hw_ring_item_dma_addr(mempoolh, to_item);
1157
1158 /* set next pointer for this RxD block to point on
1159 * previous item's DMA start address */
1160 __vxge_hw_ring_block_next_pointer_set(from_item, to_dma);
1161}
1162
1163/*
1164 * __vxge_hw_ring_mempool_item_alloc - Allocate List blocks for RxD
1165 * block callback
1166 * This function is callback passed to __vxge_hw_mempool_create to create memory
1167 * pool for RxD block
1168 */
1169static void
1170__vxge_hw_ring_mempool_item_alloc(struct vxge_hw_mempool *mempoolh,
1171 u32 memblock_index,
1172 struct vxge_hw_mempool_dma *dma_object,
1173 u32 index, u32 is_last)
1174{
1175 u32 i;
1176 void *item = mempoolh->items_arr[index];
1177 struct __vxge_hw_ring *ring =
1178 (struct __vxge_hw_ring *)mempoolh->userdata;
1179
1180 /* format rxds array */
1181 for (i = 0; i < ring->rxds_per_block; i++) {
1182 void *rxdblock_priv;
1183 void *uld_priv;
1184 struct vxge_hw_ring_rxd_1 *rxdp;
1185
1186 u32 reserve_index = ring->channel.reserve_ptr -
1187 (index * ring->rxds_per_block + i + 1);
1188 u32 memblock_item_idx;
1189
1190 ring->channel.reserve_arr[reserve_index] = ((u8 *)item) +
1191 i * ring->rxd_size;
1192
1193 /* Note: memblock_item_idx is index of the item within
1194 * the memblock. For instance, in case of three RxD-blocks
1195 * per memblock this value can be 0, 1 or 2. */
1196 rxdblock_priv = __vxge_hw_mempool_item_priv(mempoolh,
1197 memblock_index, item,
1198 &memblock_item_idx);
1199
1200 rxdp = (struct vxge_hw_ring_rxd_1 *)
1201 ring->channel.reserve_arr[reserve_index];
1202
1203 uld_priv = ((u8 *)rxdblock_priv + ring->rxd_priv_size * i);
1204
1205 /* pre-format Host_Control */
1206 rxdp->host_control = (u64)(size_t)uld_priv;
1207 }
1208
1209 __vxge_hw_ring_block_memblock_idx_set(item, memblock_index);
1210
1211 if (is_last) {
1212 /* link last one with first one */
1213 __vxge_hw_ring_rxdblock_link(mempoolh, ring, index, 0);
1214 }
1215
1216 if (index > 0) {
1217 /* link this RxD block with previous one */
1218 __vxge_hw_ring_rxdblock_link(mempoolh, ring, index - 1, index);
1219 }
1220
1221 return;
1222}
1223
1224/*
Sreenivasa Honnur33632762010-03-28 22:08:30 +00001225 * __vxge_hw_ring_replenish - Initial replenish of RxDs
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001226 * This function replenishes the RxDs from reserve array to work array
1227 */
1228enum vxge_hw_status
Sreenivasa Honnur33632762010-03-28 22:08:30 +00001229vxge_hw_ring_replenish(struct __vxge_hw_ring *ring)
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001230{
1231 void *rxd;
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001232 struct __vxge_hw_channel *channel;
1233 enum vxge_hw_status status = VXGE_HW_OK;
1234
1235 channel = &ring->channel;
1236
1237 while (vxge_hw_channel_dtr_count(channel) > 0) {
1238
1239 status = vxge_hw_ring_rxd_reserve(ring, &rxd);
1240
1241 vxge_assert(status == VXGE_HW_OK);
1242
1243 if (ring->rxd_init) {
1244 status = ring->rxd_init(rxd, channel->userdata);
1245 if (status != VXGE_HW_OK) {
1246 vxge_hw_ring_rxd_free(ring, rxd);
1247 goto exit;
1248 }
1249 }
1250
1251 vxge_hw_ring_rxd_post(ring, rxd);
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001252 }
1253 status = VXGE_HW_OK;
1254exit:
1255 return status;
1256}
1257
1258/*
1259 * __vxge_hw_ring_create - Create a Ring
1260 * This function creates Ring and initializes it.
1261 *
1262 */
1263enum vxge_hw_status
1264__vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp,
1265 struct vxge_hw_ring_attr *attr)
1266{
1267 enum vxge_hw_status status = VXGE_HW_OK;
1268 struct __vxge_hw_ring *ring;
1269 u32 ring_length;
1270 struct vxge_hw_ring_config *config;
1271 struct __vxge_hw_device *hldev;
1272 u32 vp_id;
1273 struct vxge_hw_mempool_cbs ring_mp_callback;
1274
1275 if ((vp == NULL) || (attr == NULL)) {
1276 status = VXGE_HW_FAIL;
1277 goto exit;
1278 }
1279
1280 hldev = vp->vpath->hldev;
1281 vp_id = vp->vpath->vp_id;
1282
1283 config = &hldev->config.vp_config[vp_id].ring;
1284
1285 ring_length = config->ring_blocks *
1286 vxge_hw_ring_rxds_per_block_get(config->buffer_mode);
1287
1288 ring = (struct __vxge_hw_ring *)__vxge_hw_channel_allocate(vp,
1289 VXGE_HW_CHANNEL_TYPE_RING,
1290 ring_length,
1291 attr->per_rxd_space,
1292 attr->userdata);
1293
1294 if (ring == NULL) {
1295 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1296 goto exit;
1297 }
1298
1299 vp->vpath->ringh = ring;
1300 ring->vp_id = vp_id;
1301 ring->vp_reg = vp->vpath->vp_reg;
1302 ring->common_reg = hldev->common_reg;
1303 ring->stats = &vp->vpath->sw_stats->ring_stats;
1304 ring->config = config;
1305 ring->callback = attr->callback;
1306 ring->rxd_init = attr->rxd_init;
1307 ring->rxd_term = attr->rxd_term;
1308 ring->buffer_mode = config->buffer_mode;
1309 ring->rxds_limit = config->rxds_limit;
1310
1311 ring->rxd_size = vxge_hw_ring_rxd_size_get(config->buffer_mode);
1312 ring->rxd_priv_size =
1313 sizeof(struct __vxge_hw_ring_rxd_priv) + attr->per_rxd_space;
1314 ring->per_rxd_space = attr->per_rxd_space;
1315
1316 ring->rxd_priv_size =
1317 ((ring->rxd_priv_size + VXGE_CACHE_LINE_SIZE - 1) /
1318 VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE;
1319
1320 /* how many RxDs can fit into one block. Depends on configured
1321 * buffer_mode. */
1322 ring->rxds_per_block =
1323 vxge_hw_ring_rxds_per_block_get(config->buffer_mode);
1324
1325 /* calculate actual RxD block private size */
1326 ring->rxdblock_priv_size = ring->rxd_priv_size * ring->rxds_per_block;
1327 ring_mp_callback.item_func_alloc = __vxge_hw_ring_mempool_item_alloc;
1328 ring->mempool = __vxge_hw_mempool_create(hldev,
1329 VXGE_HW_BLOCK_SIZE,
1330 VXGE_HW_BLOCK_SIZE,
1331 ring->rxdblock_priv_size,
1332 ring->config->ring_blocks,
1333 ring->config->ring_blocks,
1334 &ring_mp_callback,
1335 ring);
1336
1337 if (ring->mempool == NULL) {
1338 __vxge_hw_ring_delete(vp);
1339 return VXGE_HW_ERR_OUT_OF_MEMORY;
1340 }
1341
1342 status = __vxge_hw_channel_initialize(&ring->channel);
1343 if (status != VXGE_HW_OK) {
1344 __vxge_hw_ring_delete(vp);
1345 goto exit;
1346 }
1347
1348 /* Note:
1349 * Specifying rxd_init callback means two things:
1350 * 1) rxds need to be initialized by driver at channel-open time;
1351 * 2) rxds need to be posted at channel-open time
1352 * (that's what the initial_replenish() below does)
1353 * Currently we don't have a case when the 1) is done without the 2).
1354 */
1355 if (ring->rxd_init) {
Sreenivasa Honnur33632762010-03-28 22:08:30 +00001356 status = vxge_hw_ring_replenish(ring);
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001357 if (status != VXGE_HW_OK) {
1358 __vxge_hw_ring_delete(vp);
1359 goto exit;
1360 }
1361 }
1362
1363 /* initial replenish will increment the counter in its post() routine,
1364 * we have to reset it */
1365 ring->stats->common_stats.usage_cnt = 0;
1366exit:
1367 return status;
1368}
1369
1370/*
1371 * __vxge_hw_ring_abort - Returns the RxD
1372 * This function terminates the RxDs of ring
1373 */
1374enum vxge_hw_status __vxge_hw_ring_abort(struct __vxge_hw_ring *ring)
1375{
1376 void *rxdh;
1377 struct __vxge_hw_channel *channel;
1378
1379 channel = &ring->channel;
1380
1381 for (;;) {
1382 vxge_hw_channel_dtr_try_complete(channel, &rxdh);
1383
1384 if (rxdh == NULL)
1385 break;
1386
1387 vxge_hw_channel_dtr_complete(channel);
1388
1389 if (ring->rxd_term)
1390 ring->rxd_term(rxdh, VXGE_HW_RXD_STATE_POSTED,
1391 channel->userdata);
1392
1393 vxge_hw_channel_dtr_free(channel, rxdh);
1394 }
1395
1396 return VXGE_HW_OK;
1397}
1398
1399/*
1400 * __vxge_hw_ring_reset - Resets the ring
1401 * This function resets the ring during vpath reset operation
1402 */
1403enum vxge_hw_status __vxge_hw_ring_reset(struct __vxge_hw_ring *ring)
1404{
1405 enum vxge_hw_status status = VXGE_HW_OK;
1406 struct __vxge_hw_channel *channel;
1407
1408 channel = &ring->channel;
1409
1410 __vxge_hw_ring_abort(ring);
1411
1412 status = __vxge_hw_channel_reset(channel);
1413
1414 if (status != VXGE_HW_OK)
1415 goto exit;
1416
1417 if (ring->rxd_init) {
Sreenivasa Honnur33632762010-03-28 22:08:30 +00001418 status = vxge_hw_ring_replenish(ring);
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001419 if (status != VXGE_HW_OK)
1420 goto exit;
1421 }
1422exit:
1423 return status;
1424}
1425
1426/*
1427 * __vxge_hw_ring_delete - Removes the ring
1428 * This function freeup the memory pool and removes the ring
1429 */
1430enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp)
1431{
1432 struct __vxge_hw_ring *ring = vp->vpath->ringh;
1433
1434 __vxge_hw_ring_abort(ring);
1435
1436 if (ring->mempool)
1437 __vxge_hw_mempool_destroy(ring->mempool);
1438
1439 vp->vpath->ringh = NULL;
1440 __vxge_hw_channel_free(&ring->channel);
1441
1442 return VXGE_HW_OK;
1443}
1444
1445/*
1446 * __vxge_hw_mempool_grow
1447 * Will resize mempool up to %num_allocate value.
1448 */
1449enum vxge_hw_status
1450__vxge_hw_mempool_grow(struct vxge_hw_mempool *mempool, u32 num_allocate,
1451 u32 *num_allocated)
1452{
1453 u32 i, first_time = mempool->memblocks_allocated == 0 ? 1 : 0;
1454 u32 n_items = mempool->items_per_memblock;
1455 u32 start_block_idx = mempool->memblocks_allocated;
1456 u32 end_block_idx = mempool->memblocks_allocated + num_allocate;
1457 enum vxge_hw_status status = VXGE_HW_OK;
1458
1459 *num_allocated = 0;
1460
1461 if (end_block_idx > mempool->memblocks_max) {
1462 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1463 goto exit;
1464 }
1465
1466 for (i = start_block_idx; i < end_block_idx; i++) {
1467 u32 j;
1468 u32 is_last = ((end_block_idx - 1) == i);
1469 struct vxge_hw_mempool_dma *dma_object =
1470 mempool->memblocks_dma_arr + i;
1471 void *the_memblock;
1472
1473 /* allocate memblock's private part. Each DMA memblock
1474 * has a space allocated for item's private usage upon
1475 * mempool's user request. Each time mempool grows, it will
1476 * allocate new memblock and its private part at once.
1477 * This helps to minimize memory usage a lot. */
1478 mempool->memblocks_priv_arr[i] =
1479 vmalloc(mempool->items_priv_size * n_items);
1480 if (mempool->memblocks_priv_arr[i] == NULL) {
1481 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1482 goto exit;
1483 }
1484
1485 memset(mempool->memblocks_priv_arr[i], 0,
1486 mempool->items_priv_size * n_items);
1487
1488 /* allocate DMA-capable memblock */
1489 mempool->memblocks_arr[i] =
1490 __vxge_hw_blockpool_malloc(mempool->devh,
1491 mempool->memblock_size, dma_object);
1492 if (mempool->memblocks_arr[i] == NULL) {
1493 vfree(mempool->memblocks_priv_arr[i]);
1494 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1495 goto exit;
1496 }
1497
1498 (*num_allocated)++;
1499 mempool->memblocks_allocated++;
1500
1501 memset(mempool->memblocks_arr[i], 0, mempool->memblock_size);
1502
1503 the_memblock = mempool->memblocks_arr[i];
1504
1505 /* fill the items hash array */
1506 for (j = 0; j < n_items; j++) {
1507 u32 index = i * n_items + j;
1508
1509 if (first_time && index >= mempool->items_initial)
1510 break;
1511
1512 mempool->items_arr[index] =
1513 ((char *)the_memblock + j*mempool->item_size);
1514
1515 /* let caller to do more job on each item */
1516 if (mempool->item_func_alloc != NULL)
1517 mempool->item_func_alloc(mempool, i,
1518 dma_object, index, is_last);
1519
1520 mempool->items_current = index + 1;
1521 }
1522
1523 if (first_time && mempool->items_current ==
1524 mempool->items_initial)
1525 break;
1526 }
1527exit:
1528 return status;
1529}
1530
1531/*
1532 * vxge_hw_mempool_create
1533 * This function will create memory pool object. Pool may grow but will
1534 * never shrink. Pool consists of number of dynamically allocated blocks
1535 * with size enough to hold %items_initial number of items. Memory is
1536 * DMA-able but client must map/unmap before interoperating with the device.
1537 */
1538struct vxge_hw_mempool*
1539__vxge_hw_mempool_create(
1540 struct __vxge_hw_device *devh,
1541 u32 memblock_size,
1542 u32 item_size,
1543 u32 items_priv_size,
1544 u32 items_initial,
1545 u32 items_max,
1546 struct vxge_hw_mempool_cbs *mp_callback,
1547 void *userdata)
1548{
1549 enum vxge_hw_status status = VXGE_HW_OK;
1550 u32 memblocks_to_allocate;
1551 struct vxge_hw_mempool *mempool = NULL;
1552 u32 allocated;
1553
1554 if (memblock_size < item_size) {
1555 status = VXGE_HW_FAIL;
1556 goto exit;
1557 }
1558
1559 mempool = (struct vxge_hw_mempool *)
1560 vmalloc(sizeof(struct vxge_hw_mempool));
1561 if (mempool == NULL) {
1562 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1563 goto exit;
1564 }
1565 memset(mempool, 0, sizeof(struct vxge_hw_mempool));
1566
1567 mempool->devh = devh;
1568 mempool->memblock_size = memblock_size;
1569 mempool->items_max = items_max;
1570 mempool->items_initial = items_initial;
1571 mempool->item_size = item_size;
1572 mempool->items_priv_size = items_priv_size;
1573 mempool->item_func_alloc = mp_callback->item_func_alloc;
1574 mempool->userdata = userdata;
1575
1576 mempool->memblocks_allocated = 0;
1577
1578 mempool->items_per_memblock = memblock_size / item_size;
1579
1580 mempool->memblocks_max = (items_max + mempool->items_per_memblock - 1) /
1581 mempool->items_per_memblock;
1582
1583 /* allocate array of memblocks */
1584 mempool->memblocks_arr =
1585 (void **) vmalloc(sizeof(void *) * mempool->memblocks_max);
1586 if (mempool->memblocks_arr == NULL) {
1587 __vxge_hw_mempool_destroy(mempool);
1588 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1589 mempool = NULL;
1590 goto exit;
1591 }
1592 memset(mempool->memblocks_arr, 0,
1593 sizeof(void *) * mempool->memblocks_max);
1594
1595 /* allocate array of private parts of items per memblocks */
1596 mempool->memblocks_priv_arr =
1597 (void **) vmalloc(sizeof(void *) * mempool->memblocks_max);
1598 if (mempool->memblocks_priv_arr == NULL) {
1599 __vxge_hw_mempool_destroy(mempool);
1600 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1601 mempool = NULL;
1602 goto exit;
1603 }
1604 memset(mempool->memblocks_priv_arr, 0,
1605 sizeof(void *) * mempool->memblocks_max);
1606
1607 /* allocate array of memblocks DMA objects */
1608 mempool->memblocks_dma_arr = (struct vxge_hw_mempool_dma *)
1609 vmalloc(sizeof(struct vxge_hw_mempool_dma) *
1610 mempool->memblocks_max);
1611
1612 if (mempool->memblocks_dma_arr == NULL) {
1613 __vxge_hw_mempool_destroy(mempool);
1614 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1615 mempool = NULL;
1616 goto exit;
1617 }
1618 memset(mempool->memblocks_dma_arr, 0,
1619 sizeof(struct vxge_hw_mempool_dma) *
1620 mempool->memblocks_max);
1621
1622 /* allocate hash array of items */
1623 mempool->items_arr =
1624 (void **) vmalloc(sizeof(void *) * mempool->items_max);
1625 if (mempool->items_arr == NULL) {
1626 __vxge_hw_mempool_destroy(mempool);
1627 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1628 mempool = NULL;
1629 goto exit;
1630 }
1631 memset(mempool->items_arr, 0, sizeof(void *) * mempool->items_max);
1632
1633 /* calculate initial number of memblocks */
1634 memblocks_to_allocate = (mempool->items_initial +
1635 mempool->items_per_memblock - 1) /
1636 mempool->items_per_memblock;
1637
1638 /* pre-allocate the mempool */
1639 status = __vxge_hw_mempool_grow(mempool, memblocks_to_allocate,
1640 &allocated);
1641 if (status != VXGE_HW_OK) {
1642 __vxge_hw_mempool_destroy(mempool);
1643 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1644 mempool = NULL;
1645 goto exit;
1646 }
1647
1648exit:
1649 return mempool;
1650}
1651
1652/*
1653 * vxge_hw_mempool_destroy
1654 */
1655void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool)
1656{
1657 u32 i, j;
1658 struct __vxge_hw_device *devh = mempool->devh;
1659
1660 for (i = 0; i < mempool->memblocks_allocated; i++) {
1661 struct vxge_hw_mempool_dma *dma_object;
1662
1663 vxge_assert(mempool->memblocks_arr[i]);
1664 vxge_assert(mempool->memblocks_dma_arr + i);
1665
1666 dma_object = mempool->memblocks_dma_arr + i;
1667
1668 for (j = 0; j < mempool->items_per_memblock; j++) {
1669 u32 index = i * mempool->items_per_memblock + j;
1670
1671 /* to skip last partially filled(if any) memblock */
1672 if (index >= mempool->items_current)
1673 break;
1674 }
1675
1676 vfree(mempool->memblocks_priv_arr[i]);
1677
1678 __vxge_hw_blockpool_free(devh, mempool->memblocks_arr[i],
1679 mempool->memblock_size, dma_object);
1680 }
1681
Figo.zhang50d36a92009-06-10 04:21:55 +00001682 vfree(mempool->items_arr);
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001683
Figo.zhang50d36a92009-06-10 04:21:55 +00001684 vfree(mempool->memblocks_dma_arr);
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001685
Figo.zhang50d36a92009-06-10 04:21:55 +00001686 vfree(mempool->memblocks_priv_arr);
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001687
Figo.zhang50d36a92009-06-10 04:21:55 +00001688 vfree(mempool->memblocks_arr);
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001689
1690 vfree(mempool);
1691}
1692
1693/*
1694 * __vxge_hw_device_fifo_config_check - Check fifo configuration.
1695 * Check the fifo configuration
1696 */
1697enum vxge_hw_status
1698__vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config)
1699{
1700 if ((fifo_config->fifo_blocks < VXGE_HW_MIN_FIFO_BLOCKS) ||
1701 (fifo_config->fifo_blocks > VXGE_HW_MAX_FIFO_BLOCKS))
1702 return VXGE_HW_BADCFG_FIFO_BLOCKS;
1703
1704 return VXGE_HW_OK;
1705}
1706
1707/*
1708 * __vxge_hw_device_vpath_config_check - Check vpath configuration.
1709 * Check the vpath configuration
1710 */
1711enum vxge_hw_status
1712__vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config)
1713{
1714 enum vxge_hw_status status;
1715
1716 if ((vp_config->min_bandwidth < VXGE_HW_VPATH_BANDWIDTH_MIN) ||
1717 (vp_config->min_bandwidth >
1718 VXGE_HW_VPATH_BANDWIDTH_MAX))
1719 return VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH;
1720
1721 status = __vxge_hw_device_fifo_config_check(&vp_config->fifo);
1722 if (status != VXGE_HW_OK)
1723 return status;
1724
1725 if ((vp_config->mtu != VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) &&
1726 ((vp_config->mtu < VXGE_HW_VPATH_MIN_INITIAL_MTU) ||
1727 (vp_config->mtu > VXGE_HW_VPATH_MAX_INITIAL_MTU)))
1728 return VXGE_HW_BADCFG_VPATH_MTU;
1729
1730 if ((vp_config->rpa_strip_vlan_tag !=
1731 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) &&
1732 (vp_config->rpa_strip_vlan_tag !=
1733 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE) &&
1734 (vp_config->rpa_strip_vlan_tag !=
1735 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_DISABLE))
1736 return VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG;
1737
1738 return VXGE_HW_OK;
1739}
1740
1741/*
1742 * __vxge_hw_device_config_check - Check device configuration.
1743 * Check the device configuration
1744 */
1745enum vxge_hw_status
1746__vxge_hw_device_config_check(struct vxge_hw_device_config *new_config)
1747{
1748 u32 i;
1749 enum vxge_hw_status status;
1750
1751 if ((new_config->intr_mode != VXGE_HW_INTR_MODE_IRQLINE) &&
1752 (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX) &&
1753 (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) &&
1754 (new_config->intr_mode != VXGE_HW_INTR_MODE_DEF))
1755 return VXGE_HW_BADCFG_INTR_MODE;
1756
1757 if ((new_config->rts_mac_en != VXGE_HW_RTS_MAC_DISABLE) &&
1758 (new_config->rts_mac_en != VXGE_HW_RTS_MAC_ENABLE))
1759 return VXGE_HW_BADCFG_RTS_MAC_EN;
1760
1761 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
1762 status = __vxge_hw_device_vpath_config_check(
1763 &new_config->vp_config[i]);
1764 if (status != VXGE_HW_OK)
1765 return status;
1766 }
1767
1768 return VXGE_HW_OK;
1769}
1770
1771/*
1772 * vxge_hw_device_config_default_get - Initialize device config with defaults.
1773 * Initialize Titan device config with default values.
1774 */
1775enum vxge_hw_status __devinit
1776vxge_hw_device_config_default_get(struct vxge_hw_device_config *device_config)
1777{
1778 u32 i;
1779
1780 device_config->dma_blockpool_initial =
1781 VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE;
1782 device_config->dma_blockpool_max = VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE;
1783 device_config->intr_mode = VXGE_HW_INTR_MODE_DEF;
1784 device_config->rth_en = VXGE_HW_RTH_DEFAULT;
1785 device_config->rth_it_type = VXGE_HW_RTH_IT_TYPE_DEFAULT;
1786 device_config->device_poll_millis = VXGE_HW_DEF_DEVICE_POLL_MILLIS;
1787 device_config->rts_mac_en = VXGE_HW_RTS_MAC_DEFAULT;
1788
1789 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
1790
1791 device_config->vp_config[i].vp_id = i;
1792
1793 device_config->vp_config[i].min_bandwidth =
1794 VXGE_HW_VPATH_BANDWIDTH_DEFAULT;
1795
1796 device_config->vp_config[i].ring.enable = VXGE_HW_RING_DEFAULT;
1797
1798 device_config->vp_config[i].ring.ring_blocks =
1799 VXGE_HW_DEF_RING_BLOCKS;
1800
1801 device_config->vp_config[i].ring.buffer_mode =
1802 VXGE_HW_RING_RXD_BUFFER_MODE_DEFAULT;
1803
1804 device_config->vp_config[i].ring.scatter_mode =
1805 VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT;
1806
1807 device_config->vp_config[i].ring.rxds_limit =
1808 VXGE_HW_DEF_RING_RXDS_LIMIT;
1809
1810 device_config->vp_config[i].fifo.enable = VXGE_HW_FIFO_ENABLE;
1811
1812 device_config->vp_config[i].fifo.fifo_blocks =
1813 VXGE_HW_MIN_FIFO_BLOCKS;
1814
1815 device_config->vp_config[i].fifo.max_frags =
1816 VXGE_HW_MAX_FIFO_FRAGS;
1817
1818 device_config->vp_config[i].fifo.memblock_size =
1819 VXGE_HW_DEF_FIFO_MEMBLOCK_SIZE;
1820
1821 device_config->vp_config[i].fifo.alignment_size =
1822 VXGE_HW_DEF_FIFO_ALIGNMENT_SIZE;
1823
1824 device_config->vp_config[i].fifo.intr =
1825 VXGE_HW_FIFO_QUEUE_INTR_DEFAULT;
1826
1827 device_config->vp_config[i].fifo.no_snoop_bits =
1828 VXGE_HW_FIFO_NO_SNOOP_DEFAULT;
1829 device_config->vp_config[i].tti.intr_enable =
1830 VXGE_HW_TIM_INTR_DEFAULT;
1831
1832 device_config->vp_config[i].tti.btimer_val =
1833 VXGE_HW_USE_FLASH_DEFAULT;
1834
1835 device_config->vp_config[i].tti.timer_ac_en =
1836 VXGE_HW_USE_FLASH_DEFAULT;
1837
1838 device_config->vp_config[i].tti.timer_ci_en =
1839 VXGE_HW_USE_FLASH_DEFAULT;
1840
1841 device_config->vp_config[i].tti.timer_ri_en =
1842 VXGE_HW_USE_FLASH_DEFAULT;
1843
1844 device_config->vp_config[i].tti.rtimer_val =
1845 VXGE_HW_USE_FLASH_DEFAULT;
1846
1847 device_config->vp_config[i].tti.util_sel =
1848 VXGE_HW_USE_FLASH_DEFAULT;
1849
1850 device_config->vp_config[i].tti.ltimer_val =
1851 VXGE_HW_USE_FLASH_DEFAULT;
1852
1853 device_config->vp_config[i].tti.urange_a =
1854 VXGE_HW_USE_FLASH_DEFAULT;
1855
1856 device_config->vp_config[i].tti.uec_a =
1857 VXGE_HW_USE_FLASH_DEFAULT;
1858
1859 device_config->vp_config[i].tti.urange_b =
1860 VXGE_HW_USE_FLASH_DEFAULT;
1861
1862 device_config->vp_config[i].tti.uec_b =
1863 VXGE_HW_USE_FLASH_DEFAULT;
1864
1865 device_config->vp_config[i].tti.urange_c =
1866 VXGE_HW_USE_FLASH_DEFAULT;
1867
1868 device_config->vp_config[i].tti.uec_c =
1869 VXGE_HW_USE_FLASH_DEFAULT;
1870
1871 device_config->vp_config[i].tti.uec_d =
1872 VXGE_HW_USE_FLASH_DEFAULT;
1873
1874 device_config->vp_config[i].rti.intr_enable =
1875 VXGE_HW_TIM_INTR_DEFAULT;
1876
1877 device_config->vp_config[i].rti.btimer_val =
1878 VXGE_HW_USE_FLASH_DEFAULT;
1879
1880 device_config->vp_config[i].rti.timer_ac_en =
1881 VXGE_HW_USE_FLASH_DEFAULT;
1882
1883 device_config->vp_config[i].rti.timer_ci_en =
1884 VXGE_HW_USE_FLASH_DEFAULT;
1885
1886 device_config->vp_config[i].rti.timer_ri_en =
1887 VXGE_HW_USE_FLASH_DEFAULT;
1888
1889 device_config->vp_config[i].rti.rtimer_val =
1890 VXGE_HW_USE_FLASH_DEFAULT;
1891
1892 device_config->vp_config[i].rti.util_sel =
1893 VXGE_HW_USE_FLASH_DEFAULT;
1894
1895 device_config->vp_config[i].rti.ltimer_val =
1896 VXGE_HW_USE_FLASH_DEFAULT;
1897
1898 device_config->vp_config[i].rti.urange_a =
1899 VXGE_HW_USE_FLASH_DEFAULT;
1900
1901 device_config->vp_config[i].rti.uec_a =
1902 VXGE_HW_USE_FLASH_DEFAULT;
1903
1904 device_config->vp_config[i].rti.urange_b =
1905 VXGE_HW_USE_FLASH_DEFAULT;
1906
1907 device_config->vp_config[i].rti.uec_b =
1908 VXGE_HW_USE_FLASH_DEFAULT;
1909
1910 device_config->vp_config[i].rti.urange_c =
1911 VXGE_HW_USE_FLASH_DEFAULT;
1912
1913 device_config->vp_config[i].rti.uec_c =
1914 VXGE_HW_USE_FLASH_DEFAULT;
1915
1916 device_config->vp_config[i].rti.uec_d =
1917 VXGE_HW_USE_FLASH_DEFAULT;
1918
1919 device_config->vp_config[i].mtu =
1920 VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU;
1921
1922 device_config->vp_config[i].rpa_strip_vlan_tag =
1923 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT;
1924 }
1925
1926 return VXGE_HW_OK;
1927}
1928
1929/*
1930 * _hw_legacy_swapper_set - Set the swapper bits for the legacy secion.
1931 * Set the swapper bits appropriately for the lagacy section.
1932 */
1933enum vxge_hw_status
1934__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg)
1935{
1936 u64 val64;
1937 enum vxge_hw_status status = VXGE_HW_OK;
1938
1939 val64 = readq(&legacy_reg->toc_swapper_fb);
1940
1941 wmb();
1942
1943 switch (val64) {
1944
1945 case VXGE_HW_SWAPPER_INITIAL_VALUE:
1946 return status;
1947
1948 case VXGE_HW_SWAPPER_BYTE_SWAPPED_BIT_FLIPPED:
1949 writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
1950 &legacy_reg->pifm_rd_swap_en);
1951 writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
1952 &legacy_reg->pifm_rd_flip_en);
1953 writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
1954 &legacy_reg->pifm_wr_swap_en);
1955 writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
1956 &legacy_reg->pifm_wr_flip_en);
1957 break;
1958
1959 case VXGE_HW_SWAPPER_BYTE_SWAPPED:
1960 writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
1961 &legacy_reg->pifm_rd_swap_en);
1962 writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
1963 &legacy_reg->pifm_wr_swap_en);
1964 break;
1965
1966 case VXGE_HW_SWAPPER_BIT_FLIPPED:
1967 writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
1968 &legacy_reg->pifm_rd_flip_en);
1969 writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
1970 &legacy_reg->pifm_wr_flip_en);
1971 break;
1972 }
1973
1974 wmb();
1975
1976 val64 = readq(&legacy_reg->toc_swapper_fb);
1977
1978 if (val64 != VXGE_HW_SWAPPER_INITIAL_VALUE)
1979 status = VXGE_HW_ERR_SWAPPER_CTRL;
1980
1981 return status;
1982}
1983
1984/*
1985 * __vxge_hw_vpath_swapper_set - Set the swapper bits for the vpath.
1986 * Set the swapper bits appropriately for the vpath.
1987 */
1988enum vxge_hw_status
1989__vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg)
1990{
1991#ifndef __BIG_ENDIAN
1992 u64 val64;
1993
1994 val64 = readq(&vpath_reg->vpath_general_cfg1);
1995 wmb();
1996 val64 |= VXGE_HW_VPATH_GENERAL_CFG1_CTL_BYTE_SWAPEN;
1997 writeq(val64, &vpath_reg->vpath_general_cfg1);
1998 wmb();
1999#endif
2000 return VXGE_HW_OK;
2001}
2002
2003/*
2004 * __vxge_hw_kdfc_swapper_set - Set the swapper bits for the kdfc.
2005 * Set the swapper bits appropriately for the vpath.
2006 */
2007enum vxge_hw_status
2008__vxge_hw_kdfc_swapper_set(
2009 struct vxge_hw_legacy_reg __iomem *legacy_reg,
2010 struct vxge_hw_vpath_reg __iomem *vpath_reg)
2011{
2012 u64 val64;
2013
2014 val64 = readq(&legacy_reg->pifm_wr_swap_en);
2015
2016 if (val64 == VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE) {
2017 val64 = readq(&vpath_reg->kdfcctl_cfg0);
2018 wmb();
2019
2020 val64 |= VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO0 |
2021 VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO1 |
2022 VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO2;
2023
2024 writeq(val64, &vpath_reg->kdfcctl_cfg0);
2025 wmb();
2026 }
2027
2028 return VXGE_HW_OK;
2029}
2030
2031/*
2032 * vxge_hw_mgmt_device_config - Retrieve device configuration.
2033 * Get device configuration. Permits to retrieve at run-time configuration
2034 * values that were used to initialize and configure the device.
2035 */
2036enum vxge_hw_status
2037vxge_hw_mgmt_device_config(struct __vxge_hw_device *hldev,
2038 struct vxge_hw_device_config *dev_config, int size)
2039{
2040
2041 if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC))
2042 return VXGE_HW_ERR_INVALID_DEVICE;
2043
2044 if (size != sizeof(struct vxge_hw_device_config))
2045 return VXGE_HW_ERR_VERSION_CONFLICT;
2046
2047 memcpy(dev_config, &hldev->config,
2048 sizeof(struct vxge_hw_device_config));
2049
2050 return VXGE_HW_OK;
2051}
2052
2053/*
2054 * vxge_hw_mgmt_reg_read - Read Titan register.
2055 */
2056enum vxge_hw_status
2057vxge_hw_mgmt_reg_read(struct __vxge_hw_device *hldev,
2058 enum vxge_hw_mgmt_reg_type type,
2059 u32 index, u32 offset, u64 *value)
2060{
2061 enum vxge_hw_status status = VXGE_HW_OK;
2062
2063 if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
2064 status = VXGE_HW_ERR_INVALID_DEVICE;
2065 goto exit;
2066 }
2067
2068 switch (type) {
2069 case vxge_hw_mgmt_reg_type_legacy:
2070 if (offset > sizeof(struct vxge_hw_legacy_reg) - 8) {
2071 status = VXGE_HW_ERR_INVALID_OFFSET;
2072 break;
2073 }
2074 *value = readq((void __iomem *)hldev->legacy_reg + offset);
2075 break;
2076 case vxge_hw_mgmt_reg_type_toc:
2077 if (offset > sizeof(struct vxge_hw_toc_reg) - 8) {
2078 status = VXGE_HW_ERR_INVALID_OFFSET;
2079 break;
2080 }
2081 *value = readq((void __iomem *)hldev->toc_reg + offset);
2082 break;
2083 case vxge_hw_mgmt_reg_type_common:
2084 if (offset > sizeof(struct vxge_hw_common_reg) - 8) {
2085 status = VXGE_HW_ERR_INVALID_OFFSET;
2086 break;
2087 }
2088 *value = readq((void __iomem *)hldev->common_reg + offset);
2089 break;
2090 case vxge_hw_mgmt_reg_type_mrpcim:
2091 if (!(hldev->access_rights &
2092 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
2093 status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
2094 break;
2095 }
2096 if (offset > sizeof(struct vxge_hw_mrpcim_reg) - 8) {
2097 status = VXGE_HW_ERR_INVALID_OFFSET;
2098 break;
2099 }
2100 *value = readq((void __iomem *)hldev->mrpcim_reg + offset);
2101 break;
2102 case vxge_hw_mgmt_reg_type_srpcim:
2103 if (!(hldev->access_rights &
2104 VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM)) {
2105 status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
2106 break;
2107 }
2108 if (index > VXGE_HW_TITAN_SRPCIM_REG_SPACES - 1) {
2109 status = VXGE_HW_ERR_INVALID_INDEX;
2110 break;
2111 }
2112 if (offset > sizeof(struct vxge_hw_srpcim_reg) - 8) {
2113 status = VXGE_HW_ERR_INVALID_OFFSET;
2114 break;
2115 }
2116 *value = readq((void __iomem *)hldev->srpcim_reg[index] +
2117 offset);
2118 break;
2119 case vxge_hw_mgmt_reg_type_vpmgmt:
2120 if ((index > VXGE_HW_TITAN_VPMGMT_REG_SPACES - 1) ||
2121 (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
2122 status = VXGE_HW_ERR_INVALID_INDEX;
2123 break;
2124 }
2125 if (offset > sizeof(struct vxge_hw_vpmgmt_reg) - 8) {
2126 status = VXGE_HW_ERR_INVALID_OFFSET;
2127 break;
2128 }
2129 *value = readq((void __iomem *)hldev->vpmgmt_reg[index] +
2130 offset);
2131 break;
2132 case vxge_hw_mgmt_reg_type_vpath:
2133 if ((index > VXGE_HW_TITAN_VPATH_REG_SPACES - 1) ||
2134 (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
2135 status = VXGE_HW_ERR_INVALID_INDEX;
2136 break;
2137 }
2138 if (index > VXGE_HW_TITAN_VPATH_REG_SPACES - 1) {
2139 status = VXGE_HW_ERR_INVALID_INDEX;
2140 break;
2141 }
2142 if (offset > sizeof(struct vxge_hw_vpath_reg) - 8) {
2143 status = VXGE_HW_ERR_INVALID_OFFSET;
2144 break;
2145 }
2146 *value = readq((void __iomem *)hldev->vpath_reg[index] +
2147 offset);
2148 break;
2149 default:
2150 status = VXGE_HW_ERR_INVALID_TYPE;
2151 break;
2152 }
2153
2154exit:
2155 return status;
2156}
2157
2158/*
Sreenivasa Honnurfa41fd12009-10-05 01:56:35 +00002159 * vxge_hw_vpath_strip_fcs_check - Check for FCS strip.
2160 */
2161enum vxge_hw_status
2162vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask)
2163{
2164 struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg;
2165 enum vxge_hw_status status = VXGE_HW_OK;
2166 int i = 0, j = 0;
2167
2168 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
2169 if (!((vpath_mask) & vxge_mBIT(i)))
2170 continue;
2171 vpmgmt_reg = hldev->vpmgmt_reg[i];
2172 for (j = 0; j < VXGE_HW_MAC_MAX_MAC_PORT_ID; j++) {
2173 if (readq(&vpmgmt_reg->rxmac_cfg0_port_vpmgmt_clone[j])
2174 & VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_STRIP_FCS)
2175 return VXGE_HW_FAIL;
2176 }
2177 }
2178 return status;
2179}
2180/*
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00002181 * vxge_hw_mgmt_reg_Write - Write Titan register.
2182 */
2183enum vxge_hw_status
2184vxge_hw_mgmt_reg_write(struct __vxge_hw_device *hldev,
2185 enum vxge_hw_mgmt_reg_type type,
2186 u32 index, u32 offset, u64 value)
2187{
2188 enum vxge_hw_status status = VXGE_HW_OK;
2189
2190 if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
2191 status = VXGE_HW_ERR_INVALID_DEVICE;
2192 goto exit;
2193 }
2194
2195 switch (type) {
2196 case vxge_hw_mgmt_reg_type_legacy:
2197 if (offset > sizeof(struct vxge_hw_legacy_reg) - 8) {
2198 status = VXGE_HW_ERR_INVALID_OFFSET;
2199 break;
2200 }
2201 writeq(value, (void __iomem *)hldev->legacy_reg + offset);
2202 break;
2203 case vxge_hw_mgmt_reg_type_toc:
2204 if (offset > sizeof(struct vxge_hw_toc_reg) - 8) {
2205 status = VXGE_HW_ERR_INVALID_OFFSET;
2206 break;
2207 }
2208 writeq(value, (void __iomem *)hldev->toc_reg + offset);
2209 break;
2210 case vxge_hw_mgmt_reg_type_common:
2211 if (offset > sizeof(struct vxge_hw_common_reg) - 8) {
2212 status = VXGE_HW_ERR_INVALID_OFFSET;
2213 break;
2214 }
2215 writeq(value, (void __iomem *)hldev->common_reg + offset);
2216 break;
2217 case vxge_hw_mgmt_reg_type_mrpcim:
2218 if (!(hldev->access_rights &
2219 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
2220 status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
2221 break;
2222 }
2223 if (offset > sizeof(struct vxge_hw_mrpcim_reg) - 8) {
2224 status = VXGE_HW_ERR_INVALID_OFFSET;
2225 break;
2226 }
2227 writeq(value, (void __iomem *)hldev->mrpcim_reg + offset);
2228 break;
2229 case vxge_hw_mgmt_reg_type_srpcim:
2230 if (!(hldev->access_rights &
2231 VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM)) {
2232 status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
2233 break;
2234 }
2235 if (index > VXGE_HW_TITAN_SRPCIM_REG_SPACES - 1) {
2236 status = VXGE_HW_ERR_INVALID_INDEX;
2237 break;
2238 }
2239 if (offset > sizeof(struct vxge_hw_srpcim_reg) - 8) {
2240 status = VXGE_HW_ERR_INVALID_OFFSET;
2241 break;
2242 }
2243 writeq(value, (void __iomem *)hldev->srpcim_reg[index] +
2244 offset);
2245
2246 break;
2247 case vxge_hw_mgmt_reg_type_vpmgmt:
2248 if ((index > VXGE_HW_TITAN_VPMGMT_REG_SPACES - 1) ||
2249 (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
2250 status = VXGE_HW_ERR_INVALID_INDEX;
2251 break;
2252 }
2253 if (offset > sizeof(struct vxge_hw_vpmgmt_reg) - 8) {
2254 status = VXGE_HW_ERR_INVALID_OFFSET;
2255 break;
2256 }
2257 writeq(value, (void __iomem *)hldev->vpmgmt_reg[index] +
2258 offset);
2259 break;
2260 case vxge_hw_mgmt_reg_type_vpath:
2261 if ((index > VXGE_HW_TITAN_VPATH_REG_SPACES-1) ||
2262 (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
2263 status = VXGE_HW_ERR_INVALID_INDEX;
2264 break;
2265 }
2266 if (offset > sizeof(struct vxge_hw_vpath_reg) - 8) {
2267 status = VXGE_HW_ERR_INVALID_OFFSET;
2268 break;
2269 }
2270 writeq(value, (void __iomem *)hldev->vpath_reg[index] +
2271 offset);
2272 break;
2273 default:
2274 status = VXGE_HW_ERR_INVALID_TYPE;
2275 break;
2276 }
2277exit:
2278 return status;
2279}
2280
2281/*
2282 * __vxge_hw_fifo_mempool_item_alloc - Allocate List blocks for TxD
2283 * list callback
2284 * This function is callback passed to __vxge_hw_mempool_create to create memory
2285 * pool for TxD list
2286 */
2287static void
2288__vxge_hw_fifo_mempool_item_alloc(
2289 struct vxge_hw_mempool *mempoolh,
2290 u32 memblock_index, struct vxge_hw_mempool_dma *dma_object,
2291 u32 index, u32 is_last)
2292{
2293 u32 memblock_item_idx;
2294 struct __vxge_hw_fifo_txdl_priv *txdl_priv;
2295 struct vxge_hw_fifo_txd *txdp =
2296 (struct vxge_hw_fifo_txd *)mempoolh->items_arr[index];
2297 struct __vxge_hw_fifo *fifo =
2298 (struct __vxge_hw_fifo *)mempoolh->userdata;
2299 void *memblock = mempoolh->memblocks_arr[memblock_index];
2300
2301 vxge_assert(txdp);
2302
2303 txdp->host_control = (u64) (size_t)
2304 __vxge_hw_mempool_item_priv(mempoolh, memblock_index, txdp,
2305 &memblock_item_idx);
2306
2307 txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdp);
2308
2309 vxge_assert(txdl_priv);
2310
2311 fifo->channel.reserve_arr[fifo->channel.reserve_ptr - 1 - index] = txdp;
2312
2313 /* pre-format HW's TxDL's private */
2314 txdl_priv->dma_offset = (char *)txdp - (char *)memblock;
2315 txdl_priv->dma_addr = dma_object->addr + txdl_priv->dma_offset;
2316 txdl_priv->dma_handle = dma_object->handle;
2317 txdl_priv->memblock = memblock;
2318 txdl_priv->first_txdp = txdp;
2319 txdl_priv->next_txdl_priv = NULL;
2320 txdl_priv->alloc_frags = 0;
2321
2322 return;
2323}
2324
2325/*
2326 * __vxge_hw_fifo_create - Create a FIFO
2327 * This function creates FIFO and initializes it.
2328 */
2329enum vxge_hw_status
2330__vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
2331 struct vxge_hw_fifo_attr *attr)
2332{
2333 enum vxge_hw_status status = VXGE_HW_OK;
2334 struct __vxge_hw_fifo *fifo;
2335 struct vxge_hw_fifo_config *config;
2336 u32 txdl_size, txdl_per_memblock;
2337 struct vxge_hw_mempool_cbs fifo_mp_callback;
2338 struct __vxge_hw_virtualpath *vpath;
2339
2340 if ((vp == NULL) || (attr == NULL)) {
2341 status = VXGE_HW_ERR_INVALID_HANDLE;
2342 goto exit;
2343 }
2344 vpath = vp->vpath;
2345 config = &vpath->hldev->config.vp_config[vpath->vp_id].fifo;
2346
2347 txdl_size = config->max_frags * sizeof(struct vxge_hw_fifo_txd);
2348
2349 txdl_per_memblock = config->memblock_size / txdl_size;
2350
2351 fifo = (struct __vxge_hw_fifo *)__vxge_hw_channel_allocate(vp,
2352 VXGE_HW_CHANNEL_TYPE_FIFO,
2353 config->fifo_blocks * txdl_per_memblock,
2354 attr->per_txdl_space, attr->userdata);
2355
2356 if (fifo == NULL) {
2357 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2358 goto exit;
2359 }
2360
2361 vpath->fifoh = fifo;
2362 fifo->nofl_db = vpath->nofl_db;
2363
2364 fifo->vp_id = vpath->vp_id;
2365 fifo->vp_reg = vpath->vp_reg;
2366 fifo->stats = &vpath->sw_stats->fifo_stats;
2367
2368 fifo->config = config;
2369
2370 /* apply "interrupts per txdl" attribute */
2371 fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ;
2372
2373 if (fifo->config->intr)
2374 fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST;
2375
2376 fifo->no_snoop_bits = config->no_snoop_bits;
2377
2378 /*
2379 * FIFO memory management strategy:
2380 *
2381 * TxDL split into three independent parts:
2382 * - set of TxD's
2383 * - TxD HW private part
2384 * - driver private part
2385 *
2386 * Adaptative memory allocation used. i.e. Memory allocated on
2387 * demand with the size which will fit into one memory block.
2388 * One memory block may contain more than one TxDL.
2389 *
2390 * During "reserve" operations more memory can be allocated on demand
2391 * for example due to FIFO full condition.
2392 *
2393 * Pool of memory memblocks never shrinks except in __vxge_hw_fifo_close
2394 * routine which will essentially stop the channel and free resources.
2395 */
2396
2397 /* TxDL common private size == TxDL private + driver private */
2398 fifo->priv_size =
2399 sizeof(struct __vxge_hw_fifo_txdl_priv) + attr->per_txdl_space;
2400 fifo->priv_size = ((fifo->priv_size + VXGE_CACHE_LINE_SIZE - 1) /
2401 VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE;
2402
2403 fifo->per_txdl_space = attr->per_txdl_space;
2404
2405 /* recompute txdl size to be cacheline aligned */
2406 fifo->txdl_size = txdl_size;
2407 fifo->txdl_per_memblock = txdl_per_memblock;
2408
2409 fifo->txdl_term = attr->txdl_term;
2410 fifo->callback = attr->callback;
2411
2412 if (fifo->txdl_per_memblock == 0) {
2413 __vxge_hw_fifo_delete(vp);
2414 status = VXGE_HW_ERR_INVALID_BLOCK_SIZE;
2415 goto exit;
2416 }
2417
2418 fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
2419
2420 fifo->mempool =
2421 __vxge_hw_mempool_create(vpath->hldev,
2422 fifo->config->memblock_size,
2423 fifo->txdl_size,
2424 fifo->priv_size,
2425 (fifo->config->fifo_blocks * fifo->txdl_per_memblock),
2426 (fifo->config->fifo_blocks * fifo->txdl_per_memblock),
2427 &fifo_mp_callback,
2428 fifo);
2429
2430 if (fifo->mempool == NULL) {
2431 __vxge_hw_fifo_delete(vp);
2432 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2433 goto exit;
2434 }
2435
2436 status = __vxge_hw_channel_initialize(&fifo->channel);
2437 if (status != VXGE_HW_OK) {
2438 __vxge_hw_fifo_delete(vp);
2439 goto exit;
2440 }
2441
2442 vxge_assert(fifo->channel.reserve_ptr);
2443exit:
2444 return status;
2445}
2446
2447/*
2448 * __vxge_hw_fifo_abort - Returns the TxD
2449 * This function terminates the TxDs of fifo
2450 */
2451enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo)
2452{
2453 void *txdlh;
2454
2455 for (;;) {
2456 vxge_hw_channel_dtr_try_complete(&fifo->channel, &txdlh);
2457
2458 if (txdlh == NULL)
2459 break;
2460
2461 vxge_hw_channel_dtr_complete(&fifo->channel);
2462
2463 if (fifo->txdl_term) {
2464 fifo->txdl_term(txdlh,
2465 VXGE_HW_TXDL_STATE_POSTED,
2466 fifo->channel.userdata);
2467 }
2468
2469 vxge_hw_channel_dtr_free(&fifo->channel, txdlh);
2470 }
2471
2472 return VXGE_HW_OK;
2473}
2474
2475/*
2476 * __vxge_hw_fifo_reset - Resets the fifo
2477 * This function resets the fifo during vpath reset operation
2478 */
2479enum vxge_hw_status __vxge_hw_fifo_reset(struct __vxge_hw_fifo *fifo)
2480{
2481 enum vxge_hw_status status = VXGE_HW_OK;
2482
2483 __vxge_hw_fifo_abort(fifo);
2484 status = __vxge_hw_channel_reset(&fifo->channel);
2485
2486 return status;
2487}
2488
2489/*
2490 * __vxge_hw_fifo_delete - Removes the FIFO
2491 * This function freeup the memory pool and removes the FIFO
2492 */
2493enum vxge_hw_status __vxge_hw_fifo_delete(struct __vxge_hw_vpath_handle *vp)
2494{
2495 struct __vxge_hw_fifo *fifo = vp->vpath->fifoh;
2496
2497 __vxge_hw_fifo_abort(fifo);
2498
2499 if (fifo->mempool)
2500 __vxge_hw_mempool_destroy(fifo->mempool);
2501
2502 vp->vpath->fifoh = NULL;
2503
2504 __vxge_hw_channel_free(&fifo->channel);
2505
2506 return VXGE_HW_OK;
2507}
2508
2509/*
2510 * __vxge_hw_vpath_pci_read - Read the content of given address
2511 * in pci config space.
2512 * Read from the vpath pci config space.
2513 */
2514enum vxge_hw_status
2515__vxge_hw_vpath_pci_read(struct __vxge_hw_virtualpath *vpath,
2516 u32 phy_func_0, u32 offset, u32 *val)
2517{
2518 u64 val64;
2519 enum vxge_hw_status status = VXGE_HW_OK;
2520 struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
2521
2522 val64 = VXGE_HW_PCI_CONFIG_ACCESS_CFG1_ADDRESS(offset);
2523
2524 if (phy_func_0)
2525 val64 |= VXGE_HW_PCI_CONFIG_ACCESS_CFG1_SEL_FUNC0;
2526
2527 writeq(val64, &vp_reg->pci_config_access_cfg1);
2528 wmb();
2529 writeq(VXGE_HW_PCI_CONFIG_ACCESS_CFG2_REQ,
2530 &vp_reg->pci_config_access_cfg2);
2531 wmb();
2532
2533 status = __vxge_hw_device_register_poll(
2534 &vp_reg->pci_config_access_cfg2,
2535 VXGE_HW_INTR_MASK_ALL, VXGE_HW_DEF_DEVICE_POLL_MILLIS);
2536
2537 if (status != VXGE_HW_OK)
2538 goto exit;
2539
2540 val64 = readq(&vp_reg->pci_config_access_status);
2541
2542 if (val64 & VXGE_HW_PCI_CONFIG_ACCESS_STATUS_ACCESS_ERR) {
2543 status = VXGE_HW_FAIL;
2544 *val = 0;
2545 } else
2546 *val = (u32)vxge_bVALn(val64, 32, 32);
2547exit:
2548 return status;
2549}
2550
2551/*
2552 * __vxge_hw_vpath_func_id_get - Get the function id of the vpath.
2553 * Returns the function number of the vpath.
2554 */
2555u32
2556__vxge_hw_vpath_func_id_get(u32 vp_id,
2557 struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg)
2558{
2559 u64 val64;
2560
2561 val64 = readq(&vpmgmt_reg->vpath_to_func_map_cfg1);
2562
2563 return
2564 (u32)VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_GET_VPATH_TO_FUNC_MAP_CFG1(val64);
2565}
2566
2567/*
2568 * __vxge_hw_read_rts_ds - Program RTS steering critieria
2569 */
2570static inline void
2571__vxge_hw_read_rts_ds(struct vxge_hw_vpath_reg __iomem *vpath_reg,
2572 u64 dta_struct_sel)
2573{
2574 writeq(0, &vpath_reg->rts_access_steer_ctrl);
2575 wmb();
2576 writeq(dta_struct_sel, &vpath_reg->rts_access_steer_data0);
2577 writeq(0, &vpath_reg->rts_access_steer_data1);
2578 wmb();
2579 return;
2580}
2581
2582
2583/*
2584 * __vxge_hw_vpath_card_info_get - Get the serial numbers,
2585 * part number and product description.
2586 */
2587enum vxge_hw_status
2588__vxge_hw_vpath_card_info_get(
2589 u32 vp_id,
2590 struct vxge_hw_vpath_reg __iomem *vpath_reg,
2591 struct vxge_hw_device_hw_info *hw_info)
2592{
2593 u32 i, j;
2594 u64 val64;
2595 u64 data1 = 0ULL;
2596 u64 data2 = 0ULL;
2597 enum vxge_hw_status status = VXGE_HW_OK;
2598 u8 *serial_number = hw_info->serial_number;
2599 u8 *part_number = hw_info->part_number;
2600 u8 *product_desc = hw_info->product_desc;
2601
2602 __vxge_hw_read_rts_ds(vpath_reg,
2603 VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_SERIAL_NUMBER);
2604
2605 val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2606 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
2607 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2608 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
2609 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
2610 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2611
2612 status = __vxge_hw_pio_mem_write64(val64,
2613 &vpath_reg->rts_access_steer_ctrl,
2614 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
2615 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
2616
2617 if (status != VXGE_HW_OK)
2618 return status;
2619
2620 val64 = readq(&vpath_reg->rts_access_steer_ctrl);
2621
2622 if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
2623 data1 = readq(&vpath_reg->rts_access_steer_data0);
2624 ((u64 *)serial_number)[0] = be64_to_cpu(data1);
2625
2626 data2 = readq(&vpath_reg->rts_access_steer_data1);
2627 ((u64 *)serial_number)[1] = be64_to_cpu(data2);
2628 status = VXGE_HW_OK;
2629 } else
2630 *serial_number = 0;
2631
2632 __vxge_hw_read_rts_ds(vpath_reg,
2633 VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PART_NUMBER);
2634
2635 val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2636 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
2637 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2638 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
2639 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
2640 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2641
2642 status = __vxge_hw_pio_mem_write64(val64,
2643 &vpath_reg->rts_access_steer_ctrl,
2644 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
2645 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
2646
2647 if (status != VXGE_HW_OK)
2648 return status;
2649
2650 val64 = readq(&vpath_reg->rts_access_steer_ctrl);
2651
2652 if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
2653
2654 data1 = readq(&vpath_reg->rts_access_steer_data0);
2655 ((u64 *)part_number)[0] = be64_to_cpu(data1);
2656
2657 data2 = readq(&vpath_reg->rts_access_steer_data1);
2658 ((u64 *)part_number)[1] = be64_to_cpu(data2);
2659
2660 status = VXGE_HW_OK;
2661
2662 } else
2663 *part_number = 0;
2664
2665 j = 0;
2666
2667 for (i = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_0;
2668 i <= VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_3; i++) {
2669
2670 __vxge_hw_read_rts_ds(vpath_reg, i);
2671
2672 val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2673 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
2674 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2675 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
2676 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
2677 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2678
2679 status = __vxge_hw_pio_mem_write64(val64,
2680 &vpath_reg->rts_access_steer_ctrl,
2681 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
2682 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
2683
2684 if (status != VXGE_HW_OK)
2685 return status;
2686
2687 val64 = readq(&vpath_reg->rts_access_steer_ctrl);
2688
2689 if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
2690
2691 data1 = readq(&vpath_reg->rts_access_steer_data0);
2692 ((u64 *)product_desc)[j++] = be64_to_cpu(data1);
2693
2694 data2 = readq(&vpath_reg->rts_access_steer_data1);
2695 ((u64 *)product_desc)[j++] = be64_to_cpu(data2);
2696
2697 status = VXGE_HW_OK;
2698 } else
2699 *product_desc = 0;
2700 }
2701
2702 return status;
2703}
2704
2705/*
2706 * __vxge_hw_vpath_fw_ver_get - Get the fw version
2707 * Returns FW Version
2708 */
2709enum vxge_hw_status
2710__vxge_hw_vpath_fw_ver_get(
2711 u32 vp_id,
2712 struct vxge_hw_vpath_reg __iomem *vpath_reg,
2713 struct vxge_hw_device_hw_info *hw_info)
2714{
2715 u64 val64;
2716 u64 data1 = 0ULL;
2717 u64 data2 = 0ULL;
2718 struct vxge_hw_device_version *fw_version = &hw_info->fw_version;
2719 struct vxge_hw_device_date *fw_date = &hw_info->fw_date;
2720 struct vxge_hw_device_version *flash_version = &hw_info->flash_version;
2721 struct vxge_hw_device_date *flash_date = &hw_info->flash_date;
2722 enum vxge_hw_status status = VXGE_HW_OK;
2723
2724 val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2725 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY) |
2726 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2727 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
2728 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
2729 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2730
2731 status = __vxge_hw_pio_mem_write64(val64,
2732 &vpath_reg->rts_access_steer_ctrl,
2733 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
2734 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
2735
2736 if (status != VXGE_HW_OK)
2737 goto exit;
2738
2739 val64 = readq(&vpath_reg->rts_access_steer_ctrl);
2740
2741 if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
2742
2743 data1 = readq(&vpath_reg->rts_access_steer_data0);
2744 data2 = readq(&vpath_reg->rts_access_steer_data1);
2745
2746 fw_date->day =
2747 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_DAY(
2748 data1);
2749 fw_date->month =
2750 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MONTH(
2751 data1);
2752 fw_date->year =
2753 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_YEAR(
2754 data1);
2755
2756 snprintf(fw_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d",
2757 fw_date->month, fw_date->day, fw_date->year);
2758
2759 fw_version->major =
2760 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data1);
2761 fw_version->minor =
2762 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data1);
2763 fw_version->build =
2764 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data1);
2765
2766 snprintf(fw_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
2767 fw_version->major, fw_version->minor, fw_version->build);
2768
2769 flash_date->day =
2770 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_DAY(data2);
2771 flash_date->month =
2772 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MONTH(data2);
2773 flash_date->year =
2774 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_YEAR(data2);
2775
2776 snprintf(flash_date->date, VXGE_HW_FW_STRLEN,
2777 "%2.2d/%2.2d/%4.4d",
2778 flash_date->month, flash_date->day, flash_date->year);
2779
2780 flash_version->major =
2781 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MAJOR(data2);
2782 flash_version->minor =
2783 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MINOR(data2);
2784 flash_version->build =
2785 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(data2);
2786
2787 snprintf(flash_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
2788 flash_version->major, flash_version->minor,
2789 flash_version->build);
2790
2791 status = VXGE_HW_OK;
2792
2793 } else
2794 status = VXGE_HW_FAIL;
2795exit:
2796 return status;
2797}
2798
2799/*
2800 * __vxge_hw_vpath_pci_func_mode_get - Get the pci mode
2801 * Returns pci function mode
2802 */
2803u64
2804__vxge_hw_vpath_pci_func_mode_get(
2805 u32 vp_id,
2806 struct vxge_hw_vpath_reg __iomem *vpath_reg)
2807{
2808 u64 val64;
2809 u64 data1 = 0ULL;
2810 enum vxge_hw_status status = VXGE_HW_OK;
2811
2812 __vxge_hw_read_rts_ds(vpath_reg,
2813 VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PCI_MODE);
2814
2815 val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2816 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
2817 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2818 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
2819 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
2820 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2821
2822 status = __vxge_hw_pio_mem_write64(val64,
2823 &vpath_reg->rts_access_steer_ctrl,
2824 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
2825 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
2826
2827 if (status != VXGE_HW_OK)
2828 goto exit;
2829
2830 val64 = readq(&vpath_reg->rts_access_steer_ctrl);
2831
2832 if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
2833 data1 = readq(&vpath_reg->rts_access_steer_data0);
2834 status = VXGE_HW_OK;
2835 } else {
2836 data1 = 0;
2837 status = VXGE_HW_FAIL;
2838 }
2839exit:
2840 return data1;
2841}
2842
2843/**
2844 * vxge_hw_device_flick_link_led - Flick (blink) link LED.
2845 * @hldev: HW device.
2846 * @on_off: TRUE if flickering to be on, FALSE to be off
2847 *
2848 * Flicker the link LED.
2849 */
2850enum vxge_hw_status
2851vxge_hw_device_flick_link_led(struct __vxge_hw_device *hldev,
2852 u64 on_off)
2853{
2854 u64 val64;
2855 enum vxge_hw_status status = VXGE_HW_OK;
2856 struct vxge_hw_vpath_reg __iomem *vp_reg;
2857
2858 if (hldev == NULL) {
2859 status = VXGE_HW_ERR_INVALID_DEVICE;
2860 goto exit;
2861 }
2862
2863 vp_reg = hldev->vpath_reg[hldev->first_vp_id];
2864
2865 writeq(0, &vp_reg->rts_access_steer_ctrl);
2866 wmb();
2867 writeq(on_off, &vp_reg->rts_access_steer_data0);
2868 writeq(0, &vp_reg->rts_access_steer_data1);
2869 wmb();
2870
2871 val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2872 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LED_CONTROL) |
2873 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2874 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
2875 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
2876 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2877
2878 status = __vxge_hw_pio_mem_write64(val64,
2879 &vp_reg->rts_access_steer_ctrl,
2880 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
2881 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
2882exit:
2883 return status;
2884}
2885
2886/*
2887 * __vxge_hw_vpath_rts_table_get - Get the entries from RTS access tables
2888 */
2889enum vxge_hw_status
2890__vxge_hw_vpath_rts_table_get(
2891 struct __vxge_hw_vpath_handle *vp,
2892 u32 action, u32 rts_table, u32 offset, u64 *data1, u64 *data2)
2893{
2894 u64 val64;
2895 struct __vxge_hw_virtualpath *vpath;
2896 struct vxge_hw_vpath_reg __iomem *vp_reg;
2897
2898 enum vxge_hw_status status = VXGE_HW_OK;
2899
2900 if (vp == NULL) {
2901 status = VXGE_HW_ERR_INVALID_HANDLE;
2902 goto exit;
2903 }
2904
2905 vpath = vp->vpath;
2906 vp_reg = vpath->vp_reg;
2907
2908 val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) |
2909 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(rts_table) |
2910 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
2911 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset);
2912
2913 if ((rts_table ==
2914 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT) ||
2915 (rts_table ==
2916 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT) ||
2917 (rts_table ==
2918 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK) ||
2919 (rts_table ==
2920 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY)) {
2921 val64 = val64 | VXGE_HW_RTS_ACCESS_STEER_CTRL_TABLE_SEL;
2922 }
2923
2924 status = __vxge_hw_pio_mem_write64(val64,
2925 &vp_reg->rts_access_steer_ctrl,
2926 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
2927 vpath->hldev->config.device_poll_millis);
2928
2929 if (status != VXGE_HW_OK)
2930 goto exit;
2931
2932 val64 = readq(&vp_reg->rts_access_steer_ctrl);
2933
2934 if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
2935
2936 *data1 = readq(&vp_reg->rts_access_steer_data0);
2937
2938 if ((rts_table ==
2939 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) ||
2940 (rts_table ==
2941 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT)) {
2942 *data2 = readq(&vp_reg->rts_access_steer_data1);
2943 }
2944 status = VXGE_HW_OK;
2945 } else
2946 status = VXGE_HW_FAIL;
2947exit:
2948 return status;
2949}
2950
2951/*
2952 * __vxge_hw_vpath_rts_table_set - Set the entries of RTS access tables
2953 */
2954enum vxge_hw_status
2955__vxge_hw_vpath_rts_table_set(
2956 struct __vxge_hw_vpath_handle *vp, u32 action, u32 rts_table,
2957 u32 offset, u64 data1, u64 data2)
2958{
2959 u64 val64;
2960 struct __vxge_hw_virtualpath *vpath;
2961 enum vxge_hw_status status = VXGE_HW_OK;
2962 struct vxge_hw_vpath_reg __iomem *vp_reg;
2963
2964 if (vp == NULL) {
2965 status = VXGE_HW_ERR_INVALID_HANDLE;
2966 goto exit;
2967 }
2968
2969 vpath = vp->vpath;
2970 vp_reg = vpath->vp_reg;
2971
2972 writeq(data1, &vp_reg->rts_access_steer_data0);
2973 wmb();
2974
2975 if ((rts_table == VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) ||
2976 (rts_table ==
2977 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT)) {
2978 writeq(data2, &vp_reg->rts_access_steer_data1);
2979 wmb();
2980 }
2981
2982 val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) |
2983 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(rts_table) |
2984 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
2985 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset);
2986
2987 status = __vxge_hw_pio_mem_write64(val64,
2988 &vp_reg->rts_access_steer_ctrl,
2989 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
2990 vpath->hldev->config.device_poll_millis);
2991
2992 if (status != VXGE_HW_OK)
2993 goto exit;
2994
2995 val64 = readq(&vp_reg->rts_access_steer_ctrl);
2996
2997 if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS)
2998 status = VXGE_HW_OK;
2999 else
3000 status = VXGE_HW_FAIL;
3001exit:
3002 return status;
3003}
3004
3005/*
3006 * __vxge_hw_vpath_addr_get - Get the hw address entry for this vpath
3007 * from MAC address table.
3008 */
3009enum vxge_hw_status
3010__vxge_hw_vpath_addr_get(
3011 u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg,
3012 u8 (macaddr)[ETH_ALEN], u8 (macaddr_mask)[ETH_ALEN])
3013{
3014 u32 i;
3015 u64 val64;
3016 u64 data1 = 0ULL;
3017 u64 data2 = 0ULL;
3018 enum vxge_hw_status status = VXGE_HW_OK;
3019
3020 val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
3021 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY) |
3022 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
3023 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) |
3024 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
3025 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
3026
3027 status = __vxge_hw_pio_mem_write64(val64,
3028 &vpath_reg->rts_access_steer_ctrl,
3029 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
3030 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
3031
3032 if (status != VXGE_HW_OK)
3033 goto exit;
3034
3035 val64 = readq(&vpath_reg->rts_access_steer_ctrl);
3036
3037 if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
3038
3039 data1 = readq(&vpath_reg->rts_access_steer_data0);
3040 data2 = readq(&vpath_reg->rts_access_steer_data1);
3041
3042 data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
3043 data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(
3044 data2);
3045
3046 for (i = ETH_ALEN; i > 0; i--) {
3047 macaddr[i-1] = (u8)(data1 & 0xFF);
3048 data1 >>= 8;
3049
3050 macaddr_mask[i-1] = (u8)(data2 & 0xFF);
3051 data2 >>= 8;
3052 }
3053 status = VXGE_HW_OK;
3054 } else
3055 status = VXGE_HW_FAIL;
3056exit:
3057 return status;
3058}
3059
3060/*
3061 * vxge_hw_vpath_rts_rth_set - Set/configure RTS hashing.
3062 */
3063enum vxge_hw_status vxge_hw_vpath_rts_rth_set(
3064 struct __vxge_hw_vpath_handle *vp,
3065 enum vxge_hw_rth_algoritms algorithm,
3066 struct vxge_hw_rth_hash_types *hash_type,
3067 u16 bucket_size)
3068{
3069 u64 data0, data1;
3070 enum vxge_hw_status status = VXGE_HW_OK;
3071
3072 if (vp == NULL) {
3073 status = VXGE_HW_ERR_INVALID_HANDLE;
3074 goto exit;
3075 }
3076
3077 status = __vxge_hw_vpath_rts_table_get(vp,
3078 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY,
3079 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG,
3080 0, &data0, &data1);
3081
3082 data0 &= ~(VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(0xf) |
3083 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(0x3));
3084
3085 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_EN |
3086 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(bucket_size) |
3087 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(algorithm);
3088
3089 if (hash_type->hash_type_tcpipv4_en)
3090 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV4_EN;
3091
3092 if (hash_type->hash_type_ipv4_en)
3093 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV4_EN;
3094
3095 if (hash_type->hash_type_tcpipv6_en)
3096 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EN;
3097
3098 if (hash_type->hash_type_ipv6_en)
3099 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EN;
3100
3101 if (hash_type->hash_type_tcpipv6ex_en)
3102 data0 |=
3103 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EX_EN;
3104
3105 if (hash_type->hash_type_ipv6ex_en)
3106 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EX_EN;
3107
3108 if (VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_ACTIVE_TABLE(data0))
3109 data0 &= ~VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE;
3110 else
3111 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE;
3112
3113 status = __vxge_hw_vpath_rts_table_set(vp,
3114 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY,
3115 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG,
3116 0, data0, 0);
3117exit:
3118 return status;
3119}
3120
3121static void
3122vxge_hw_rts_rth_data0_data1_get(u32 j, u64 *data0, u64 *data1,
3123 u16 flag, u8 *itable)
3124{
3125 switch (flag) {
3126 case 1:
3127 *data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_NUM(j)|
3128 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_ENTRY_EN |
3129 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_DATA(
3130 itable[j]);
3131 case 2:
3132 *data0 |=
3133 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_NUM(j)|
3134 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_ENTRY_EN |
3135 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_DATA(
3136 itable[j]);
3137 case 3:
3138 *data1 = VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_NUM(j)|
3139 VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_ENTRY_EN |
3140 VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_DATA(
3141 itable[j]);
3142 case 4:
3143 *data1 |=
3144 VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_NUM(j)|
3145 VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_ENTRY_EN |
3146 VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_DATA(
3147 itable[j]);
3148 default:
3149 return;
3150 }
3151}
3152/*
3153 * vxge_hw_vpath_rts_rth_itable_set - Set/configure indirection table (IT).
3154 */
3155enum vxge_hw_status vxge_hw_vpath_rts_rth_itable_set(
3156 struct __vxge_hw_vpath_handle **vpath_handles,
3157 u32 vpath_count,
3158 u8 *mtable,
3159 u8 *itable,
3160 u32 itable_size)
3161{
3162 u32 i, j, action, rts_table;
3163 u64 data0;
3164 u64 data1;
3165 u32 max_entries;
3166 enum vxge_hw_status status = VXGE_HW_OK;
3167 struct __vxge_hw_vpath_handle *vp = vpath_handles[0];
3168
3169 if (vp == NULL) {
3170 status = VXGE_HW_ERR_INVALID_HANDLE;
3171 goto exit;
3172 }
3173
3174 max_entries = (((u32)1) << itable_size);
3175
3176 if (vp->vpath->hldev->config.rth_it_type
3177 == VXGE_HW_RTH_IT_TYPE_SOLO_IT) {
3178 action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY;
3179 rts_table =
3180 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT;
3181
3182 for (j = 0; j < max_entries; j++) {
3183
3184 data1 = 0;
3185
3186 data0 =
3187 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA(
3188 itable[j]);
3189
3190 status = __vxge_hw_vpath_rts_table_set(vpath_handles[0],
3191 action, rts_table, j, data0, data1);
3192
3193 if (status != VXGE_HW_OK)
3194 goto exit;
3195 }
3196
3197 for (j = 0; j < max_entries; j++) {
3198
3199 data1 = 0;
3200
3201 data0 =
3202 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_ENTRY_EN |
3203 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA(
3204 itable[j]);
3205
3206 status = __vxge_hw_vpath_rts_table_set(
3207 vpath_handles[mtable[itable[j]]], action,
3208 rts_table, j, data0, data1);
3209
3210 if (status != VXGE_HW_OK)
3211 goto exit;
3212 }
3213 } else {
3214 action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY;
3215 rts_table =
3216 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT;
3217 for (i = 0; i < vpath_count; i++) {
3218
3219 for (j = 0; j < max_entries;) {
3220
3221 data0 = 0;
3222 data1 = 0;
3223
3224 while (j < max_entries) {
3225 if (mtable[itable[j]] != i) {
3226 j++;
3227 continue;
3228 }
3229 vxge_hw_rts_rth_data0_data1_get(j,
3230 &data0, &data1, 1, itable);
3231 j++;
3232 break;
3233 }
3234
3235 while (j < max_entries) {
3236 if (mtable[itable[j]] != i) {
3237 j++;
3238 continue;
3239 }
3240 vxge_hw_rts_rth_data0_data1_get(j,
3241 &data0, &data1, 2, itable);
3242 j++;
3243 break;
3244 }
3245
3246 while (j < max_entries) {
3247 if (mtable[itable[j]] != i) {
3248 j++;
3249 continue;
3250 }
3251 vxge_hw_rts_rth_data0_data1_get(j,
3252 &data0, &data1, 3, itable);
3253 j++;
3254 break;
3255 }
3256
3257 while (j < max_entries) {
3258 if (mtable[itable[j]] != i) {
3259 j++;
3260 continue;
3261 }
3262 vxge_hw_rts_rth_data0_data1_get(j,
3263 &data0, &data1, 4, itable);
3264 j++;
3265 break;
3266 }
3267
3268 if (data0 != 0) {
3269 status = __vxge_hw_vpath_rts_table_set(
3270 vpath_handles[i],
3271 action, rts_table,
3272 0, data0, data1);
3273
3274 if (status != VXGE_HW_OK)
3275 goto exit;
3276 }
3277 }
3278 }
3279 }
3280exit:
3281 return status;
3282}
3283
3284/**
3285 * vxge_hw_vpath_check_leak - Check for memory leak
3286 * @ringh: Handle to the ring object used for receive
3287 *
3288 * If PRC_RXD_DOORBELL_VPn.NEW_QW_CNT is larger or equal to
3289 * PRC_CFG6_VPn.RXD_SPAT then a leak has occurred.
3290 * Returns: VXGE_HW_FAIL, if leak has occurred.
3291 *
3292 */
3293enum vxge_hw_status
3294vxge_hw_vpath_check_leak(struct __vxge_hw_ring *ring)
3295{
3296 enum vxge_hw_status status = VXGE_HW_OK;
3297 u64 rxd_new_count, rxd_spat;
3298
3299 if (ring == NULL)
3300 return status;
3301
3302 rxd_new_count = readl(&ring->vp_reg->prc_rxd_doorbell);
3303 rxd_spat = readq(&ring->vp_reg->prc_cfg6);
3304 rxd_spat = VXGE_HW_PRC_CFG6_RXD_SPAT(rxd_spat);
3305
3306 if (rxd_new_count >= rxd_spat)
3307 status = VXGE_HW_FAIL;
3308
3309 return status;
3310}
3311
3312/*
3313 * __vxge_hw_vpath_mgmt_read
3314 * This routine reads the vpath_mgmt registers
3315 */
3316static enum vxge_hw_status
3317__vxge_hw_vpath_mgmt_read(
3318 struct __vxge_hw_device *hldev,
3319 struct __vxge_hw_virtualpath *vpath)
3320{
3321 u32 i, mtu = 0, max_pyld = 0;
3322 u64 val64;
3323 enum vxge_hw_status status = VXGE_HW_OK;
3324
3325 for (i = 0; i < VXGE_HW_MAC_MAX_MAC_PORT_ID; i++) {
3326
3327 val64 = readq(&vpath->vpmgmt_reg->
3328 rxmac_cfg0_port_vpmgmt_clone[i]);
3329 max_pyld =
3330 (u32)
3331 VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_GET_MAX_PYLD_LEN
3332 (val64);
3333 if (mtu < max_pyld)
3334 mtu = max_pyld;
3335 }
3336
3337 vpath->max_mtu = mtu + VXGE_HW_MAC_HEADER_MAX_SIZE;
3338
3339 val64 = readq(&vpath->vpmgmt_reg->xmac_vsport_choices_vp);
3340
3341 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
3342 if (val64 & vxge_mBIT(i))
3343 vpath->vsport_number = i;
3344 }
3345
3346 val64 = readq(&vpath->vpmgmt_reg->xgmac_gen_status_vpmgmt_clone);
3347
3348 if (val64 & VXGE_HW_XGMAC_GEN_STATUS_VPMGMT_CLONE_XMACJ_NTWK_OK)
3349 VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_UP);
3350 else
3351 VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_DOWN);
3352
3353 return status;
3354}
3355
3356/*
3357 * __vxge_hw_vpath_reset_check - Check if resetting the vpath completed
3358 * This routine checks the vpath_rst_in_prog register to see if
3359 * adapter completed the reset process for the vpath
3360 */
3361enum vxge_hw_status
3362__vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath)
3363{
3364 enum vxge_hw_status status;
3365
3366 status = __vxge_hw_device_register_poll(
3367 &vpath->hldev->common_reg->vpath_rst_in_prog,
3368 VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(
3369 1 << (16 - vpath->vp_id)),
3370 vpath->hldev->config.device_poll_millis);
3371
3372 return status;
3373}
3374
3375/*
3376 * __vxge_hw_vpath_reset
3377 * This routine resets the vpath on the device
3378 */
3379enum vxge_hw_status
3380__vxge_hw_vpath_reset(struct __vxge_hw_device *hldev, u32 vp_id)
3381{
3382 u64 val64;
3383 enum vxge_hw_status status = VXGE_HW_OK;
3384
3385 val64 = VXGE_HW_CMN_RSTHDLR_CFG0_SW_RESET_VPATH(1 << (16 - vp_id));
3386
3387 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
3388 &hldev->common_reg->cmn_rsthdlr_cfg0);
3389
3390 return status;
3391}
3392
3393/*
3394 * __vxge_hw_vpath_sw_reset
3395 * This routine resets the vpath structures
3396 */
3397enum vxge_hw_status
3398__vxge_hw_vpath_sw_reset(struct __vxge_hw_device *hldev, u32 vp_id)
3399{
3400 enum vxge_hw_status status = VXGE_HW_OK;
3401 struct __vxge_hw_virtualpath *vpath;
3402
3403 vpath = (struct __vxge_hw_virtualpath *)&hldev->virtual_paths[vp_id];
3404
3405 if (vpath->ringh) {
3406 status = __vxge_hw_ring_reset(vpath->ringh);
3407 if (status != VXGE_HW_OK)
3408 goto exit;
3409 }
3410
3411 if (vpath->fifoh)
3412 status = __vxge_hw_fifo_reset(vpath->fifoh);
3413exit:
3414 return status;
3415}
3416
3417/*
3418 * __vxge_hw_vpath_prc_configure
3419 * This routine configures the prc registers of virtual path using the config
3420 * passed
3421 */
3422void
3423__vxge_hw_vpath_prc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
3424{
3425 u64 val64;
3426 struct __vxge_hw_virtualpath *vpath;
3427 struct vxge_hw_vp_config *vp_config;
3428 struct vxge_hw_vpath_reg __iomem *vp_reg;
3429
3430 vpath = &hldev->virtual_paths[vp_id];
3431 vp_reg = vpath->vp_reg;
3432 vp_config = vpath->vp_config;
3433
3434 if (vp_config->ring.enable == VXGE_HW_RING_DISABLE)
3435 return;
3436
3437 val64 = readq(&vp_reg->prc_cfg1);
3438 val64 |= VXGE_HW_PRC_CFG1_RTI_TINT_DISABLE;
3439 writeq(val64, &vp_reg->prc_cfg1);
3440
3441 val64 = readq(&vpath->vp_reg->prc_cfg6);
3442 val64 |= VXGE_HW_PRC_CFG6_DOORBELL_MODE_EN;
3443 writeq(val64, &vpath->vp_reg->prc_cfg6);
3444
3445 val64 = readq(&vp_reg->prc_cfg7);
3446
3447 if (vpath->vp_config->ring.scatter_mode !=
3448 VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT) {
3449
3450 val64 &= ~VXGE_HW_PRC_CFG7_SCATTER_MODE(0x3);
3451
3452 switch (vpath->vp_config->ring.scatter_mode) {
3453 case VXGE_HW_RING_SCATTER_MODE_A:
3454 val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE(
3455 VXGE_HW_PRC_CFG7_SCATTER_MODE_A);
3456 break;
3457 case VXGE_HW_RING_SCATTER_MODE_B:
3458 val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE(
3459 VXGE_HW_PRC_CFG7_SCATTER_MODE_B);
3460 break;
3461 case VXGE_HW_RING_SCATTER_MODE_C:
3462 val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE(
3463 VXGE_HW_PRC_CFG7_SCATTER_MODE_C);
3464 break;
3465 }
3466 }
3467
3468 writeq(val64, &vp_reg->prc_cfg7);
3469
3470 writeq(VXGE_HW_PRC_CFG5_RXD0_ADD(
3471 __vxge_hw_ring_first_block_address_get(
3472 vpath->ringh) >> 3), &vp_reg->prc_cfg5);
3473
3474 val64 = readq(&vp_reg->prc_cfg4);
3475 val64 |= VXGE_HW_PRC_CFG4_IN_SVC;
3476 val64 &= ~VXGE_HW_PRC_CFG4_RING_MODE(0x3);
3477
3478 val64 |= VXGE_HW_PRC_CFG4_RING_MODE(
3479 VXGE_HW_PRC_CFG4_RING_MODE_ONE_BUFFER);
3480
3481 if (hldev->config.rth_en == VXGE_HW_RTH_DISABLE)
3482 val64 |= VXGE_HW_PRC_CFG4_RTH_DISABLE;
3483 else
3484 val64 &= ~VXGE_HW_PRC_CFG4_RTH_DISABLE;
3485
3486 writeq(val64, &vp_reg->prc_cfg4);
3487 return;
3488}
3489
3490/*
3491 * __vxge_hw_vpath_kdfc_configure
3492 * This routine configures the kdfc registers of virtual path using the
3493 * config passed
3494 */
3495enum vxge_hw_status
3496__vxge_hw_vpath_kdfc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
3497{
3498 u64 val64;
3499 u64 vpath_stride;
3500 enum vxge_hw_status status = VXGE_HW_OK;
3501 struct __vxge_hw_virtualpath *vpath;
3502 struct vxge_hw_vpath_reg __iomem *vp_reg;
3503
3504 vpath = &hldev->virtual_paths[vp_id];
3505 vp_reg = vpath->vp_reg;
3506 status = __vxge_hw_kdfc_swapper_set(hldev->legacy_reg, vp_reg);
3507
3508 if (status != VXGE_HW_OK)
3509 goto exit;
3510
3511 val64 = readq(&vp_reg->kdfc_drbl_triplet_total);
3512
3513 vpath->max_kdfc_db =
3514 (u32)VXGE_HW_KDFC_DRBL_TRIPLET_TOTAL_GET_KDFC_MAX_SIZE(
3515 val64+1)/2;
3516
3517 if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
3518
3519 vpath->max_nofl_db = vpath->max_kdfc_db;
3520
3521 if (vpath->max_nofl_db <
3522 ((vpath->vp_config->fifo.memblock_size /
3523 (vpath->vp_config->fifo.max_frags *
3524 sizeof(struct vxge_hw_fifo_txd))) *
3525 vpath->vp_config->fifo.fifo_blocks)) {
3526
3527 return VXGE_HW_BADCFG_FIFO_BLOCKS;
3528 }
3529 val64 = VXGE_HW_KDFC_FIFO_TRPL_PARTITION_LENGTH_0(
3530 (vpath->max_nofl_db*2)-1);
3531 }
3532
3533 writeq(val64, &vp_reg->kdfc_fifo_trpl_partition);
3534
3535 writeq(VXGE_HW_KDFC_FIFO_TRPL_CTRL_TRIPLET_ENABLE,
3536 &vp_reg->kdfc_fifo_trpl_ctrl);
3537
3538 val64 = readq(&vp_reg->kdfc_trpl_fifo_0_ctrl);
3539
3540 val64 &= ~(VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(0x3) |
3541 VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0xFF));
3542
3543 val64 |= VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(
3544 VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE_NON_OFFLOAD_ONLY) |
3545#ifndef __BIG_ENDIAN
3546 VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SWAP_EN |
3547#endif
3548 VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0);
3549
3550 writeq(val64, &vp_reg->kdfc_trpl_fifo_0_ctrl);
3551 writeq((u64)0, &vp_reg->kdfc_trpl_fifo_0_wb_address);
3552 wmb();
3553 vpath_stride = readq(&hldev->toc_reg->toc_kdfc_vpath_stride);
3554
3555 vpath->nofl_db =
3556 (struct __vxge_hw_non_offload_db_wrapper __iomem *)
3557 (hldev->kdfc + (vp_id *
3558 VXGE_HW_TOC_KDFC_VPATH_STRIDE_GET_TOC_KDFC_VPATH_STRIDE(
3559 vpath_stride)));
3560exit:
3561 return status;
3562}
3563
3564/*
3565 * __vxge_hw_vpath_mac_configure
3566 * This routine configures the mac of virtual path using the config passed
3567 */
3568enum vxge_hw_status
3569__vxge_hw_vpath_mac_configure(struct __vxge_hw_device *hldev, u32 vp_id)
3570{
3571 u64 val64;
3572 enum vxge_hw_status status = VXGE_HW_OK;
3573 struct __vxge_hw_virtualpath *vpath;
3574 struct vxge_hw_vp_config *vp_config;
3575 struct vxge_hw_vpath_reg __iomem *vp_reg;
3576
3577 vpath = &hldev->virtual_paths[vp_id];
3578 vp_reg = vpath->vp_reg;
3579 vp_config = vpath->vp_config;
3580
3581 writeq(VXGE_HW_XMAC_VSPORT_CHOICE_VSPORT_NUMBER(
3582 vpath->vsport_number), &vp_reg->xmac_vsport_choice);
3583
3584 if (vp_config->ring.enable == VXGE_HW_RING_ENABLE) {
3585
3586 val64 = readq(&vp_reg->xmac_rpa_vcfg);
3587
3588 if (vp_config->rpa_strip_vlan_tag !=
3589 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) {
3590 if (vp_config->rpa_strip_vlan_tag)
3591 val64 |= VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG;
3592 else
3593 val64 &= ~VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG;
3594 }
3595
3596 writeq(val64, &vp_reg->xmac_rpa_vcfg);
3597 val64 = readq(&vp_reg->rxmac_vcfg0);
3598
3599 if (vp_config->mtu !=
3600 VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) {
3601 val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
3602 if ((vp_config->mtu +
3603 VXGE_HW_MAC_HEADER_MAX_SIZE) < vpath->max_mtu)
3604 val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(
3605 vp_config->mtu +
3606 VXGE_HW_MAC_HEADER_MAX_SIZE);
3607 else
3608 val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(
3609 vpath->max_mtu);
3610 }
3611
3612 writeq(val64, &vp_reg->rxmac_vcfg0);
3613
3614 val64 = readq(&vp_reg->rxmac_vcfg1);
3615
3616 val64 &= ~(VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(0x3) |
3617 VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE);
3618
3619 if (hldev->config.rth_it_type ==
3620 VXGE_HW_RTH_IT_TYPE_MULTI_IT) {
3621 val64 |= VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(
3622 0x2) |
3623 VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE;
3624 }
3625
3626 writeq(val64, &vp_reg->rxmac_vcfg1);
3627 }
3628 return status;
3629}
3630
3631/*
3632 * __vxge_hw_vpath_tim_configure
3633 * This routine configures the tim registers of virtual path using the config
3634 * passed
3635 */
3636enum vxge_hw_status
3637__vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
3638{
3639 u64 val64;
3640 enum vxge_hw_status status = VXGE_HW_OK;
3641 struct __vxge_hw_virtualpath *vpath;
3642 struct vxge_hw_vpath_reg __iomem *vp_reg;
3643 struct vxge_hw_vp_config *config;
3644
3645 vpath = &hldev->virtual_paths[vp_id];
3646 vp_reg = vpath->vp_reg;
3647 config = vpath->vp_config;
3648
3649 writeq((u64)0, &vp_reg->tim_dest_addr);
3650 writeq((u64)0, &vp_reg->tim_vpath_map);
3651 writeq((u64)0, &vp_reg->tim_bitmap);
3652 writeq((u64)0, &vp_reg->tim_remap);
3653
3654 if (config->ring.enable == VXGE_HW_RING_ENABLE)
3655 writeq(VXGE_HW_TIM_RING_ASSN_INT_NUM(
3656 (vp_id * VXGE_HW_MAX_INTR_PER_VP) +
3657 VXGE_HW_VPATH_INTR_RX), &vp_reg->tim_ring_assn);
3658
3659 val64 = readq(&vp_reg->tim_pci_cfg);
3660 val64 |= VXGE_HW_TIM_PCI_CFG_ADD_PAD;
3661 writeq(val64, &vp_reg->tim_pci_cfg);
3662
3663 if (config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
3664
3665 val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
3666
3667 if (config->tti.btimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
3668 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
3669 0x3ffffff);
3670 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
3671 config->tti.btimer_val);
3672 }
3673
3674 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN;
3675
3676 if (config->tti.timer_ac_en != VXGE_HW_USE_FLASH_DEFAULT) {
3677 if (config->tti.timer_ac_en)
3678 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
3679 else
3680 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
3681 }
3682
3683 if (config->tti.timer_ci_en != VXGE_HW_USE_FLASH_DEFAULT) {
3684 if (config->tti.timer_ci_en)
3685 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
3686 else
3687 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
3688 }
3689
3690 if (config->tti.urange_a != VXGE_HW_USE_FLASH_DEFAULT) {
3691 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(0x3f);
3692 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(
3693 config->tti.urange_a);
3694 }
3695
3696 if (config->tti.urange_b != VXGE_HW_USE_FLASH_DEFAULT) {
3697 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(0x3f);
3698 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(
3699 config->tti.urange_b);
3700 }
3701
3702 if (config->tti.urange_c != VXGE_HW_USE_FLASH_DEFAULT) {
3703 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(0x3f);
3704 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(
3705 config->tti.urange_c);
3706 }
3707
3708 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
3709 val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]);
3710
3711 if (config->tti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) {
3712 val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(0xffff);
3713 val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(
3714 config->tti.uec_a);
3715 }
3716
3717 if (config->tti.uec_b != VXGE_HW_USE_FLASH_DEFAULT) {
3718 val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(0xffff);
3719 val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(
3720 config->tti.uec_b);
3721 }
3722
3723 if (config->tti.uec_c != VXGE_HW_USE_FLASH_DEFAULT) {
3724 val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(0xffff);
3725 val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(
3726 config->tti.uec_c);
3727 }
3728
3729 if (config->tti.uec_d != VXGE_HW_USE_FLASH_DEFAULT) {
3730 val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(0xffff);
3731 val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(
3732 config->tti.uec_d);
3733 }
3734
3735 writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]);
3736 val64 = readq(&vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
3737
3738 if (config->tti.timer_ri_en != VXGE_HW_USE_FLASH_DEFAULT) {
3739 if (config->tti.timer_ri_en)
3740 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
3741 else
3742 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
3743 }
3744
3745 if (config->tti.rtimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
3746 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
3747 0x3ffffff);
3748 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
3749 config->tti.rtimer_val);
3750 }
3751
3752 if (config->tti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) {
3753 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f);
3754 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(
3755 config->tti.util_sel);
3756 }
3757
3758 if (config->tti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
3759 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
3760 0x3ffffff);
3761 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
3762 config->tti.ltimer_val);
3763 }
3764
3765 writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
3766 }
3767
3768 if (config->ring.enable == VXGE_HW_RING_ENABLE) {
3769
3770 val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
3771
3772 if (config->rti.btimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
3773 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
3774 0x3ffffff);
3775 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
3776 config->rti.btimer_val);
3777 }
3778
3779 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN;
3780
3781 if (config->rti.timer_ac_en != VXGE_HW_USE_FLASH_DEFAULT) {
3782 if (config->rti.timer_ac_en)
3783 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
3784 else
3785 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
3786 }
3787
3788 if (config->rti.timer_ci_en != VXGE_HW_USE_FLASH_DEFAULT) {
3789 if (config->rti.timer_ci_en)
3790 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
3791 else
3792 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
3793 }
3794
3795 if (config->rti.urange_a != VXGE_HW_USE_FLASH_DEFAULT) {
3796 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(0x3f);
3797 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(
3798 config->rti.urange_a);
3799 }
3800
3801 if (config->rti.urange_b != VXGE_HW_USE_FLASH_DEFAULT) {
3802 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(0x3f);
3803 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(
3804 config->rti.urange_b);
3805 }
3806
3807 if (config->rti.urange_c != VXGE_HW_USE_FLASH_DEFAULT) {
3808 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(0x3f);
3809 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(
3810 config->rti.urange_c);
3811 }
3812
3813 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
3814 val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]);
3815
3816 if (config->rti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) {
3817 val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(0xffff);
3818 val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(
3819 config->rti.uec_a);
3820 }
3821
3822 if (config->rti.uec_b != VXGE_HW_USE_FLASH_DEFAULT) {
3823 val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(0xffff);
3824 val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(
3825 config->rti.uec_b);
3826 }
3827
3828 if (config->rti.uec_c != VXGE_HW_USE_FLASH_DEFAULT) {
3829 val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(0xffff);
3830 val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(
3831 config->rti.uec_c);
3832 }
3833
3834 if (config->rti.uec_d != VXGE_HW_USE_FLASH_DEFAULT) {
3835 val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(0xffff);
3836 val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(
3837 config->rti.uec_d);
3838 }
3839
3840 writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]);
3841 val64 = readq(&vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
3842
3843 if (config->rti.timer_ri_en != VXGE_HW_USE_FLASH_DEFAULT) {
3844 if (config->rti.timer_ri_en)
3845 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
3846 else
3847 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
3848 }
3849
3850 if (config->rti.rtimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
3851 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
3852 0x3ffffff);
3853 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
3854 config->rti.rtimer_val);
3855 }
3856
3857 if (config->rti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) {
3858 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f);
3859 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(
3860 config->rti.util_sel);
3861 }
3862
3863 if (config->rti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
3864 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
3865 0x3ffffff);
3866 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
3867 config->rti.ltimer_val);
3868 }
3869
3870 writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
3871 }
3872
3873 val64 = 0;
3874 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_EINTA]);
3875 writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_EINTA]);
3876 writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_EINTA]);
3877 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_BMAP]);
3878 writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_BMAP]);
3879 writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_BMAP]);
3880
3881 return status;
3882}
3883
Sreenivasa Honnureb5f10c2009-10-05 01:57:29 +00003884void
3885vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id)
3886{
3887 struct __vxge_hw_virtualpath *vpath;
3888 struct vxge_hw_vpath_reg __iomem *vp_reg;
3889 struct vxge_hw_vp_config *config;
3890 u64 val64;
3891
3892 vpath = &hldev->virtual_paths[vp_id];
3893 vp_reg = vpath->vp_reg;
3894 config = vpath->vp_config;
3895
3896 if (config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
3897 val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
3898
3899 if (config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) {
3900 config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE;
3901 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
3902 writeq(val64,
3903 &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
3904 }
3905 }
3906 return;
3907}
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00003908/*
3909 * __vxge_hw_vpath_initialize
3910 * This routine is the final phase of init which initializes the
3911 * registers of the vpath using the configuration passed.
3912 */
3913enum vxge_hw_status
3914__vxge_hw_vpath_initialize(struct __vxge_hw_device *hldev, u32 vp_id)
3915{
3916 u64 val64;
3917 u32 val32;
3918 enum vxge_hw_status status = VXGE_HW_OK;
3919 struct __vxge_hw_virtualpath *vpath;
3920 struct vxge_hw_vpath_reg __iomem *vp_reg;
3921
3922 vpath = &hldev->virtual_paths[vp_id];
3923
3924 if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) {
3925 status = VXGE_HW_ERR_VPATH_NOT_AVAILABLE;
3926 goto exit;
3927 }
3928 vp_reg = vpath->vp_reg;
3929
3930 status = __vxge_hw_vpath_swapper_set(vpath->vp_reg);
3931
3932 if (status != VXGE_HW_OK)
3933 goto exit;
3934
3935 status = __vxge_hw_vpath_mac_configure(hldev, vp_id);
3936
3937 if (status != VXGE_HW_OK)
3938 goto exit;
3939
3940 status = __vxge_hw_vpath_kdfc_configure(hldev, vp_id);
3941
3942 if (status != VXGE_HW_OK)
3943 goto exit;
3944
3945 status = __vxge_hw_vpath_tim_configure(hldev, vp_id);
3946
3947 if (status != VXGE_HW_OK)
3948 goto exit;
3949
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00003950 val64 = readq(&vp_reg->rtdma_rd_optimization_ctrl);
3951
3952 /* Get MRRS value from device control */
3953 status = __vxge_hw_vpath_pci_read(vpath, 1, 0x78, &val32);
3954
3955 if (status == VXGE_HW_OK) {
3956 val32 = (val32 & VXGE_HW_PCI_EXP_DEVCTL_READRQ) >> 12;
3957 val64 &=
3958 ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(7));
3959 val64 |=
3960 VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(val32);
3961
3962 val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_WAIT_FOR_SPACE;
3963 }
3964
3965 val64 &= ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(7));
3966 val64 |=
3967 VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(
3968 VXGE_HW_MAX_PAYLOAD_SIZE_512);
3969
3970 val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY_EN;
3971 writeq(val64, &vp_reg->rtdma_rd_optimization_ctrl);
3972
3973exit:
3974 return status;
3975}
3976
3977/*
3978 * __vxge_hw_vp_initialize - Initialize Virtual Path structure
3979 * This routine is the initial phase of init which resets the vpath and
3980 * initializes the software support structures.
3981 */
3982enum vxge_hw_status
3983__vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id,
3984 struct vxge_hw_vp_config *config)
3985{
3986 struct __vxge_hw_virtualpath *vpath;
3987 enum vxge_hw_status status = VXGE_HW_OK;
3988
3989 if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) {
3990 status = VXGE_HW_ERR_VPATH_NOT_AVAILABLE;
3991 goto exit;
3992 }
3993
3994 vpath = &hldev->virtual_paths[vp_id];
3995
3996 vpath->vp_id = vp_id;
3997 vpath->vp_open = VXGE_HW_VP_OPEN;
3998 vpath->hldev = hldev;
3999 vpath->vp_config = config;
4000 vpath->vp_reg = hldev->vpath_reg[vp_id];
4001 vpath->vpmgmt_reg = hldev->vpmgmt_reg[vp_id];
4002
4003 __vxge_hw_vpath_reset(hldev, vp_id);
4004
4005 status = __vxge_hw_vpath_reset_check(vpath);
4006
4007 if (status != VXGE_HW_OK) {
4008 memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
4009 goto exit;
4010 }
4011
4012 status = __vxge_hw_vpath_mgmt_read(hldev, vpath);
4013
4014 if (status != VXGE_HW_OK) {
4015 memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
4016 goto exit;
4017 }
4018
4019 INIT_LIST_HEAD(&vpath->vpath_handles);
4020
4021 vpath->sw_stats = &hldev->stats.sw_dev_info_stats.vpath_info[vp_id];
4022
4023 VXGE_HW_DEVICE_TIM_INT_MASK_SET(hldev->tim_int_mask0,
4024 hldev->tim_int_mask1, vp_id);
4025
4026 status = __vxge_hw_vpath_initialize(hldev, vp_id);
4027
4028 if (status != VXGE_HW_OK)
4029 __vxge_hw_vp_terminate(hldev, vp_id);
4030exit:
4031 return status;
4032}
4033
4034/*
4035 * __vxge_hw_vp_terminate - Terminate Virtual Path structure
4036 * This routine closes all channels it opened and freeup memory
4037 */
4038void
4039__vxge_hw_vp_terminate(struct __vxge_hw_device *hldev, u32 vp_id)
4040{
4041 struct __vxge_hw_virtualpath *vpath;
4042
4043 vpath = &hldev->virtual_paths[vp_id];
4044
4045 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN)
4046 goto exit;
4047
4048 VXGE_HW_DEVICE_TIM_INT_MASK_RESET(vpath->hldev->tim_int_mask0,
4049 vpath->hldev->tim_int_mask1, vpath->vp_id);
4050 hldev->stats.hw_dev_info_stats.vpath_info[vpath->vp_id] = NULL;
4051
4052 memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
4053exit:
4054 return;
4055}
4056
4057/*
4058 * vxge_hw_vpath_mtu_set - Set MTU.
4059 * Set new MTU value. Example, to use jumbo frames:
4060 * vxge_hw_vpath_mtu_set(my_device, 9600);
4061 */
4062enum vxge_hw_status
4063vxge_hw_vpath_mtu_set(struct __vxge_hw_vpath_handle *vp, u32 new_mtu)
4064{
4065 u64 val64;
4066 enum vxge_hw_status status = VXGE_HW_OK;
4067 struct __vxge_hw_virtualpath *vpath;
4068
4069 if (vp == NULL) {
4070 status = VXGE_HW_ERR_INVALID_HANDLE;
4071 goto exit;
4072 }
4073 vpath = vp->vpath;
4074
4075 new_mtu += VXGE_HW_MAC_HEADER_MAX_SIZE;
4076
4077 if ((new_mtu < VXGE_HW_MIN_MTU) || (new_mtu > vpath->max_mtu))
4078 status = VXGE_HW_ERR_INVALID_MTU_SIZE;
4079
4080 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
4081
4082 val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
4083 val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(new_mtu);
4084
4085 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
4086
4087 vpath->vp_config->mtu = new_mtu - VXGE_HW_MAC_HEADER_MAX_SIZE;
4088
4089exit:
4090 return status;
4091}
4092
4093/*
4094 * vxge_hw_vpath_open - Open a virtual path on a given adapter
4095 * This function is used to open access to virtual path of an
4096 * adapter for offload, GRO operations. This function returns
4097 * synchronously.
4098 */
4099enum vxge_hw_status
4100vxge_hw_vpath_open(struct __vxge_hw_device *hldev,
4101 struct vxge_hw_vpath_attr *attr,
4102 struct __vxge_hw_vpath_handle **vpath_handle)
4103{
4104 struct __vxge_hw_virtualpath *vpath;
4105 struct __vxge_hw_vpath_handle *vp;
4106 enum vxge_hw_status status;
4107
4108 vpath = &hldev->virtual_paths[attr->vp_id];
4109
4110 if (vpath->vp_open == VXGE_HW_VP_OPEN) {
4111 status = VXGE_HW_ERR_INVALID_STATE;
4112 goto vpath_open_exit1;
4113 }
4114
4115 status = __vxge_hw_vp_initialize(hldev, attr->vp_id,
4116 &hldev->config.vp_config[attr->vp_id]);
4117
4118 if (status != VXGE_HW_OK)
4119 goto vpath_open_exit1;
4120
4121 vp = (struct __vxge_hw_vpath_handle *)
4122 vmalloc(sizeof(struct __vxge_hw_vpath_handle));
4123 if (vp == NULL) {
4124 status = VXGE_HW_ERR_OUT_OF_MEMORY;
4125 goto vpath_open_exit2;
4126 }
4127
4128 memset(vp, 0, sizeof(struct __vxge_hw_vpath_handle));
4129
4130 vp->vpath = vpath;
4131
4132 if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
4133 status = __vxge_hw_fifo_create(vp, &attr->fifo_attr);
4134 if (status != VXGE_HW_OK)
4135 goto vpath_open_exit6;
4136 }
4137
4138 if (vpath->vp_config->ring.enable == VXGE_HW_RING_ENABLE) {
4139 status = __vxge_hw_ring_create(vp, &attr->ring_attr);
4140 if (status != VXGE_HW_OK)
4141 goto vpath_open_exit7;
4142
4143 __vxge_hw_vpath_prc_configure(hldev, attr->vp_id);
4144 }
4145
4146 vpath->fifoh->tx_intr_num =
4147 (attr->vp_id * VXGE_HW_MAX_INTR_PER_VP) +
4148 VXGE_HW_VPATH_INTR_TX;
4149
4150 vpath->stats_block = __vxge_hw_blockpool_block_allocate(hldev,
4151 VXGE_HW_BLOCK_SIZE);
4152
4153 if (vpath->stats_block == NULL) {
4154 status = VXGE_HW_ERR_OUT_OF_MEMORY;
4155 goto vpath_open_exit8;
4156 }
4157
4158 vpath->hw_stats = (struct vxge_hw_vpath_stats_hw_info *)vpath->
4159 stats_block->memblock;
4160 memset(vpath->hw_stats, 0,
4161 sizeof(struct vxge_hw_vpath_stats_hw_info));
4162
4163 hldev->stats.hw_dev_info_stats.vpath_info[attr->vp_id] =
4164 vpath->hw_stats;
4165
4166 vpath->hw_stats_sav =
4167 &hldev->stats.hw_dev_info_stats.vpath_info_sav[attr->vp_id];
4168 memset(vpath->hw_stats_sav, 0,
4169 sizeof(struct vxge_hw_vpath_stats_hw_info));
4170
4171 writeq(vpath->stats_block->dma_addr, &vpath->vp_reg->stats_cfg);
4172
4173 status = vxge_hw_vpath_stats_enable(vp);
4174 if (status != VXGE_HW_OK)
4175 goto vpath_open_exit8;
4176
4177 list_add(&vp->item, &vpath->vpath_handles);
4178
4179 hldev->vpaths_deployed |= vxge_mBIT(vpath->vp_id);
4180
4181 *vpath_handle = vp;
4182
4183 attr->fifo_attr.userdata = vpath->fifoh;
4184 attr->ring_attr.userdata = vpath->ringh;
4185
4186 return VXGE_HW_OK;
4187
4188vpath_open_exit8:
4189 if (vpath->ringh != NULL)
4190 __vxge_hw_ring_delete(vp);
4191vpath_open_exit7:
4192 if (vpath->fifoh != NULL)
4193 __vxge_hw_fifo_delete(vp);
4194vpath_open_exit6:
4195 vfree(vp);
4196vpath_open_exit2:
4197 __vxge_hw_vp_terminate(hldev, attr->vp_id);
4198vpath_open_exit1:
4199
4200 return status;
4201}
4202
4203/**
4204 * vxge_hw_vpath_rx_doorbell_post - Close the handle got from previous vpath
4205 * (vpath) open
4206 * @vp: Handle got from previous vpath open
4207 *
4208 * This function is used to close access to virtual path opened
4209 * earlier.
4210 */
4211void
4212vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp)
4213{
4214 struct __vxge_hw_virtualpath *vpath = NULL;
4215 u64 new_count, val64, val164;
4216 struct __vxge_hw_ring *ring;
4217
4218 vpath = vp->vpath;
4219 ring = vpath->ringh;
4220
4221 new_count = readq(&vpath->vp_reg->rxdmem_size);
4222 new_count &= 0x1fff;
4223 val164 = (VXGE_HW_RXDMEM_SIZE_PRC_RXDMEM_SIZE(new_count));
4224
4225 writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(val164),
4226 &vpath->vp_reg->prc_rxd_doorbell);
4227 readl(&vpath->vp_reg->prc_rxd_doorbell);
4228
4229 val164 /= 2;
4230 val64 = readq(&vpath->vp_reg->prc_cfg6);
4231 val64 = VXGE_HW_PRC_CFG6_RXD_SPAT(val64);
4232 val64 &= 0x1ff;
4233
4234 /*
4235 * Each RxD is of 4 qwords
4236 */
4237 new_count -= (val64 + 1);
4238 val64 = min(val164, new_count) / 4;
4239
4240 ring->rxds_limit = min(ring->rxds_limit, val64);
4241 if (ring->rxds_limit < 4)
4242 ring->rxds_limit = 4;
4243}
4244
4245/*
4246 * vxge_hw_vpath_close - Close the handle got from previous vpath (vpath) open
4247 * This function is used to close access to virtual path opened
4248 * earlier.
4249 */
4250enum vxge_hw_status vxge_hw_vpath_close(struct __vxge_hw_vpath_handle *vp)
4251{
4252 struct __vxge_hw_virtualpath *vpath = NULL;
4253 struct __vxge_hw_device *devh = NULL;
4254 u32 vp_id = vp->vpath->vp_id;
4255 u32 is_empty = TRUE;
4256 enum vxge_hw_status status = VXGE_HW_OK;
4257
4258 vpath = vp->vpath;
4259 devh = vpath->hldev;
4260
4261 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4262 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4263 goto vpath_close_exit;
4264 }
4265
4266 list_del(&vp->item);
4267
4268 if (!list_empty(&vpath->vpath_handles)) {
4269 list_add(&vp->item, &vpath->vpath_handles);
4270 is_empty = FALSE;
4271 }
4272
4273 if (!is_empty) {
4274 status = VXGE_HW_FAIL;
4275 goto vpath_close_exit;
4276 }
4277
4278 devh->vpaths_deployed &= ~vxge_mBIT(vp_id);
4279
4280 if (vpath->ringh != NULL)
4281 __vxge_hw_ring_delete(vp);
4282
4283 if (vpath->fifoh != NULL)
4284 __vxge_hw_fifo_delete(vp);
4285
4286 if (vpath->stats_block != NULL)
4287 __vxge_hw_blockpool_block_free(devh, vpath->stats_block);
4288
4289 vfree(vp);
4290
4291 __vxge_hw_vp_terminate(devh, vp_id);
4292
4293 vpath->vp_open = VXGE_HW_VP_NOT_OPEN;
4294
4295vpath_close_exit:
4296 return status;
4297}
4298
4299/*
4300 * vxge_hw_vpath_reset - Resets vpath
4301 * This function is used to request a reset of vpath
4302 */
4303enum vxge_hw_status vxge_hw_vpath_reset(struct __vxge_hw_vpath_handle *vp)
4304{
4305 enum vxge_hw_status status;
4306 u32 vp_id;
4307 struct __vxge_hw_virtualpath *vpath = vp->vpath;
4308
4309 vp_id = vpath->vp_id;
4310
4311 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4312 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4313 goto exit;
4314 }
4315
4316 status = __vxge_hw_vpath_reset(vpath->hldev, vp_id);
4317 if (status == VXGE_HW_OK)
4318 vpath->sw_stats->soft_reset_cnt++;
4319exit:
4320 return status;
4321}
4322
4323/*
4324 * vxge_hw_vpath_recover_from_reset - Poll for reset complete and re-initialize.
4325 * This function poll's for the vpath reset completion and re initializes
4326 * the vpath.
4327 */
4328enum vxge_hw_status
4329vxge_hw_vpath_recover_from_reset(struct __vxge_hw_vpath_handle *vp)
4330{
4331 struct __vxge_hw_virtualpath *vpath = NULL;
4332 enum vxge_hw_status status;
4333 struct __vxge_hw_device *hldev;
4334 u32 vp_id;
4335
4336 vp_id = vp->vpath->vp_id;
4337 vpath = vp->vpath;
4338 hldev = vpath->hldev;
4339
4340 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4341 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4342 goto exit;
4343 }
4344
4345 status = __vxge_hw_vpath_reset_check(vpath);
4346 if (status != VXGE_HW_OK)
4347 goto exit;
4348
4349 status = __vxge_hw_vpath_sw_reset(hldev, vp_id);
4350 if (status != VXGE_HW_OK)
4351 goto exit;
4352
4353 status = __vxge_hw_vpath_initialize(hldev, vp_id);
4354 if (status != VXGE_HW_OK)
4355 goto exit;
4356
4357 if (vpath->ringh != NULL)
4358 __vxge_hw_vpath_prc_configure(hldev, vp_id);
4359
4360 memset(vpath->hw_stats, 0,
4361 sizeof(struct vxge_hw_vpath_stats_hw_info));
4362
4363 memset(vpath->hw_stats_sav, 0,
4364 sizeof(struct vxge_hw_vpath_stats_hw_info));
4365
4366 writeq(vpath->stats_block->dma_addr,
4367 &vpath->vp_reg->stats_cfg);
4368
4369 status = vxge_hw_vpath_stats_enable(vp);
4370
4371exit:
4372 return status;
4373}
4374
4375/*
4376 * vxge_hw_vpath_enable - Enable vpath.
4377 * This routine clears the vpath reset thereby enabling a vpath
4378 * to start forwarding frames and generating interrupts.
4379 */
4380void
4381vxge_hw_vpath_enable(struct __vxge_hw_vpath_handle *vp)
4382{
4383 struct __vxge_hw_device *hldev;
4384 u64 val64;
4385
4386 hldev = vp->vpath->hldev;
4387
4388 val64 = VXGE_HW_CMN_RSTHDLR_CFG1_CLR_VPATH_RESET(
4389 1 << (16 - vp->vpath->vp_id));
4390
4391 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
4392 &hldev->common_reg->cmn_rsthdlr_cfg1);
4393}
4394
4395/*
4396 * vxge_hw_vpath_stats_enable - Enable vpath h/wstatistics.
4397 * Enable the DMA vpath statistics. The function is to be called to re-enable
4398 * the adapter to update stats into the host memory
4399 */
4400enum vxge_hw_status
4401vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vp)
4402{
4403 enum vxge_hw_status status = VXGE_HW_OK;
4404 struct __vxge_hw_virtualpath *vpath;
4405
4406 vpath = vp->vpath;
4407
4408 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4409 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4410 goto exit;
4411 }
4412
4413 memcpy(vpath->hw_stats_sav, vpath->hw_stats,
4414 sizeof(struct vxge_hw_vpath_stats_hw_info));
4415
4416 status = __vxge_hw_vpath_stats_get(vpath, vpath->hw_stats);
4417exit:
4418 return status;
4419}
4420
4421/*
4422 * __vxge_hw_vpath_stats_access - Get the statistics from the given location
4423 * and offset and perform an operation
4424 */
4425enum vxge_hw_status
4426__vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath,
4427 u32 operation, u32 offset, u64 *stat)
4428{
4429 u64 val64;
4430 enum vxge_hw_status status = VXGE_HW_OK;
4431 struct vxge_hw_vpath_reg __iomem *vp_reg;
4432
4433 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4434 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4435 goto vpath_stats_access_exit;
4436 }
4437
4438 vp_reg = vpath->vp_reg;
4439
4440 val64 = VXGE_HW_XMAC_STATS_ACCESS_CMD_OP(operation) |
4441 VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE |
4442 VXGE_HW_XMAC_STATS_ACCESS_CMD_OFFSET_SEL(offset);
4443
4444 status = __vxge_hw_pio_mem_write64(val64,
4445 &vp_reg->xmac_stats_access_cmd,
4446 VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE,
4447 vpath->hldev->config.device_poll_millis);
4448
4449 if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ))
4450 *stat = readq(&vp_reg->xmac_stats_access_data);
4451 else
4452 *stat = 0;
4453
4454vpath_stats_access_exit:
4455 return status;
4456}
4457
4458/*
4459 * __vxge_hw_vpath_xmac_tx_stats_get - Get the TX Statistics of a vpath
4460 */
4461enum vxge_hw_status
4462__vxge_hw_vpath_xmac_tx_stats_get(
4463 struct __vxge_hw_virtualpath *vpath,
4464 struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats)
4465{
4466 u64 *val64;
4467 int i;
4468 u32 offset = VXGE_HW_STATS_VPATH_TX_OFFSET;
4469 enum vxge_hw_status status = VXGE_HW_OK;
4470
4471 val64 = (u64 *) vpath_tx_stats;
4472
4473 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4474 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4475 goto exit;
4476 }
4477
4478 for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_tx_stats) / 8; i++) {
4479 status = __vxge_hw_vpath_stats_access(vpath,
4480 VXGE_HW_STATS_OP_READ,
4481 offset, val64);
4482 if (status != VXGE_HW_OK)
4483 goto exit;
4484 offset++;
4485 val64++;
4486 }
4487exit:
4488 return status;
4489}
4490
4491/*
4492 * __vxge_hw_vpath_xmac_rx_stats_get - Get the RX Statistics of a vpath
4493 */
4494enum vxge_hw_status
4495__vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath,
4496 struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats)
4497{
4498 u64 *val64;
4499 enum vxge_hw_status status = VXGE_HW_OK;
4500 int i;
4501 u32 offset = VXGE_HW_STATS_VPATH_RX_OFFSET;
4502 val64 = (u64 *) vpath_rx_stats;
4503
4504 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4505 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4506 goto exit;
4507 }
4508 for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_rx_stats) / 8; i++) {
4509 status = __vxge_hw_vpath_stats_access(vpath,
4510 VXGE_HW_STATS_OP_READ,
4511 offset >> 3, val64);
4512 if (status != VXGE_HW_OK)
4513 goto exit;
4514
4515 offset += 8;
4516 val64++;
4517 }
4518exit:
4519 return status;
4520}
4521
4522/*
4523 * __vxge_hw_vpath_stats_get - Get the vpath hw statistics.
4524 */
4525enum vxge_hw_status __vxge_hw_vpath_stats_get(
4526 struct __vxge_hw_virtualpath *vpath,
4527 struct vxge_hw_vpath_stats_hw_info *hw_stats)
4528{
4529 u64 val64;
4530 enum vxge_hw_status status = VXGE_HW_OK;
4531 struct vxge_hw_vpath_reg __iomem *vp_reg;
4532
4533 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4534 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4535 goto exit;
4536 }
4537 vp_reg = vpath->vp_reg;
4538
4539 val64 = readq(&vp_reg->vpath_debug_stats0);
4540 hw_stats->ini_num_mwr_sent =
4541 (u32)VXGE_HW_VPATH_DEBUG_STATS0_GET_INI_NUM_MWR_SENT(val64);
4542
4543 val64 = readq(&vp_reg->vpath_debug_stats1);
4544 hw_stats->ini_num_mrd_sent =
4545 (u32)VXGE_HW_VPATH_DEBUG_STATS1_GET_INI_NUM_MRD_SENT(val64);
4546
4547 val64 = readq(&vp_reg->vpath_debug_stats2);
4548 hw_stats->ini_num_cpl_rcvd =
4549 (u32)VXGE_HW_VPATH_DEBUG_STATS2_GET_INI_NUM_CPL_RCVD(val64);
4550
4551 val64 = readq(&vp_reg->vpath_debug_stats3);
4552 hw_stats->ini_num_mwr_byte_sent =
4553 VXGE_HW_VPATH_DEBUG_STATS3_GET_INI_NUM_MWR_BYTE_SENT(val64);
4554
4555 val64 = readq(&vp_reg->vpath_debug_stats4);
4556 hw_stats->ini_num_cpl_byte_rcvd =
4557 VXGE_HW_VPATH_DEBUG_STATS4_GET_INI_NUM_CPL_BYTE_RCVD(val64);
4558
4559 val64 = readq(&vp_reg->vpath_debug_stats5);
4560 hw_stats->wrcrdtarb_xoff =
4561 (u32)VXGE_HW_VPATH_DEBUG_STATS5_GET_WRCRDTARB_XOFF(val64);
4562
4563 val64 = readq(&vp_reg->vpath_debug_stats6);
4564 hw_stats->rdcrdtarb_xoff =
4565 (u32)VXGE_HW_VPATH_DEBUG_STATS6_GET_RDCRDTARB_XOFF(val64);
4566
4567 val64 = readq(&vp_reg->vpath_genstats_count01);
4568 hw_stats->vpath_genstats_count0 =
4569 (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT0(
4570 val64);
4571
4572 val64 = readq(&vp_reg->vpath_genstats_count01);
4573 hw_stats->vpath_genstats_count1 =
4574 (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT1(
4575 val64);
4576
4577 val64 = readq(&vp_reg->vpath_genstats_count23);
4578 hw_stats->vpath_genstats_count2 =
4579 (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT2(
4580 val64);
4581
4582 val64 = readq(&vp_reg->vpath_genstats_count01);
4583 hw_stats->vpath_genstats_count3 =
4584 (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT3(
4585 val64);
4586
4587 val64 = readq(&vp_reg->vpath_genstats_count4);
4588 hw_stats->vpath_genstats_count4 =
4589 (u32)VXGE_HW_VPATH_GENSTATS_COUNT4_GET_PPIF_VPATH_GENSTATS_COUNT4(
4590 val64);
4591
4592 val64 = readq(&vp_reg->vpath_genstats_count5);
4593 hw_stats->vpath_genstats_count5 =
4594 (u32)VXGE_HW_VPATH_GENSTATS_COUNT5_GET_PPIF_VPATH_GENSTATS_COUNT5(
4595 val64);
4596
4597 status = __vxge_hw_vpath_xmac_tx_stats_get(vpath, &hw_stats->tx_stats);
4598 if (status != VXGE_HW_OK)
4599 goto exit;
4600
4601 status = __vxge_hw_vpath_xmac_rx_stats_get(vpath, &hw_stats->rx_stats);
4602 if (status != VXGE_HW_OK)
4603 goto exit;
4604
4605 VXGE_HW_VPATH_STATS_PIO_READ(
4606 VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM0_OFFSET);
4607
4608 hw_stats->prog_event_vnum0 =
4609 (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM0(val64);
4610
4611 hw_stats->prog_event_vnum1 =
4612 (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM1(val64);
4613
4614 VXGE_HW_VPATH_STATS_PIO_READ(
4615 VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM2_OFFSET);
4616
4617 hw_stats->prog_event_vnum2 =
4618 (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM2(val64);
4619
4620 hw_stats->prog_event_vnum3 =
4621 (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM3(val64);
4622
4623 val64 = readq(&vp_reg->rx_multi_cast_stats);
4624 hw_stats->rx_multi_cast_frame_discard =
4625 (u16)VXGE_HW_RX_MULTI_CAST_STATS_GET_FRAME_DISCARD(val64);
4626
4627 val64 = readq(&vp_reg->rx_frm_transferred);
4628 hw_stats->rx_frm_transferred =
4629 (u32)VXGE_HW_RX_FRM_TRANSFERRED_GET_RX_FRM_TRANSFERRED(val64);
4630
4631 val64 = readq(&vp_reg->rxd_returned);
4632 hw_stats->rxd_returned =
4633 (u16)VXGE_HW_RXD_RETURNED_GET_RXD_RETURNED(val64);
4634
4635 val64 = readq(&vp_reg->dbg_stats_rx_mpa);
4636 hw_stats->rx_mpa_len_fail_frms =
4637 (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_LEN_FAIL_FRMS(val64);
4638 hw_stats->rx_mpa_mrk_fail_frms =
4639 (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_MRK_FAIL_FRMS(val64);
4640 hw_stats->rx_mpa_crc_fail_frms =
4641 (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_CRC_FAIL_FRMS(val64);
4642
4643 val64 = readq(&vp_reg->dbg_stats_rx_fau);
4644 hw_stats->rx_permitted_frms =
4645 (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_PERMITTED_FRMS(val64);
4646 hw_stats->rx_vp_reset_discarded_frms =
4647 (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_VP_RESET_DISCARDED_FRMS(val64);
4648 hw_stats->rx_wol_frms =
4649 (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_WOL_FRMS(val64);
4650
4651 val64 = readq(&vp_reg->tx_vp_reset_discarded_frms);
4652 hw_stats->tx_vp_reset_discarded_frms =
4653 (u16)VXGE_HW_TX_VP_RESET_DISCARDED_FRMS_GET_TX_VP_RESET_DISCARDED_FRMS(
4654 val64);
4655exit:
4656 return status;
4657}
4658
4659/*
4660 * __vxge_hw_blockpool_create - Create block pool
4661 */
4662
4663enum vxge_hw_status
4664__vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
4665 struct __vxge_hw_blockpool *blockpool,
4666 u32 pool_size,
4667 u32 pool_max)
4668{
4669 u32 i;
4670 struct __vxge_hw_blockpool_entry *entry = NULL;
4671 void *memblock;
4672 dma_addr_t dma_addr;
4673 struct pci_dev *dma_handle;
4674 struct pci_dev *acc_handle;
4675 enum vxge_hw_status status = VXGE_HW_OK;
4676
4677 if (blockpool == NULL) {
4678 status = VXGE_HW_FAIL;
4679 goto blockpool_create_exit;
4680 }
4681
4682 blockpool->hldev = hldev;
4683 blockpool->block_size = VXGE_HW_BLOCK_SIZE;
4684 blockpool->pool_size = 0;
4685 blockpool->pool_max = pool_max;
4686 blockpool->req_out = 0;
4687
4688 INIT_LIST_HEAD(&blockpool->free_block_list);
4689 INIT_LIST_HEAD(&blockpool->free_entry_list);
4690
4691 for (i = 0; i < pool_size + pool_max; i++) {
4692 entry = kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
4693 GFP_KERNEL);
4694 if (entry == NULL) {
4695 __vxge_hw_blockpool_destroy(blockpool);
4696 status = VXGE_HW_ERR_OUT_OF_MEMORY;
4697 goto blockpool_create_exit;
4698 }
4699 list_add(&entry->item, &blockpool->free_entry_list);
4700 }
4701
4702 for (i = 0; i < pool_size; i++) {
4703
4704 memblock = vxge_os_dma_malloc(
4705 hldev->pdev,
4706 VXGE_HW_BLOCK_SIZE,
4707 &dma_handle,
4708 &acc_handle);
4709
4710 if (memblock == NULL) {
4711 __vxge_hw_blockpool_destroy(blockpool);
4712 status = VXGE_HW_ERR_OUT_OF_MEMORY;
4713 goto blockpool_create_exit;
4714 }
4715
4716 dma_addr = pci_map_single(hldev->pdev, memblock,
4717 VXGE_HW_BLOCK_SIZE, PCI_DMA_BIDIRECTIONAL);
4718
4719 if (unlikely(pci_dma_mapping_error(hldev->pdev,
4720 dma_addr))) {
4721
4722 vxge_os_dma_free(hldev->pdev, memblock, &acc_handle);
4723 __vxge_hw_blockpool_destroy(blockpool);
4724 status = VXGE_HW_ERR_OUT_OF_MEMORY;
4725 goto blockpool_create_exit;
4726 }
4727
4728 if (!list_empty(&blockpool->free_entry_list))
4729 entry = (struct __vxge_hw_blockpool_entry *)
4730 list_first_entry(&blockpool->free_entry_list,
4731 struct __vxge_hw_blockpool_entry,
4732 item);
4733
4734 if (entry == NULL)
4735 entry =
4736 kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
4737 GFP_KERNEL);
4738 if (entry != NULL) {
4739 list_del(&entry->item);
4740 entry->length = VXGE_HW_BLOCK_SIZE;
4741 entry->memblock = memblock;
4742 entry->dma_addr = dma_addr;
4743 entry->acc_handle = acc_handle;
4744 entry->dma_handle = dma_handle;
4745 list_add(&entry->item,
4746 &blockpool->free_block_list);
4747 blockpool->pool_size++;
4748 } else {
4749 __vxge_hw_blockpool_destroy(blockpool);
4750 status = VXGE_HW_ERR_OUT_OF_MEMORY;
4751 goto blockpool_create_exit;
4752 }
4753 }
4754
4755blockpool_create_exit:
4756 return status;
4757}
4758
4759/*
4760 * __vxge_hw_blockpool_destroy - Deallocates the block pool
4761 */
4762
4763void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool)
4764{
4765
4766 struct __vxge_hw_device *hldev;
4767 struct list_head *p, *n;
4768 u16 ret;
4769
4770 if (blockpool == NULL) {
4771 ret = 1;
4772 goto exit;
4773 }
4774
4775 hldev = blockpool->hldev;
4776
4777 list_for_each_safe(p, n, &blockpool->free_block_list) {
4778
4779 pci_unmap_single(hldev->pdev,
4780 ((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
4781 ((struct __vxge_hw_blockpool_entry *)p)->length,
4782 PCI_DMA_BIDIRECTIONAL);
4783
4784 vxge_os_dma_free(hldev->pdev,
4785 ((struct __vxge_hw_blockpool_entry *)p)->memblock,
4786 &((struct __vxge_hw_blockpool_entry *) p)->acc_handle);
4787
4788 list_del(
4789 &((struct __vxge_hw_blockpool_entry *)p)->item);
4790 kfree(p);
4791 blockpool->pool_size--;
4792 }
4793
4794 list_for_each_safe(p, n, &blockpool->free_entry_list) {
4795 list_del(
4796 &((struct __vxge_hw_blockpool_entry *)p)->item);
4797 kfree((void *)p);
4798 }
4799 ret = 0;
4800exit:
4801 return;
4802}
4803
4804/*
4805 * __vxge_hw_blockpool_blocks_add - Request additional blocks
4806 */
4807static
4808void __vxge_hw_blockpool_blocks_add(struct __vxge_hw_blockpool *blockpool)
4809{
4810 u32 nreq = 0, i;
4811
4812 if ((blockpool->pool_size + blockpool->req_out) <
4813 VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE) {
4814 nreq = VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE;
4815 blockpool->req_out += nreq;
4816 }
4817
4818 for (i = 0; i < nreq; i++)
4819 vxge_os_dma_malloc_async(
4820 ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
4821 blockpool->hldev, VXGE_HW_BLOCK_SIZE);
4822}
4823
4824/*
4825 * __vxge_hw_blockpool_blocks_remove - Free additional blocks
4826 */
4827static
4828void __vxge_hw_blockpool_blocks_remove(struct __vxge_hw_blockpool *blockpool)
4829{
4830 struct list_head *p, *n;
4831
4832 list_for_each_safe(p, n, &blockpool->free_block_list) {
4833
4834 if (blockpool->pool_size < blockpool->pool_max)
4835 break;
4836
4837 pci_unmap_single(
4838 ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
4839 ((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
4840 ((struct __vxge_hw_blockpool_entry *)p)->length,
4841 PCI_DMA_BIDIRECTIONAL);
4842
4843 vxge_os_dma_free(
4844 ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
4845 ((struct __vxge_hw_blockpool_entry *)p)->memblock,
4846 &((struct __vxge_hw_blockpool_entry *)p)->acc_handle);
4847
4848 list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
4849
4850 list_add(p, &blockpool->free_entry_list);
4851
4852 blockpool->pool_size--;
4853
4854 }
4855}
4856
4857/*
4858 * vxge_hw_blockpool_block_add - callback for vxge_os_dma_malloc_async
4859 * Adds a block to block pool
4860 */
4861void vxge_hw_blockpool_block_add(
4862 struct __vxge_hw_device *devh,
4863 void *block_addr,
4864 u32 length,
4865 struct pci_dev *dma_h,
4866 struct pci_dev *acc_handle)
4867{
4868 struct __vxge_hw_blockpool *blockpool;
4869 struct __vxge_hw_blockpool_entry *entry = NULL;
4870 dma_addr_t dma_addr;
4871 enum vxge_hw_status status = VXGE_HW_OK;
4872 u32 req_out;
4873
4874 blockpool = &devh->block_pool;
4875
4876 if (block_addr == NULL) {
4877 blockpool->req_out--;
4878 status = VXGE_HW_FAIL;
4879 goto exit;
4880 }
4881
4882 dma_addr = pci_map_single(devh->pdev, block_addr, length,
4883 PCI_DMA_BIDIRECTIONAL);
4884
4885 if (unlikely(pci_dma_mapping_error(devh->pdev, dma_addr))) {
4886
4887 vxge_os_dma_free(devh->pdev, block_addr, &acc_handle);
4888 blockpool->req_out--;
4889 status = VXGE_HW_FAIL;
4890 goto exit;
4891 }
4892
4893
4894 if (!list_empty(&blockpool->free_entry_list))
4895 entry = (struct __vxge_hw_blockpool_entry *)
4896 list_first_entry(&blockpool->free_entry_list,
4897 struct __vxge_hw_blockpool_entry,
4898 item);
4899
4900 if (entry == NULL)
4901 entry = (struct __vxge_hw_blockpool_entry *)
4902 vmalloc(sizeof(struct __vxge_hw_blockpool_entry));
4903 else
4904 list_del(&entry->item);
4905
4906 if (entry != NULL) {
4907 entry->length = length;
4908 entry->memblock = block_addr;
4909 entry->dma_addr = dma_addr;
4910 entry->acc_handle = acc_handle;
4911 entry->dma_handle = dma_h;
4912 list_add(&entry->item, &blockpool->free_block_list);
4913 blockpool->pool_size++;
4914 status = VXGE_HW_OK;
4915 } else
4916 status = VXGE_HW_ERR_OUT_OF_MEMORY;
4917
4918 blockpool->req_out--;
4919
4920 req_out = blockpool->req_out;
4921exit:
4922 return;
4923}
4924
4925/*
4926 * __vxge_hw_blockpool_malloc - Allocate a memory block from pool
4927 * Allocates a block of memory of given size, either from block pool
4928 * or by calling vxge_os_dma_malloc()
4929 */
4930void *
4931__vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size,
4932 struct vxge_hw_mempool_dma *dma_object)
4933{
4934 struct __vxge_hw_blockpool_entry *entry = NULL;
4935 struct __vxge_hw_blockpool *blockpool;
4936 void *memblock = NULL;
4937 enum vxge_hw_status status = VXGE_HW_OK;
4938
4939 blockpool = &devh->block_pool;
4940
4941 if (size != blockpool->block_size) {
4942
4943 memblock = vxge_os_dma_malloc(devh->pdev, size,
4944 &dma_object->handle,
4945 &dma_object->acc_handle);
4946
4947 if (memblock == NULL) {
4948 status = VXGE_HW_ERR_OUT_OF_MEMORY;
4949 goto exit;
4950 }
4951
4952 dma_object->addr = pci_map_single(devh->pdev, memblock, size,
4953 PCI_DMA_BIDIRECTIONAL);
4954
4955 if (unlikely(pci_dma_mapping_error(devh->pdev,
4956 dma_object->addr))) {
4957 vxge_os_dma_free(devh->pdev, memblock,
4958 &dma_object->acc_handle);
4959 status = VXGE_HW_ERR_OUT_OF_MEMORY;
4960 goto exit;
4961 }
4962
4963 } else {
4964
4965 if (!list_empty(&blockpool->free_block_list))
4966 entry = (struct __vxge_hw_blockpool_entry *)
4967 list_first_entry(&blockpool->free_block_list,
4968 struct __vxge_hw_blockpool_entry,
4969 item);
4970
4971 if (entry != NULL) {
4972 list_del(&entry->item);
4973 dma_object->addr = entry->dma_addr;
4974 dma_object->handle = entry->dma_handle;
4975 dma_object->acc_handle = entry->acc_handle;
4976 memblock = entry->memblock;
4977
4978 list_add(&entry->item,
4979 &blockpool->free_entry_list);
4980 blockpool->pool_size--;
4981 }
4982
4983 if (memblock != NULL)
4984 __vxge_hw_blockpool_blocks_add(blockpool);
4985 }
4986exit:
4987 return memblock;
4988}
4989
4990/*
4991 * __vxge_hw_blockpool_free - Frees the memory allcoated with
4992 __vxge_hw_blockpool_malloc
4993 */
4994void
4995__vxge_hw_blockpool_free(struct __vxge_hw_device *devh,
4996 void *memblock, u32 size,
4997 struct vxge_hw_mempool_dma *dma_object)
4998{
4999 struct __vxge_hw_blockpool_entry *entry = NULL;
5000 struct __vxge_hw_blockpool *blockpool;
5001 enum vxge_hw_status status = VXGE_HW_OK;
5002
5003 blockpool = &devh->block_pool;
5004
5005 if (size != blockpool->block_size) {
5006 pci_unmap_single(devh->pdev, dma_object->addr, size,
5007 PCI_DMA_BIDIRECTIONAL);
5008 vxge_os_dma_free(devh->pdev, memblock, &dma_object->acc_handle);
5009 } else {
5010
5011 if (!list_empty(&blockpool->free_entry_list))
5012 entry = (struct __vxge_hw_blockpool_entry *)
5013 list_first_entry(&blockpool->free_entry_list,
5014 struct __vxge_hw_blockpool_entry,
5015 item);
5016
5017 if (entry == NULL)
5018 entry = (struct __vxge_hw_blockpool_entry *)
5019 vmalloc(sizeof(
5020 struct __vxge_hw_blockpool_entry));
5021 else
5022 list_del(&entry->item);
5023
5024 if (entry != NULL) {
5025 entry->length = size;
5026 entry->memblock = memblock;
5027 entry->dma_addr = dma_object->addr;
5028 entry->acc_handle = dma_object->acc_handle;
5029 entry->dma_handle = dma_object->handle;
5030 list_add(&entry->item,
5031 &blockpool->free_block_list);
5032 blockpool->pool_size++;
5033 status = VXGE_HW_OK;
5034 } else
5035 status = VXGE_HW_ERR_OUT_OF_MEMORY;
5036
5037 if (status == VXGE_HW_OK)
5038 __vxge_hw_blockpool_blocks_remove(blockpool);
5039 }
5040
5041 return;
5042}
5043
5044/*
5045 * __vxge_hw_blockpool_block_allocate - Allocates a block from block pool
5046 * This function allocates a block from block pool or from the system
5047 */
5048struct __vxge_hw_blockpool_entry *
5049__vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *devh, u32 size)
5050{
5051 struct __vxge_hw_blockpool_entry *entry = NULL;
5052 struct __vxge_hw_blockpool *blockpool;
5053
5054 blockpool = &devh->block_pool;
5055
5056 if (size == blockpool->block_size) {
5057
5058 if (!list_empty(&blockpool->free_block_list))
5059 entry = (struct __vxge_hw_blockpool_entry *)
5060 list_first_entry(&blockpool->free_block_list,
5061 struct __vxge_hw_blockpool_entry,
5062 item);
5063
5064 if (entry != NULL) {
5065 list_del(&entry->item);
5066 blockpool->pool_size--;
5067 }
5068 }
5069
5070 if (entry != NULL)
5071 __vxge_hw_blockpool_blocks_add(blockpool);
5072
5073 return entry;
5074}
5075
5076/*
5077 * __vxge_hw_blockpool_block_free - Frees a block from block pool
5078 * @devh: Hal device
5079 * @entry: Entry of block to be freed
5080 *
5081 * This function frees a block from block pool
5082 */
5083void
5084__vxge_hw_blockpool_block_free(struct __vxge_hw_device *devh,
5085 struct __vxge_hw_blockpool_entry *entry)
5086{
5087 struct __vxge_hw_blockpool *blockpool;
5088
5089 blockpool = &devh->block_pool;
5090
5091 if (entry->length == blockpool->block_size) {
5092 list_add(&entry->item, &blockpool->free_block_list);
5093 blockpool->pool_size++;
5094 }
5095
5096 __vxge_hw_blockpool_blocks_remove(blockpool);
5097
5098 return;
5099}