| Ramkrishna Vepa | 1132413 | 2009-04-01 18:14:58 +0000 | [diff] [blame] | 1 | /****************************************************************************** | 
|  | 2 | * This software may be used and distributed according to the terms of | 
|  | 3 | * the GNU General Public License (GPL), incorporated herein by reference. | 
|  | 4 | * Drivers based on or derived from this code fall under the GPL and must | 
|  | 5 | * retain the authorship, copyright and license notice.  This file is not | 
|  | 6 | * a complete program and may only be used when the entire operating | 
|  | 7 | * system is licensed under the GPL. | 
|  | 8 | * See the file COPYING in this distribution for more information. | 
|  | 9 | * | 
|  | 10 | * vxge-traffic.c: Driver for Neterion Inc's X3100 Series 10GbE PCIe I/O | 
|  | 11 | *                 Virtualized Server Adapter. | 
|  | 12 | * Copyright(c) 2002-2009 Neterion Inc. | 
|  | 13 | ******************************************************************************/ | 
|  | 14 | #include <linux/etherdevice.h> | 
|  | 15 |  | 
|  | 16 | #include "vxge-traffic.h" | 
|  | 17 | #include "vxge-config.h" | 
|  | 18 | #include "vxge-main.h" | 
|  | 19 |  | 
|  | 20 | /* | 
|  | 21 | * vxge_hw_vpath_intr_enable - Enable vpath interrupts. | 
|  | 22 | * @vp: Virtual Path handle. | 
|  | 23 | * | 
|  | 24 | * Enable vpath interrupts. The function is to be executed the last in | 
|  | 25 | * vpath initialization sequence. | 
|  | 26 | * | 
|  | 27 | * See also: vxge_hw_vpath_intr_disable() | 
|  | 28 | */ | 
|  | 29 | enum vxge_hw_status vxge_hw_vpath_intr_enable(struct __vxge_hw_vpath_handle *vp) | 
|  | 30 | { | 
|  | 31 | u64 val64; | 
|  | 32 |  | 
|  | 33 | struct __vxge_hw_virtualpath *vpath; | 
|  | 34 | struct vxge_hw_vpath_reg __iomem *vp_reg; | 
|  | 35 | enum vxge_hw_status status = VXGE_HW_OK; | 
|  | 36 | if (vp == NULL) { | 
|  | 37 | status = VXGE_HW_ERR_INVALID_HANDLE; | 
|  | 38 | goto exit; | 
|  | 39 | } | 
|  | 40 |  | 
|  | 41 | vpath = vp->vpath; | 
|  | 42 |  | 
|  | 43 | if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { | 
|  | 44 | status = VXGE_HW_ERR_VPATH_NOT_OPEN; | 
|  | 45 | goto exit; | 
|  | 46 | } | 
|  | 47 |  | 
|  | 48 | vp_reg = vpath->vp_reg; | 
|  | 49 |  | 
|  | 50 | writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_reg); | 
|  | 51 |  | 
|  | 52 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | 
|  | 53 | &vp_reg->general_errors_reg); | 
|  | 54 |  | 
|  | 55 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | 
|  | 56 | &vp_reg->pci_config_errors_reg); | 
|  | 57 |  | 
|  | 58 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | 
|  | 59 | &vp_reg->mrpcim_to_vpath_alarm_reg); | 
|  | 60 |  | 
|  | 61 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | 
|  | 62 | &vp_reg->srpcim_to_vpath_alarm_reg); | 
|  | 63 |  | 
|  | 64 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | 
|  | 65 | &vp_reg->vpath_ppif_int_status); | 
|  | 66 |  | 
|  | 67 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | 
|  | 68 | &vp_reg->srpcim_msg_to_vpath_reg); | 
|  | 69 |  | 
|  | 70 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | 
|  | 71 | &vp_reg->vpath_pcipif_int_status); | 
|  | 72 |  | 
|  | 73 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | 
|  | 74 | &vp_reg->prc_alarm_reg); | 
|  | 75 |  | 
|  | 76 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | 
|  | 77 | &vp_reg->wrdma_alarm_status); | 
|  | 78 |  | 
|  | 79 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | 
|  | 80 | &vp_reg->asic_ntwk_vp_err_reg); | 
|  | 81 |  | 
|  | 82 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | 
|  | 83 | &vp_reg->xgmac_vp_int_status); | 
|  | 84 |  | 
|  | 85 | val64 = readq(&vp_reg->vpath_general_int_status); | 
|  | 86 |  | 
|  | 87 | /* Mask unwanted interrupts */ | 
|  | 88 |  | 
|  | 89 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | 
|  | 90 | &vp_reg->vpath_pcipif_int_mask); | 
|  | 91 |  | 
|  | 92 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | 
|  | 93 | &vp_reg->srpcim_msg_to_vpath_mask); | 
|  | 94 |  | 
|  | 95 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | 
|  | 96 | &vp_reg->srpcim_to_vpath_alarm_mask); | 
|  | 97 |  | 
|  | 98 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | 
|  | 99 | &vp_reg->mrpcim_to_vpath_alarm_mask); | 
|  | 100 |  | 
|  | 101 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | 
|  | 102 | &vp_reg->pci_config_errors_mask); | 
|  | 103 |  | 
|  | 104 | /* Unmask the individual interrupts */ | 
|  | 105 |  | 
|  | 106 | writeq((u32)vxge_bVALn((VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO1_OVRFLOW| | 
|  | 107 | VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO2_OVRFLOW| | 
|  | 108 | VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ| | 
|  | 109 | VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR), 0, 32), | 
|  | 110 | &vp_reg->general_errors_mask); | 
|  | 111 |  | 
|  | 112 | __vxge_hw_pio_mem_write32_upper( | 
|  | 113 | (u32)vxge_bVALn((VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_OVRWR| | 
|  | 114 | VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_OVRWR| | 
|  | 115 | VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON| | 
|  | 116 | VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON| | 
|  | 117 | VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR| | 
|  | 118 | VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR), 0, 32), | 
|  | 119 | &vp_reg->kdfcctl_errors_mask); | 
|  | 120 |  | 
|  | 121 | __vxge_hw_pio_mem_write32_upper(0, &vp_reg->vpath_ppif_int_mask); | 
|  | 122 |  | 
|  | 123 | __vxge_hw_pio_mem_write32_upper( | 
|  | 124 | (u32)vxge_bVALn(VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP, 0, 32), | 
|  | 125 | &vp_reg->prc_alarm_mask); | 
|  | 126 |  | 
|  | 127 | __vxge_hw_pio_mem_write32_upper(0, &vp_reg->wrdma_alarm_mask); | 
|  | 128 | __vxge_hw_pio_mem_write32_upper(0, &vp_reg->xgmac_vp_int_mask); | 
|  | 129 |  | 
|  | 130 | if (vpath->hldev->first_vp_id != vpath->vp_id) | 
|  | 131 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | 
|  | 132 | &vp_reg->asic_ntwk_vp_err_mask); | 
|  | 133 | else | 
|  | 134 | __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(( | 
|  | 135 | VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT | | 
|  | 136 | VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK), 0, 32), | 
|  | 137 | &vp_reg->asic_ntwk_vp_err_mask); | 
|  | 138 |  | 
|  | 139 | __vxge_hw_pio_mem_write32_upper(0, | 
|  | 140 | &vp_reg->vpath_general_int_mask); | 
|  | 141 | exit: | 
|  | 142 | return status; | 
|  | 143 |  | 
|  | 144 | } | 
|  | 145 |  | 
|  | 146 | /* | 
|  | 147 | * vxge_hw_vpath_intr_disable - Disable vpath interrupts. | 
|  | 148 | * @vp: Virtual Path handle. | 
|  | 149 | * | 
|  | 150 | * Disable vpath interrupts. The function is to be executed the last in | 
|  | 151 | * vpath initialization sequence. | 
|  | 152 | * | 
|  | 153 | * See also: vxge_hw_vpath_intr_enable() | 
|  | 154 | */ | 
|  | 155 | enum vxge_hw_status vxge_hw_vpath_intr_disable( | 
|  | 156 | struct __vxge_hw_vpath_handle *vp) | 
|  | 157 | { | 
|  | 158 | u64 val64; | 
|  | 159 |  | 
|  | 160 | struct __vxge_hw_virtualpath *vpath; | 
|  | 161 | enum vxge_hw_status status = VXGE_HW_OK; | 
|  | 162 | struct vxge_hw_vpath_reg __iomem *vp_reg; | 
|  | 163 | if (vp == NULL) { | 
|  | 164 | status = VXGE_HW_ERR_INVALID_HANDLE; | 
|  | 165 | goto exit; | 
|  | 166 | } | 
|  | 167 |  | 
|  | 168 | vpath = vp->vpath; | 
|  | 169 |  | 
|  | 170 | if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { | 
|  | 171 | status = VXGE_HW_ERR_VPATH_NOT_OPEN; | 
|  | 172 | goto exit; | 
|  | 173 | } | 
|  | 174 | vp_reg = vpath->vp_reg; | 
|  | 175 |  | 
|  | 176 | __vxge_hw_pio_mem_write32_upper( | 
|  | 177 | (u32)VXGE_HW_INTR_MASK_ALL, | 
|  | 178 | &vp_reg->vpath_general_int_mask); | 
|  | 179 |  | 
|  | 180 | val64 = VXGE_HW_TIM_CLR_INT_EN_VP(1 << (16 - vpath->vp_id)); | 
|  | 181 |  | 
|  | 182 | writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_mask); | 
|  | 183 |  | 
|  | 184 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | 
|  | 185 | &vp_reg->general_errors_mask); | 
|  | 186 |  | 
|  | 187 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | 
|  | 188 | &vp_reg->pci_config_errors_mask); | 
|  | 189 |  | 
|  | 190 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | 
|  | 191 | &vp_reg->mrpcim_to_vpath_alarm_mask); | 
|  | 192 |  | 
|  | 193 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | 
|  | 194 | &vp_reg->srpcim_to_vpath_alarm_mask); | 
|  | 195 |  | 
|  | 196 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | 
|  | 197 | &vp_reg->vpath_ppif_int_mask); | 
|  | 198 |  | 
|  | 199 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | 
|  | 200 | &vp_reg->srpcim_msg_to_vpath_mask); | 
|  | 201 |  | 
|  | 202 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | 
|  | 203 | &vp_reg->vpath_pcipif_int_mask); | 
|  | 204 |  | 
|  | 205 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | 
|  | 206 | &vp_reg->wrdma_alarm_mask); | 
|  | 207 |  | 
|  | 208 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | 
|  | 209 | &vp_reg->prc_alarm_mask); | 
|  | 210 |  | 
|  | 211 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | 
|  | 212 | &vp_reg->xgmac_vp_int_mask); | 
|  | 213 |  | 
|  | 214 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | 
|  | 215 | &vp_reg->asic_ntwk_vp_err_mask); | 
|  | 216 |  | 
|  | 217 | exit: | 
|  | 218 | return status; | 
|  | 219 | } | 
|  | 220 |  | 
|  | 221 | /** | 
|  | 222 | * vxge_hw_channel_msix_mask - Mask MSIX Vector. | 
|  | 223 | * @channeh: Channel for rx or tx handle | 
|  | 224 | * @msix_id:  MSIX ID | 
|  | 225 | * | 
|  | 226 | * The function masks the msix interrupt for the given msix_id | 
|  | 227 | * | 
|  | 228 | * Returns: 0 | 
|  | 229 | */ | 
|  | 230 | void vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channel, int msix_id) | 
|  | 231 | { | 
|  | 232 |  | 
|  | 233 | __vxge_hw_pio_mem_write32_upper( | 
|  | 234 | (u32)vxge_bVALn(vxge_mBIT(channel->first_vp_id+(msix_id/4)), | 
|  | 235 | 0, 32), | 
|  | 236 | &channel->common_reg->set_msix_mask_vect[msix_id%4]); | 
|  | 237 |  | 
|  | 238 | return; | 
|  | 239 | } | 
|  | 240 |  | 
|  | 241 | /** | 
|  | 242 | * vxge_hw_channel_msix_unmask - Unmask the MSIX Vector. | 
|  | 243 | * @channeh: Channel for rx or tx handle | 
|  | 244 | * @msix_id:  MSI ID | 
|  | 245 | * | 
|  | 246 | * The function unmasks the msix interrupt for the given msix_id | 
|  | 247 | * | 
|  | 248 | * Returns: 0 | 
|  | 249 | */ | 
|  | 250 | void | 
|  | 251 | vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channel, int msix_id) | 
|  | 252 | { | 
|  | 253 |  | 
|  | 254 | __vxge_hw_pio_mem_write32_upper( | 
|  | 255 | (u32)vxge_bVALn(vxge_mBIT(channel->first_vp_id+(msix_id/4)), | 
|  | 256 | 0, 32), | 
|  | 257 | &channel->common_reg->clear_msix_mask_vect[msix_id%4]); | 
|  | 258 |  | 
|  | 259 | return; | 
|  | 260 | } | 
|  | 261 |  | 
|  | 262 | /** | 
|  | 263 | * vxge_hw_device_set_intr_type - Updates the configuration | 
|  | 264 | *		with new interrupt type. | 
|  | 265 | * @hldev: HW device handle. | 
|  | 266 | * @intr_mode: New interrupt type | 
|  | 267 | */ | 
|  | 268 | u32 vxge_hw_device_set_intr_type(struct __vxge_hw_device *hldev, u32 intr_mode) | 
|  | 269 | { | 
|  | 270 |  | 
|  | 271 | if ((intr_mode != VXGE_HW_INTR_MODE_IRQLINE) && | 
|  | 272 | (intr_mode != VXGE_HW_INTR_MODE_MSIX) && | 
|  | 273 | (intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) && | 
|  | 274 | (intr_mode != VXGE_HW_INTR_MODE_DEF)) | 
|  | 275 | intr_mode = VXGE_HW_INTR_MODE_IRQLINE; | 
|  | 276 |  | 
|  | 277 | hldev->config.intr_mode = intr_mode; | 
|  | 278 | return intr_mode; | 
|  | 279 | } | 
|  | 280 |  | 
|  | 281 | /** | 
|  | 282 | * vxge_hw_device_intr_enable - Enable interrupts. | 
|  | 283 | * @hldev: HW device handle. | 
|  | 284 | * @op: One of the enum vxge_hw_device_intr enumerated values specifying | 
|  | 285 | *      the type(s) of interrupts to enable. | 
|  | 286 | * | 
|  | 287 | * Enable Titan interrupts. The function is to be executed the last in | 
|  | 288 | * Titan initialization sequence. | 
|  | 289 | * | 
|  | 290 | * See also: vxge_hw_device_intr_disable() | 
|  | 291 | */ | 
|  | 292 | void vxge_hw_device_intr_enable(struct __vxge_hw_device *hldev) | 
|  | 293 | { | 
|  | 294 | u32 i; | 
|  | 295 | u64 val64; | 
|  | 296 | u32 val32; | 
|  | 297 |  | 
|  | 298 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { | 
|  | 299 |  | 
|  | 300 | if (!(hldev->vpaths_deployed & vxge_mBIT(i))) | 
|  | 301 | continue; | 
|  | 302 |  | 
|  | 303 | vxge_hw_vpath_intr_enable( | 
|  | 304 | VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i])); | 
|  | 305 | } | 
|  | 306 |  | 
|  | 307 | if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE) { | 
|  | 308 | val64 = hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] | | 
|  | 309 | hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX]; | 
|  | 310 |  | 
|  | 311 | if (val64 != 0) { | 
|  | 312 | writeq(val64, &hldev->common_reg->tim_int_status0); | 
|  | 313 |  | 
|  | 314 | writeq(~val64, &hldev->common_reg->tim_int_mask0); | 
|  | 315 | } | 
|  | 316 |  | 
|  | 317 | val32 = hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] | | 
|  | 318 | hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX]; | 
|  | 319 |  | 
|  | 320 | if (val32 != 0) { | 
|  | 321 | __vxge_hw_pio_mem_write32_upper(val32, | 
|  | 322 | &hldev->common_reg->tim_int_status1); | 
|  | 323 |  | 
|  | 324 | __vxge_hw_pio_mem_write32_upper(~val32, | 
|  | 325 | &hldev->common_reg->tim_int_mask1); | 
|  | 326 | } | 
|  | 327 | } | 
|  | 328 |  | 
|  | 329 | val64 = readq(&hldev->common_reg->titan_general_int_status); | 
|  | 330 |  | 
|  | 331 | vxge_hw_device_unmask_all(hldev); | 
|  | 332 |  | 
|  | 333 | return; | 
|  | 334 | } | 
|  | 335 |  | 
|  | 336 | /** | 
|  | 337 | * vxge_hw_device_intr_disable - Disable Titan interrupts. | 
|  | 338 | * @hldev: HW device handle. | 
|  | 339 | * @op: One of the enum vxge_hw_device_intr enumerated values specifying | 
|  | 340 | *      the type(s) of interrupts to disable. | 
|  | 341 | * | 
|  | 342 | * Disable Titan interrupts. | 
|  | 343 | * | 
|  | 344 | * See also: vxge_hw_device_intr_enable() | 
|  | 345 | */ | 
|  | 346 | void vxge_hw_device_intr_disable(struct __vxge_hw_device *hldev) | 
|  | 347 | { | 
|  | 348 | u32 i; | 
|  | 349 |  | 
|  | 350 | vxge_hw_device_mask_all(hldev); | 
|  | 351 |  | 
|  | 352 | /* mask all the tim interrupts */ | 
|  | 353 | writeq(VXGE_HW_INTR_MASK_ALL, &hldev->common_reg->tim_int_mask0); | 
|  | 354 | __vxge_hw_pio_mem_write32_upper(VXGE_HW_DEFAULT_32, | 
|  | 355 | &hldev->common_reg->tim_int_mask1); | 
|  | 356 |  | 
|  | 357 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { | 
|  | 358 |  | 
|  | 359 | if (!(hldev->vpaths_deployed & vxge_mBIT(i))) | 
|  | 360 | continue; | 
|  | 361 |  | 
|  | 362 | vxge_hw_vpath_intr_disable( | 
|  | 363 | VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i])); | 
|  | 364 | } | 
|  | 365 |  | 
|  | 366 | return; | 
|  | 367 | } | 
|  | 368 |  | 
|  | 369 | /** | 
|  | 370 | * vxge_hw_device_mask_all - Mask all device interrupts. | 
|  | 371 | * @hldev: HW device handle. | 
|  | 372 | * | 
|  | 373 | * Mask	all device interrupts. | 
|  | 374 | * | 
|  | 375 | * See also: vxge_hw_device_unmask_all() | 
|  | 376 | */ | 
|  | 377 | void vxge_hw_device_mask_all(struct __vxge_hw_device *hldev) | 
|  | 378 | { | 
|  | 379 | u64 val64; | 
|  | 380 |  | 
|  | 381 | val64 = VXGE_HW_TITAN_MASK_ALL_INT_ALARM | | 
|  | 382 | VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC; | 
|  | 383 |  | 
|  | 384 | __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), | 
|  | 385 | &hldev->common_reg->titan_mask_all_int); | 
|  | 386 |  | 
|  | 387 | return; | 
|  | 388 | } | 
|  | 389 |  | 
|  | 390 | /** | 
|  | 391 | * vxge_hw_device_unmask_all - Unmask all device interrupts. | 
|  | 392 | * @hldev: HW device handle. | 
|  | 393 | * | 
|  | 394 | * Unmask all device interrupts. | 
|  | 395 | * | 
|  | 396 | * See also: vxge_hw_device_mask_all() | 
|  | 397 | */ | 
|  | 398 | void vxge_hw_device_unmask_all(struct __vxge_hw_device *hldev) | 
|  | 399 | { | 
|  | 400 | u64 val64 = 0; | 
|  | 401 |  | 
|  | 402 | if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE) | 
|  | 403 | val64 =  VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC; | 
|  | 404 |  | 
|  | 405 | __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), | 
|  | 406 | &hldev->common_reg->titan_mask_all_int); | 
|  | 407 |  | 
|  | 408 | return; | 
|  | 409 | } | 
|  | 410 |  | 
|  | 411 | /** | 
|  | 412 | * vxge_hw_device_flush_io - Flush io writes. | 
|  | 413 | * @hldev: HW device handle. | 
|  | 414 | * | 
|  | 415 | * The function	performs a read operation to flush io writes. | 
|  | 416 | * | 
|  | 417 | * Returns: void | 
|  | 418 | */ | 
|  | 419 | void vxge_hw_device_flush_io(struct __vxge_hw_device *hldev) | 
|  | 420 | { | 
|  | 421 | u32 val32; | 
|  | 422 |  | 
|  | 423 | val32 = readl(&hldev->common_reg->titan_general_int_status); | 
|  | 424 | } | 
|  | 425 |  | 
|  | 426 | /** | 
|  | 427 | * vxge_hw_device_begin_irq - Begin IRQ processing. | 
|  | 428 | * @hldev: HW device handle. | 
|  | 429 | * @skip_alarms: Do not clear the alarms | 
|  | 430 | * @reason: "Reason" for the interrupt, the value of Titan's | 
|  | 431 | *	general_int_status register. | 
|  | 432 | * | 
|  | 433 | * The function	performs two actions, It first checks whether (shared IRQ) the | 
|  | 434 | * interrupt was raised	by the device. Next, it	masks the device interrupts. | 
|  | 435 | * | 
|  | 436 | * Note: | 
|  | 437 | * vxge_hw_device_begin_irq() does not flush MMIO writes through the | 
|  | 438 | * bridge. Therefore, two back-to-back interrupts are potentially possible. | 
|  | 439 | * | 
|  | 440 | * Returns: 0, if the interrupt	is not "ours" (note that in this case the | 
|  | 441 | * device remain enabled). | 
|  | 442 | * Otherwise, vxge_hw_device_begin_irq() returns 64bit general adapter | 
|  | 443 | * status. | 
|  | 444 | */ | 
|  | 445 | enum vxge_hw_status vxge_hw_device_begin_irq(struct __vxge_hw_device *hldev, | 
|  | 446 | u32 skip_alarms, u64 *reason) | 
|  | 447 | { | 
|  | 448 | u32 i; | 
|  | 449 | u64 val64; | 
|  | 450 | u64 adapter_status; | 
|  | 451 | u64 vpath_mask; | 
|  | 452 | enum vxge_hw_status ret = VXGE_HW_OK; | 
|  | 453 |  | 
|  | 454 | val64 = readq(&hldev->common_reg->titan_general_int_status); | 
|  | 455 |  | 
|  | 456 | if (unlikely(!val64)) { | 
|  | 457 | /* not Titan interrupt	*/ | 
|  | 458 | *reason	= 0; | 
|  | 459 | ret = VXGE_HW_ERR_WRONG_IRQ; | 
|  | 460 | goto exit; | 
|  | 461 | } | 
|  | 462 |  | 
|  | 463 | if (unlikely(val64 == VXGE_HW_ALL_FOXES)) { | 
|  | 464 |  | 
|  | 465 | adapter_status = readq(&hldev->common_reg->adapter_status); | 
|  | 466 |  | 
|  | 467 | if (adapter_status == VXGE_HW_ALL_FOXES) { | 
|  | 468 |  | 
|  | 469 | __vxge_hw_device_handle_error(hldev, | 
|  | 470 | NULL_VPID, VXGE_HW_EVENT_SLOT_FREEZE); | 
|  | 471 | *reason	= 0; | 
|  | 472 | ret = VXGE_HW_ERR_SLOT_FREEZE; | 
|  | 473 | goto exit; | 
|  | 474 | } | 
|  | 475 | } | 
|  | 476 |  | 
|  | 477 | hldev->stats.sw_dev_info_stats.total_intr_cnt++; | 
|  | 478 |  | 
|  | 479 | *reason	= val64; | 
|  | 480 |  | 
|  | 481 | vpath_mask = hldev->vpaths_deployed >> | 
|  | 482 | (64 - VXGE_HW_MAX_VIRTUAL_PATHS); | 
|  | 483 |  | 
|  | 484 | if (val64 & | 
|  | 485 | VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(vpath_mask)) { | 
|  | 486 | hldev->stats.sw_dev_info_stats.traffic_intr_cnt++; | 
|  | 487 |  | 
|  | 488 | return VXGE_HW_OK; | 
|  | 489 | } | 
|  | 490 |  | 
|  | 491 | hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt++; | 
|  | 492 |  | 
|  | 493 | if (unlikely(val64 & | 
|  | 494 | VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_ALARM_INT)) { | 
|  | 495 |  | 
|  | 496 | enum vxge_hw_status error_level = VXGE_HW_OK; | 
|  | 497 |  | 
|  | 498 | hldev->stats.sw_dev_err_stats.vpath_alarms++; | 
|  | 499 |  | 
|  | 500 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { | 
|  | 501 |  | 
|  | 502 | if (!(hldev->vpaths_deployed & vxge_mBIT(i))) | 
|  | 503 | continue; | 
|  | 504 |  | 
|  | 505 | ret = __vxge_hw_vpath_alarm_process( | 
|  | 506 | &hldev->virtual_paths[i], skip_alarms); | 
|  | 507 |  | 
|  | 508 | error_level = VXGE_HW_SET_LEVEL(ret, error_level); | 
|  | 509 |  | 
|  | 510 | if (unlikely((ret == VXGE_HW_ERR_CRITICAL) || | 
|  | 511 | (ret == VXGE_HW_ERR_SLOT_FREEZE))) | 
|  | 512 | break; | 
|  | 513 | } | 
|  | 514 |  | 
|  | 515 | ret = error_level; | 
|  | 516 | } | 
|  | 517 | exit: | 
|  | 518 | return ret; | 
|  | 519 | } | 
|  | 520 |  | 
|  | 521 | /* | 
|  | 522 | * __vxge_hw_device_handle_link_up_ind | 
|  | 523 | * @hldev: HW device handle. | 
|  | 524 | * | 
|  | 525 | * Link up indication handler. The function is invoked by HW when | 
|  | 526 | * Titan indicates that the link is up for programmable amount of time. | 
|  | 527 | */ | 
|  | 528 | enum vxge_hw_status | 
|  | 529 | __vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev) | 
|  | 530 | { | 
|  | 531 | /* | 
|  | 532 | * If the previous link state is not down, return. | 
|  | 533 | */ | 
|  | 534 | if (hldev->link_state == VXGE_HW_LINK_UP) | 
|  | 535 | goto exit; | 
|  | 536 |  | 
|  | 537 | hldev->link_state = VXGE_HW_LINK_UP; | 
|  | 538 |  | 
|  | 539 | /* notify driver */ | 
|  | 540 | if (hldev->uld_callbacks.link_up) | 
|  | 541 | hldev->uld_callbacks.link_up(hldev); | 
|  | 542 | exit: | 
|  | 543 | return VXGE_HW_OK; | 
|  | 544 | } | 
|  | 545 |  | 
|  | 546 | /* | 
|  | 547 | * __vxge_hw_device_handle_link_down_ind | 
|  | 548 | * @hldev: HW device handle. | 
|  | 549 | * | 
|  | 550 | * Link down indication handler. The function is invoked by HW when | 
|  | 551 | * Titan indicates that the link is down. | 
|  | 552 | */ | 
|  | 553 | enum vxge_hw_status | 
|  | 554 | __vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev) | 
|  | 555 | { | 
|  | 556 | /* | 
|  | 557 | * If the previous link state is not down, return. | 
|  | 558 | */ | 
|  | 559 | if (hldev->link_state == VXGE_HW_LINK_DOWN) | 
|  | 560 | goto exit; | 
|  | 561 |  | 
|  | 562 | hldev->link_state = VXGE_HW_LINK_DOWN; | 
|  | 563 |  | 
|  | 564 | /* notify driver */ | 
|  | 565 | if (hldev->uld_callbacks.link_down) | 
|  | 566 | hldev->uld_callbacks.link_down(hldev); | 
|  | 567 | exit: | 
|  | 568 | return VXGE_HW_OK; | 
|  | 569 | } | 
|  | 570 |  | 
|  | 571 | /** | 
|  | 572 | * __vxge_hw_device_handle_error - Handle error | 
|  | 573 | * @hldev: HW device | 
|  | 574 | * @vp_id: Vpath Id | 
|  | 575 | * @type: Error type. Please see enum vxge_hw_event{} | 
|  | 576 | * | 
|  | 577 | * Handle error. | 
|  | 578 | */ | 
|  | 579 | enum vxge_hw_status | 
|  | 580 | __vxge_hw_device_handle_error( | 
|  | 581 | struct __vxge_hw_device *hldev, | 
|  | 582 | u32 vp_id, | 
|  | 583 | enum vxge_hw_event type) | 
|  | 584 | { | 
|  | 585 | switch (type) { | 
|  | 586 | case VXGE_HW_EVENT_UNKNOWN: | 
|  | 587 | break; | 
|  | 588 | case VXGE_HW_EVENT_RESET_START: | 
|  | 589 | case VXGE_HW_EVENT_RESET_COMPLETE: | 
|  | 590 | case VXGE_HW_EVENT_LINK_DOWN: | 
|  | 591 | case VXGE_HW_EVENT_LINK_UP: | 
|  | 592 | goto out; | 
|  | 593 | case VXGE_HW_EVENT_ALARM_CLEARED: | 
|  | 594 | goto out; | 
|  | 595 | case VXGE_HW_EVENT_ECCERR: | 
|  | 596 | case VXGE_HW_EVENT_MRPCIM_ECCERR: | 
|  | 597 | goto out; | 
|  | 598 | case VXGE_HW_EVENT_FIFO_ERR: | 
|  | 599 | case VXGE_HW_EVENT_VPATH_ERR: | 
|  | 600 | case VXGE_HW_EVENT_CRITICAL_ERR: | 
|  | 601 | case VXGE_HW_EVENT_SERR: | 
|  | 602 | break; | 
|  | 603 | case VXGE_HW_EVENT_SRPCIM_SERR: | 
|  | 604 | case VXGE_HW_EVENT_MRPCIM_SERR: | 
|  | 605 | goto out; | 
|  | 606 | case VXGE_HW_EVENT_SLOT_FREEZE: | 
|  | 607 | break; | 
|  | 608 | default: | 
|  | 609 | vxge_assert(0); | 
|  | 610 | goto out; | 
|  | 611 | } | 
|  | 612 |  | 
|  | 613 | /* notify driver */ | 
|  | 614 | if (hldev->uld_callbacks.crit_err) | 
|  | 615 | hldev->uld_callbacks.crit_err( | 
|  | 616 | (struct __vxge_hw_device *)hldev, | 
|  | 617 | type, vp_id); | 
|  | 618 | out: | 
|  | 619 |  | 
|  | 620 | return VXGE_HW_OK; | 
|  | 621 | } | 
|  | 622 |  | 
|  | 623 | /** | 
|  | 624 | * vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the | 
|  | 625 | * condition that has caused the Tx and RX interrupt. | 
|  | 626 | * @hldev: HW device. | 
|  | 627 | * | 
|  | 628 | * Acknowledge (that is, clear) the condition that has caused | 
|  | 629 | * the Tx and Rx interrupt. | 
|  | 630 | * See also: vxge_hw_device_begin_irq(), | 
|  | 631 | * vxge_hw_device_mask_tx_rx(), vxge_hw_device_unmask_tx_rx(). | 
|  | 632 | */ | 
|  | 633 | void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev) | 
|  | 634 | { | 
|  | 635 |  | 
|  | 636 | if ((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) || | 
|  | 637 | (hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) { | 
|  | 638 | writeq((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] | | 
|  | 639 | hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX]), | 
|  | 640 | &hldev->common_reg->tim_int_status0); | 
|  | 641 | } | 
|  | 642 |  | 
|  | 643 | if ((hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) || | 
|  | 644 | (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) { | 
|  | 645 | __vxge_hw_pio_mem_write32_upper( | 
|  | 646 | (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] | | 
|  | 647 | hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX]), | 
|  | 648 | &hldev->common_reg->tim_int_status1); | 
|  | 649 | } | 
|  | 650 |  | 
|  | 651 | return; | 
|  | 652 | } | 
|  | 653 |  | 
|  | 654 | /* | 
|  | 655 | * vxge_hw_channel_dtr_alloc - Allocate a dtr from the channel | 
|  | 656 | * @channel: Channel | 
|  | 657 | * @dtrh: Buffer to return the DTR pointer | 
|  | 658 | * | 
|  | 659 | * Allocates a dtr from the reserve array. If the reserve array is empty, | 
|  | 660 | * it swaps the reserve and free arrays. | 
|  | 661 | * | 
|  | 662 | */ | 
|  | 663 | enum vxge_hw_status | 
|  | 664 | vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh) | 
|  | 665 | { | 
|  | 666 | void **tmp_arr; | 
|  | 667 |  | 
|  | 668 | if (channel->reserve_ptr - channel->reserve_top > 0) { | 
|  | 669 | _alloc_after_swap: | 
|  | 670 | *dtrh =	channel->reserve_arr[--channel->reserve_ptr]; | 
|  | 671 |  | 
|  | 672 | return VXGE_HW_OK; | 
|  | 673 | } | 
|  | 674 |  | 
|  | 675 | /* switch between empty	and full arrays	*/ | 
|  | 676 |  | 
|  | 677 | /* the idea behind such	a design is that by having free	and reserved | 
|  | 678 | * arrays separated we basically separated irq and non-irq parts. | 
|  | 679 | * i.e.	no additional lock need	to be done when	we free	a resource */ | 
|  | 680 |  | 
|  | 681 | if (channel->length - channel->free_ptr > 0) { | 
|  | 682 |  | 
|  | 683 | tmp_arr	= channel->reserve_arr; | 
|  | 684 | channel->reserve_arr = channel->free_arr; | 
|  | 685 | channel->free_arr = tmp_arr; | 
|  | 686 | channel->reserve_ptr = channel->length; | 
|  | 687 | channel->reserve_top = channel->free_ptr; | 
|  | 688 | channel->free_ptr = channel->length; | 
|  | 689 |  | 
|  | 690 | channel->stats->reserve_free_swaps_cnt++; | 
|  | 691 |  | 
|  | 692 | goto _alloc_after_swap; | 
|  | 693 | } | 
|  | 694 |  | 
|  | 695 | channel->stats->full_cnt++; | 
|  | 696 |  | 
|  | 697 | *dtrh =	NULL; | 
|  | 698 | return VXGE_HW_INF_OUT_OF_DESCRIPTORS; | 
|  | 699 | } | 
|  | 700 |  | 
|  | 701 | /* | 
|  | 702 | * vxge_hw_channel_dtr_post - Post a dtr to the channel | 
|  | 703 | * @channelh: Channel | 
|  | 704 | * @dtrh: DTR pointer | 
|  | 705 | * | 
|  | 706 | * Posts a dtr to work array. | 
|  | 707 | * | 
|  | 708 | */ | 
|  | 709 | void vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh) | 
|  | 710 | { | 
|  | 711 | vxge_assert(channel->work_arr[channel->post_index] == NULL); | 
|  | 712 |  | 
|  | 713 | channel->work_arr[channel->post_index++] = dtrh; | 
|  | 714 |  | 
|  | 715 | /* wrap-around */ | 
|  | 716 | if (channel->post_index	== channel->length) | 
|  | 717 | channel->post_index = 0; | 
|  | 718 | } | 
|  | 719 |  | 
|  | 720 | /* | 
|  | 721 | * vxge_hw_channel_dtr_try_complete - Returns next completed dtr | 
|  | 722 | * @channel: Channel | 
|  | 723 | * @dtr: Buffer to return the next completed DTR pointer | 
|  | 724 | * | 
|  | 725 | * Returns the next completed dtr with out removing it from work array | 
|  | 726 | * | 
|  | 727 | */ | 
|  | 728 | void | 
|  | 729 | vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel, void **dtrh) | 
|  | 730 | { | 
|  | 731 | vxge_assert(channel->compl_index < channel->length); | 
|  | 732 |  | 
|  | 733 | *dtrh =	channel->work_arr[channel->compl_index]; | 
|  | 734 | } | 
|  | 735 |  | 
|  | 736 | /* | 
|  | 737 | * vxge_hw_channel_dtr_complete - Removes next completed dtr from the work array | 
|  | 738 | * @channel: Channel handle | 
|  | 739 | * | 
|  | 740 | * Removes the next completed dtr from work array | 
|  | 741 | * | 
|  | 742 | */ | 
|  | 743 | void vxge_hw_channel_dtr_complete(struct __vxge_hw_channel *channel) | 
|  | 744 | { | 
|  | 745 | channel->work_arr[channel->compl_index]	= NULL; | 
|  | 746 |  | 
|  | 747 | /* wrap-around */ | 
|  | 748 | if (++channel->compl_index == channel->length) | 
|  | 749 | channel->compl_index = 0; | 
|  | 750 |  | 
|  | 751 | channel->stats->total_compl_cnt++; | 
|  | 752 | } | 
|  | 753 |  | 
|  | 754 | /* | 
|  | 755 | * vxge_hw_channel_dtr_free - Frees a dtr | 
|  | 756 | * @channel: Channel handle | 
|  | 757 | * @dtr:  DTR pointer | 
|  | 758 | * | 
|  | 759 | * Returns the dtr to free array | 
|  | 760 | * | 
|  | 761 | */ | 
|  | 762 | void vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh) | 
|  | 763 | { | 
|  | 764 | channel->free_arr[--channel->free_ptr] = dtrh; | 
|  | 765 | } | 
|  | 766 |  | 
|  | 767 | /* | 
|  | 768 | * vxge_hw_channel_dtr_count | 
|  | 769 | * @channel: Channel handle. Obtained via vxge_hw_channel_open(). | 
|  | 770 | * | 
|  | 771 | * Retreive number of DTRs available. This function can not be called | 
|  | 772 | * from data path. ring_initial_replenishi() is the only user. | 
|  | 773 | */ | 
|  | 774 | int vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel) | 
|  | 775 | { | 
|  | 776 | return (channel->reserve_ptr - channel->reserve_top) + | 
|  | 777 | (channel->length - channel->free_ptr); | 
|  | 778 | } | 
|  | 779 |  | 
|  | 780 | /** | 
|  | 781 | * vxge_hw_ring_rxd_reserve	- Reserve ring descriptor. | 
|  | 782 | * @ring: Handle to the ring object used for receive | 
|  | 783 | * @rxdh: Reserved descriptor. On success HW fills this "out" parameter | 
|  | 784 | * with a valid handle. | 
|  | 785 | * | 
|  | 786 | * Reserve Rx descriptor for the subsequent filling-in driver | 
|  | 787 | * and posting on the corresponding channel (@channelh) | 
|  | 788 | * via vxge_hw_ring_rxd_post(). | 
|  | 789 | * | 
|  | 790 | * Returns: VXGE_HW_OK - success. | 
|  | 791 | * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available. | 
|  | 792 | * | 
|  | 793 | */ | 
|  | 794 | enum vxge_hw_status vxge_hw_ring_rxd_reserve(struct __vxge_hw_ring *ring, | 
|  | 795 | void **rxdh) | 
|  | 796 | { | 
|  | 797 | enum vxge_hw_status status; | 
|  | 798 | struct __vxge_hw_channel *channel; | 
|  | 799 |  | 
|  | 800 | channel = &ring->channel; | 
|  | 801 |  | 
|  | 802 | status = vxge_hw_channel_dtr_alloc(channel, rxdh); | 
|  | 803 |  | 
|  | 804 | if (status == VXGE_HW_OK) { | 
|  | 805 | struct vxge_hw_ring_rxd_1 *rxdp = | 
|  | 806 | (struct vxge_hw_ring_rxd_1 *)*rxdh; | 
|  | 807 |  | 
|  | 808 | rxdp->control_0	= rxdp->control_1 = 0; | 
|  | 809 | } | 
|  | 810 |  | 
|  | 811 | return status; | 
|  | 812 | } | 
|  | 813 |  | 
|  | 814 | /** | 
|  | 815 | * vxge_hw_ring_rxd_free - Free descriptor. | 
|  | 816 | * @ring: Handle to the ring object used for receive | 
|  | 817 | * @rxdh: Descriptor handle. | 
|  | 818 | * | 
|  | 819 | * Free	the reserved descriptor. This operation is "symmetrical" to | 
|  | 820 | * vxge_hw_ring_rxd_reserve. The "free-ing" completes the descriptor's | 
|  | 821 | * lifecycle. | 
|  | 822 | * | 
|  | 823 | * After free-ing (see vxge_hw_ring_rxd_free()) the descriptor again can | 
|  | 824 | * be: | 
|  | 825 | * | 
|  | 826 | * - reserved (vxge_hw_ring_rxd_reserve); | 
|  | 827 | * | 
|  | 828 | * - posted	(vxge_hw_ring_rxd_post); | 
|  | 829 | * | 
|  | 830 | * - completed (vxge_hw_ring_rxd_next_completed); | 
|  | 831 | * | 
|  | 832 | * - and recycled again	(vxge_hw_ring_rxd_free). | 
|  | 833 | * | 
|  | 834 | * For alternative state transitions and more details please refer to | 
|  | 835 | * the design doc. | 
|  | 836 | * | 
|  | 837 | */ | 
|  | 838 | void vxge_hw_ring_rxd_free(struct __vxge_hw_ring *ring, void *rxdh) | 
|  | 839 | { | 
|  | 840 | struct __vxge_hw_channel *channel; | 
|  | 841 |  | 
|  | 842 | channel = &ring->channel; | 
|  | 843 |  | 
|  | 844 | vxge_hw_channel_dtr_free(channel, rxdh); | 
|  | 845 |  | 
|  | 846 | } | 
|  | 847 |  | 
|  | 848 | /** | 
|  | 849 | * vxge_hw_ring_rxd_pre_post - Prepare rxd and post | 
|  | 850 | * @ring: Handle to the ring object used for receive | 
|  | 851 | * @rxdh: Descriptor handle. | 
|  | 852 | * | 
|  | 853 | * This routine prepares a rxd and posts | 
|  | 854 | */ | 
|  | 855 | void vxge_hw_ring_rxd_pre_post(struct __vxge_hw_ring *ring, void *rxdh) | 
|  | 856 | { | 
|  | 857 | struct __vxge_hw_channel *channel; | 
|  | 858 |  | 
|  | 859 | channel = &ring->channel; | 
|  | 860 |  | 
|  | 861 | vxge_hw_channel_dtr_post(channel, rxdh); | 
|  | 862 | } | 
|  | 863 |  | 
|  | 864 | /** | 
|  | 865 | * vxge_hw_ring_rxd_post_post - Process rxd after post. | 
|  | 866 | * @ring: Handle to the ring object used for receive | 
|  | 867 | * @rxdh: Descriptor handle. | 
|  | 868 | * | 
|  | 869 | * Processes rxd after post | 
|  | 870 | */ | 
|  | 871 | void vxge_hw_ring_rxd_post_post(struct __vxge_hw_ring *ring, void *rxdh) | 
|  | 872 | { | 
|  | 873 | struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh; | 
|  | 874 | struct __vxge_hw_channel *channel; | 
|  | 875 |  | 
|  | 876 | channel = &ring->channel; | 
|  | 877 |  | 
|  | 878 | rxdp->control_0	|= VXGE_HW_RING_RXD_LIST_OWN_ADAPTER; | 
|  | 879 |  | 
|  | 880 | if (ring->stats->common_stats.usage_cnt > 0) | 
|  | 881 | ring->stats->common_stats.usage_cnt--; | 
|  | 882 | } | 
|  | 883 |  | 
|  | 884 | /** | 
|  | 885 | * vxge_hw_ring_rxd_post - Post descriptor on the ring. | 
|  | 886 | * @ring: Handle to the ring object used for receive | 
|  | 887 | * @rxdh: Descriptor obtained via vxge_hw_ring_rxd_reserve(). | 
|  | 888 | * | 
|  | 889 | * Post	descriptor on the ring. | 
|  | 890 | * Prior to posting the	descriptor should be filled in accordance with | 
|  | 891 | * Host/Titan interface specification for a given service (LL, etc.). | 
|  | 892 | * | 
|  | 893 | */ | 
|  | 894 | void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring, void *rxdh) | 
|  | 895 | { | 
|  | 896 | struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh; | 
|  | 897 | struct __vxge_hw_channel *channel; | 
|  | 898 |  | 
|  | 899 | channel = &ring->channel; | 
|  | 900 |  | 
|  | 901 | wmb(); | 
|  | 902 | rxdp->control_0	|= VXGE_HW_RING_RXD_LIST_OWN_ADAPTER; | 
|  | 903 |  | 
|  | 904 | vxge_hw_channel_dtr_post(channel, rxdh); | 
|  | 905 |  | 
|  | 906 | if (ring->stats->common_stats.usage_cnt > 0) | 
|  | 907 | ring->stats->common_stats.usage_cnt--; | 
|  | 908 | } | 
|  | 909 |  | 
|  | 910 | /** | 
|  | 911 | * vxge_hw_ring_rxd_post_post_wmb - Process rxd after post with memory barrier. | 
|  | 912 | * @ring: Handle to the ring object used for receive | 
|  | 913 | * @rxdh: Descriptor handle. | 
|  | 914 | * | 
|  | 915 | * Processes rxd after post with memory barrier. | 
|  | 916 | */ | 
|  | 917 | void vxge_hw_ring_rxd_post_post_wmb(struct __vxge_hw_ring *ring, void *rxdh) | 
|  | 918 | { | 
|  | 919 | struct __vxge_hw_channel *channel; | 
|  | 920 |  | 
|  | 921 | channel = &ring->channel; | 
|  | 922 |  | 
|  | 923 | wmb(); | 
|  | 924 | vxge_hw_ring_rxd_post_post(ring, rxdh); | 
|  | 925 | } | 
|  | 926 |  | 
|  | 927 | /** | 
|  | 928 | * vxge_hw_ring_rxd_next_completed - Get the _next_ completed descriptor. | 
|  | 929 | * @ring: Handle to the ring object used for receive | 
|  | 930 | * @rxdh: Descriptor handle. Returned by HW. | 
|  | 931 | * @t_code:	Transfer code, as per Titan User Guide, | 
|  | 932 | *	 Receive Descriptor Format. Returned by HW. | 
|  | 933 | * | 
|  | 934 | * Retrieve the	_next_ completed descriptor. | 
|  | 935 | * HW uses ring callback (*vxge_hw_ring_callback_f) to notifiy | 
|  | 936 | * driver of new completed descriptors. After that | 
|  | 937 | * the driver can use vxge_hw_ring_rxd_next_completed to retrieve the rest | 
|  | 938 | * completions (the very first completion is passed by HW via | 
|  | 939 | * vxge_hw_ring_callback_f). | 
|  | 940 | * | 
|  | 941 | * Implementation-wise, the driver is free to call | 
|  | 942 | * vxge_hw_ring_rxd_next_completed either immediately from inside the | 
|  | 943 | * ring callback, or in a deferred fashion and separate (from HW) | 
|  | 944 | * context. | 
|  | 945 | * | 
|  | 946 | * Non-zero @t_code means failure to fill-in receive buffer(s) | 
|  | 947 | * of the descriptor. | 
|  | 948 | * For instance, parity	error detected during the data transfer. | 
|  | 949 | * In this case	Titan will complete the descriptor and indicate | 
|  | 950 | * for the host	that the received data is not to be used. | 
|  | 951 | * For details please refer to Titan User Guide. | 
|  | 952 | * | 
|  | 953 | * Returns: VXGE_HW_OK - success. | 
|  | 954 | * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors | 
|  | 955 | * are currently available for processing. | 
|  | 956 | * | 
|  | 957 | * See also: vxge_hw_ring_callback_f{}, | 
|  | 958 | * vxge_hw_fifo_rxd_next_completed(), enum vxge_hw_status{}. | 
|  | 959 | */ | 
|  | 960 | enum vxge_hw_status vxge_hw_ring_rxd_next_completed( | 
|  | 961 | struct __vxge_hw_ring *ring, void **rxdh, u8 *t_code) | 
|  | 962 | { | 
|  | 963 | struct __vxge_hw_channel *channel; | 
|  | 964 | struct vxge_hw_ring_rxd_1 *rxdp; | 
|  | 965 | enum vxge_hw_status status = VXGE_HW_OK; | 
|  | 966 |  | 
|  | 967 | channel = &ring->channel; | 
|  | 968 |  | 
|  | 969 | vxge_hw_channel_dtr_try_complete(channel, rxdh); | 
|  | 970 |  | 
|  | 971 | rxdp = (struct vxge_hw_ring_rxd_1 *)*rxdh; | 
|  | 972 | if (rxdp == NULL) { | 
|  | 973 | status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS; | 
|  | 974 | goto exit; | 
|  | 975 | } | 
|  | 976 |  | 
|  | 977 | /* check whether it is not the end */ | 
|  | 978 | if (!(rxdp->control_0 &	VXGE_HW_RING_RXD_LIST_OWN_ADAPTER)) { | 
|  | 979 |  | 
|  | 980 | vxge_assert(((struct vxge_hw_ring_rxd_1 *)rxdp)->host_control != | 
|  | 981 | 0); | 
|  | 982 |  | 
|  | 983 | ++ring->cmpl_cnt; | 
|  | 984 | vxge_hw_channel_dtr_complete(channel); | 
|  | 985 |  | 
|  | 986 | *t_code	= (u8)VXGE_HW_RING_RXD_T_CODE_GET(rxdp->control_0); | 
|  | 987 |  | 
|  | 988 | vxge_assert(*t_code != VXGE_HW_RING_RXD_T_CODE_UNUSED); | 
|  | 989 |  | 
|  | 990 | ring->stats->common_stats.usage_cnt++; | 
|  | 991 | if (ring->stats->common_stats.usage_max < | 
|  | 992 | ring->stats->common_stats.usage_cnt) | 
|  | 993 | ring->stats->common_stats.usage_max = | 
|  | 994 | ring->stats->common_stats.usage_cnt; | 
|  | 995 |  | 
|  | 996 | status = VXGE_HW_OK; | 
|  | 997 | goto exit; | 
|  | 998 | } | 
|  | 999 |  | 
|  | 1000 | /* reset it. since we don't want to return | 
|  | 1001 | * garbage to the driver */ | 
|  | 1002 | *rxdh =	NULL; | 
|  | 1003 | status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS; | 
|  | 1004 | exit: | 
|  | 1005 | return status; | 
|  | 1006 | } | 
|  | 1007 |  | 
|  | 1008 | /** | 
|  | 1009 | * vxge_hw_ring_handle_tcode - Handle transfer code. | 
|  | 1010 | * @ring: Handle to the ring object used for receive | 
|  | 1011 | * @rxdh: Descriptor handle. | 
|  | 1012 | * @t_code: One of the enumerated (and documented in the Titan user guide) | 
|  | 1013 | * "transfer codes". | 
|  | 1014 | * | 
|  | 1015 | * Handle descriptor's transfer code. The latter comes with each completed | 
|  | 1016 | * descriptor. | 
|  | 1017 | * | 
|  | 1018 | * Returns: one of the enum vxge_hw_status{} enumerated types. | 
|  | 1019 | * VXGE_HW_OK			- for success. | 
|  | 1020 | * VXGE_HW_ERR_CRITICAL         - when encounters critical error. | 
|  | 1021 | */ | 
|  | 1022 | enum vxge_hw_status vxge_hw_ring_handle_tcode( | 
|  | 1023 | struct __vxge_hw_ring *ring, void *rxdh, u8 t_code) | 
|  | 1024 | { | 
|  | 1025 | struct __vxge_hw_channel *channel; | 
|  | 1026 | enum vxge_hw_status status = VXGE_HW_OK; | 
|  | 1027 |  | 
|  | 1028 | channel = &ring->channel; | 
|  | 1029 |  | 
|  | 1030 | /* If the t_code is not supported and if the | 
|  | 1031 | * t_code is other than 0x5 (unparseable packet | 
|  | 1032 | * such as unknown UPV6 header), Drop it !!! | 
|  | 1033 | */ | 
|  | 1034 |  | 
|  | 1035 | if (t_code == 0 || t_code == 5) { | 
|  | 1036 | status = VXGE_HW_OK; | 
|  | 1037 | goto exit; | 
|  | 1038 | } | 
|  | 1039 |  | 
|  | 1040 | if (t_code > 0xF) { | 
|  | 1041 | status = VXGE_HW_ERR_INVALID_TCODE; | 
|  | 1042 | goto exit; | 
|  | 1043 | } | 
|  | 1044 |  | 
|  | 1045 | ring->stats->rxd_t_code_err_cnt[t_code]++; | 
|  | 1046 | exit: | 
|  | 1047 | return status; | 
|  | 1048 | } | 
|  | 1049 |  | 
|  | 1050 | /** | 
|  | 1051 | * __vxge_hw_non_offload_db_post - Post non offload doorbell | 
|  | 1052 | * | 
|  | 1053 | * @fifo: fifohandle | 
|  | 1054 | * @txdl_ptr: The starting location of the TxDL in host memory | 
|  | 1055 | * @num_txds: The highest TxD in this TxDL (0 to 255 means 1 to 256) | 
|  | 1056 | * @no_snoop: No snoop flags | 
|  | 1057 | * | 
|  | 1058 | * This function posts a non-offload doorbell to doorbell FIFO | 
|  | 1059 | * | 
|  | 1060 | */ | 
|  | 1061 | static void __vxge_hw_non_offload_db_post(struct __vxge_hw_fifo *fifo, | 
|  | 1062 | u64 txdl_ptr, u32 num_txds, u32 no_snoop) | 
|  | 1063 | { | 
|  | 1064 | struct __vxge_hw_channel *channel; | 
|  | 1065 |  | 
|  | 1066 | channel = &fifo->channel; | 
|  | 1067 |  | 
|  | 1068 | writeq(VXGE_HW_NODBW_TYPE(VXGE_HW_NODBW_TYPE_NODBW) | | 
|  | 1069 | VXGE_HW_NODBW_LAST_TXD_NUMBER(num_txds) | | 
|  | 1070 | VXGE_HW_NODBW_GET_NO_SNOOP(no_snoop), | 
|  | 1071 | &fifo->nofl_db->control_0); | 
|  | 1072 |  | 
|  | 1073 | wmb(); | 
|  | 1074 |  | 
|  | 1075 | writeq(txdl_ptr, &fifo->nofl_db->txdl_ptr); | 
|  | 1076 | wmb(); | 
|  | 1077 |  | 
|  | 1078 | } | 
|  | 1079 |  | 
|  | 1080 | /** | 
|  | 1081 | * vxge_hw_fifo_free_txdl_count_get - returns the number of txdls available in | 
|  | 1082 | * the fifo | 
|  | 1083 | * @fifoh: Handle to the fifo object used for non offload send | 
|  | 1084 | */ | 
|  | 1085 | u32 vxge_hw_fifo_free_txdl_count_get(struct __vxge_hw_fifo *fifoh) | 
|  | 1086 | { | 
|  | 1087 | return vxge_hw_channel_dtr_count(&fifoh->channel); | 
|  | 1088 | } | 
|  | 1089 |  | 
|  | 1090 | /** | 
|  | 1091 | * vxge_hw_fifo_txdl_reserve - Reserve fifo descriptor. | 
|  | 1092 | * @fifoh: Handle to the fifo object used for non offload send | 
|  | 1093 | * @txdlh: Reserved descriptor. On success HW fills this "out" parameter | 
|  | 1094 | *        with a valid handle. | 
|  | 1095 | * @txdl_priv: Buffer to return the pointer to per txdl space | 
|  | 1096 | * | 
|  | 1097 | * Reserve a single TxDL (that is, fifo descriptor) | 
|  | 1098 | * for the subsequent filling-in by driver) | 
|  | 1099 | * and posting on the corresponding channel (@channelh) | 
|  | 1100 | * via vxge_hw_fifo_txdl_post(). | 
|  | 1101 | * | 
|  | 1102 | * Note: it is the responsibility of driver to reserve multiple descriptors | 
|  | 1103 | * for lengthy (e.g., LSO) transmit operation. A single fifo descriptor | 
|  | 1104 | * carries up to configured number (fifo.max_frags) of contiguous buffers. | 
|  | 1105 | * | 
|  | 1106 | * Returns: VXGE_HW_OK - success; | 
|  | 1107 | * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available | 
|  | 1108 | * | 
|  | 1109 | */ | 
|  | 1110 | enum vxge_hw_status vxge_hw_fifo_txdl_reserve( | 
|  | 1111 | struct __vxge_hw_fifo *fifo, | 
|  | 1112 | void **txdlh, void **txdl_priv) | 
|  | 1113 | { | 
|  | 1114 | struct __vxge_hw_channel *channel; | 
|  | 1115 | enum vxge_hw_status status; | 
|  | 1116 | int i; | 
|  | 1117 |  | 
|  | 1118 | channel = &fifo->channel; | 
|  | 1119 |  | 
|  | 1120 | status = vxge_hw_channel_dtr_alloc(channel, txdlh); | 
|  | 1121 |  | 
|  | 1122 | if (status == VXGE_HW_OK) { | 
|  | 1123 | struct vxge_hw_fifo_txd *txdp = | 
|  | 1124 | (struct vxge_hw_fifo_txd *)*txdlh; | 
|  | 1125 | struct __vxge_hw_fifo_txdl_priv *priv; | 
|  | 1126 |  | 
|  | 1127 | priv = __vxge_hw_fifo_txdl_priv(fifo, txdp); | 
|  | 1128 |  | 
|  | 1129 | /* reset the TxDL's private */ | 
|  | 1130 | priv->align_dma_offset = 0; | 
|  | 1131 | priv->align_vaddr_start = priv->align_vaddr; | 
|  | 1132 | priv->align_used_frags = 0; | 
|  | 1133 | priv->frags = 0; | 
|  | 1134 | priv->alloc_frags = fifo->config->max_frags; | 
|  | 1135 | priv->next_txdl_priv = NULL; | 
|  | 1136 |  | 
|  | 1137 | *txdl_priv = (void *)(size_t)txdp->host_control; | 
|  | 1138 |  | 
|  | 1139 | for (i = 0; i < fifo->config->max_frags; i++) { | 
|  | 1140 | txdp = ((struct vxge_hw_fifo_txd *)*txdlh) + i; | 
|  | 1141 | txdp->control_0 = txdp->control_1 = 0; | 
|  | 1142 | } | 
|  | 1143 | } | 
|  | 1144 |  | 
|  | 1145 | return status; | 
|  | 1146 | } | 
|  | 1147 |  | 
|  | 1148 | /** | 
|  | 1149 | * vxge_hw_fifo_txdl_buffer_set - Set transmit buffer pointer in the | 
|  | 1150 | * descriptor. | 
|  | 1151 | * @fifo: Handle to the fifo object used for non offload send | 
|  | 1152 | * @txdlh: Descriptor handle. | 
|  | 1153 | * @frag_idx: Index of the data buffer in the caller's scatter-gather list | 
|  | 1154 | *            (of buffers). | 
|  | 1155 | * @dma_pointer: DMA address of the data buffer referenced by @frag_idx. | 
|  | 1156 | * @size: Size of the data buffer (in bytes). | 
|  | 1157 | * | 
|  | 1158 | * This API is part of the preparation of the transmit descriptor for posting | 
|  | 1159 | * (via vxge_hw_fifo_txdl_post()). The related "preparation" APIs include | 
|  | 1160 | * vxge_hw_fifo_txdl_mss_set() and vxge_hw_fifo_txdl_cksum_set_bits(). | 
|  | 1161 | * All three APIs fill in the fields of the fifo descriptor, | 
|  | 1162 | * in accordance with the Titan specification. | 
|  | 1163 | * | 
|  | 1164 | */ | 
|  | 1165 | void vxge_hw_fifo_txdl_buffer_set(struct __vxge_hw_fifo *fifo, | 
|  | 1166 | void *txdlh, u32 frag_idx, | 
|  | 1167 | dma_addr_t dma_pointer, u32 size) | 
|  | 1168 | { | 
|  | 1169 | struct __vxge_hw_fifo_txdl_priv *txdl_priv; | 
|  | 1170 | struct vxge_hw_fifo_txd *txdp, *txdp_last; | 
|  | 1171 | struct __vxge_hw_channel *channel; | 
|  | 1172 |  | 
|  | 1173 | channel = &fifo->channel; | 
|  | 1174 |  | 
|  | 1175 | txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh); | 
|  | 1176 | txdp = (struct vxge_hw_fifo_txd *)txdlh  +  txdl_priv->frags; | 
|  | 1177 |  | 
|  | 1178 | if (frag_idx != 0) | 
|  | 1179 | txdp->control_0 = txdp->control_1 = 0; | 
|  | 1180 | else { | 
|  | 1181 | txdp->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE( | 
|  | 1182 | VXGE_HW_FIFO_TXD_GATHER_CODE_FIRST); | 
|  | 1183 | txdp->control_1 |= fifo->interrupt_type; | 
|  | 1184 | txdp->control_1 |= VXGE_HW_FIFO_TXD_INT_NUMBER( | 
|  | 1185 | fifo->tx_intr_num); | 
|  | 1186 | if (txdl_priv->frags) { | 
|  | 1187 | txdp_last = (struct vxge_hw_fifo_txd *)txdlh  + | 
|  | 1188 | (txdl_priv->frags - 1); | 
|  | 1189 | txdp_last->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE( | 
|  | 1190 | VXGE_HW_FIFO_TXD_GATHER_CODE_LAST); | 
|  | 1191 | } | 
|  | 1192 | } | 
|  | 1193 |  | 
|  | 1194 | vxge_assert(frag_idx < txdl_priv->alloc_frags); | 
|  | 1195 |  | 
|  | 1196 | txdp->buffer_pointer = (u64)dma_pointer; | 
|  | 1197 | txdp->control_0 |= VXGE_HW_FIFO_TXD_BUFFER_SIZE(size); | 
|  | 1198 | fifo->stats->total_buffers++; | 
|  | 1199 | txdl_priv->frags++; | 
|  | 1200 | } | 
|  | 1201 |  | 
|  | 1202 | /** | 
|  | 1203 | * vxge_hw_fifo_txdl_post - Post descriptor on the fifo channel. | 
|  | 1204 | * @fifo: Handle to the fifo object used for non offload send | 
|  | 1205 | * @txdlh: Descriptor obtained via vxge_hw_fifo_txdl_reserve() | 
|  | 1206 | * @frags: Number of contiguous buffers that are part of a single | 
|  | 1207 | *         transmit operation. | 
|  | 1208 | * | 
|  | 1209 | * Post descriptor on the 'fifo' type channel for transmission. | 
|  | 1210 | * Prior to posting the descriptor should be filled in accordance with | 
|  | 1211 | * Host/Titan interface specification for a given service (LL, etc.). | 
|  | 1212 | * | 
|  | 1213 | */ | 
|  | 1214 | void vxge_hw_fifo_txdl_post(struct __vxge_hw_fifo *fifo, void *txdlh) | 
|  | 1215 | { | 
|  | 1216 | struct __vxge_hw_fifo_txdl_priv *txdl_priv; | 
|  | 1217 | struct vxge_hw_fifo_txd *txdp_last; | 
|  | 1218 | struct vxge_hw_fifo_txd *txdp_first; | 
|  | 1219 | struct __vxge_hw_channel *channel; | 
|  | 1220 |  | 
|  | 1221 | channel = &fifo->channel; | 
|  | 1222 |  | 
|  | 1223 | txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh); | 
|  | 1224 | txdp_first = (struct vxge_hw_fifo_txd *)txdlh; | 
|  | 1225 |  | 
|  | 1226 | txdp_last = (struct vxge_hw_fifo_txd *)txdlh  +  (txdl_priv->frags - 1); | 
|  | 1227 | txdp_last->control_0 |= | 
|  | 1228 | VXGE_HW_FIFO_TXD_GATHER_CODE(VXGE_HW_FIFO_TXD_GATHER_CODE_LAST); | 
|  | 1229 | txdp_first->control_0 |= VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER; | 
|  | 1230 |  | 
|  | 1231 | vxge_hw_channel_dtr_post(&fifo->channel, txdlh); | 
|  | 1232 |  | 
|  | 1233 | __vxge_hw_non_offload_db_post(fifo, | 
|  | 1234 | (u64)(size_t)txdl_priv->dma_addr, | 
|  | 1235 | txdl_priv->frags - 1, | 
|  | 1236 | fifo->no_snoop_bits); | 
|  | 1237 |  | 
|  | 1238 | fifo->stats->total_posts++; | 
|  | 1239 | fifo->stats->common_stats.usage_cnt++; | 
|  | 1240 | if (fifo->stats->common_stats.usage_max < | 
|  | 1241 | fifo->stats->common_stats.usage_cnt) | 
|  | 1242 | fifo->stats->common_stats.usage_max = | 
|  | 1243 | fifo->stats->common_stats.usage_cnt; | 
|  | 1244 | } | 
|  | 1245 |  | 
|  | 1246 | /** | 
|  | 1247 | * vxge_hw_fifo_txdl_next_completed - Retrieve next completed descriptor. | 
|  | 1248 | * @fifo: Handle to the fifo object used for non offload send | 
|  | 1249 | * @txdlh: Descriptor handle. Returned by HW. | 
|  | 1250 | * @t_code: Transfer code, as per Titan User Guide, | 
|  | 1251 | *          Transmit Descriptor Format. | 
|  | 1252 | *          Returned by HW. | 
|  | 1253 | * | 
|  | 1254 | * Retrieve the _next_ completed descriptor. | 
|  | 1255 | * HW uses channel callback (*vxge_hw_channel_callback_f) to notifiy | 
|  | 1256 | * driver of new completed descriptors. After that | 
|  | 1257 | * the driver can use vxge_hw_fifo_txdl_next_completed to retrieve the rest | 
|  | 1258 | * completions (the very first completion is passed by HW via | 
|  | 1259 | * vxge_hw_channel_callback_f). | 
|  | 1260 | * | 
|  | 1261 | * Implementation-wise, the driver is free to call | 
|  | 1262 | * vxge_hw_fifo_txdl_next_completed either immediately from inside the | 
|  | 1263 | * channel callback, or in a deferred fashion and separate (from HW) | 
|  | 1264 | * context. | 
|  | 1265 | * | 
|  | 1266 | * Non-zero @t_code means failure to process the descriptor. | 
|  | 1267 | * The failure could happen, for instance, when the link is | 
|  | 1268 | * down, in which case Titan completes the descriptor because it | 
|  | 1269 | * is not able to send the data out. | 
|  | 1270 | * | 
|  | 1271 | * For details please refer to Titan User Guide. | 
|  | 1272 | * | 
|  | 1273 | * Returns: VXGE_HW_OK - success. | 
|  | 1274 | * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors | 
|  | 1275 | * are currently available for processing. | 
|  | 1276 | * | 
|  | 1277 | */ | 
|  | 1278 | enum vxge_hw_status vxge_hw_fifo_txdl_next_completed( | 
|  | 1279 | struct __vxge_hw_fifo *fifo, void **txdlh, | 
|  | 1280 | enum vxge_hw_fifo_tcode *t_code) | 
|  | 1281 | { | 
|  | 1282 | struct __vxge_hw_channel *channel; | 
|  | 1283 | struct vxge_hw_fifo_txd *txdp; | 
|  | 1284 | enum vxge_hw_status status = VXGE_HW_OK; | 
|  | 1285 |  | 
|  | 1286 | channel = &fifo->channel; | 
|  | 1287 |  | 
|  | 1288 | vxge_hw_channel_dtr_try_complete(channel, txdlh); | 
|  | 1289 |  | 
|  | 1290 | txdp = (struct vxge_hw_fifo_txd *)*txdlh; | 
|  | 1291 | if (txdp == NULL) { | 
|  | 1292 | status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS; | 
|  | 1293 | goto exit; | 
|  | 1294 | } | 
|  | 1295 |  | 
|  | 1296 | /* check whether host owns it */ | 
|  | 1297 | if (!(txdp->control_0 & VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER)) { | 
|  | 1298 |  | 
|  | 1299 | vxge_assert(txdp->host_control != 0); | 
|  | 1300 |  | 
|  | 1301 | vxge_hw_channel_dtr_complete(channel); | 
|  | 1302 |  | 
|  | 1303 | *t_code = (u8)VXGE_HW_FIFO_TXD_T_CODE_GET(txdp->control_0); | 
|  | 1304 |  | 
|  | 1305 | if (fifo->stats->common_stats.usage_cnt > 0) | 
|  | 1306 | fifo->stats->common_stats.usage_cnt--; | 
|  | 1307 |  | 
|  | 1308 | status = VXGE_HW_OK; | 
|  | 1309 | goto exit; | 
|  | 1310 | } | 
|  | 1311 |  | 
|  | 1312 | /* no more completions */ | 
|  | 1313 | *txdlh = NULL; | 
|  | 1314 | status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS; | 
|  | 1315 | exit: | 
|  | 1316 | return status; | 
|  | 1317 | } | 
|  | 1318 |  | 
|  | 1319 | /** | 
|  | 1320 | * vxge_hw_fifo_handle_tcode - Handle transfer code. | 
|  | 1321 | * @fifo: Handle to the fifo object used for non offload send | 
|  | 1322 | * @txdlh: Descriptor handle. | 
|  | 1323 | * @t_code: One of the enumerated (and documented in the Titan user guide) | 
|  | 1324 | *          "transfer codes". | 
|  | 1325 | * | 
|  | 1326 | * Handle descriptor's transfer code. The latter comes with each completed | 
|  | 1327 | * descriptor. | 
|  | 1328 | * | 
|  | 1329 | * Returns: one of the enum vxge_hw_status{} enumerated types. | 
|  | 1330 | * VXGE_HW_OK - for success. | 
|  | 1331 | * VXGE_HW_ERR_CRITICAL - when encounters critical error. | 
|  | 1332 | */ | 
|  | 1333 | enum vxge_hw_status vxge_hw_fifo_handle_tcode(struct __vxge_hw_fifo *fifo, | 
|  | 1334 | void *txdlh, | 
|  | 1335 | enum vxge_hw_fifo_tcode t_code) | 
|  | 1336 | { | 
|  | 1337 | struct __vxge_hw_channel *channel; | 
|  | 1338 |  | 
|  | 1339 | enum vxge_hw_status status = VXGE_HW_OK; | 
|  | 1340 | channel = &fifo->channel; | 
|  | 1341 |  | 
|  | 1342 | if (((t_code & 0x7) < 0) || ((t_code & 0x7) > 0x4)) { | 
|  | 1343 | status = VXGE_HW_ERR_INVALID_TCODE; | 
|  | 1344 | goto exit; | 
|  | 1345 | } | 
|  | 1346 |  | 
|  | 1347 | fifo->stats->txd_t_code_err_cnt[t_code]++; | 
|  | 1348 | exit: | 
|  | 1349 | return status; | 
|  | 1350 | } | 
|  | 1351 |  | 
|  | 1352 | /** | 
|  | 1353 | * vxge_hw_fifo_txdl_free - Free descriptor. | 
|  | 1354 | * @fifo: Handle to the fifo object used for non offload send | 
|  | 1355 | * @txdlh: Descriptor handle. | 
|  | 1356 | * | 
|  | 1357 | * Free the reserved descriptor. This operation is "symmetrical" to | 
|  | 1358 | * vxge_hw_fifo_txdl_reserve. The "free-ing" completes the descriptor's | 
|  | 1359 | * lifecycle. | 
|  | 1360 | * | 
|  | 1361 | * After free-ing (see vxge_hw_fifo_txdl_free()) the descriptor again can | 
|  | 1362 | * be: | 
|  | 1363 | * | 
|  | 1364 | * - reserved (vxge_hw_fifo_txdl_reserve); | 
|  | 1365 | * | 
|  | 1366 | * - posted (vxge_hw_fifo_txdl_post); | 
|  | 1367 | * | 
|  | 1368 | * - completed (vxge_hw_fifo_txdl_next_completed); | 
|  | 1369 | * | 
|  | 1370 | * - and recycled again (vxge_hw_fifo_txdl_free). | 
|  | 1371 | * | 
|  | 1372 | * For alternative state transitions and more details please refer to | 
|  | 1373 | * the design doc. | 
|  | 1374 | * | 
|  | 1375 | */ | 
|  | 1376 | void vxge_hw_fifo_txdl_free(struct __vxge_hw_fifo *fifo, void *txdlh) | 
|  | 1377 | { | 
|  | 1378 | struct __vxge_hw_fifo_txdl_priv *txdl_priv; | 
|  | 1379 | u32 max_frags; | 
|  | 1380 | struct __vxge_hw_channel *channel; | 
|  | 1381 |  | 
|  | 1382 | channel = &fifo->channel; | 
|  | 1383 |  | 
|  | 1384 | txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, | 
|  | 1385 | (struct vxge_hw_fifo_txd *)txdlh); | 
|  | 1386 |  | 
|  | 1387 | max_frags = fifo->config->max_frags; | 
|  | 1388 |  | 
|  | 1389 | vxge_hw_channel_dtr_free(channel, txdlh); | 
|  | 1390 | } | 
|  | 1391 |  | 
|  | 1392 | /** | 
|  | 1393 | * vxge_hw_vpath_mac_addr_add - Add the mac address entry for this vpath | 
|  | 1394 | *               to MAC address table. | 
|  | 1395 | * @vp: Vpath handle. | 
|  | 1396 | * @macaddr: MAC address to be added for this vpath into the list | 
|  | 1397 | * @macaddr_mask: MAC address mask for macaddr | 
|  | 1398 | * @duplicate_mode: Duplicate MAC address add mode. Please see | 
|  | 1399 | *             enum vxge_hw_vpath_mac_addr_add_mode{} | 
|  | 1400 | * | 
|  | 1401 | * Adds the given mac address and mac address mask into the list for this | 
|  | 1402 | * vpath. | 
|  | 1403 | * see also: vxge_hw_vpath_mac_addr_delete, vxge_hw_vpath_mac_addr_get and | 
|  | 1404 | * vxge_hw_vpath_mac_addr_get_next | 
|  | 1405 | * | 
|  | 1406 | */ | 
|  | 1407 | enum vxge_hw_status | 
|  | 1408 | vxge_hw_vpath_mac_addr_add( | 
|  | 1409 | struct __vxge_hw_vpath_handle *vp, | 
|  | 1410 | u8 (macaddr)[ETH_ALEN], | 
|  | 1411 | u8 (macaddr_mask)[ETH_ALEN], | 
|  | 1412 | enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode) | 
|  | 1413 | { | 
|  | 1414 | u32 i; | 
|  | 1415 | u64 data1 = 0ULL; | 
|  | 1416 | u64 data2 = 0ULL; | 
|  | 1417 | enum vxge_hw_status status = VXGE_HW_OK; | 
|  | 1418 |  | 
|  | 1419 | if (vp == NULL) { | 
|  | 1420 | status = VXGE_HW_ERR_INVALID_HANDLE; | 
|  | 1421 | goto exit; | 
|  | 1422 | } | 
|  | 1423 |  | 
|  | 1424 | for (i = 0; i < ETH_ALEN; i++) { | 
|  | 1425 | data1 <<= 8; | 
|  | 1426 | data1 |= (u8)macaddr[i]; | 
|  | 1427 |  | 
|  | 1428 | data2 <<= 8; | 
|  | 1429 | data2 |= (u8)macaddr_mask[i]; | 
|  | 1430 | } | 
|  | 1431 |  | 
|  | 1432 | switch (duplicate_mode) { | 
|  | 1433 | case VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE: | 
|  | 1434 | i = 0; | 
|  | 1435 | break; | 
|  | 1436 | case VXGE_HW_VPATH_MAC_ADDR_DISCARD_DUPLICATE: | 
|  | 1437 | i = 1; | 
|  | 1438 | break; | 
|  | 1439 | case VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE: | 
|  | 1440 | i = 2; | 
|  | 1441 | break; | 
|  | 1442 | default: | 
|  | 1443 | i = 0; | 
|  | 1444 | break; | 
|  | 1445 | } | 
|  | 1446 |  | 
|  | 1447 | status = __vxge_hw_vpath_rts_table_set(vp, | 
|  | 1448 | VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY, | 
|  | 1449 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA, | 
|  | 1450 | 0, | 
|  | 1451 | VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1), | 
|  | 1452 | VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2)| | 
|  | 1453 | VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MODE(i)); | 
|  | 1454 | exit: | 
|  | 1455 | return status; | 
|  | 1456 | } | 
|  | 1457 |  | 
|  | 1458 | /** | 
|  | 1459 | * vxge_hw_vpath_mac_addr_get - Get the first mac address entry for this vpath | 
|  | 1460 | *               from MAC address table. | 
|  | 1461 | * @vp: Vpath handle. | 
|  | 1462 | * @macaddr: First MAC address entry for this vpath in the list | 
|  | 1463 | * @macaddr_mask: MAC address mask for macaddr | 
|  | 1464 | * | 
|  | 1465 | * Returns the first mac address and mac address mask in the list for this | 
|  | 1466 | * vpath. | 
|  | 1467 | * see also: vxge_hw_vpath_mac_addr_get_next | 
|  | 1468 | * | 
|  | 1469 | */ | 
|  | 1470 | enum vxge_hw_status | 
|  | 1471 | vxge_hw_vpath_mac_addr_get( | 
|  | 1472 | struct __vxge_hw_vpath_handle *vp, | 
|  | 1473 | u8 (macaddr)[ETH_ALEN], | 
|  | 1474 | u8 (macaddr_mask)[ETH_ALEN]) | 
|  | 1475 | { | 
|  | 1476 | u32 i; | 
|  | 1477 | u64 data1 = 0ULL; | 
|  | 1478 | u64 data2 = 0ULL; | 
|  | 1479 | enum vxge_hw_status status = VXGE_HW_OK; | 
|  | 1480 |  | 
|  | 1481 | if (vp == NULL) { | 
|  | 1482 | status = VXGE_HW_ERR_INVALID_HANDLE; | 
|  | 1483 | goto exit; | 
|  | 1484 | } | 
|  | 1485 |  | 
|  | 1486 | status = __vxge_hw_vpath_rts_table_get(vp, | 
|  | 1487 | VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY, | 
|  | 1488 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA, | 
|  | 1489 | 0, &data1, &data2); | 
|  | 1490 |  | 
|  | 1491 | if (status != VXGE_HW_OK) | 
|  | 1492 | goto exit; | 
|  | 1493 |  | 
|  | 1494 | data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1); | 
|  | 1495 |  | 
|  | 1496 | data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2); | 
|  | 1497 |  | 
|  | 1498 | for (i = ETH_ALEN; i > 0; i--) { | 
|  | 1499 | macaddr[i-1] = (u8)(data1 & 0xFF); | 
|  | 1500 | data1 >>= 8; | 
|  | 1501 |  | 
|  | 1502 | macaddr_mask[i-1] = (u8)(data2 & 0xFF); | 
|  | 1503 | data2 >>= 8; | 
|  | 1504 | } | 
|  | 1505 | exit: | 
|  | 1506 | return status; | 
|  | 1507 | } | 
|  | 1508 |  | 
|  | 1509 | /** | 
|  | 1510 | * vxge_hw_vpath_mac_addr_get_next - Get the next mac address entry for this | 
|  | 1511 | * vpath | 
|  | 1512 | *               from MAC address table. | 
|  | 1513 | * @vp: Vpath handle. | 
|  | 1514 | * @macaddr: Next MAC address entry for this vpath in the list | 
|  | 1515 | * @macaddr_mask: MAC address mask for macaddr | 
|  | 1516 | * | 
|  | 1517 | * Returns the next mac address and mac address mask in the list for this | 
|  | 1518 | * vpath. | 
|  | 1519 | * see also: vxge_hw_vpath_mac_addr_get | 
|  | 1520 | * | 
|  | 1521 | */ | 
|  | 1522 | enum vxge_hw_status | 
|  | 1523 | vxge_hw_vpath_mac_addr_get_next( | 
|  | 1524 | struct __vxge_hw_vpath_handle *vp, | 
|  | 1525 | u8 (macaddr)[ETH_ALEN], | 
|  | 1526 | u8 (macaddr_mask)[ETH_ALEN]) | 
|  | 1527 | { | 
|  | 1528 | u32 i; | 
|  | 1529 | u64 data1 = 0ULL; | 
|  | 1530 | u64 data2 = 0ULL; | 
|  | 1531 | enum vxge_hw_status status = VXGE_HW_OK; | 
|  | 1532 |  | 
|  | 1533 | if (vp == NULL) { | 
|  | 1534 | status = VXGE_HW_ERR_INVALID_HANDLE; | 
|  | 1535 | goto exit; | 
|  | 1536 | } | 
|  | 1537 |  | 
|  | 1538 | status = __vxge_hw_vpath_rts_table_get(vp, | 
|  | 1539 | VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY, | 
|  | 1540 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA, | 
|  | 1541 | 0, &data1, &data2); | 
|  | 1542 |  | 
|  | 1543 | if (status != VXGE_HW_OK) | 
|  | 1544 | goto exit; | 
|  | 1545 |  | 
|  | 1546 | data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1); | 
|  | 1547 |  | 
|  | 1548 | data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2); | 
|  | 1549 |  | 
|  | 1550 | for (i = ETH_ALEN; i > 0; i--) { | 
|  | 1551 | macaddr[i-1] = (u8)(data1 & 0xFF); | 
|  | 1552 | data1 >>= 8; | 
|  | 1553 |  | 
|  | 1554 | macaddr_mask[i-1] = (u8)(data2 & 0xFF); | 
|  | 1555 | data2 >>= 8; | 
|  | 1556 | } | 
|  | 1557 |  | 
|  | 1558 | exit: | 
|  | 1559 | return status; | 
|  | 1560 | } | 
|  | 1561 |  | 
|  | 1562 | /** | 
|  | 1563 | * vxge_hw_vpath_mac_addr_delete - Delete the mac address entry for this vpath | 
|  | 1564 | *               to MAC address table. | 
|  | 1565 | * @vp: Vpath handle. | 
|  | 1566 | * @macaddr: MAC address to be added for this vpath into the list | 
|  | 1567 | * @macaddr_mask: MAC address mask for macaddr | 
|  | 1568 | * | 
|  | 1569 | * Delete the given mac address and mac address mask into the list for this | 
|  | 1570 | * vpath. | 
|  | 1571 | * see also: vxge_hw_vpath_mac_addr_add, vxge_hw_vpath_mac_addr_get and | 
|  | 1572 | * vxge_hw_vpath_mac_addr_get_next | 
|  | 1573 | * | 
|  | 1574 | */ | 
|  | 1575 | enum vxge_hw_status | 
|  | 1576 | vxge_hw_vpath_mac_addr_delete( | 
|  | 1577 | struct __vxge_hw_vpath_handle *vp, | 
|  | 1578 | u8 (macaddr)[ETH_ALEN], | 
|  | 1579 | u8 (macaddr_mask)[ETH_ALEN]) | 
|  | 1580 | { | 
|  | 1581 | u32 i; | 
|  | 1582 | u64 data1 = 0ULL; | 
|  | 1583 | u64 data2 = 0ULL; | 
|  | 1584 | enum vxge_hw_status status = VXGE_HW_OK; | 
|  | 1585 |  | 
|  | 1586 | if (vp == NULL) { | 
|  | 1587 | status = VXGE_HW_ERR_INVALID_HANDLE; | 
|  | 1588 | goto exit; | 
|  | 1589 | } | 
|  | 1590 |  | 
|  | 1591 | for (i = 0; i < ETH_ALEN; i++) { | 
|  | 1592 | data1 <<= 8; | 
|  | 1593 | data1 |= (u8)macaddr[i]; | 
|  | 1594 |  | 
|  | 1595 | data2 <<= 8; | 
|  | 1596 | data2 |= (u8)macaddr_mask[i]; | 
|  | 1597 | } | 
|  | 1598 |  | 
|  | 1599 | status = __vxge_hw_vpath_rts_table_set(vp, | 
|  | 1600 | VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY, | 
|  | 1601 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA, | 
|  | 1602 | 0, | 
|  | 1603 | VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1), | 
|  | 1604 | VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2)); | 
|  | 1605 | exit: | 
|  | 1606 | return status; | 
|  | 1607 | } | 
|  | 1608 |  | 
|  | 1609 | /** | 
|  | 1610 | * vxge_hw_vpath_vid_add - Add the vlan id entry for this vpath | 
|  | 1611 | *               to vlan id table. | 
|  | 1612 | * @vp: Vpath handle. | 
|  | 1613 | * @vid: vlan id to be added for this vpath into the list | 
|  | 1614 | * | 
|  | 1615 | * Adds the given vlan id into the list for this  vpath. | 
|  | 1616 | * see also: vxge_hw_vpath_vid_delete, vxge_hw_vpath_vid_get and | 
|  | 1617 | * vxge_hw_vpath_vid_get_next | 
|  | 1618 | * | 
|  | 1619 | */ | 
|  | 1620 | enum vxge_hw_status | 
|  | 1621 | vxge_hw_vpath_vid_add(struct __vxge_hw_vpath_handle *vp, u64 vid) | 
|  | 1622 | { | 
|  | 1623 | enum vxge_hw_status status = VXGE_HW_OK; | 
|  | 1624 |  | 
|  | 1625 | if (vp == NULL) { | 
|  | 1626 | status = VXGE_HW_ERR_INVALID_HANDLE; | 
|  | 1627 | goto exit; | 
|  | 1628 | } | 
|  | 1629 |  | 
|  | 1630 | status = __vxge_hw_vpath_rts_table_set(vp, | 
|  | 1631 | VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY, | 
|  | 1632 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID, | 
|  | 1633 | 0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0); | 
|  | 1634 | exit: | 
|  | 1635 | return status; | 
|  | 1636 | } | 
|  | 1637 |  | 
|  | 1638 | /** | 
|  | 1639 | * vxge_hw_vpath_vid_get - Get the first vid entry for this vpath | 
|  | 1640 | *               from vlan id table. | 
|  | 1641 | * @vp: Vpath handle. | 
|  | 1642 | * @vid: Buffer to return vlan id | 
|  | 1643 | * | 
|  | 1644 | * Returns the first vlan id in the list for this vpath. | 
|  | 1645 | * see also: vxge_hw_vpath_vid_get_next | 
|  | 1646 | * | 
|  | 1647 | */ | 
|  | 1648 | enum vxge_hw_status | 
|  | 1649 | vxge_hw_vpath_vid_get(struct __vxge_hw_vpath_handle *vp, u64 *vid) | 
|  | 1650 | { | 
|  | 1651 | u64 data; | 
|  | 1652 | enum vxge_hw_status status = VXGE_HW_OK; | 
|  | 1653 |  | 
|  | 1654 | if (vp == NULL) { | 
|  | 1655 | status = VXGE_HW_ERR_INVALID_HANDLE; | 
|  | 1656 | goto exit; | 
|  | 1657 | } | 
|  | 1658 |  | 
|  | 1659 | status = __vxge_hw_vpath_rts_table_get(vp, | 
|  | 1660 | VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY, | 
|  | 1661 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID, | 
|  | 1662 | 0, vid, &data); | 
|  | 1663 |  | 
|  | 1664 | *vid = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_VLAN_ID(*vid); | 
|  | 1665 | exit: | 
|  | 1666 | return status; | 
|  | 1667 | } | 
|  | 1668 |  | 
|  | 1669 | /** | 
|  | 1670 | * vxge_hw_vpath_vid_get_next - Get the next vid entry for this vpath | 
|  | 1671 | *               from vlan id table. | 
|  | 1672 | * @vp: Vpath handle. | 
|  | 1673 | * @vid: Buffer to return vlan id | 
|  | 1674 | * | 
|  | 1675 | * Returns the next vlan id in the list for this vpath. | 
|  | 1676 | * see also: vxge_hw_vpath_vid_get | 
|  | 1677 | * | 
|  | 1678 | */ | 
|  | 1679 | enum vxge_hw_status | 
|  | 1680 | vxge_hw_vpath_vid_get_next(struct __vxge_hw_vpath_handle *vp, u64 *vid) | 
|  | 1681 | { | 
|  | 1682 | u64 data; | 
|  | 1683 | enum vxge_hw_status status = VXGE_HW_OK; | 
|  | 1684 |  | 
|  | 1685 | if (vp == NULL) { | 
|  | 1686 | status = VXGE_HW_ERR_INVALID_HANDLE; | 
|  | 1687 | goto exit; | 
|  | 1688 | } | 
|  | 1689 |  | 
|  | 1690 | status = __vxge_hw_vpath_rts_table_get(vp, | 
|  | 1691 | VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY, | 
|  | 1692 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID, | 
|  | 1693 | 0, vid, &data); | 
|  | 1694 |  | 
|  | 1695 | *vid = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_VLAN_ID(*vid); | 
|  | 1696 | exit: | 
|  | 1697 | return status; | 
|  | 1698 | } | 
|  | 1699 |  | 
|  | 1700 | /** | 
|  | 1701 | * vxge_hw_vpath_vid_delete - Delete the vlan id entry for this vpath | 
|  | 1702 | *               to vlan id table. | 
|  | 1703 | * @vp: Vpath handle. | 
|  | 1704 | * @vid: vlan id to be added for this vpath into the list | 
|  | 1705 | * | 
|  | 1706 | * Adds the given vlan id into the list for this  vpath. | 
|  | 1707 | * see also: vxge_hw_vpath_vid_add, vxge_hw_vpath_vid_get and | 
|  | 1708 | * vxge_hw_vpath_vid_get_next | 
|  | 1709 | * | 
|  | 1710 | */ | 
|  | 1711 | enum vxge_hw_status | 
|  | 1712 | vxge_hw_vpath_vid_delete(struct __vxge_hw_vpath_handle *vp, u64 vid) | 
|  | 1713 | { | 
|  | 1714 | enum vxge_hw_status status = VXGE_HW_OK; | 
|  | 1715 |  | 
|  | 1716 | if (vp == NULL) { | 
|  | 1717 | status = VXGE_HW_ERR_INVALID_HANDLE; | 
|  | 1718 | goto exit; | 
|  | 1719 | } | 
|  | 1720 |  | 
|  | 1721 | status = __vxge_hw_vpath_rts_table_set(vp, | 
|  | 1722 | VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY, | 
|  | 1723 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID, | 
|  | 1724 | 0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0); | 
|  | 1725 | exit: | 
|  | 1726 | return status; | 
|  | 1727 | } | 
|  | 1728 |  | 
|  | 1729 | /** | 
|  | 1730 | * vxge_hw_vpath_promisc_enable - Enable promiscuous mode. | 
|  | 1731 | * @vp: Vpath handle. | 
|  | 1732 | * | 
|  | 1733 | * Enable promiscuous mode of Titan-e operation. | 
|  | 1734 | * | 
|  | 1735 | * See also: vxge_hw_vpath_promisc_disable(). | 
|  | 1736 | */ | 
|  | 1737 | enum vxge_hw_status vxge_hw_vpath_promisc_enable( | 
|  | 1738 | struct __vxge_hw_vpath_handle *vp) | 
|  | 1739 | { | 
|  | 1740 | u64 val64; | 
|  | 1741 | struct __vxge_hw_virtualpath *vpath; | 
|  | 1742 | enum vxge_hw_status status = VXGE_HW_OK; | 
|  | 1743 |  | 
|  | 1744 | if ((vp == NULL) || (vp->vpath->ringh == NULL)) { | 
|  | 1745 | status = VXGE_HW_ERR_INVALID_HANDLE; | 
|  | 1746 | goto exit; | 
|  | 1747 | } | 
|  | 1748 |  | 
|  | 1749 | vpath = vp->vpath; | 
|  | 1750 |  | 
|  | 1751 | /* Enable promiscous mode for function 0 only */ | 
|  | 1752 | if (!(vpath->hldev->access_rights & | 
|  | 1753 | VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) | 
|  | 1754 | return VXGE_HW_OK; | 
|  | 1755 |  | 
|  | 1756 | val64 = readq(&vpath->vp_reg->rxmac_vcfg0); | 
|  | 1757 |  | 
|  | 1758 | if (!(val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN)) { | 
|  | 1759 |  | 
|  | 1760 | val64 |= VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN | | 
|  | 1761 | VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN | | 
|  | 1762 | VXGE_HW_RXMAC_VCFG0_BCAST_EN | | 
|  | 1763 | VXGE_HW_RXMAC_VCFG0_ALL_VID_EN; | 
|  | 1764 |  | 
|  | 1765 | writeq(val64, &vpath->vp_reg->rxmac_vcfg0); | 
|  | 1766 | } | 
|  | 1767 | exit: | 
|  | 1768 | return status; | 
|  | 1769 | } | 
|  | 1770 |  | 
|  | 1771 | /** | 
|  | 1772 | * vxge_hw_vpath_promisc_disable - Disable promiscuous mode. | 
|  | 1773 | * @vp: Vpath handle. | 
|  | 1774 | * | 
|  | 1775 | * Disable promiscuous mode of Titan-e operation. | 
|  | 1776 | * | 
|  | 1777 | * See also: vxge_hw_vpath_promisc_enable(). | 
|  | 1778 | */ | 
|  | 1779 | enum vxge_hw_status vxge_hw_vpath_promisc_disable( | 
|  | 1780 | struct __vxge_hw_vpath_handle *vp) | 
|  | 1781 | { | 
|  | 1782 | u64 val64; | 
|  | 1783 | struct __vxge_hw_virtualpath *vpath; | 
|  | 1784 | enum vxge_hw_status status = VXGE_HW_OK; | 
|  | 1785 |  | 
|  | 1786 | if ((vp == NULL) || (vp->vpath->ringh == NULL)) { | 
|  | 1787 | status = VXGE_HW_ERR_INVALID_HANDLE; | 
|  | 1788 | goto exit; | 
|  | 1789 | } | 
|  | 1790 |  | 
|  | 1791 | vpath = vp->vpath; | 
|  | 1792 |  | 
|  | 1793 | val64 = readq(&vpath->vp_reg->rxmac_vcfg0); | 
|  | 1794 |  | 
|  | 1795 | if (val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN) { | 
|  | 1796 |  | 
|  | 1797 | val64 &= ~(VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN | | 
|  | 1798 | VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN | | 
|  | 1799 | VXGE_HW_RXMAC_VCFG0_ALL_VID_EN); | 
|  | 1800 |  | 
|  | 1801 | writeq(val64, &vpath->vp_reg->rxmac_vcfg0); | 
|  | 1802 | } | 
|  | 1803 | exit: | 
|  | 1804 | return status; | 
|  | 1805 | } | 
|  | 1806 |  | 
|  | 1807 | /* | 
|  | 1808 | * vxge_hw_vpath_bcast_enable - Enable broadcast | 
|  | 1809 | * @vp: Vpath handle. | 
|  | 1810 | * | 
|  | 1811 | * Enable receiving broadcasts. | 
|  | 1812 | */ | 
|  | 1813 | enum vxge_hw_status vxge_hw_vpath_bcast_enable( | 
|  | 1814 | struct __vxge_hw_vpath_handle *vp) | 
|  | 1815 | { | 
|  | 1816 | u64 val64; | 
|  | 1817 | struct __vxge_hw_virtualpath *vpath; | 
|  | 1818 | enum vxge_hw_status status = VXGE_HW_OK; | 
|  | 1819 |  | 
|  | 1820 | if ((vp == NULL) || (vp->vpath->ringh == NULL)) { | 
|  | 1821 | status = VXGE_HW_ERR_INVALID_HANDLE; | 
|  | 1822 | goto exit; | 
|  | 1823 | } | 
|  | 1824 |  | 
|  | 1825 | vpath = vp->vpath; | 
|  | 1826 |  | 
|  | 1827 | val64 = readq(&vpath->vp_reg->rxmac_vcfg0); | 
|  | 1828 |  | 
|  | 1829 | if (!(val64 & VXGE_HW_RXMAC_VCFG0_BCAST_EN)) { | 
|  | 1830 | val64 |= VXGE_HW_RXMAC_VCFG0_BCAST_EN; | 
|  | 1831 | writeq(val64, &vpath->vp_reg->rxmac_vcfg0); | 
|  | 1832 | } | 
|  | 1833 | exit: | 
|  | 1834 | return status; | 
|  | 1835 | } | 
|  | 1836 |  | 
|  | 1837 | /** | 
|  | 1838 | * vxge_hw_vpath_mcast_enable - Enable multicast addresses. | 
|  | 1839 | * @vp: Vpath handle. | 
|  | 1840 | * | 
|  | 1841 | * Enable Titan-e multicast addresses. | 
|  | 1842 | * Returns: VXGE_HW_OK on success. | 
|  | 1843 | * | 
|  | 1844 | */ | 
|  | 1845 | enum vxge_hw_status vxge_hw_vpath_mcast_enable( | 
|  | 1846 | struct __vxge_hw_vpath_handle *vp) | 
|  | 1847 | { | 
|  | 1848 | u64 val64; | 
|  | 1849 | struct __vxge_hw_virtualpath *vpath; | 
|  | 1850 | enum vxge_hw_status status = VXGE_HW_OK; | 
|  | 1851 |  | 
|  | 1852 | if ((vp == NULL) || (vp->vpath->ringh == NULL)) { | 
|  | 1853 | status = VXGE_HW_ERR_INVALID_HANDLE; | 
|  | 1854 | goto exit; | 
|  | 1855 | } | 
|  | 1856 |  | 
|  | 1857 | vpath = vp->vpath; | 
|  | 1858 |  | 
|  | 1859 | val64 = readq(&vpath->vp_reg->rxmac_vcfg0); | 
|  | 1860 |  | 
|  | 1861 | if (!(val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN)) { | 
|  | 1862 | val64 |= VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN; | 
|  | 1863 | writeq(val64, &vpath->vp_reg->rxmac_vcfg0); | 
|  | 1864 | } | 
|  | 1865 | exit: | 
|  | 1866 | return status; | 
|  | 1867 | } | 
|  | 1868 |  | 
|  | 1869 | /** | 
|  | 1870 | * vxge_hw_vpath_mcast_disable - Disable  multicast addresses. | 
|  | 1871 | * @vp: Vpath handle. | 
|  | 1872 | * | 
|  | 1873 | * Disable Titan-e multicast addresses. | 
|  | 1874 | * Returns: VXGE_HW_OK - success. | 
|  | 1875 | * VXGE_HW_ERR_INVALID_HANDLE - Invalid handle | 
|  | 1876 | * | 
|  | 1877 | */ | 
|  | 1878 | enum vxge_hw_status | 
|  | 1879 | vxge_hw_vpath_mcast_disable(struct __vxge_hw_vpath_handle *vp) | 
|  | 1880 | { | 
|  | 1881 | u64 val64; | 
|  | 1882 | struct __vxge_hw_virtualpath *vpath; | 
|  | 1883 | enum vxge_hw_status status = VXGE_HW_OK; | 
|  | 1884 |  | 
|  | 1885 | if ((vp == NULL) || (vp->vpath->ringh == NULL)) { | 
|  | 1886 | status = VXGE_HW_ERR_INVALID_HANDLE; | 
|  | 1887 | goto exit; | 
|  | 1888 | } | 
|  | 1889 |  | 
|  | 1890 | vpath = vp->vpath; | 
|  | 1891 |  | 
|  | 1892 | val64 = readq(&vpath->vp_reg->rxmac_vcfg0); | 
|  | 1893 |  | 
|  | 1894 | if (val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN) { | 
|  | 1895 | val64 &= ~VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN; | 
|  | 1896 | writeq(val64, &vpath->vp_reg->rxmac_vcfg0); | 
|  | 1897 | } | 
|  | 1898 | exit: | 
|  | 1899 | return status; | 
|  | 1900 | } | 
|  | 1901 |  | 
|  | 1902 | /* | 
|  | 1903 | * __vxge_hw_vpath_alarm_process - Process Alarms. | 
|  | 1904 | * @vpath: Virtual Path. | 
|  | 1905 | * @skip_alarms: Do not clear the alarms | 
|  | 1906 | * | 
|  | 1907 | * Process vpath alarms. | 
|  | 1908 | * | 
|  | 1909 | */ | 
|  | 1910 | enum vxge_hw_status __vxge_hw_vpath_alarm_process( | 
|  | 1911 | struct __vxge_hw_virtualpath *vpath, | 
|  | 1912 | u32 skip_alarms) | 
|  | 1913 | { | 
|  | 1914 | u64 val64; | 
|  | 1915 | u64 alarm_status; | 
|  | 1916 | u64 pic_status; | 
|  | 1917 | struct __vxge_hw_device *hldev = NULL; | 
|  | 1918 | enum vxge_hw_event alarm_event = VXGE_HW_EVENT_UNKNOWN; | 
|  | 1919 | u64 mask64; | 
|  | 1920 | struct vxge_hw_vpath_stats_sw_info *sw_stats; | 
|  | 1921 | struct vxge_hw_vpath_reg __iomem *vp_reg; | 
|  | 1922 |  | 
|  | 1923 | if (vpath == NULL) { | 
|  | 1924 | alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN, | 
|  | 1925 | alarm_event); | 
|  | 1926 | goto out; | 
|  | 1927 | } | 
|  | 1928 |  | 
|  | 1929 | hldev = vpath->hldev; | 
|  | 1930 | vp_reg = vpath->vp_reg; | 
|  | 1931 | alarm_status = readq(&vp_reg->vpath_general_int_status); | 
|  | 1932 |  | 
|  | 1933 | if (alarm_status == VXGE_HW_ALL_FOXES) { | 
|  | 1934 | alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_SLOT_FREEZE, | 
|  | 1935 | alarm_event); | 
|  | 1936 | goto out; | 
|  | 1937 | } | 
|  | 1938 |  | 
|  | 1939 | sw_stats = vpath->sw_stats; | 
|  | 1940 |  | 
|  | 1941 | if (alarm_status & ~( | 
|  | 1942 | VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT | | 
|  | 1943 | VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT | | 
|  | 1944 | VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT | | 
|  | 1945 | VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) { | 
|  | 1946 | sw_stats->error_stats.unknown_alarms++; | 
|  | 1947 |  | 
|  | 1948 | alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN, | 
|  | 1949 | alarm_event); | 
|  | 1950 | goto out; | 
|  | 1951 | } | 
|  | 1952 |  | 
|  | 1953 | if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) { | 
|  | 1954 |  | 
|  | 1955 | val64 = readq(&vp_reg->xgmac_vp_int_status); | 
|  | 1956 |  | 
|  | 1957 | if (val64 & | 
|  | 1958 | VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) { | 
|  | 1959 |  | 
|  | 1960 | val64 = readq(&vp_reg->asic_ntwk_vp_err_reg); | 
|  | 1961 |  | 
|  | 1962 | if (((val64 & | 
|  | 1963 | VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) && | 
|  | 1964 | (!(val64 & | 
|  | 1965 | VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) || | 
|  | 1966 | ((val64 & | 
|  | 1967 | VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) | 
|  | 1968 | && (!(val64 & | 
|  | 1969 | VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) | 
|  | 1970 | ))) { | 
|  | 1971 | sw_stats->error_stats.network_sustained_fault++; | 
|  | 1972 |  | 
|  | 1973 | writeq( | 
|  | 1974 | VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT, | 
|  | 1975 | &vp_reg->asic_ntwk_vp_err_mask); | 
|  | 1976 |  | 
|  | 1977 | __vxge_hw_device_handle_link_down_ind(hldev); | 
|  | 1978 | alarm_event = VXGE_HW_SET_LEVEL( | 
|  | 1979 | VXGE_HW_EVENT_LINK_DOWN, alarm_event); | 
|  | 1980 | } | 
|  | 1981 |  | 
|  | 1982 | if (((val64 & | 
|  | 1983 | VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) && | 
|  | 1984 | (!(val64 & | 
|  | 1985 | VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) || | 
|  | 1986 | ((val64 & | 
|  | 1987 | VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) | 
|  | 1988 | && (!(val64 & | 
|  | 1989 | VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) | 
|  | 1990 | ))) { | 
|  | 1991 |  | 
|  | 1992 | sw_stats->error_stats.network_sustained_ok++; | 
|  | 1993 |  | 
|  | 1994 | writeq( | 
|  | 1995 | VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK, | 
|  | 1996 | &vp_reg->asic_ntwk_vp_err_mask); | 
|  | 1997 |  | 
|  | 1998 | __vxge_hw_device_handle_link_up_ind(hldev); | 
|  | 1999 | alarm_event = VXGE_HW_SET_LEVEL( | 
|  | 2000 | VXGE_HW_EVENT_LINK_UP, alarm_event); | 
|  | 2001 | } | 
|  | 2002 |  | 
|  | 2003 | writeq(VXGE_HW_INTR_MASK_ALL, | 
|  | 2004 | &vp_reg->asic_ntwk_vp_err_reg); | 
|  | 2005 |  | 
|  | 2006 | alarm_event = VXGE_HW_SET_LEVEL( | 
|  | 2007 | VXGE_HW_EVENT_ALARM_CLEARED, alarm_event); | 
|  | 2008 |  | 
|  | 2009 | if (skip_alarms) | 
|  | 2010 | return VXGE_HW_OK; | 
|  | 2011 | } | 
|  | 2012 | } | 
|  | 2013 |  | 
|  | 2014 | if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT) { | 
|  | 2015 |  | 
|  | 2016 | pic_status = readq(&vp_reg->vpath_ppif_int_status); | 
|  | 2017 |  | 
|  | 2018 | if (pic_status & | 
|  | 2019 | VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT) { | 
|  | 2020 |  | 
|  | 2021 | val64 = readq(&vp_reg->general_errors_reg); | 
|  | 2022 | mask64 = readq(&vp_reg->general_errors_mask); | 
|  | 2023 |  | 
|  | 2024 | if ((val64 & | 
|  | 2025 | VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET) & | 
|  | 2026 | ~mask64) { | 
|  | 2027 | sw_stats->error_stats.ini_serr_det++; | 
|  | 2028 |  | 
|  | 2029 | alarm_event = VXGE_HW_SET_LEVEL( | 
|  | 2030 | VXGE_HW_EVENT_SERR, alarm_event); | 
|  | 2031 | } | 
|  | 2032 |  | 
|  | 2033 | if ((val64 & | 
|  | 2034 | VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW) & | 
|  | 2035 | ~mask64) { | 
|  | 2036 | sw_stats->error_stats.dblgen_fifo0_overflow++; | 
|  | 2037 |  | 
|  | 2038 | alarm_event = VXGE_HW_SET_LEVEL( | 
|  | 2039 | VXGE_HW_EVENT_FIFO_ERR, alarm_event); | 
|  | 2040 | } | 
|  | 2041 |  | 
|  | 2042 | if ((val64 & | 
|  | 2043 | VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR) & | 
|  | 2044 | ~mask64) | 
|  | 2045 | sw_stats->error_stats.statsb_pif_chain_error++; | 
|  | 2046 |  | 
|  | 2047 | if ((val64 & | 
|  | 2048 | VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ) & | 
|  | 2049 | ~mask64) | 
|  | 2050 | sw_stats->error_stats.statsb_drop_timeout++; | 
|  | 2051 |  | 
|  | 2052 | if ((val64 & | 
|  | 2053 | VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS) & | 
|  | 2054 | ~mask64) | 
|  | 2055 | sw_stats->error_stats.target_illegal_access++; | 
|  | 2056 |  | 
|  | 2057 | if (!skip_alarms) { | 
|  | 2058 | writeq(VXGE_HW_INTR_MASK_ALL, | 
|  | 2059 | &vp_reg->general_errors_reg); | 
|  | 2060 | alarm_event = VXGE_HW_SET_LEVEL( | 
|  | 2061 | VXGE_HW_EVENT_ALARM_CLEARED, | 
|  | 2062 | alarm_event); | 
|  | 2063 | } | 
|  | 2064 | } | 
|  | 2065 |  | 
|  | 2066 | if (pic_status & | 
|  | 2067 | VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT) { | 
|  | 2068 |  | 
|  | 2069 | val64 = readq(&vp_reg->kdfcctl_errors_reg); | 
|  | 2070 | mask64 = readq(&vp_reg->kdfcctl_errors_mask); | 
|  | 2071 |  | 
|  | 2072 | if ((val64 & | 
|  | 2073 | VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR) & | 
|  | 2074 | ~mask64) { | 
|  | 2075 | sw_stats->error_stats.kdfcctl_fifo0_overwrite++; | 
|  | 2076 |  | 
|  | 2077 | alarm_event = VXGE_HW_SET_LEVEL( | 
|  | 2078 | VXGE_HW_EVENT_FIFO_ERR, | 
|  | 2079 | alarm_event); | 
|  | 2080 | } | 
|  | 2081 |  | 
|  | 2082 | if ((val64 & | 
|  | 2083 | VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON) & | 
|  | 2084 | ~mask64) { | 
|  | 2085 | sw_stats->error_stats.kdfcctl_fifo0_poison++; | 
|  | 2086 |  | 
|  | 2087 | alarm_event = VXGE_HW_SET_LEVEL( | 
|  | 2088 | VXGE_HW_EVENT_FIFO_ERR, | 
|  | 2089 | alarm_event); | 
|  | 2090 | } | 
|  | 2091 |  | 
|  | 2092 | if ((val64 & | 
|  | 2093 | VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR) & | 
|  | 2094 | ~mask64) { | 
|  | 2095 | sw_stats->error_stats.kdfcctl_fifo0_dma_error++; | 
|  | 2096 |  | 
|  | 2097 | alarm_event = VXGE_HW_SET_LEVEL( | 
|  | 2098 | VXGE_HW_EVENT_FIFO_ERR, | 
|  | 2099 | alarm_event); | 
|  | 2100 | } | 
|  | 2101 |  | 
|  | 2102 | if (!skip_alarms) { | 
|  | 2103 | writeq(VXGE_HW_INTR_MASK_ALL, | 
|  | 2104 | &vp_reg->kdfcctl_errors_reg); | 
|  | 2105 | alarm_event = VXGE_HW_SET_LEVEL( | 
|  | 2106 | VXGE_HW_EVENT_ALARM_CLEARED, | 
|  | 2107 | alarm_event); | 
|  | 2108 | } | 
|  | 2109 | } | 
|  | 2110 |  | 
|  | 2111 | } | 
|  | 2112 |  | 
|  | 2113 | if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT) { | 
|  | 2114 |  | 
|  | 2115 | val64 = readq(&vp_reg->wrdma_alarm_status); | 
|  | 2116 |  | 
|  | 2117 | if (val64 & VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT) { | 
|  | 2118 |  | 
|  | 2119 | val64 = readq(&vp_reg->prc_alarm_reg); | 
|  | 2120 | mask64 = readq(&vp_reg->prc_alarm_mask); | 
|  | 2121 |  | 
|  | 2122 | if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP)& | 
|  | 2123 | ~mask64) | 
|  | 2124 | sw_stats->error_stats.prc_ring_bumps++; | 
|  | 2125 |  | 
|  | 2126 | if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR) & | 
|  | 2127 | ~mask64) { | 
|  | 2128 | sw_stats->error_stats.prc_rxdcm_sc_err++; | 
|  | 2129 |  | 
|  | 2130 | alarm_event = VXGE_HW_SET_LEVEL( | 
|  | 2131 | VXGE_HW_EVENT_VPATH_ERR, | 
|  | 2132 | alarm_event); | 
|  | 2133 | } | 
|  | 2134 |  | 
|  | 2135 | if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT) | 
|  | 2136 | & ~mask64) { | 
|  | 2137 | sw_stats->error_stats.prc_rxdcm_sc_abort++; | 
|  | 2138 |  | 
|  | 2139 | alarm_event = VXGE_HW_SET_LEVEL( | 
|  | 2140 | VXGE_HW_EVENT_VPATH_ERR, | 
|  | 2141 | alarm_event); | 
|  | 2142 | } | 
|  | 2143 |  | 
|  | 2144 | if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR) | 
|  | 2145 | & ~mask64) { | 
|  | 2146 | sw_stats->error_stats.prc_quanta_size_err++; | 
|  | 2147 |  | 
|  | 2148 | alarm_event = VXGE_HW_SET_LEVEL( | 
|  | 2149 | VXGE_HW_EVENT_VPATH_ERR, | 
|  | 2150 | alarm_event); | 
|  | 2151 | } | 
|  | 2152 |  | 
|  | 2153 | if (!skip_alarms) { | 
|  | 2154 | writeq(VXGE_HW_INTR_MASK_ALL, | 
|  | 2155 | &vp_reg->prc_alarm_reg); | 
|  | 2156 | alarm_event = VXGE_HW_SET_LEVEL( | 
|  | 2157 | VXGE_HW_EVENT_ALARM_CLEARED, | 
|  | 2158 | alarm_event); | 
|  | 2159 | } | 
|  | 2160 | } | 
|  | 2161 | } | 
|  | 2162 | out: | 
|  | 2163 | hldev->stats.sw_dev_err_stats.vpath_alarms++; | 
|  | 2164 |  | 
|  | 2165 | if ((alarm_event == VXGE_HW_EVENT_ALARM_CLEARED) || | 
|  | 2166 | (alarm_event == VXGE_HW_EVENT_UNKNOWN)) | 
|  | 2167 | return VXGE_HW_OK; | 
|  | 2168 |  | 
|  | 2169 | __vxge_hw_device_handle_error(hldev, vpath->vp_id, alarm_event); | 
|  | 2170 |  | 
|  | 2171 | if (alarm_event == VXGE_HW_EVENT_SERR) | 
|  | 2172 | return VXGE_HW_ERR_CRITICAL; | 
|  | 2173 |  | 
|  | 2174 | return (alarm_event == VXGE_HW_EVENT_SLOT_FREEZE) ? | 
|  | 2175 | VXGE_HW_ERR_SLOT_FREEZE : | 
|  | 2176 | (alarm_event == VXGE_HW_EVENT_FIFO_ERR) ? VXGE_HW_ERR_FIFO : | 
|  | 2177 | VXGE_HW_ERR_VPATH; | 
|  | 2178 | } | 
|  | 2179 |  | 
|  | 2180 | /* | 
|  | 2181 | * vxge_hw_vpath_alarm_process - Process Alarms. | 
|  | 2182 | * @vpath: Virtual Path. | 
|  | 2183 | * @skip_alarms: Do not clear the alarms | 
|  | 2184 | * | 
|  | 2185 | * Process vpath alarms. | 
|  | 2186 | * | 
|  | 2187 | */ | 
|  | 2188 | enum vxge_hw_status vxge_hw_vpath_alarm_process( | 
|  | 2189 | struct __vxge_hw_vpath_handle *vp, | 
|  | 2190 | u32 skip_alarms) | 
|  | 2191 | { | 
|  | 2192 | enum vxge_hw_status status = VXGE_HW_OK; | 
|  | 2193 |  | 
|  | 2194 | if (vp == NULL) { | 
|  | 2195 | status = VXGE_HW_ERR_INVALID_HANDLE; | 
|  | 2196 | goto exit; | 
|  | 2197 | } | 
|  | 2198 |  | 
|  | 2199 | status = __vxge_hw_vpath_alarm_process(vp->vpath, skip_alarms); | 
|  | 2200 | exit: | 
|  | 2201 | return status; | 
|  | 2202 | } | 
|  | 2203 |  | 
|  | 2204 | /** | 
|  | 2205 | * vxge_hw_vpath_msix_set - Associate MSIX vectors with TIM interrupts and | 
|  | 2206 | *                            alrms | 
|  | 2207 | * @vp: Virtual Path handle. | 
|  | 2208 | * @tim_msix_id: MSIX vectors associated with VXGE_HW_MAX_INTR_PER_VP number of | 
|  | 2209 | *             interrupts(Can be repeated). If fifo or ring are not enabled | 
|  | 2210 | *             the MSIX vector for that should be set to 0 | 
|  | 2211 | * @alarm_msix_id: MSIX vector for alarm. | 
|  | 2212 | * | 
|  | 2213 | * This API will associate a given MSIX vector numbers with the four TIM | 
|  | 2214 | * interrupts and alarm interrupt. | 
|  | 2215 | */ | 
|  | 2216 | enum vxge_hw_status | 
|  | 2217 | vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id, | 
|  | 2218 | int alarm_msix_id) | 
|  | 2219 | { | 
|  | 2220 | u64 val64; | 
|  | 2221 | struct __vxge_hw_virtualpath *vpath = vp->vpath; | 
|  | 2222 | struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg; | 
|  | 2223 | u32 first_vp_id = vpath->hldev->first_vp_id; | 
|  | 2224 |  | 
|  | 2225 | val64 =  VXGE_HW_INTERRUPT_CFG0_GROUP0_MSIX_FOR_TXTI( | 
|  | 2226 | (first_vp_id * 4) + tim_msix_id[0]) | | 
|  | 2227 | VXGE_HW_INTERRUPT_CFG0_GROUP1_MSIX_FOR_TXTI( | 
|  | 2228 | (first_vp_id * 4) + tim_msix_id[1]) | | 
|  | 2229 | VXGE_HW_INTERRUPT_CFG0_GROUP2_MSIX_FOR_TXTI( | 
|  | 2230 | (first_vp_id * 4) + tim_msix_id[2]); | 
|  | 2231 |  | 
|  | 2232 | val64 |= VXGE_HW_INTERRUPT_CFG0_GROUP3_MSIX_FOR_TXTI( | 
|  | 2233 | (first_vp_id * 4) + tim_msix_id[3]); | 
|  | 2234 |  | 
|  | 2235 | writeq(val64, &vp_reg->interrupt_cfg0); | 
|  | 2236 |  | 
|  | 2237 | writeq(VXGE_HW_INTERRUPT_CFG2_ALARM_MAP_TO_MSG( | 
|  | 2238 | (first_vp_id * 4) + alarm_msix_id), | 
|  | 2239 | &vp_reg->interrupt_cfg2); | 
|  | 2240 |  | 
|  | 2241 | if (vpath->hldev->config.intr_mode == | 
|  | 2242 | VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) { | 
|  | 2243 | __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn( | 
|  | 2244 | VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN, | 
|  | 2245 | 0, 32), &vp_reg->one_shot_vect1_en); | 
|  | 2246 | } | 
|  | 2247 |  | 
|  | 2248 | if (vpath->hldev->config.intr_mode == | 
|  | 2249 | VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) { | 
|  | 2250 | __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn( | 
|  | 2251 | VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN, | 
|  | 2252 | 0, 32), &vp_reg->one_shot_vect2_en); | 
|  | 2253 |  | 
|  | 2254 | __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn( | 
|  | 2255 | VXGE_HW_ONE_SHOT_VECT3_EN_ONE_SHOT_VECT3_EN, | 
|  | 2256 | 0, 32), &vp_reg->one_shot_vect3_en); | 
|  | 2257 | } | 
|  | 2258 |  | 
|  | 2259 | return VXGE_HW_OK; | 
|  | 2260 | } | 
|  | 2261 |  | 
|  | 2262 | /** | 
|  | 2263 | * vxge_hw_vpath_msix_mask - Mask MSIX Vector. | 
|  | 2264 | * @vp: Virtual Path handle. | 
|  | 2265 | * @msix_id:  MSIX ID | 
|  | 2266 | * | 
|  | 2267 | * The function masks the msix interrupt for the given msix_id | 
|  | 2268 | * | 
|  | 2269 | * Returns: 0, | 
|  | 2270 | * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range | 
|  | 2271 | * status. | 
|  | 2272 | * See also: | 
|  | 2273 | */ | 
|  | 2274 | void | 
|  | 2275 | vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id) | 
|  | 2276 | { | 
|  | 2277 | struct __vxge_hw_device *hldev = vp->vpath->hldev; | 
|  | 2278 | __vxge_hw_pio_mem_write32_upper( | 
|  | 2279 | (u32) vxge_bVALn(vxge_mBIT(hldev->first_vp_id + | 
|  | 2280 | (msix_id  / 4)), 0, 32), | 
|  | 2281 | &hldev->common_reg->set_msix_mask_vect[msix_id % 4]); | 
|  | 2282 |  | 
|  | 2283 | return; | 
|  | 2284 | } | 
|  | 2285 |  | 
|  | 2286 | /** | 
|  | 2287 | * vxge_hw_vpath_msix_clear - Clear MSIX Vector. | 
|  | 2288 | * @vp: Virtual Path handle. | 
|  | 2289 | * @msix_id:  MSI ID | 
|  | 2290 | * | 
|  | 2291 | * The function clears the msix interrupt for the given msix_id | 
|  | 2292 | * | 
|  | 2293 | * Returns: 0, | 
|  | 2294 | * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range | 
|  | 2295 | * status. | 
|  | 2296 | * See also: | 
|  | 2297 | */ | 
|  | 2298 | void | 
|  | 2299 | vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id) | 
|  | 2300 | { | 
|  | 2301 | struct __vxge_hw_device *hldev = vp->vpath->hldev; | 
|  | 2302 | if (hldev->config.intr_mode == | 
|  | 2303 | VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) { | 
|  | 2304 | __vxge_hw_pio_mem_write32_upper( | 
|  | 2305 | (u32)vxge_bVALn(vxge_mBIT(hldev->first_vp_id + | 
|  | 2306 | (msix_id/4)), 0, 32), | 
|  | 2307 | &hldev->common_reg-> | 
|  | 2308 | clr_msix_one_shot_vec[msix_id%4]); | 
|  | 2309 | } else { | 
|  | 2310 | __vxge_hw_pio_mem_write32_upper( | 
|  | 2311 | (u32)vxge_bVALn(vxge_mBIT(hldev->first_vp_id + | 
|  | 2312 | (msix_id/4)), 0, 32), | 
|  | 2313 | &hldev->common_reg-> | 
|  | 2314 | clear_msix_mask_vect[msix_id%4]); | 
|  | 2315 | } | 
|  | 2316 |  | 
|  | 2317 | return; | 
|  | 2318 | } | 
|  | 2319 |  | 
|  | 2320 | /** | 
|  | 2321 | * vxge_hw_vpath_msix_unmask - Unmask the MSIX Vector. | 
|  | 2322 | * @vp: Virtual Path handle. | 
|  | 2323 | * @msix_id:  MSI ID | 
|  | 2324 | * | 
|  | 2325 | * The function unmasks the msix interrupt for the given msix_id | 
|  | 2326 | * | 
|  | 2327 | * Returns: 0, | 
|  | 2328 | * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range | 
|  | 2329 | * status. | 
|  | 2330 | * See also: | 
|  | 2331 | */ | 
|  | 2332 | void | 
|  | 2333 | vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vp, int msix_id) | 
|  | 2334 | { | 
|  | 2335 | struct __vxge_hw_device *hldev = vp->vpath->hldev; | 
|  | 2336 | __vxge_hw_pio_mem_write32_upper( | 
|  | 2337 | (u32)vxge_bVALn(vxge_mBIT(hldev->first_vp_id + | 
|  | 2338 | (msix_id/4)), 0, 32), | 
|  | 2339 | &hldev->common_reg->clear_msix_mask_vect[msix_id%4]); | 
|  | 2340 |  | 
|  | 2341 | return; | 
|  | 2342 | } | 
|  | 2343 |  | 
|  | 2344 | /** | 
|  | 2345 | * vxge_hw_vpath_msix_mask_all - Mask all MSIX vectors for the vpath. | 
|  | 2346 | * @vp: Virtual Path handle. | 
|  | 2347 | * | 
|  | 2348 | * The function masks all msix interrupt for the given vpath | 
|  | 2349 | * | 
|  | 2350 | */ | 
|  | 2351 | void | 
|  | 2352 | vxge_hw_vpath_msix_mask_all(struct __vxge_hw_vpath_handle *vp) | 
|  | 2353 | { | 
|  | 2354 |  | 
|  | 2355 | __vxge_hw_pio_mem_write32_upper( | 
|  | 2356 | (u32)vxge_bVALn(vxge_mBIT(vp->vpath->vp_id), 0, 32), | 
|  | 2357 | &vp->vpath->hldev->common_reg->set_msix_mask_all_vect); | 
|  | 2358 |  | 
|  | 2359 | return; | 
|  | 2360 | } | 
|  | 2361 |  | 
|  | 2362 | /** | 
|  | 2363 | * vxge_hw_vpath_inta_mask_tx_rx - Mask Tx and Rx interrupts. | 
|  | 2364 | * @vp: Virtual Path handle. | 
|  | 2365 | * | 
|  | 2366 | * Mask Tx and Rx vpath interrupts. | 
|  | 2367 | * | 
|  | 2368 | * See also: vxge_hw_vpath_inta_mask_tx_rx() | 
|  | 2369 | */ | 
|  | 2370 | void vxge_hw_vpath_inta_mask_tx_rx(struct __vxge_hw_vpath_handle *vp) | 
|  | 2371 | { | 
|  | 2372 | u64	tim_int_mask0[4] = {[0 ...3] = 0}; | 
|  | 2373 | u32	tim_int_mask1[4] = {[0 ...3] = 0}; | 
|  | 2374 | u64	val64; | 
|  | 2375 | struct __vxge_hw_device *hldev = vp->vpath->hldev; | 
|  | 2376 |  | 
|  | 2377 | VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0, | 
|  | 2378 | tim_int_mask1, vp->vpath->vp_id); | 
|  | 2379 |  | 
|  | 2380 | val64 = readq(&hldev->common_reg->tim_int_mask0); | 
|  | 2381 |  | 
|  | 2382 | if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) || | 
|  | 2383 | (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) { | 
|  | 2384 | writeq((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] | | 
|  | 2385 | tim_int_mask0[VXGE_HW_VPATH_INTR_RX] | val64), | 
|  | 2386 | &hldev->common_reg->tim_int_mask0); | 
|  | 2387 | } | 
|  | 2388 |  | 
|  | 2389 | val64 = readl(&hldev->common_reg->tim_int_mask1); | 
|  | 2390 |  | 
|  | 2391 | if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) || | 
|  | 2392 | (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) { | 
|  | 2393 | __vxge_hw_pio_mem_write32_upper( | 
|  | 2394 | (tim_int_mask1[VXGE_HW_VPATH_INTR_TX] | | 
|  | 2395 | tim_int_mask1[VXGE_HW_VPATH_INTR_RX] | val64), | 
|  | 2396 | &hldev->common_reg->tim_int_mask1); | 
|  | 2397 | } | 
|  | 2398 |  | 
|  | 2399 | return; | 
|  | 2400 | } | 
|  | 2401 |  | 
|  | 2402 | /** | 
|  | 2403 | * vxge_hw_vpath_inta_unmask_tx_rx - Unmask Tx and Rx interrupts. | 
|  | 2404 | * @vp: Virtual Path handle. | 
|  | 2405 | * | 
|  | 2406 | * Unmask Tx and Rx vpath interrupts. | 
|  | 2407 | * | 
|  | 2408 | * See also: vxge_hw_vpath_inta_mask_tx_rx() | 
|  | 2409 | */ | 
|  | 2410 | void vxge_hw_vpath_inta_unmask_tx_rx(struct __vxge_hw_vpath_handle *vp) | 
|  | 2411 | { | 
|  | 2412 | u64	tim_int_mask0[4] = {[0 ...3] = 0}; | 
|  | 2413 | u32	tim_int_mask1[4] = {[0 ...3] = 0}; | 
|  | 2414 | u64	val64; | 
|  | 2415 | struct __vxge_hw_device *hldev = vp->vpath->hldev; | 
|  | 2416 |  | 
|  | 2417 | VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0, | 
|  | 2418 | tim_int_mask1, vp->vpath->vp_id); | 
|  | 2419 |  | 
|  | 2420 | val64 = readq(&hldev->common_reg->tim_int_mask0); | 
|  | 2421 |  | 
|  | 2422 | if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) || | 
|  | 2423 | (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) { | 
|  | 2424 | writeq((~(tim_int_mask0[VXGE_HW_VPATH_INTR_TX] | | 
|  | 2425 | tim_int_mask0[VXGE_HW_VPATH_INTR_RX])) & val64, | 
|  | 2426 | &hldev->common_reg->tim_int_mask0); | 
|  | 2427 | } | 
|  | 2428 |  | 
|  | 2429 | if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) || | 
|  | 2430 | (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) { | 
|  | 2431 | __vxge_hw_pio_mem_write32_upper( | 
|  | 2432 | (~(tim_int_mask1[VXGE_HW_VPATH_INTR_TX] | | 
|  | 2433 | tim_int_mask1[VXGE_HW_VPATH_INTR_RX])) & val64, | 
|  | 2434 | &hldev->common_reg->tim_int_mask1); | 
|  | 2435 | } | 
|  | 2436 |  | 
|  | 2437 | return; | 
|  | 2438 | } | 
|  | 2439 |  | 
|  | 2440 | /** | 
|  | 2441 | * vxge_hw_vpath_poll_rx - Poll Rx Virtual Path for completed | 
|  | 2442 | * descriptors and process the same. | 
|  | 2443 | * @ring: Handle to the ring object used for receive | 
|  | 2444 | * | 
|  | 2445 | * The function	polls the Rx for the completed	descriptors and	calls | 
|  | 2446 | * the driver via supplied completion	callback. | 
|  | 2447 | * | 
|  | 2448 | * Returns: VXGE_HW_OK, if the polling is completed successful. | 
|  | 2449 | * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed | 
|  | 2450 | * descriptors available which are yet to be processed. | 
|  | 2451 | * | 
|  | 2452 | * See also: vxge_hw_vpath_poll_rx() | 
|  | 2453 | */ | 
|  | 2454 | enum vxge_hw_status vxge_hw_vpath_poll_rx(struct __vxge_hw_ring *ring) | 
|  | 2455 | { | 
|  | 2456 | u8 t_code; | 
|  | 2457 | enum vxge_hw_status status = VXGE_HW_OK; | 
|  | 2458 | void *first_rxdh; | 
|  | 2459 | u64 val64 = 0; | 
|  | 2460 | int new_count = 0; | 
|  | 2461 |  | 
|  | 2462 | ring->cmpl_cnt = 0; | 
|  | 2463 |  | 
|  | 2464 | status = vxge_hw_ring_rxd_next_completed(ring, &first_rxdh, &t_code); | 
|  | 2465 | if (status == VXGE_HW_OK) | 
|  | 2466 | ring->callback(ring, first_rxdh, | 
|  | 2467 | t_code, ring->channel.userdata); | 
|  | 2468 |  | 
|  | 2469 | if (ring->cmpl_cnt != 0) { | 
|  | 2470 | ring->doorbell_cnt += ring->cmpl_cnt; | 
|  | 2471 | if (ring->doorbell_cnt >= ring->rxds_limit) { | 
|  | 2472 | /* | 
|  | 2473 | * Each RxD is of 4 qwords, update the number of | 
|  | 2474 | * qwords replenished | 
|  | 2475 | */ | 
|  | 2476 | new_count = (ring->doorbell_cnt * 4); | 
|  | 2477 |  | 
|  | 2478 | /* For each block add 4 more qwords */ | 
|  | 2479 | ring->total_db_cnt += ring->doorbell_cnt; | 
|  | 2480 | if (ring->total_db_cnt >= ring->rxds_per_block) { | 
|  | 2481 | new_count += 4; | 
|  | 2482 | /* Reset total count */ | 
|  | 2483 | ring->total_db_cnt %= ring->rxds_per_block; | 
|  | 2484 | } | 
|  | 2485 | writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(new_count), | 
|  | 2486 | &ring->vp_reg->prc_rxd_doorbell); | 
|  | 2487 | val64 = | 
|  | 2488 | readl(&ring->common_reg->titan_general_int_status); | 
|  | 2489 | ring->doorbell_cnt = 0; | 
|  | 2490 | } | 
|  | 2491 | } | 
|  | 2492 |  | 
|  | 2493 | return status; | 
|  | 2494 | } | 
|  | 2495 |  | 
|  | 2496 | /** | 
|  | 2497 | * vxge_hw_vpath_poll_tx - Poll Tx for completed descriptors and process | 
|  | 2498 | * the same. | 
|  | 2499 | * @fifo: Handle to the fifo object used for non offload send | 
|  | 2500 | * | 
|  | 2501 | * The function	polls the Tx for the completed	descriptors and	calls | 
|  | 2502 | * the driver via supplied completion callback. | 
|  | 2503 | * | 
|  | 2504 | * Returns: VXGE_HW_OK, if the polling is completed successful. | 
|  | 2505 | * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed | 
|  | 2506 | * descriptors available which are yet to be processed. | 
|  | 2507 | * | 
|  | 2508 | * See also: vxge_hw_vpath_poll_tx(). | 
|  | 2509 | */ | 
|  | 2510 | enum vxge_hw_status vxge_hw_vpath_poll_tx(struct __vxge_hw_fifo *fifo, | 
|  | 2511 | void **skb_ptr) | 
|  | 2512 | { | 
|  | 2513 | enum vxge_hw_fifo_tcode t_code; | 
|  | 2514 | void *first_txdlh; | 
|  | 2515 | enum vxge_hw_status status = VXGE_HW_OK; | 
|  | 2516 | struct __vxge_hw_channel *channel; | 
|  | 2517 |  | 
|  | 2518 | channel = &fifo->channel; | 
|  | 2519 |  | 
|  | 2520 | status = vxge_hw_fifo_txdl_next_completed(fifo, | 
|  | 2521 | &first_txdlh, &t_code); | 
|  | 2522 | if (status == VXGE_HW_OK) | 
|  | 2523 | if (fifo->callback(fifo, first_txdlh, | 
|  | 2524 | t_code, channel->userdata, skb_ptr) != VXGE_HW_OK) | 
|  | 2525 | status = VXGE_HW_COMPLETIONS_REMAIN; | 
|  | 2526 |  | 
|  | 2527 | return status; | 
|  | 2528 | } |